Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan_native.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h +50 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_int_mm_cuda_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_native.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr_cpu_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_native.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conj.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d.h +47 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cuda_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mH.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cpu_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h +377 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_cpu_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_dim_native.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zeros_like_ops.h +39 -0
- vllm/lib/python3.10/site-packages/py/_code/__init__.py +1 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/_assertionnew.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/_assertionold.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/_py2traceback.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/assertion.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/code.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/__pycache__/source.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/py/_code/_assertionnew.py +322 -0
- vllm/lib/python3.10/site-packages/py/_code/_assertionold.py +556 -0
- vllm/lib/python3.10/site-packages/py/_code/_py2traceback.py +79 -0
- vllm/lib/python3.10/site-packages/py/_code/assertion.py +90 -0
- vllm/lib/python3.10/site-packages/py/_code/code.py +796 -0
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & _cdist_backward_out(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _ctc_loss_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false);
|
| 21 |
+
TORCH_API at::Tensor & _ctc_loss_backward_outf(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _cudnn_rnn_flatten_weight {
|
| 18 |
+
using schema = at::Tensor (at::TensorList, int64_t, c10::SymInt, int64_t, c10::SymInt, c10::SymInt, int64_t, bool, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_rnn_flatten_weight")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor")
|
| 24 |
+
static at::Tensor call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _cudnn_rnn_flatten_weight_out {
|
| 29 |
+
using schema = at::Tensor & (at::TensorList, int64_t, c10::SymInt, int64_t, c10::SymInt, c10::SymInt, int64_t, bool, bool, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_rnn_flatten_weight")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan_native.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API void _foreach_atan_out(at::TensorList self, at::TensorList out);
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_atan_slow(at::TensorList self);
|
| 21 |
+
TORCH_API void foreach_tensor_atan_slow_(at::TensorList self);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_atan_cuda(at::TensorList self);
|
| 23 |
+
TORCH_API void foreach_tensor_atan_cuda_(at::TensorList self);
|
| 24 |
+
} // namespace native
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_erfc {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc(Tensor[] self) -> Tensor[]")
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_erfc_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc_(Tensor(a!)[] self) -> ()")
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_erfc_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_int_mm_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _int_mm(const at::Tensor & self, const at::Tensor & mat2);
|
| 21 |
+
TORCH_API at::Tensor & _int_mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2);
|
| 22 |
+
TORCH_API at::Tensor & _int_mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_sparse_csr_prod_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
| 26 |
+
inline at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 27 |
+
return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 32 |
+
return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_test_string_default_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
|
| 26 |
+
inline at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") {
|
| 27 |
+
return at::_ops::_test_string_default::call(dummy, a, b);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\");
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim=c10::nullopt);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_native.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors);
|
| 20 |
+
struct TORCH_API structured__upsample_bicubic2d_aa_out_cpu : public at::meta::structured__upsample_bicubic2d_aa {
|
| 21 |
+
void impl(const at::Tensor & self, at::ArrayRef<int64_t> output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const at::Tensor & out);
|
| 22 |
+
};
|
| 23 |
+
struct TORCH_API structured__upsample_bicubic2d_aa_out_cuda : public at::meta::structured__upsample_bicubic2d_aa {
|
| 24 |
+
void impl(const at::Tensor & self, at::ArrayRef<int64_t> output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const at::Tensor & out);
|
| 25 |
+
};
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured__upsample_nearest_exact2d_backward : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, c10::optional<double> scales_h, c10::optional<double> scales_w);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace cpu
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input);
|
| 22 |
+
|
| 23 |
+
} // namespace cpu
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
|
| 21 |
+
TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
|
| 22 |
+
TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1);
|
| 21 |
+
TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1);
|
| 22 |
+
TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & as_strided_scatter_out_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API coalesce {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::coalesce")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "coalesce(Tensor(a) self) -> Tensor(a)")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conj.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/conj_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::conj(Tensor(a) self) -> Tensor(a)
|
| 26 |
+
inline at::Tensor __dispatch_conj(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::conj::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/conv_transpose2d_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
|
| 26 |
+
inline at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) {
|
| 27 |
+
return at::_ops::conv_transpose2d_input::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation));
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) {
|
| 32 |
+
return at::_ops::conv_transpose2d_input::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation));
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
|
| 37 |
+
inline at::Tensor conv_transpose2d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) {
|
| 38 |
+
return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) {
|
| 43 |
+
return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & cudnn_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API diagonal_backward {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API diagonal_backward_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_erfinv : public TensorIteratorBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mH.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/mH_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor neg(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & neg_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/pairwise_distance_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
|
| 26 |
+
inline at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false) {
|
| 27 |
+
return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/randn_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 26 |
+
inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
|
| 27 |
+
return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
|
| 32 |
+
return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 37 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 38 |
+
return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 42 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 43 |
+
return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 48 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
|
| 49 |
+
return at::_ops::randn::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 53 |
+
at::Tensor randn(c10::SymIntArrayRef size, at::TensorOptions options={}) {
|
| 54 |
+
return at::_ops::randn::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 59 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 60 |
+
return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 64 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 65 |
+
return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 70 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
|
| 71 |
+
return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 75 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
|
| 76 |
+
return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 81 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 82 |
+
return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 86 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 87 |
+
return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 92 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
|
| 93 |
+
return at::_ops::randn_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 94 |
+
}
|
| 95 |
+
namespace symint {
|
| 96 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 97 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
|
| 98 |
+
return at::_ops::randn_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 103 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 104 |
+
return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
|
| 105 |
+
}
|
| 106 |
+
namespace symint {
|
| 107 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 108 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 109 |
+
return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 114 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 115 |
+
return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 116 |
+
}
|
| 117 |
+
namespace symint {
|
| 118 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 119 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 120 |
+
return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 125 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 126 |
+
return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
|
| 127 |
+
}
|
| 128 |
+
namespace symint {
|
| 129 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 130 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 131 |
+
return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 136 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 137 |
+
return at::_ops::randn_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 138 |
+
}
|
| 139 |
+
namespace symint {
|
| 140 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 141 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 142 |
+
return at::_ops::randn_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 147 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 148 |
+
return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
|
| 149 |
+
}
|
| 150 |
+
namespace symint {
|
| 151 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 152 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 153 |
+
return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 158 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 159 |
+
return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 160 |
+
}
|
| 161 |
+
namespace symint {
|
| 162 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 163 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 164 |
+
return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 165 |
+
}
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 169 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 170 |
+
return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
|
| 171 |
+
}
|
| 172 |
+
namespace symint {
|
| 173 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 174 |
+
at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 175 |
+
return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 180 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 181 |
+
return at::_ops::randn_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 182 |
+
}
|
| 183 |
+
namespace symint {
|
| 184 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 185 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
|
| 186 |
+
return at::_ops::randn_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 191 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 192 |
+
return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
|
| 193 |
+
}
|
| 194 |
+
namespace symint {
|
| 195 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 196 |
+
at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 197 |
+
return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 202 |
+
inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
|
| 203 |
+
return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
|
| 204 |
+
}
|
| 205 |
+
namespace symint {
|
| 206 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 207 |
+
at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
|
| 208 |
+
return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 213 |
+
inline at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
|
| 214 |
+
return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
|
| 215 |
+
}
|
| 216 |
+
namespace symint {
|
| 217 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 218 |
+
at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
|
| 219 |
+
return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 224 |
+
inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
|
| 225 |
+
return at::_ops::randn_out::call(size, out);
|
| 226 |
+
}
|
| 227 |
+
namespace symint {
|
| 228 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 229 |
+
at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size) {
|
| 230 |
+
return at::_ops::randn_out::call(size, out);
|
| 231 |
+
}
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 235 |
+
inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
|
| 236 |
+
return at::_ops::randn_out::call(size, out);
|
| 237 |
+
}
|
| 238 |
+
namespace symint {
|
| 239 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 240 |
+
at::Tensor & randn_outf(c10::SymIntArrayRef size, at::Tensor & out) {
|
| 241 |
+
return at::_ops::randn_out::call(size, out);
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
|
| 246 |
+
inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
|
| 247 |
+
return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
|
| 248 |
+
}
|
| 249 |
+
namespace symint {
|
| 250 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 251 |
+
at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
|
| 252 |
+
return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
|
| 257 |
+
inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
|
| 258 |
+
return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
|
| 259 |
+
}
|
| 260 |
+
namespace symint {
|
| 261 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 262 |
+
at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
|
| 263 |
+
return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
|
| 264 |
+
}
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
|
| 268 |
+
inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
|
| 269 |
+
return at::_ops::randn_generator_out::call(size, generator, out);
|
| 270 |
+
}
|
| 271 |
+
namespace symint {
|
| 272 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 273 |
+
at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
|
| 274 |
+
return at::_ops::randn_generator_out::call(size, generator, out);
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
|
| 279 |
+
inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
|
| 280 |
+
return at::_ops::randn_generator_out::call(size, generator, out);
|
| 281 |
+
}
|
| 282 |
+
namespace symint {
|
| 283 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 284 |
+
at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
|
| 285 |
+
return at::_ops::randn_generator_out::call(size, generator, out);
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 290 |
+
inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
|
| 291 |
+
return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
|
| 292 |
+
}
|
| 293 |
+
namespace symint {
|
| 294 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 295 |
+
at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
|
| 296 |
+
return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 301 |
+
inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 302 |
+
return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
|
| 303 |
+
}
|
| 304 |
+
namespace symint {
|
| 305 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 306 |
+
at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 307 |
+
return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 312 |
+
inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) {
|
| 313 |
+
return at::_ops::randn_names_out::call(size, names, out);
|
| 314 |
+
}
|
| 315 |
+
namespace symint {
|
| 316 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 317 |
+
at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) {
|
| 318 |
+
return at::_ops::randn_names_out::call(size, names, out);
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 323 |
+
inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 324 |
+
return at::_ops::randn_names_out::call(size, names, out);
|
| 325 |
+
}
|
| 326 |
+
namespace symint {
|
| 327 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 328 |
+
at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 329 |
+
return at::_ops::randn_names_out::call(size, names, out);
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 334 |
+
inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) {
|
| 335 |
+
return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
|
| 336 |
+
}
|
| 337 |
+
namespace symint {
|
| 338 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 339 |
+
at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) {
|
| 340 |
+
return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
|
| 341 |
+
}
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 345 |
+
inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 346 |
+
return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
|
| 347 |
+
}
|
| 348 |
+
namespace symint {
|
| 349 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 350 |
+
at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 351 |
+
return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 356 |
+
inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) {
|
| 357 |
+
return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
|
| 358 |
+
}
|
| 359 |
+
namespace symint {
|
| 360 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 361 |
+
at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) {
|
| 362 |
+
return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
|
| 367 |
+
inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 368 |
+
return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
|
| 369 |
+
}
|
| 370 |
+
namespace symint {
|
| 371 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 372 |
+
at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
|
| 373 |
+
return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
|
| 374 |
+
}
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true);
|
| 21 |
+
TORCH_API at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true);
|
| 22 |
+
TORCH_API at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & scatter_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true);
|
| 24 |
+
|
| 25 |
+
} // namespace meta
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor sigmoid(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & sigmoid_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_dim_native.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API int64_t sparse_dim_strided(const at::Tensor & self);
|
| 20 |
+
TORCH_API int64_t sparse_dim_sparse(const at::Tensor & self);
|
| 21 |
+
TORCH_API int64_t sparse_dim_sparse_csr(const at::Tensor & self);
|
| 22 |
+
} // namespace native
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zeros_like_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API zeros_like {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>, c10::optional<at::MemoryFormat>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::zeros_like")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API zeros_like_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, c10::optional<at::MemoryFormat>, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::zeros_like")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
vllm/lib/python3.10/site-packages/py/_code/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
""" python inspection/code generation API """
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (209 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/_assertionnew.cpython-310.pyc
ADDED
|
Binary file (9.44 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/_assertionold.cpython-310.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/_py2traceback.cpython-310.pyc
ADDED
|
Binary file (2.13 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/assertion.cpython-310.pyc
ADDED
|
Binary file (2.53 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/code.cpython-310.pyc
ADDED
|
Binary file (26.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/__pycache__/source.cpython-310.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/py/_code/_assertionnew.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Find intermediate evalutation results in assert statements through builtin AST.
|
| 3 |
+
This should replace _assertionold.py eventually.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import ast
|
| 8 |
+
|
| 9 |
+
import py
|
| 10 |
+
from py._code.assertion import _format_explanation, BuiltinAssertionError
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _is_ast_expr(node):
|
| 14 |
+
return isinstance(node, ast.expr)
|
| 15 |
+
def _is_ast_stmt(node):
|
| 16 |
+
return isinstance(node, ast.stmt)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Failure(Exception):
|
| 20 |
+
"""Error found while interpreting AST."""
|
| 21 |
+
|
| 22 |
+
def __init__(self, explanation=""):
|
| 23 |
+
self.cause = sys.exc_info()
|
| 24 |
+
self.explanation = explanation
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def interpret(source, frame, should_fail=False):
|
| 28 |
+
mod = ast.parse(source)
|
| 29 |
+
visitor = DebugInterpreter(frame)
|
| 30 |
+
try:
|
| 31 |
+
visitor.visit(mod)
|
| 32 |
+
except Failure:
|
| 33 |
+
failure = sys.exc_info()[1]
|
| 34 |
+
return getfailure(failure)
|
| 35 |
+
if should_fail:
|
| 36 |
+
return ("(assertion failed, but when it was re-run for "
|
| 37 |
+
"printing intermediate values, it did not fail. Suggestions: "
|
| 38 |
+
"compute assert expression before the assert or use --no-assert)")
|
| 39 |
+
|
| 40 |
+
def run(offending_line, frame=None):
|
| 41 |
+
if frame is None:
|
| 42 |
+
frame = py.code.Frame(sys._getframe(1))
|
| 43 |
+
return interpret(offending_line, frame)
|
| 44 |
+
|
| 45 |
+
def getfailure(failure):
|
| 46 |
+
explanation = _format_explanation(failure.explanation)
|
| 47 |
+
value = failure.cause[1]
|
| 48 |
+
if str(value):
|
| 49 |
+
lines = explanation.splitlines()
|
| 50 |
+
if not lines:
|
| 51 |
+
lines.append("")
|
| 52 |
+
lines[0] += " << %s" % (value,)
|
| 53 |
+
explanation = "\n".join(lines)
|
| 54 |
+
text = "%s: %s" % (failure.cause[0].__name__, explanation)
|
| 55 |
+
if text.startswith("AssertionError: assert "):
|
| 56 |
+
text = text[16:]
|
| 57 |
+
return text
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
operator_map = {
|
| 61 |
+
ast.BitOr : "|",
|
| 62 |
+
ast.BitXor : "^",
|
| 63 |
+
ast.BitAnd : "&",
|
| 64 |
+
ast.LShift : "<<",
|
| 65 |
+
ast.RShift : ">>",
|
| 66 |
+
ast.Add : "+",
|
| 67 |
+
ast.Sub : "-",
|
| 68 |
+
ast.Mult : "*",
|
| 69 |
+
ast.Div : "/",
|
| 70 |
+
ast.FloorDiv : "//",
|
| 71 |
+
ast.Mod : "%",
|
| 72 |
+
ast.Eq : "==",
|
| 73 |
+
ast.NotEq : "!=",
|
| 74 |
+
ast.Lt : "<",
|
| 75 |
+
ast.LtE : "<=",
|
| 76 |
+
ast.Gt : ">",
|
| 77 |
+
ast.GtE : ">=",
|
| 78 |
+
ast.Pow : "**",
|
| 79 |
+
ast.Is : "is",
|
| 80 |
+
ast.IsNot : "is not",
|
| 81 |
+
ast.In : "in",
|
| 82 |
+
ast.NotIn : "not in"
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
unary_map = {
|
| 86 |
+
ast.Not : "not %s",
|
| 87 |
+
ast.Invert : "~%s",
|
| 88 |
+
ast.USub : "-%s",
|
| 89 |
+
ast.UAdd : "+%s"
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class DebugInterpreter(ast.NodeVisitor):
|
| 94 |
+
"""Interpret AST nodes to gleam useful debugging information. """
|
| 95 |
+
|
| 96 |
+
def __init__(self, frame):
|
| 97 |
+
self.frame = frame
|
| 98 |
+
|
| 99 |
+
def generic_visit(self, node):
|
| 100 |
+
# Fallback when we don't have a special implementation.
|
| 101 |
+
if _is_ast_expr(node):
|
| 102 |
+
mod = ast.Expression(node)
|
| 103 |
+
co = self._compile(mod)
|
| 104 |
+
try:
|
| 105 |
+
result = self.frame.eval(co)
|
| 106 |
+
except Exception:
|
| 107 |
+
raise Failure()
|
| 108 |
+
explanation = self.frame.repr(result)
|
| 109 |
+
return explanation, result
|
| 110 |
+
elif _is_ast_stmt(node):
|
| 111 |
+
mod = ast.Module([node])
|
| 112 |
+
co = self._compile(mod, "exec")
|
| 113 |
+
try:
|
| 114 |
+
self.frame.exec_(co)
|
| 115 |
+
except Exception:
|
| 116 |
+
raise Failure()
|
| 117 |
+
return None, None
|
| 118 |
+
else:
|
| 119 |
+
raise AssertionError("can't handle %s" %(node,))
|
| 120 |
+
|
| 121 |
+
def _compile(self, source, mode="eval"):
|
| 122 |
+
return compile(source, "<assertion interpretation>", mode)
|
| 123 |
+
|
| 124 |
+
def visit_Expr(self, expr):
|
| 125 |
+
return self.visit(expr.value)
|
| 126 |
+
|
| 127 |
+
def visit_Module(self, mod):
|
| 128 |
+
for stmt in mod.body:
|
| 129 |
+
self.visit(stmt)
|
| 130 |
+
|
| 131 |
+
def visit_Name(self, name):
|
| 132 |
+
explanation, result = self.generic_visit(name)
|
| 133 |
+
# See if the name is local.
|
| 134 |
+
source = "%r in locals() is not globals()" % (name.id,)
|
| 135 |
+
co = self._compile(source)
|
| 136 |
+
try:
|
| 137 |
+
local = self.frame.eval(co)
|
| 138 |
+
except Exception:
|
| 139 |
+
# have to assume it isn't
|
| 140 |
+
local = False
|
| 141 |
+
if not local:
|
| 142 |
+
return name.id, result
|
| 143 |
+
return explanation, result
|
| 144 |
+
|
| 145 |
+
def visit_Compare(self, comp):
|
| 146 |
+
left = comp.left
|
| 147 |
+
left_explanation, left_result = self.visit(left)
|
| 148 |
+
for op, next_op in zip(comp.ops, comp.comparators):
|
| 149 |
+
next_explanation, next_result = self.visit(next_op)
|
| 150 |
+
op_symbol = operator_map[op.__class__]
|
| 151 |
+
explanation = "%s %s %s" % (left_explanation, op_symbol,
|
| 152 |
+
next_explanation)
|
| 153 |
+
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
|
| 154 |
+
co = self._compile(source)
|
| 155 |
+
try:
|
| 156 |
+
result = self.frame.eval(co, __exprinfo_left=left_result,
|
| 157 |
+
__exprinfo_right=next_result)
|
| 158 |
+
except Exception:
|
| 159 |
+
raise Failure(explanation)
|
| 160 |
+
try:
|
| 161 |
+
if not result:
|
| 162 |
+
break
|
| 163 |
+
except KeyboardInterrupt:
|
| 164 |
+
raise
|
| 165 |
+
except:
|
| 166 |
+
break
|
| 167 |
+
left_explanation, left_result = next_explanation, next_result
|
| 168 |
+
|
| 169 |
+
rcomp = py.code._reprcompare
|
| 170 |
+
if rcomp:
|
| 171 |
+
res = rcomp(op_symbol, left_result, next_result)
|
| 172 |
+
if res:
|
| 173 |
+
explanation = res
|
| 174 |
+
return explanation, result
|
| 175 |
+
|
| 176 |
+
def visit_BoolOp(self, boolop):
|
| 177 |
+
is_or = isinstance(boolop.op, ast.Or)
|
| 178 |
+
explanations = []
|
| 179 |
+
for operand in boolop.values:
|
| 180 |
+
explanation, result = self.visit(operand)
|
| 181 |
+
explanations.append(explanation)
|
| 182 |
+
if result == is_or:
|
| 183 |
+
break
|
| 184 |
+
name = is_or and " or " or " and "
|
| 185 |
+
explanation = "(" + name.join(explanations) + ")"
|
| 186 |
+
return explanation, result
|
| 187 |
+
|
| 188 |
+
def visit_UnaryOp(self, unary):
|
| 189 |
+
pattern = unary_map[unary.op.__class__]
|
| 190 |
+
operand_explanation, operand_result = self.visit(unary.operand)
|
| 191 |
+
explanation = pattern % (operand_explanation,)
|
| 192 |
+
co = self._compile(pattern % ("__exprinfo_expr",))
|
| 193 |
+
try:
|
| 194 |
+
result = self.frame.eval(co, __exprinfo_expr=operand_result)
|
| 195 |
+
except Exception:
|
| 196 |
+
raise Failure(explanation)
|
| 197 |
+
return explanation, result
|
| 198 |
+
|
| 199 |
+
def visit_BinOp(self, binop):
|
| 200 |
+
left_explanation, left_result = self.visit(binop.left)
|
| 201 |
+
right_explanation, right_result = self.visit(binop.right)
|
| 202 |
+
symbol = operator_map[binop.op.__class__]
|
| 203 |
+
explanation = "(%s %s %s)" % (left_explanation, symbol,
|
| 204 |
+
right_explanation)
|
| 205 |
+
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
|
| 206 |
+
co = self._compile(source)
|
| 207 |
+
try:
|
| 208 |
+
result = self.frame.eval(co, __exprinfo_left=left_result,
|
| 209 |
+
__exprinfo_right=right_result)
|
| 210 |
+
except Exception:
|
| 211 |
+
raise Failure(explanation)
|
| 212 |
+
return explanation, result
|
| 213 |
+
|
| 214 |
+
def visit_Call(self, call):
|
| 215 |
+
func_explanation, func = self.visit(call.func)
|
| 216 |
+
arg_explanations = []
|
| 217 |
+
ns = {"__exprinfo_func" : func}
|
| 218 |
+
arguments = []
|
| 219 |
+
for arg in call.args:
|
| 220 |
+
arg_explanation, arg_result = self.visit(arg)
|
| 221 |
+
arg_name = "__exprinfo_%s" % (len(ns),)
|
| 222 |
+
ns[arg_name] = arg_result
|
| 223 |
+
arguments.append(arg_name)
|
| 224 |
+
arg_explanations.append(arg_explanation)
|
| 225 |
+
for keyword in call.keywords:
|
| 226 |
+
arg_explanation, arg_result = self.visit(keyword.value)
|
| 227 |
+
arg_name = "__exprinfo_%s" % (len(ns),)
|
| 228 |
+
ns[arg_name] = arg_result
|
| 229 |
+
keyword_source = "%s=%%s" % (keyword.arg)
|
| 230 |
+
arguments.append(keyword_source % (arg_name,))
|
| 231 |
+
arg_explanations.append(keyword_source % (arg_explanation,))
|
| 232 |
+
if call.starargs:
|
| 233 |
+
arg_explanation, arg_result = self.visit(call.starargs)
|
| 234 |
+
arg_name = "__exprinfo_star"
|
| 235 |
+
ns[arg_name] = arg_result
|
| 236 |
+
arguments.append("*%s" % (arg_name,))
|
| 237 |
+
arg_explanations.append("*%s" % (arg_explanation,))
|
| 238 |
+
if call.kwargs:
|
| 239 |
+
arg_explanation, arg_result = self.visit(call.kwargs)
|
| 240 |
+
arg_name = "__exprinfo_kwds"
|
| 241 |
+
ns[arg_name] = arg_result
|
| 242 |
+
arguments.append("**%s" % (arg_name,))
|
| 243 |
+
arg_explanations.append("**%s" % (arg_explanation,))
|
| 244 |
+
args_explained = ", ".join(arg_explanations)
|
| 245 |
+
explanation = "%s(%s)" % (func_explanation, args_explained)
|
| 246 |
+
args = ", ".join(arguments)
|
| 247 |
+
source = "__exprinfo_func(%s)" % (args,)
|
| 248 |
+
co = self._compile(source)
|
| 249 |
+
try:
|
| 250 |
+
result = self.frame.eval(co, **ns)
|
| 251 |
+
except Exception:
|
| 252 |
+
raise Failure(explanation)
|
| 253 |
+
pattern = "%s\n{%s = %s\n}"
|
| 254 |
+
rep = self.frame.repr(result)
|
| 255 |
+
explanation = pattern % (rep, rep, explanation)
|
| 256 |
+
return explanation, result
|
| 257 |
+
|
| 258 |
+
def _is_builtin_name(self, name):
|
| 259 |
+
pattern = "%r not in globals() and %r not in locals()"
|
| 260 |
+
source = pattern % (name.id, name.id)
|
| 261 |
+
co = self._compile(source)
|
| 262 |
+
try:
|
| 263 |
+
return self.frame.eval(co)
|
| 264 |
+
except Exception:
|
| 265 |
+
return False
|
| 266 |
+
|
| 267 |
+
def visit_Attribute(self, attr):
|
| 268 |
+
if not isinstance(attr.ctx, ast.Load):
|
| 269 |
+
return self.generic_visit(attr)
|
| 270 |
+
source_explanation, source_result = self.visit(attr.value)
|
| 271 |
+
explanation = "%s.%s" % (source_explanation, attr.attr)
|
| 272 |
+
source = "__exprinfo_expr.%s" % (attr.attr,)
|
| 273 |
+
co = self._compile(source)
|
| 274 |
+
try:
|
| 275 |
+
result = self.frame.eval(co, __exprinfo_expr=source_result)
|
| 276 |
+
except Exception:
|
| 277 |
+
raise Failure(explanation)
|
| 278 |
+
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
|
| 279 |
+
self.frame.repr(result),
|
| 280 |
+
source_explanation, attr.attr)
|
| 281 |
+
# Check if the attr is from an instance.
|
| 282 |
+
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
|
| 283 |
+
source = source % (attr.attr,)
|
| 284 |
+
co = self._compile(source)
|
| 285 |
+
try:
|
| 286 |
+
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
|
| 287 |
+
except Exception:
|
| 288 |
+
from_instance = True
|
| 289 |
+
if from_instance:
|
| 290 |
+
rep = self.frame.repr(result)
|
| 291 |
+
pattern = "%s\n{%s = %s\n}"
|
| 292 |
+
explanation = pattern % (rep, rep, explanation)
|
| 293 |
+
return explanation, result
|
| 294 |
+
|
| 295 |
+
def visit_Assert(self, assrt):
|
| 296 |
+
test_explanation, test_result = self.visit(assrt.test)
|
| 297 |
+
if test_explanation.startswith("False\n{False =") and \
|
| 298 |
+
test_explanation.endswith("\n"):
|
| 299 |
+
test_explanation = test_explanation[15:-2]
|
| 300 |
+
explanation = "assert %s" % (test_explanation,)
|
| 301 |
+
if not test_result:
|
| 302 |
+
try:
|
| 303 |
+
raise BuiltinAssertionError
|
| 304 |
+
except Exception:
|
| 305 |
+
raise Failure(explanation)
|
| 306 |
+
return explanation, test_result
|
| 307 |
+
|
| 308 |
+
def visit_Assign(self, assign):
|
| 309 |
+
value_explanation, value_result = self.visit(assign.value)
|
| 310 |
+
explanation = "... = %s" % (value_explanation,)
|
| 311 |
+
name = ast.Name("__exprinfo_expr", ast.Load(),
|
| 312 |
+
lineno=assign.value.lineno,
|
| 313 |
+
col_offset=assign.value.col_offset)
|
| 314 |
+
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
|
| 315 |
+
col_offset=assign.col_offset)
|
| 316 |
+
mod = ast.Module([new_assign])
|
| 317 |
+
co = self._compile(mod, "exec")
|
| 318 |
+
try:
|
| 319 |
+
self.frame.exec_(co, __exprinfo_expr=value_result)
|
| 320 |
+
except Exception:
|
| 321 |
+
raise Failure(explanation)
|
| 322 |
+
return explanation, value_result
|
vllm/lib/python3.10/site-packages/py/_code/_assertionold.py
ADDED
|
@@ -0,0 +1,556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import py
|
| 2 |
+
import sys, inspect
|
| 3 |
+
from compiler import parse, ast, pycodegen
|
| 4 |
+
from py._code.assertion import BuiltinAssertionError, _format_explanation
|
| 5 |
+
import types
|
| 6 |
+
|
| 7 |
+
passthroughex = py.builtin._sysex
|
| 8 |
+
|
| 9 |
+
class Failure:
|
| 10 |
+
def __init__(self, node):
|
| 11 |
+
self.exc, self.value, self.tb = sys.exc_info()
|
| 12 |
+
self.node = node
|
| 13 |
+
|
| 14 |
+
class View(object):
|
| 15 |
+
"""View base class.
|
| 16 |
+
|
| 17 |
+
If C is a subclass of View, then C(x) creates a proxy object around
|
| 18 |
+
the object x. The actual class of the proxy is not C in general,
|
| 19 |
+
but a *subclass* of C determined by the rules below. To avoid confusion
|
| 20 |
+
we call view class the class of the proxy (a subclass of C, so of View)
|
| 21 |
+
and object class the class of x.
|
| 22 |
+
|
| 23 |
+
Attributes and methods not found in the proxy are automatically read on x.
|
| 24 |
+
Other operations like setting attributes are performed on the proxy, as
|
| 25 |
+
determined by its view class. The object x is available from the proxy
|
| 26 |
+
as its __obj__ attribute.
|
| 27 |
+
|
| 28 |
+
The view class selection is determined by the __view__ tuples and the
|
| 29 |
+
optional __viewkey__ method. By default, the selected view class is the
|
| 30 |
+
most specific subclass of C whose __view__ mentions the class of x.
|
| 31 |
+
If no such subclass is found, the search proceeds with the parent
|
| 32 |
+
object classes. For example, C(True) will first look for a subclass
|
| 33 |
+
of C with __view__ = (..., bool, ...) and only if it doesn't find any
|
| 34 |
+
look for one with __view__ = (..., int, ...), and then ..., object,...
|
| 35 |
+
If everything fails the class C itself is considered to be the default.
|
| 36 |
+
|
| 37 |
+
Alternatively, the view class selection can be driven by another aspect
|
| 38 |
+
of the object x, instead of the class of x, by overriding __viewkey__.
|
| 39 |
+
See last example at the end of this module.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
_viewcache = {}
|
| 43 |
+
__view__ = ()
|
| 44 |
+
|
| 45 |
+
def __new__(rootclass, obj, *args, **kwds):
|
| 46 |
+
self = object.__new__(rootclass)
|
| 47 |
+
self.__obj__ = obj
|
| 48 |
+
self.__rootclass__ = rootclass
|
| 49 |
+
key = self.__viewkey__()
|
| 50 |
+
try:
|
| 51 |
+
self.__class__ = self._viewcache[key]
|
| 52 |
+
except KeyError:
|
| 53 |
+
self.__class__ = self._selectsubclass(key)
|
| 54 |
+
return self
|
| 55 |
+
|
| 56 |
+
def __getattr__(self, attr):
|
| 57 |
+
# attributes not found in the normal hierarchy rooted on View
|
| 58 |
+
# are looked up in the object's real class
|
| 59 |
+
return getattr(self.__obj__, attr)
|
| 60 |
+
|
| 61 |
+
def __viewkey__(self):
|
| 62 |
+
return self.__obj__.__class__
|
| 63 |
+
|
| 64 |
+
def __matchkey__(self, key, subclasses):
|
| 65 |
+
if inspect.isclass(key):
|
| 66 |
+
keys = inspect.getmro(key)
|
| 67 |
+
else:
|
| 68 |
+
keys = [key]
|
| 69 |
+
for key in keys:
|
| 70 |
+
result = [C for C in subclasses if key in C.__view__]
|
| 71 |
+
if result:
|
| 72 |
+
return result
|
| 73 |
+
return []
|
| 74 |
+
|
| 75 |
+
def _selectsubclass(self, key):
|
| 76 |
+
subclasses = list(enumsubclasses(self.__rootclass__))
|
| 77 |
+
for C in subclasses:
|
| 78 |
+
if not isinstance(C.__view__, tuple):
|
| 79 |
+
C.__view__ = (C.__view__,)
|
| 80 |
+
choices = self.__matchkey__(key, subclasses)
|
| 81 |
+
if not choices:
|
| 82 |
+
return self.__rootclass__
|
| 83 |
+
elif len(choices) == 1:
|
| 84 |
+
return choices[0]
|
| 85 |
+
else:
|
| 86 |
+
# combine the multiple choices
|
| 87 |
+
return type('?', tuple(choices), {})
|
| 88 |
+
|
| 89 |
+
def __repr__(self):
|
| 90 |
+
return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def enumsubclasses(cls):
|
| 94 |
+
for subcls in cls.__subclasses__():
|
| 95 |
+
for subsubclass in enumsubclasses(subcls):
|
| 96 |
+
yield subsubclass
|
| 97 |
+
yield cls
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class Interpretable(View):
|
| 101 |
+
"""A parse tree node with a few extra methods."""
|
| 102 |
+
explanation = None
|
| 103 |
+
|
| 104 |
+
def is_builtin(self, frame):
|
| 105 |
+
return False
|
| 106 |
+
|
| 107 |
+
def eval(self, frame):
|
| 108 |
+
# fall-back for unknown expression nodes
|
| 109 |
+
try:
|
| 110 |
+
expr = ast.Expression(self.__obj__)
|
| 111 |
+
expr.filename = '<eval>'
|
| 112 |
+
self.__obj__.filename = '<eval>'
|
| 113 |
+
co = pycodegen.ExpressionCodeGenerator(expr).getCode()
|
| 114 |
+
result = frame.eval(co)
|
| 115 |
+
except passthroughex:
|
| 116 |
+
raise
|
| 117 |
+
except:
|
| 118 |
+
raise Failure(self)
|
| 119 |
+
self.result = result
|
| 120 |
+
self.explanation = self.explanation or frame.repr(self.result)
|
| 121 |
+
|
| 122 |
+
def run(self, frame):
|
| 123 |
+
# fall-back for unknown statement nodes
|
| 124 |
+
try:
|
| 125 |
+
expr = ast.Module(None, ast.Stmt([self.__obj__]))
|
| 126 |
+
expr.filename = '<run>'
|
| 127 |
+
co = pycodegen.ModuleCodeGenerator(expr).getCode()
|
| 128 |
+
frame.exec_(co)
|
| 129 |
+
except passthroughex:
|
| 130 |
+
raise
|
| 131 |
+
except:
|
| 132 |
+
raise Failure(self)
|
| 133 |
+
|
| 134 |
+
def nice_explanation(self):
|
| 135 |
+
return _format_explanation(self.explanation)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class Name(Interpretable):
|
| 139 |
+
__view__ = ast.Name
|
| 140 |
+
|
| 141 |
+
def is_local(self, frame):
|
| 142 |
+
source = '%r in locals() is not globals()' % self.name
|
| 143 |
+
try:
|
| 144 |
+
return frame.is_true(frame.eval(source))
|
| 145 |
+
except passthroughex:
|
| 146 |
+
raise
|
| 147 |
+
except:
|
| 148 |
+
return False
|
| 149 |
+
|
| 150 |
+
def is_global(self, frame):
|
| 151 |
+
source = '%r in globals()' % self.name
|
| 152 |
+
try:
|
| 153 |
+
return frame.is_true(frame.eval(source))
|
| 154 |
+
except passthroughex:
|
| 155 |
+
raise
|
| 156 |
+
except:
|
| 157 |
+
return False
|
| 158 |
+
|
| 159 |
+
def is_builtin(self, frame):
|
| 160 |
+
source = '%r not in locals() and %r not in globals()' % (
|
| 161 |
+
self.name, self.name)
|
| 162 |
+
try:
|
| 163 |
+
return frame.is_true(frame.eval(source))
|
| 164 |
+
except passthroughex:
|
| 165 |
+
raise
|
| 166 |
+
except:
|
| 167 |
+
return False
|
| 168 |
+
|
| 169 |
+
def eval(self, frame):
|
| 170 |
+
super(Name, self).eval(frame)
|
| 171 |
+
if not self.is_local(frame):
|
| 172 |
+
self.explanation = self.name
|
| 173 |
+
|
| 174 |
+
class Compare(Interpretable):
|
| 175 |
+
__view__ = ast.Compare
|
| 176 |
+
|
| 177 |
+
def eval(self, frame):
|
| 178 |
+
expr = Interpretable(self.expr)
|
| 179 |
+
expr.eval(frame)
|
| 180 |
+
for operation, expr2 in self.ops:
|
| 181 |
+
if hasattr(self, 'result'):
|
| 182 |
+
# shortcutting in chained expressions
|
| 183 |
+
if not frame.is_true(self.result):
|
| 184 |
+
break
|
| 185 |
+
expr2 = Interpretable(expr2)
|
| 186 |
+
expr2.eval(frame)
|
| 187 |
+
self.explanation = "%s %s %s" % (
|
| 188 |
+
expr.explanation, operation, expr2.explanation)
|
| 189 |
+
source = "__exprinfo_left %s __exprinfo_right" % operation
|
| 190 |
+
try:
|
| 191 |
+
self.result = frame.eval(source,
|
| 192 |
+
__exprinfo_left=expr.result,
|
| 193 |
+
__exprinfo_right=expr2.result)
|
| 194 |
+
except passthroughex:
|
| 195 |
+
raise
|
| 196 |
+
except:
|
| 197 |
+
raise Failure(self)
|
| 198 |
+
expr = expr2
|
| 199 |
+
|
| 200 |
+
class And(Interpretable):
|
| 201 |
+
__view__ = ast.And
|
| 202 |
+
|
| 203 |
+
def eval(self, frame):
|
| 204 |
+
explanations = []
|
| 205 |
+
for expr in self.nodes:
|
| 206 |
+
expr = Interpretable(expr)
|
| 207 |
+
expr.eval(frame)
|
| 208 |
+
explanations.append(expr.explanation)
|
| 209 |
+
self.result = expr.result
|
| 210 |
+
if not frame.is_true(expr.result):
|
| 211 |
+
break
|
| 212 |
+
self.explanation = '(' + ' and '.join(explanations) + ')'
|
| 213 |
+
|
| 214 |
+
class Or(Interpretable):
|
| 215 |
+
__view__ = ast.Or
|
| 216 |
+
|
| 217 |
+
def eval(self, frame):
|
| 218 |
+
explanations = []
|
| 219 |
+
for expr in self.nodes:
|
| 220 |
+
expr = Interpretable(expr)
|
| 221 |
+
expr.eval(frame)
|
| 222 |
+
explanations.append(expr.explanation)
|
| 223 |
+
self.result = expr.result
|
| 224 |
+
if frame.is_true(expr.result):
|
| 225 |
+
break
|
| 226 |
+
self.explanation = '(' + ' or '.join(explanations) + ')'
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# == Unary operations ==
|
| 230 |
+
keepalive = []
|
| 231 |
+
for astclass, astpattern in {
|
| 232 |
+
ast.Not : 'not __exprinfo_expr',
|
| 233 |
+
ast.Invert : '(~__exprinfo_expr)',
|
| 234 |
+
}.items():
|
| 235 |
+
|
| 236 |
+
class UnaryArith(Interpretable):
|
| 237 |
+
__view__ = astclass
|
| 238 |
+
|
| 239 |
+
def eval(self, frame, astpattern=astpattern):
|
| 240 |
+
expr = Interpretable(self.expr)
|
| 241 |
+
expr.eval(frame)
|
| 242 |
+
self.explanation = astpattern.replace('__exprinfo_expr',
|
| 243 |
+
expr.explanation)
|
| 244 |
+
try:
|
| 245 |
+
self.result = frame.eval(astpattern,
|
| 246 |
+
__exprinfo_expr=expr.result)
|
| 247 |
+
except passthroughex:
|
| 248 |
+
raise
|
| 249 |
+
except:
|
| 250 |
+
raise Failure(self)
|
| 251 |
+
|
| 252 |
+
keepalive.append(UnaryArith)
|
| 253 |
+
|
| 254 |
+
# == Binary operations ==
|
| 255 |
+
for astclass, astpattern in {
|
| 256 |
+
ast.Add : '(__exprinfo_left + __exprinfo_right)',
|
| 257 |
+
ast.Sub : '(__exprinfo_left - __exprinfo_right)',
|
| 258 |
+
ast.Mul : '(__exprinfo_left * __exprinfo_right)',
|
| 259 |
+
ast.Div : '(__exprinfo_left / __exprinfo_right)',
|
| 260 |
+
ast.Mod : '(__exprinfo_left % __exprinfo_right)',
|
| 261 |
+
ast.Power : '(__exprinfo_left ** __exprinfo_right)',
|
| 262 |
+
}.items():
|
| 263 |
+
|
| 264 |
+
class BinaryArith(Interpretable):
|
| 265 |
+
__view__ = astclass
|
| 266 |
+
|
| 267 |
+
def eval(self, frame, astpattern=astpattern):
|
| 268 |
+
left = Interpretable(self.left)
|
| 269 |
+
left.eval(frame)
|
| 270 |
+
right = Interpretable(self.right)
|
| 271 |
+
right.eval(frame)
|
| 272 |
+
self.explanation = (astpattern
|
| 273 |
+
.replace('__exprinfo_left', left .explanation)
|
| 274 |
+
.replace('__exprinfo_right', right.explanation))
|
| 275 |
+
try:
|
| 276 |
+
self.result = frame.eval(astpattern,
|
| 277 |
+
__exprinfo_left=left.result,
|
| 278 |
+
__exprinfo_right=right.result)
|
| 279 |
+
except passthroughex:
|
| 280 |
+
raise
|
| 281 |
+
except:
|
| 282 |
+
raise Failure(self)
|
| 283 |
+
|
| 284 |
+
keepalive.append(BinaryArith)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class CallFunc(Interpretable):
|
| 288 |
+
__view__ = ast.CallFunc
|
| 289 |
+
|
| 290 |
+
def is_bool(self, frame):
|
| 291 |
+
source = 'isinstance(__exprinfo_value, bool)'
|
| 292 |
+
try:
|
| 293 |
+
return frame.is_true(frame.eval(source,
|
| 294 |
+
__exprinfo_value=self.result))
|
| 295 |
+
except passthroughex:
|
| 296 |
+
raise
|
| 297 |
+
except:
|
| 298 |
+
return False
|
| 299 |
+
|
| 300 |
+
def eval(self, frame):
|
| 301 |
+
node = Interpretable(self.node)
|
| 302 |
+
node.eval(frame)
|
| 303 |
+
explanations = []
|
| 304 |
+
vars = {'__exprinfo_fn': node.result}
|
| 305 |
+
source = '__exprinfo_fn('
|
| 306 |
+
for a in self.args:
|
| 307 |
+
if isinstance(a, ast.Keyword):
|
| 308 |
+
keyword = a.name
|
| 309 |
+
a = a.expr
|
| 310 |
+
else:
|
| 311 |
+
keyword = None
|
| 312 |
+
a = Interpretable(a)
|
| 313 |
+
a.eval(frame)
|
| 314 |
+
argname = '__exprinfo_%d' % len(vars)
|
| 315 |
+
vars[argname] = a.result
|
| 316 |
+
if keyword is None:
|
| 317 |
+
source += argname + ','
|
| 318 |
+
explanations.append(a.explanation)
|
| 319 |
+
else:
|
| 320 |
+
source += '%s=%s,' % (keyword, argname)
|
| 321 |
+
explanations.append('%s=%s' % (keyword, a.explanation))
|
| 322 |
+
if self.star_args:
|
| 323 |
+
star_args = Interpretable(self.star_args)
|
| 324 |
+
star_args.eval(frame)
|
| 325 |
+
argname = '__exprinfo_star'
|
| 326 |
+
vars[argname] = star_args.result
|
| 327 |
+
source += '*' + argname + ','
|
| 328 |
+
explanations.append('*' + star_args.explanation)
|
| 329 |
+
if self.dstar_args:
|
| 330 |
+
dstar_args = Interpretable(self.dstar_args)
|
| 331 |
+
dstar_args.eval(frame)
|
| 332 |
+
argname = '__exprinfo_kwds'
|
| 333 |
+
vars[argname] = dstar_args.result
|
| 334 |
+
source += '**' + argname + ','
|
| 335 |
+
explanations.append('**' + dstar_args.explanation)
|
| 336 |
+
self.explanation = "%s(%s)" % (
|
| 337 |
+
node.explanation, ', '.join(explanations))
|
| 338 |
+
if source.endswith(','):
|
| 339 |
+
source = source[:-1]
|
| 340 |
+
source += ')'
|
| 341 |
+
try:
|
| 342 |
+
self.result = frame.eval(source, **vars)
|
| 343 |
+
except passthroughex:
|
| 344 |
+
raise
|
| 345 |
+
except:
|
| 346 |
+
raise Failure(self)
|
| 347 |
+
if not node.is_builtin(frame) or not self.is_bool(frame):
|
| 348 |
+
r = frame.repr(self.result)
|
| 349 |
+
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
|
| 350 |
+
|
| 351 |
+
class Getattr(Interpretable):
|
| 352 |
+
__view__ = ast.Getattr
|
| 353 |
+
|
| 354 |
+
def eval(self, frame):
|
| 355 |
+
expr = Interpretable(self.expr)
|
| 356 |
+
expr.eval(frame)
|
| 357 |
+
source = '__exprinfo_expr.%s' % self.attrname
|
| 358 |
+
try:
|
| 359 |
+
self.result = frame.eval(source, __exprinfo_expr=expr.result)
|
| 360 |
+
except passthroughex:
|
| 361 |
+
raise
|
| 362 |
+
except:
|
| 363 |
+
raise Failure(self)
|
| 364 |
+
self.explanation = '%s.%s' % (expr.explanation, self.attrname)
|
| 365 |
+
# if the attribute comes from the instance, its value is interesting
|
| 366 |
+
source = ('hasattr(__exprinfo_expr, "__dict__") and '
|
| 367 |
+
'%r in __exprinfo_expr.__dict__' % self.attrname)
|
| 368 |
+
try:
|
| 369 |
+
from_instance = frame.is_true(
|
| 370 |
+
frame.eval(source, __exprinfo_expr=expr.result))
|
| 371 |
+
except passthroughex:
|
| 372 |
+
raise
|
| 373 |
+
except:
|
| 374 |
+
from_instance = True
|
| 375 |
+
if from_instance:
|
| 376 |
+
r = frame.repr(self.result)
|
| 377 |
+
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
|
| 378 |
+
|
| 379 |
+
# == Re-interpretation of full statements ==
|
| 380 |
+
|
| 381 |
+
class Assert(Interpretable):
|
| 382 |
+
__view__ = ast.Assert
|
| 383 |
+
|
| 384 |
+
def run(self, frame):
|
| 385 |
+
test = Interpretable(self.test)
|
| 386 |
+
test.eval(frame)
|
| 387 |
+
# simplify 'assert False where False = ...'
|
| 388 |
+
if (test.explanation.startswith('False\n{False = ') and
|
| 389 |
+
test.explanation.endswith('\n}')):
|
| 390 |
+
test.explanation = test.explanation[15:-2]
|
| 391 |
+
# print the result as 'assert <explanation>'
|
| 392 |
+
self.result = test.result
|
| 393 |
+
self.explanation = 'assert ' + test.explanation
|
| 394 |
+
if not frame.is_true(test.result):
|
| 395 |
+
try:
|
| 396 |
+
raise BuiltinAssertionError
|
| 397 |
+
except passthroughex:
|
| 398 |
+
raise
|
| 399 |
+
except:
|
| 400 |
+
raise Failure(self)
|
| 401 |
+
|
| 402 |
+
class Assign(Interpretable):
|
| 403 |
+
__view__ = ast.Assign
|
| 404 |
+
|
| 405 |
+
def run(self, frame):
|
| 406 |
+
expr = Interpretable(self.expr)
|
| 407 |
+
expr.eval(frame)
|
| 408 |
+
self.result = expr.result
|
| 409 |
+
self.explanation = '... = ' + expr.explanation
|
| 410 |
+
# fall-back-run the rest of the assignment
|
| 411 |
+
ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
|
| 412 |
+
mod = ast.Module(None, ast.Stmt([ass]))
|
| 413 |
+
mod.filename = '<run>'
|
| 414 |
+
co = pycodegen.ModuleCodeGenerator(mod).getCode()
|
| 415 |
+
try:
|
| 416 |
+
frame.exec_(co, __exprinfo_expr=expr.result)
|
| 417 |
+
except passthroughex:
|
| 418 |
+
raise
|
| 419 |
+
except:
|
| 420 |
+
raise Failure(self)
|
| 421 |
+
|
| 422 |
+
class Discard(Interpretable):
|
| 423 |
+
__view__ = ast.Discard
|
| 424 |
+
|
| 425 |
+
def run(self, frame):
|
| 426 |
+
expr = Interpretable(self.expr)
|
| 427 |
+
expr.eval(frame)
|
| 428 |
+
self.result = expr.result
|
| 429 |
+
self.explanation = expr.explanation
|
| 430 |
+
|
| 431 |
+
class Stmt(Interpretable):
|
| 432 |
+
__view__ = ast.Stmt
|
| 433 |
+
|
| 434 |
+
def run(self, frame):
|
| 435 |
+
for stmt in self.nodes:
|
| 436 |
+
stmt = Interpretable(stmt)
|
| 437 |
+
stmt.run(frame)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def report_failure(e):
|
| 441 |
+
explanation = e.node.nice_explanation()
|
| 442 |
+
if explanation:
|
| 443 |
+
explanation = ", in: " + explanation
|
| 444 |
+
else:
|
| 445 |
+
explanation = ""
|
| 446 |
+
sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
|
| 447 |
+
|
| 448 |
+
def check(s, frame=None):
|
| 449 |
+
if frame is None:
|
| 450 |
+
frame = sys._getframe(1)
|
| 451 |
+
frame = py.code.Frame(frame)
|
| 452 |
+
expr = parse(s, 'eval')
|
| 453 |
+
assert isinstance(expr, ast.Expression)
|
| 454 |
+
node = Interpretable(expr.node)
|
| 455 |
+
try:
|
| 456 |
+
node.eval(frame)
|
| 457 |
+
except passthroughex:
|
| 458 |
+
raise
|
| 459 |
+
except Failure:
|
| 460 |
+
e = sys.exc_info()[1]
|
| 461 |
+
report_failure(e)
|
| 462 |
+
else:
|
| 463 |
+
if not frame.is_true(node.result):
|
| 464 |
+
sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
###########################################################
|
| 468 |
+
# API / Entry points
|
| 469 |
+
# #########################################################
|
| 470 |
+
|
| 471 |
+
def interpret(source, frame, should_fail=False):
|
| 472 |
+
module = Interpretable(parse(source, 'exec').node)
|
| 473 |
+
#print "got module", module
|
| 474 |
+
if isinstance(frame, types.FrameType):
|
| 475 |
+
frame = py.code.Frame(frame)
|
| 476 |
+
try:
|
| 477 |
+
module.run(frame)
|
| 478 |
+
except Failure:
|
| 479 |
+
e = sys.exc_info()[1]
|
| 480 |
+
return getfailure(e)
|
| 481 |
+
except passthroughex:
|
| 482 |
+
raise
|
| 483 |
+
except:
|
| 484 |
+
import traceback
|
| 485 |
+
traceback.print_exc()
|
| 486 |
+
if should_fail:
|
| 487 |
+
return ("(assertion failed, but when it was re-run for "
|
| 488 |
+
"printing intermediate values, it did not fail. Suggestions: "
|
| 489 |
+
"compute assert expression before the assert or use --nomagic)")
|
| 490 |
+
else:
|
| 491 |
+
return None
|
| 492 |
+
|
| 493 |
+
def getmsg(excinfo):
|
| 494 |
+
if isinstance(excinfo, tuple):
|
| 495 |
+
excinfo = py.code.ExceptionInfo(excinfo)
|
| 496 |
+
#frame, line = gettbline(tb)
|
| 497 |
+
#frame = py.code.Frame(frame)
|
| 498 |
+
#return interpret(line, frame)
|
| 499 |
+
|
| 500 |
+
tb = excinfo.traceback[-1]
|
| 501 |
+
source = str(tb.statement).strip()
|
| 502 |
+
x = interpret(source, tb.frame, should_fail=True)
|
| 503 |
+
if not isinstance(x, str):
|
| 504 |
+
raise TypeError("interpret returned non-string %r" % (x,))
|
| 505 |
+
return x
|
| 506 |
+
|
| 507 |
+
def getfailure(e):
|
| 508 |
+
explanation = e.node.nice_explanation()
|
| 509 |
+
if str(e.value):
|
| 510 |
+
lines = explanation.split('\n')
|
| 511 |
+
lines[0] += " << %s" % (e.value,)
|
| 512 |
+
explanation = '\n'.join(lines)
|
| 513 |
+
text = "%s: %s" % (e.exc.__name__, explanation)
|
| 514 |
+
if text.startswith('AssertionError: assert '):
|
| 515 |
+
text = text[16:]
|
| 516 |
+
return text
|
| 517 |
+
|
| 518 |
+
def run(s, frame=None):
|
| 519 |
+
if frame is None:
|
| 520 |
+
frame = sys._getframe(1)
|
| 521 |
+
frame = py.code.Frame(frame)
|
| 522 |
+
module = Interpretable(parse(s, 'exec').node)
|
| 523 |
+
try:
|
| 524 |
+
module.run(frame)
|
| 525 |
+
except Failure:
|
| 526 |
+
e = sys.exc_info()[1]
|
| 527 |
+
report_failure(e)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
if __name__ == '__main__':
|
| 531 |
+
# example:
|
| 532 |
+
def f():
|
| 533 |
+
return 5
|
| 534 |
+
def g():
|
| 535 |
+
return 3
|
| 536 |
+
def h(x):
|
| 537 |
+
return 'never'
|
| 538 |
+
check("f() * g() == 5")
|
| 539 |
+
check("not f()")
|
| 540 |
+
check("not (f() and g() or 0)")
|
| 541 |
+
check("f() == g()")
|
| 542 |
+
i = 4
|
| 543 |
+
check("i == f()")
|
| 544 |
+
check("len(f()) == 0")
|
| 545 |
+
check("isinstance(2+3+4, float)")
|
| 546 |
+
|
| 547 |
+
run("x = i")
|
| 548 |
+
check("x == 5")
|
| 549 |
+
|
| 550 |
+
run("assert not f(), 'oops'")
|
| 551 |
+
run("a, b, c = 1, 2")
|
| 552 |
+
run("a, b, c = f()")
|
| 553 |
+
|
| 554 |
+
check("max([f(),g()]) == 4")
|
| 555 |
+
check("'hello'[g()] == 'h'")
|
| 556 |
+
run("'guk%d' % h(f())")
|
vllm/lib/python3.10/site-packages/py/_code/_py2traceback.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# copied from python-2.7.3's traceback.py
|
| 2 |
+
# CHANGES:
|
| 3 |
+
# - some_str is replaced, trying to create unicode strings
|
| 4 |
+
#
|
| 5 |
+
import types
|
| 6 |
+
|
| 7 |
+
def format_exception_only(etype, value):
|
| 8 |
+
"""Format the exception part of a traceback.
|
| 9 |
+
|
| 10 |
+
The arguments are the exception type and value such as given by
|
| 11 |
+
sys.last_type and sys.last_value. The return value is a list of
|
| 12 |
+
strings, each ending in a newline.
|
| 13 |
+
|
| 14 |
+
Normally, the list contains a single string; however, for
|
| 15 |
+
SyntaxError exceptions, it contains several lines that (when
|
| 16 |
+
printed) display detailed information about where the syntax
|
| 17 |
+
error occurred.
|
| 18 |
+
|
| 19 |
+
The message indicating which exception occurred is always the last
|
| 20 |
+
string in the list.
|
| 21 |
+
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
# An instance should not have a meaningful value parameter, but
|
| 25 |
+
# sometimes does, particularly for string exceptions, such as
|
| 26 |
+
# >>> raise string1, string2 # deprecated
|
| 27 |
+
#
|
| 28 |
+
# Clear these out first because issubtype(string1, SyntaxError)
|
| 29 |
+
# would throw another exception and mask the original problem.
|
| 30 |
+
if (isinstance(etype, BaseException) or
|
| 31 |
+
isinstance(etype, types.InstanceType) or
|
| 32 |
+
etype is None or type(etype) is str):
|
| 33 |
+
return [_format_final_exc_line(etype, value)]
|
| 34 |
+
|
| 35 |
+
stype = etype.__name__
|
| 36 |
+
|
| 37 |
+
if not issubclass(etype, SyntaxError):
|
| 38 |
+
return [_format_final_exc_line(stype, value)]
|
| 39 |
+
|
| 40 |
+
# It was a syntax error; show exactly where the problem was found.
|
| 41 |
+
lines = []
|
| 42 |
+
try:
|
| 43 |
+
msg, (filename, lineno, offset, badline) = value.args
|
| 44 |
+
except Exception:
|
| 45 |
+
pass
|
| 46 |
+
else:
|
| 47 |
+
filename = filename or "<string>"
|
| 48 |
+
lines.append(' File "%s", line %d\n' % (filename, lineno))
|
| 49 |
+
if badline is not None:
|
| 50 |
+
lines.append(' %s\n' % badline.strip())
|
| 51 |
+
if offset is not None:
|
| 52 |
+
caretspace = badline.rstrip('\n')[:offset].lstrip()
|
| 53 |
+
# non-space whitespace (likes tabs) must be kept for alignment
|
| 54 |
+
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
|
| 55 |
+
# only three spaces to account for offset1 == pos 0
|
| 56 |
+
lines.append(' %s^\n' % ''.join(caretspace))
|
| 57 |
+
value = msg
|
| 58 |
+
|
| 59 |
+
lines.append(_format_final_exc_line(stype, value))
|
| 60 |
+
return lines
|
| 61 |
+
|
| 62 |
+
def _format_final_exc_line(etype, value):
|
| 63 |
+
"""Return a list of a single line -- normal case for format_exception_only"""
|
| 64 |
+
valuestr = _some_str(value)
|
| 65 |
+
if value is None or not valuestr:
|
| 66 |
+
line = "%s\n" % etype
|
| 67 |
+
else:
|
| 68 |
+
line = "%s: %s\n" % (etype, valuestr)
|
| 69 |
+
return line
|
| 70 |
+
|
| 71 |
+
def _some_str(value):
|
| 72 |
+
try:
|
| 73 |
+
return unicode(value)
|
| 74 |
+
except Exception:
|
| 75 |
+
try:
|
| 76 |
+
return str(value)
|
| 77 |
+
except Exception:
|
| 78 |
+
pass
|
| 79 |
+
return '<unprintable %s object>' % type(value).__name__
|
vllm/lib/python3.10/site-packages/py/_code/assertion.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import py
|
| 3 |
+
|
| 4 |
+
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
| 5 |
+
|
| 6 |
+
_reprcompare = None # if set, will be called by assert reinterp for comparison ops
|
| 7 |
+
|
| 8 |
+
def _format_explanation(explanation):
|
| 9 |
+
"""This formats an explanation
|
| 10 |
+
|
| 11 |
+
Normally all embedded newlines are escaped, however there are
|
| 12 |
+
three exceptions: \n{, \n} and \n~. The first two are intended
|
| 13 |
+
cover nested explanations, see function and attribute explanations
|
| 14 |
+
for examples (.visit_Call(), visit_Attribute()). The last one is
|
| 15 |
+
for when one explanation needs to span multiple lines, e.g. when
|
| 16 |
+
displaying diffs.
|
| 17 |
+
"""
|
| 18 |
+
raw_lines = (explanation or '').split('\n')
|
| 19 |
+
# escape newlines not followed by {, } and ~
|
| 20 |
+
lines = [raw_lines[0]]
|
| 21 |
+
for l in raw_lines[1:]:
|
| 22 |
+
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
|
| 23 |
+
lines.append(l)
|
| 24 |
+
else:
|
| 25 |
+
lines[-1] += '\\n' + l
|
| 26 |
+
|
| 27 |
+
result = lines[:1]
|
| 28 |
+
stack = [0]
|
| 29 |
+
stackcnt = [0]
|
| 30 |
+
for line in lines[1:]:
|
| 31 |
+
if line.startswith('{'):
|
| 32 |
+
if stackcnt[-1]:
|
| 33 |
+
s = 'and '
|
| 34 |
+
else:
|
| 35 |
+
s = 'where '
|
| 36 |
+
stack.append(len(result))
|
| 37 |
+
stackcnt[-1] += 1
|
| 38 |
+
stackcnt.append(0)
|
| 39 |
+
result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
|
| 40 |
+
elif line.startswith('}'):
|
| 41 |
+
assert line.startswith('}')
|
| 42 |
+
stack.pop()
|
| 43 |
+
stackcnt.pop()
|
| 44 |
+
result[stack[-1]] += line[1:]
|
| 45 |
+
else:
|
| 46 |
+
assert line.startswith('~')
|
| 47 |
+
result.append(' '*len(stack) + line[1:])
|
| 48 |
+
assert len(stack) == 1
|
| 49 |
+
return '\n'.join(result)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class AssertionError(BuiltinAssertionError):
|
| 53 |
+
def __init__(self, *args):
|
| 54 |
+
BuiltinAssertionError.__init__(self, *args)
|
| 55 |
+
if args:
|
| 56 |
+
try:
|
| 57 |
+
self.msg = str(args[0])
|
| 58 |
+
except py.builtin._sysex:
|
| 59 |
+
raise
|
| 60 |
+
except:
|
| 61 |
+
self.msg = "<[broken __repr__] %s at %0xd>" %(
|
| 62 |
+
args[0].__class__, id(args[0]))
|
| 63 |
+
else:
|
| 64 |
+
f = py.code.Frame(sys._getframe(1))
|
| 65 |
+
try:
|
| 66 |
+
source = f.code.fullsource
|
| 67 |
+
if source is not None:
|
| 68 |
+
try:
|
| 69 |
+
source = source.getstatement(f.lineno, assertion=True)
|
| 70 |
+
except IndexError:
|
| 71 |
+
source = None
|
| 72 |
+
else:
|
| 73 |
+
source = str(source.deindent()).strip()
|
| 74 |
+
except py.error.ENOENT:
|
| 75 |
+
source = None
|
| 76 |
+
# this can also occur during reinterpretation, when the
|
| 77 |
+
# co_filename is set to "<run>".
|
| 78 |
+
if source:
|
| 79 |
+
self.msg = reinterpret(source, f, should_fail=True)
|
| 80 |
+
else:
|
| 81 |
+
self.msg = "<could not determine information>"
|
| 82 |
+
if not self.args:
|
| 83 |
+
self.args = (self.msg,)
|
| 84 |
+
|
| 85 |
+
if sys.version_info > (3, 0):
|
| 86 |
+
AssertionError.__module__ = "builtins"
|
| 87 |
+
reinterpret_old = "old reinterpretation not available for py3"
|
| 88 |
+
else:
|
| 89 |
+
from py._code._assertionold import interpret as reinterpret_old
|
| 90 |
+
from py._code._assertionnew import interpret as reinterpret
|
vllm/lib/python3.10/site-packages/py/_code/code.py
ADDED
|
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import py
|
| 2 |
+
import sys
|
| 3 |
+
from inspect import CO_VARARGS, CO_VARKEYWORDS, isclass
|
| 4 |
+
|
| 5 |
+
builtin_repr = repr
|
| 6 |
+
|
| 7 |
+
reprlib = py.builtin._tryimport('repr', 'reprlib')
|
| 8 |
+
|
| 9 |
+
if sys.version_info[0] >= 3:
|
| 10 |
+
from traceback import format_exception_only
|
| 11 |
+
else:
|
| 12 |
+
from py._code._py2traceback import format_exception_only
|
| 13 |
+
|
| 14 |
+
import traceback
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Code(object):
|
| 18 |
+
""" wrapper around Python code objects """
|
| 19 |
+
def __init__(self, rawcode):
|
| 20 |
+
if not hasattr(rawcode, "co_filename"):
|
| 21 |
+
rawcode = py.code.getrawcode(rawcode)
|
| 22 |
+
try:
|
| 23 |
+
self.filename = rawcode.co_filename
|
| 24 |
+
self.firstlineno = rawcode.co_firstlineno - 1
|
| 25 |
+
self.name = rawcode.co_name
|
| 26 |
+
except AttributeError:
|
| 27 |
+
raise TypeError("not a code object: %r" % (rawcode,))
|
| 28 |
+
self.raw = rawcode
|
| 29 |
+
|
| 30 |
+
def __eq__(self, other):
|
| 31 |
+
return self.raw == other.raw
|
| 32 |
+
|
| 33 |
+
def __ne__(self, other):
|
| 34 |
+
return not self == other
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def path(self):
|
| 38 |
+
""" return a path object pointing to source code (note that it
|
| 39 |
+
might not point to an actually existing file). """
|
| 40 |
+
p = py.path.local(self.raw.co_filename)
|
| 41 |
+
# maybe don't try this checking
|
| 42 |
+
if not p.check():
|
| 43 |
+
# XXX maybe try harder like the weird logic
|
| 44 |
+
# in the standard lib [linecache.updatecache] does?
|
| 45 |
+
p = self.raw.co_filename
|
| 46 |
+
return p
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def fullsource(self):
|
| 50 |
+
""" return a py.code.Source object for the full source file of the code
|
| 51 |
+
"""
|
| 52 |
+
from py._code import source
|
| 53 |
+
full, _ = source.findsource(self.raw)
|
| 54 |
+
return full
|
| 55 |
+
|
| 56 |
+
def source(self):
|
| 57 |
+
""" return a py.code.Source object for the code object's source only
|
| 58 |
+
"""
|
| 59 |
+
# return source only for that part of code
|
| 60 |
+
return py.code.Source(self.raw)
|
| 61 |
+
|
| 62 |
+
def getargs(self, var=False):
|
| 63 |
+
""" return a tuple with the argument names for the code object
|
| 64 |
+
|
| 65 |
+
if 'var' is set True also return the names of the variable and
|
| 66 |
+
keyword arguments when present
|
| 67 |
+
"""
|
| 68 |
+
# handfull shortcut for getting args
|
| 69 |
+
raw = self.raw
|
| 70 |
+
argcount = raw.co_argcount
|
| 71 |
+
if var:
|
| 72 |
+
argcount += raw.co_flags & CO_VARARGS
|
| 73 |
+
argcount += raw.co_flags & CO_VARKEYWORDS
|
| 74 |
+
return raw.co_varnames[:argcount]
|
| 75 |
+
|
| 76 |
+
class Frame(object):
|
| 77 |
+
"""Wrapper around a Python frame holding f_locals and f_globals
|
| 78 |
+
in which expressions can be evaluated."""
|
| 79 |
+
|
| 80 |
+
def __init__(self, frame):
|
| 81 |
+
self.lineno = frame.f_lineno - 1
|
| 82 |
+
self.f_globals = frame.f_globals
|
| 83 |
+
self.f_locals = frame.f_locals
|
| 84 |
+
self.raw = frame
|
| 85 |
+
self.code = py.code.Code(frame.f_code)
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def statement(self):
|
| 89 |
+
""" statement this frame is at """
|
| 90 |
+
if self.code.fullsource is None:
|
| 91 |
+
return py.code.Source("")
|
| 92 |
+
return self.code.fullsource.getstatement(self.lineno)
|
| 93 |
+
|
| 94 |
+
def eval(self, code, **vars):
|
| 95 |
+
""" evaluate 'code' in the frame
|
| 96 |
+
|
| 97 |
+
'vars' are optional additional local variables
|
| 98 |
+
|
| 99 |
+
returns the result of the evaluation
|
| 100 |
+
"""
|
| 101 |
+
f_locals = self.f_locals.copy()
|
| 102 |
+
f_locals.update(vars)
|
| 103 |
+
return eval(code, self.f_globals, f_locals)
|
| 104 |
+
|
| 105 |
+
def exec_(self, code, **vars):
|
| 106 |
+
""" exec 'code' in the frame
|
| 107 |
+
|
| 108 |
+
'vars' are optiona; additional local variables
|
| 109 |
+
"""
|
| 110 |
+
f_locals = self.f_locals.copy()
|
| 111 |
+
f_locals.update(vars)
|
| 112 |
+
py.builtin.exec_(code, self.f_globals, f_locals)
|
| 113 |
+
|
| 114 |
+
def repr(self, object):
|
| 115 |
+
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
|
| 116 |
+
"""
|
| 117 |
+
return py.io.saferepr(object)
|
| 118 |
+
|
| 119 |
+
def is_true(self, object):
|
| 120 |
+
return object
|
| 121 |
+
|
| 122 |
+
def getargs(self, var=False):
|
| 123 |
+
""" return a list of tuples (name, value) for all arguments
|
| 124 |
+
|
| 125 |
+
if 'var' is set True also include the variable and keyword
|
| 126 |
+
arguments when present
|
| 127 |
+
"""
|
| 128 |
+
retval = []
|
| 129 |
+
for arg in self.code.getargs(var):
|
| 130 |
+
try:
|
| 131 |
+
retval.append((arg, self.f_locals[arg]))
|
| 132 |
+
except KeyError:
|
| 133 |
+
pass # this can occur when using Psyco
|
| 134 |
+
return retval
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class TracebackEntry(object):
|
| 138 |
+
""" a single entry in a traceback """
|
| 139 |
+
|
| 140 |
+
_repr_style = None
|
| 141 |
+
exprinfo = None
|
| 142 |
+
|
| 143 |
+
def __init__(self, rawentry):
|
| 144 |
+
self._rawentry = rawentry
|
| 145 |
+
self.lineno = rawentry.tb_lineno - 1
|
| 146 |
+
|
| 147 |
+
def set_repr_style(self, mode):
|
| 148 |
+
assert mode in ("short", "long")
|
| 149 |
+
self._repr_style = mode
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def frame(self):
|
| 153 |
+
return py.code.Frame(self._rawentry.tb_frame)
|
| 154 |
+
|
| 155 |
+
@property
|
| 156 |
+
def relline(self):
|
| 157 |
+
return self.lineno - self.frame.code.firstlineno
|
| 158 |
+
|
| 159 |
+
def __repr__(self):
|
| 160 |
+
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno+1)
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def statement(self):
|
| 164 |
+
""" py.code.Source object for the current statement """
|
| 165 |
+
source = self.frame.code.fullsource
|
| 166 |
+
return source.getstatement(self.lineno)
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def path(self):
|
| 170 |
+
""" path to the source code """
|
| 171 |
+
return self.frame.code.path
|
| 172 |
+
|
| 173 |
+
def getlocals(self):
|
| 174 |
+
return self.frame.f_locals
|
| 175 |
+
locals = property(getlocals, None, None, "locals of underlaying frame")
|
| 176 |
+
|
| 177 |
+
def reinterpret(self):
|
| 178 |
+
"""Reinterpret the failing statement and returns a detailed information
|
| 179 |
+
about what operations are performed."""
|
| 180 |
+
if self.exprinfo is None:
|
| 181 |
+
source = str(self.statement).strip()
|
| 182 |
+
x = py.code._reinterpret(source, self.frame, should_fail=True)
|
| 183 |
+
if not isinstance(x, str):
|
| 184 |
+
raise TypeError("interpret returned non-string %r" % (x,))
|
| 185 |
+
self.exprinfo = x
|
| 186 |
+
return self.exprinfo
|
| 187 |
+
|
| 188 |
+
def getfirstlinesource(self):
|
| 189 |
+
# on Jython this firstlineno can be -1 apparently
|
| 190 |
+
return max(self.frame.code.firstlineno, 0)
|
| 191 |
+
|
| 192 |
+
def getsource(self, astcache=None):
|
| 193 |
+
""" return failing source code. """
|
| 194 |
+
# we use the passed in astcache to not reparse asttrees
|
| 195 |
+
# within exception info printing
|
| 196 |
+
from py._code.source import getstatementrange_ast
|
| 197 |
+
source = self.frame.code.fullsource
|
| 198 |
+
if source is None:
|
| 199 |
+
return None
|
| 200 |
+
key = astnode = None
|
| 201 |
+
if astcache is not None:
|
| 202 |
+
key = self.frame.code.path
|
| 203 |
+
if key is not None:
|
| 204 |
+
astnode = astcache.get(key, None)
|
| 205 |
+
start = self.getfirstlinesource()
|
| 206 |
+
try:
|
| 207 |
+
astnode, _, end = getstatementrange_ast(self.lineno, source,
|
| 208 |
+
astnode=astnode)
|
| 209 |
+
except SyntaxError:
|
| 210 |
+
end = self.lineno + 1
|
| 211 |
+
else:
|
| 212 |
+
if key is not None:
|
| 213 |
+
astcache[key] = astnode
|
| 214 |
+
return source[start:end]
|
| 215 |
+
|
| 216 |
+
source = property(getsource)
|
| 217 |
+
|
| 218 |
+
def ishidden(self):
|
| 219 |
+
""" return True if the current frame has a var __tracebackhide__
|
| 220 |
+
resolving to True
|
| 221 |
+
|
| 222 |
+
mostly for internal use
|
| 223 |
+
"""
|
| 224 |
+
try:
|
| 225 |
+
return self.frame.f_locals['__tracebackhide__']
|
| 226 |
+
except KeyError:
|
| 227 |
+
try:
|
| 228 |
+
return self.frame.f_globals['__tracebackhide__']
|
| 229 |
+
except KeyError:
|
| 230 |
+
return False
|
| 231 |
+
|
| 232 |
+
def __str__(self):
|
| 233 |
+
try:
|
| 234 |
+
fn = str(self.path)
|
| 235 |
+
except py.error.Error:
|
| 236 |
+
fn = '???'
|
| 237 |
+
name = self.frame.code.name
|
| 238 |
+
try:
|
| 239 |
+
line = str(self.statement).lstrip()
|
| 240 |
+
except KeyboardInterrupt:
|
| 241 |
+
raise
|
| 242 |
+
except:
|
| 243 |
+
line = "???"
|
| 244 |
+
return " File %r:%d in %s\n %s\n" % (fn, self.lineno+1, name, line)
|
| 245 |
+
|
| 246 |
+
def name(self):
|
| 247 |
+
return self.frame.code.raw.co_name
|
| 248 |
+
name = property(name, None, None, "co_name of underlaying code")
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class Traceback(list):
|
| 252 |
+
""" Traceback objects encapsulate and offer higher level
|
| 253 |
+
access to Traceback entries.
|
| 254 |
+
"""
|
| 255 |
+
Entry = TracebackEntry
|
| 256 |
+
|
| 257 |
+
def __init__(self, tb):
|
| 258 |
+
""" initialize from given python traceback object. """
|
| 259 |
+
if hasattr(tb, 'tb_next'):
|
| 260 |
+
def f(cur):
|
| 261 |
+
while cur is not None:
|
| 262 |
+
yield self.Entry(cur)
|
| 263 |
+
cur = cur.tb_next
|
| 264 |
+
list.__init__(self, f(tb))
|
| 265 |
+
else:
|
| 266 |
+
list.__init__(self, tb)
|
| 267 |
+
|
| 268 |
+
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
|
| 269 |
+
""" return a Traceback instance wrapping part of this Traceback
|
| 270 |
+
|
| 271 |
+
by provding any combination of path, lineno and firstlineno, the
|
| 272 |
+
first frame to start the to-be-returned traceback is determined
|
| 273 |
+
|
| 274 |
+
this allows cutting the first part of a Traceback instance e.g.
|
| 275 |
+
for formatting reasons (removing some uninteresting bits that deal
|
| 276 |
+
with handling of the exception/traceback)
|
| 277 |
+
"""
|
| 278 |
+
for x in self:
|
| 279 |
+
code = x.frame.code
|
| 280 |
+
codepath = code.path
|
| 281 |
+
if ((path is None or codepath == path) and
|
| 282 |
+
(excludepath is None or not hasattr(codepath, 'relto') or
|
| 283 |
+
not codepath.relto(excludepath)) and
|
| 284 |
+
(lineno is None or x.lineno == lineno) and
|
| 285 |
+
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
|
| 286 |
+
return Traceback(x._rawentry)
|
| 287 |
+
return self
|
| 288 |
+
|
| 289 |
+
def __getitem__(self, key):
|
| 290 |
+
val = super(Traceback, self).__getitem__(key)
|
| 291 |
+
if isinstance(key, type(slice(0))):
|
| 292 |
+
val = self.__class__(val)
|
| 293 |
+
return val
|
| 294 |
+
|
| 295 |
+
def filter(self, fn=lambda x: not x.ishidden()):
|
| 296 |
+
""" return a Traceback instance with certain items removed
|
| 297 |
+
|
| 298 |
+
fn is a function that gets a single argument, a TracebackItem
|
| 299 |
+
instance, and should return True when the item should be added
|
| 300 |
+
to the Traceback, False when not
|
| 301 |
+
|
| 302 |
+
by default this removes all the TracebackItems which are hidden
|
| 303 |
+
(see ishidden() above)
|
| 304 |
+
"""
|
| 305 |
+
return Traceback(filter(fn, self))
|
| 306 |
+
|
| 307 |
+
def getcrashentry(self):
|
| 308 |
+
""" return last non-hidden traceback entry that lead
|
| 309 |
+
to the exception of a traceback.
|
| 310 |
+
"""
|
| 311 |
+
for i in range(-1, -len(self)-1, -1):
|
| 312 |
+
entry = self[i]
|
| 313 |
+
if not entry.ishidden():
|
| 314 |
+
return entry
|
| 315 |
+
return self[-1]
|
| 316 |
+
|
| 317 |
+
def recursionindex(self):
|
| 318 |
+
""" return the index of the frame/TracebackItem where recursion
|
| 319 |
+
originates if appropriate, None if no recursion occurred
|
| 320 |
+
"""
|
| 321 |
+
cache = {}
|
| 322 |
+
for i, entry in enumerate(self):
|
| 323 |
+
# id for the code.raw is needed to work around
|
| 324 |
+
# the strange metaprogramming in the decorator lib from pypi
|
| 325 |
+
# which generates code objects that have hash/value equality
|
| 326 |
+
#XXX needs a test
|
| 327 |
+
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
|
| 328 |
+
#print "checking for recursion at", key
|
| 329 |
+
l = cache.setdefault(key, [])
|
| 330 |
+
if l:
|
| 331 |
+
f = entry.frame
|
| 332 |
+
loc = f.f_locals
|
| 333 |
+
for otherloc in l:
|
| 334 |
+
if f.is_true(f.eval(co_equal,
|
| 335 |
+
__recursioncache_locals_1=loc,
|
| 336 |
+
__recursioncache_locals_2=otherloc)):
|
| 337 |
+
return i
|
| 338 |
+
l.append(entry.frame.f_locals)
|
| 339 |
+
return None
|
| 340 |
+
|
| 341 |
+
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
|
| 342 |
+
'?', 'eval')
|
| 343 |
+
|
| 344 |
+
class ExceptionInfo(object):
|
| 345 |
+
""" wraps sys.exc_info() objects and offers
|
| 346 |
+
help for navigating the traceback.
|
| 347 |
+
"""
|
| 348 |
+
_striptext = ''
|
| 349 |
+
def __init__(self, tup=None, exprinfo=None):
|
| 350 |
+
if tup is None:
|
| 351 |
+
tup = sys.exc_info()
|
| 352 |
+
if exprinfo is None and isinstance(tup[1], AssertionError):
|
| 353 |
+
exprinfo = getattr(tup[1], 'msg', None)
|
| 354 |
+
if exprinfo is None:
|
| 355 |
+
exprinfo = str(tup[1])
|
| 356 |
+
if exprinfo and exprinfo.startswith('assert '):
|
| 357 |
+
self._striptext = 'AssertionError: '
|
| 358 |
+
self._excinfo = tup
|
| 359 |
+
#: the exception class
|
| 360 |
+
self.type = tup[0]
|
| 361 |
+
#: the exception instance
|
| 362 |
+
self.value = tup[1]
|
| 363 |
+
#: the exception raw traceback
|
| 364 |
+
self.tb = tup[2]
|
| 365 |
+
#: the exception type name
|
| 366 |
+
self.typename = self.type.__name__
|
| 367 |
+
#: the exception traceback (py.code.Traceback instance)
|
| 368 |
+
self.traceback = py.code.Traceback(self.tb)
|
| 369 |
+
|
| 370 |
+
def __repr__(self):
|
| 371 |
+
return "<ExceptionInfo %s tblen=%d>" % (
|
| 372 |
+
self.typename, len(self.traceback))
|
| 373 |
+
|
| 374 |
+
def exconly(self, tryshort=False):
|
| 375 |
+
""" return the exception as a string
|
| 376 |
+
|
| 377 |
+
when 'tryshort' resolves to True, and the exception is a
|
| 378 |
+
py.code._AssertionError, only the actual exception part of
|
| 379 |
+
the exception representation is returned (so 'AssertionError: ' is
|
| 380 |
+
removed from the beginning)
|
| 381 |
+
"""
|
| 382 |
+
lines = format_exception_only(self.type, self.value)
|
| 383 |
+
text = ''.join(lines)
|
| 384 |
+
text = text.rstrip()
|
| 385 |
+
if tryshort:
|
| 386 |
+
if text.startswith(self._striptext):
|
| 387 |
+
text = text[len(self._striptext):]
|
| 388 |
+
return text
|
| 389 |
+
|
| 390 |
+
def errisinstance(self, exc):
|
| 391 |
+
""" return True if the exception is an instance of exc """
|
| 392 |
+
return isinstance(self.value, exc)
|
| 393 |
+
|
| 394 |
+
def _getreprcrash(self):
|
| 395 |
+
exconly = self.exconly(tryshort=True)
|
| 396 |
+
entry = self.traceback.getcrashentry()
|
| 397 |
+
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
|
| 398 |
+
return ReprFileLocation(path, lineno+1, exconly)
|
| 399 |
+
|
| 400 |
+
def getrepr(self, showlocals=False, style="long",
|
| 401 |
+
abspath=False, tbfilter=True, funcargs=False):
|
| 402 |
+
""" return str()able representation of this exception info.
|
| 403 |
+
showlocals: show locals per traceback entry
|
| 404 |
+
style: long|short|no|native traceback style
|
| 405 |
+
tbfilter: hide entries (where __tracebackhide__ is true)
|
| 406 |
+
|
| 407 |
+
in case of style==native, tbfilter and showlocals is ignored.
|
| 408 |
+
"""
|
| 409 |
+
if style == 'native':
|
| 410 |
+
return ReprExceptionInfo(ReprTracebackNative(
|
| 411 |
+
traceback.format_exception(
|
| 412 |
+
self.type,
|
| 413 |
+
self.value,
|
| 414 |
+
self.traceback[0]._rawentry,
|
| 415 |
+
)), self._getreprcrash())
|
| 416 |
+
|
| 417 |
+
fmt = FormattedExcinfo(
|
| 418 |
+
showlocals=showlocals, style=style,
|
| 419 |
+
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
|
| 420 |
+
return fmt.repr_excinfo(self)
|
| 421 |
+
|
| 422 |
+
def __str__(self):
|
| 423 |
+
entry = self.traceback[-1]
|
| 424 |
+
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
|
| 425 |
+
return str(loc)
|
| 426 |
+
|
| 427 |
+
def __unicode__(self):
|
| 428 |
+
entry = self.traceback[-1]
|
| 429 |
+
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
|
| 430 |
+
return loc.__unicode__()
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class FormattedExcinfo(object):
|
| 434 |
+
""" presenting information about failing Functions and Generators. """
|
| 435 |
+
# for traceback entries
|
| 436 |
+
flow_marker = ">"
|
| 437 |
+
fail_marker = "E"
|
| 438 |
+
|
| 439 |
+
def __init__(self, showlocals=False, style="long",
|
| 440 |
+
abspath=True, tbfilter=True, funcargs=False):
|
| 441 |
+
self.showlocals = showlocals
|
| 442 |
+
self.style = style
|
| 443 |
+
self.tbfilter = tbfilter
|
| 444 |
+
self.funcargs = funcargs
|
| 445 |
+
self.abspath = abspath
|
| 446 |
+
self.astcache = {}
|
| 447 |
+
|
| 448 |
+
def _getindent(self, source):
|
| 449 |
+
# figure out indent for given source
|
| 450 |
+
try:
|
| 451 |
+
s = str(source.getstatement(len(source)-1))
|
| 452 |
+
except KeyboardInterrupt:
|
| 453 |
+
raise
|
| 454 |
+
except:
|
| 455 |
+
try:
|
| 456 |
+
s = str(source[-1])
|
| 457 |
+
except KeyboardInterrupt:
|
| 458 |
+
raise
|
| 459 |
+
except:
|
| 460 |
+
return 0
|
| 461 |
+
return 4 + (len(s) - len(s.lstrip()))
|
| 462 |
+
|
| 463 |
+
def _getentrysource(self, entry):
|
| 464 |
+
source = entry.getsource(self.astcache)
|
| 465 |
+
if source is not None:
|
| 466 |
+
source = source.deindent()
|
| 467 |
+
return source
|
| 468 |
+
|
| 469 |
+
def _saferepr(self, obj):
|
| 470 |
+
return py.io.saferepr(obj)
|
| 471 |
+
|
| 472 |
+
def repr_args(self, entry):
|
| 473 |
+
if self.funcargs:
|
| 474 |
+
args = []
|
| 475 |
+
for argname, argvalue in entry.frame.getargs(var=True):
|
| 476 |
+
args.append((argname, self._saferepr(argvalue)))
|
| 477 |
+
return ReprFuncArgs(args)
|
| 478 |
+
|
| 479 |
+
def get_source(self, source, line_index=-1, excinfo=None, short=False):
|
| 480 |
+
""" return formatted and marked up source lines. """
|
| 481 |
+
lines = []
|
| 482 |
+
if source is None or line_index >= len(source.lines):
|
| 483 |
+
source = py.code.Source("???")
|
| 484 |
+
line_index = 0
|
| 485 |
+
if line_index < 0:
|
| 486 |
+
line_index += len(source)
|
| 487 |
+
space_prefix = " "
|
| 488 |
+
if short:
|
| 489 |
+
lines.append(space_prefix + source.lines[line_index].strip())
|
| 490 |
+
else:
|
| 491 |
+
for line in source.lines[:line_index]:
|
| 492 |
+
lines.append(space_prefix + line)
|
| 493 |
+
lines.append(self.flow_marker + " " + source.lines[line_index])
|
| 494 |
+
for line in source.lines[line_index+1:]:
|
| 495 |
+
lines.append(space_prefix + line)
|
| 496 |
+
if excinfo is not None:
|
| 497 |
+
indent = 4 if short else self._getindent(source)
|
| 498 |
+
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
|
| 499 |
+
return lines
|
| 500 |
+
|
| 501 |
+
def get_exconly(self, excinfo, indent=4, markall=False):
|
| 502 |
+
lines = []
|
| 503 |
+
indent = " " * indent
|
| 504 |
+
# get the real exception information out
|
| 505 |
+
exlines = excinfo.exconly(tryshort=True).split('\n')
|
| 506 |
+
failindent = self.fail_marker + indent[1:]
|
| 507 |
+
for line in exlines:
|
| 508 |
+
lines.append(failindent + line)
|
| 509 |
+
if not markall:
|
| 510 |
+
failindent = indent
|
| 511 |
+
return lines
|
| 512 |
+
|
| 513 |
+
def repr_locals(self, locals):
|
| 514 |
+
if self.showlocals:
|
| 515 |
+
lines = []
|
| 516 |
+
keys = [loc for loc in locals if loc[0] != "@"]
|
| 517 |
+
keys.sort()
|
| 518 |
+
for name in keys:
|
| 519 |
+
value = locals[name]
|
| 520 |
+
if name == '__builtins__':
|
| 521 |
+
lines.append("__builtins__ = <builtins>")
|
| 522 |
+
else:
|
| 523 |
+
# This formatting could all be handled by the
|
| 524 |
+
# _repr() function, which is only reprlib.Repr in
|
| 525 |
+
# disguise, so is very configurable.
|
| 526 |
+
str_repr = self._saferepr(value)
|
| 527 |
+
#if len(str_repr) < 70 or not isinstance(value,
|
| 528 |
+
# (list, tuple, dict)):
|
| 529 |
+
lines.append("%-10s = %s" %(name, str_repr))
|
| 530 |
+
#else:
|
| 531 |
+
# self._line("%-10s =\\" % (name,))
|
| 532 |
+
# # XXX
|
| 533 |
+
# pprint.pprint(value, stream=self.excinfowriter)
|
| 534 |
+
return ReprLocals(lines)
|
| 535 |
+
|
| 536 |
+
def repr_traceback_entry(self, entry, excinfo=None):
|
| 537 |
+
source = self._getentrysource(entry)
|
| 538 |
+
if source is None:
|
| 539 |
+
source = py.code.Source("???")
|
| 540 |
+
line_index = 0
|
| 541 |
+
else:
|
| 542 |
+
# entry.getfirstlinesource() can be -1, should be 0 on jython
|
| 543 |
+
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
|
| 544 |
+
|
| 545 |
+
lines = []
|
| 546 |
+
style = entry._repr_style
|
| 547 |
+
if style is None:
|
| 548 |
+
style = self.style
|
| 549 |
+
if style in ("short", "long"):
|
| 550 |
+
short = style == "short"
|
| 551 |
+
reprargs = self.repr_args(entry) if not short else None
|
| 552 |
+
s = self.get_source(source, line_index, excinfo, short=short)
|
| 553 |
+
lines.extend(s)
|
| 554 |
+
if short:
|
| 555 |
+
message = "in %s" %(entry.name)
|
| 556 |
+
else:
|
| 557 |
+
message = excinfo and excinfo.typename or ""
|
| 558 |
+
path = self._makepath(entry.path)
|
| 559 |
+
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
|
| 560 |
+
localsrepr = None
|
| 561 |
+
if not short:
|
| 562 |
+
localsrepr = self.repr_locals(entry.locals)
|
| 563 |
+
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
|
| 564 |
+
if excinfo:
|
| 565 |
+
lines.extend(self.get_exconly(excinfo, indent=4))
|
| 566 |
+
return ReprEntry(lines, None, None, None, style)
|
| 567 |
+
|
| 568 |
+
def _makepath(self, path):
|
| 569 |
+
if not self.abspath:
|
| 570 |
+
try:
|
| 571 |
+
np = py.path.local().bestrelpath(path)
|
| 572 |
+
except OSError:
|
| 573 |
+
return path
|
| 574 |
+
if len(np) < len(str(path)):
|
| 575 |
+
path = np
|
| 576 |
+
return path
|
| 577 |
+
|
| 578 |
+
def repr_traceback(self, excinfo):
|
| 579 |
+
traceback = excinfo.traceback
|
| 580 |
+
if self.tbfilter:
|
| 581 |
+
traceback = traceback.filter()
|
| 582 |
+
recursionindex = None
|
| 583 |
+
if excinfo.errisinstance(RuntimeError):
|
| 584 |
+
if "maximum recursion depth exceeded" in str(excinfo.value):
|
| 585 |
+
recursionindex = traceback.recursionindex()
|
| 586 |
+
last = traceback[-1]
|
| 587 |
+
entries = []
|
| 588 |
+
extraline = None
|
| 589 |
+
for index, entry in enumerate(traceback):
|
| 590 |
+
einfo = (last == entry) and excinfo or None
|
| 591 |
+
reprentry = self.repr_traceback_entry(entry, einfo)
|
| 592 |
+
entries.append(reprentry)
|
| 593 |
+
if index == recursionindex:
|
| 594 |
+
extraline = "!!! Recursion detected (same locals & position)"
|
| 595 |
+
break
|
| 596 |
+
return ReprTraceback(entries, extraline, style=self.style)
|
| 597 |
+
|
| 598 |
+
def repr_excinfo(self, excinfo):
|
| 599 |
+
reprtraceback = self.repr_traceback(excinfo)
|
| 600 |
+
reprcrash = excinfo._getreprcrash()
|
| 601 |
+
return ReprExceptionInfo(reprtraceback, reprcrash)
|
| 602 |
+
|
| 603 |
+
class TerminalRepr:
|
| 604 |
+
def __str__(self):
|
| 605 |
+
s = self.__unicode__()
|
| 606 |
+
if sys.version_info[0] < 3:
|
| 607 |
+
s = s.encode('utf-8')
|
| 608 |
+
return s
|
| 609 |
+
|
| 610 |
+
def __unicode__(self):
|
| 611 |
+
# FYI this is called from pytest-xdist's serialization of exception
|
| 612 |
+
# information.
|
| 613 |
+
io = py.io.TextIO()
|
| 614 |
+
tw = py.io.TerminalWriter(file=io)
|
| 615 |
+
self.toterminal(tw)
|
| 616 |
+
return io.getvalue().strip()
|
| 617 |
+
|
| 618 |
+
def __repr__(self):
|
| 619 |
+
return "<%s instance at %0x>" %(self.__class__, id(self))
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
class ReprExceptionInfo(TerminalRepr):
|
| 623 |
+
def __init__(self, reprtraceback, reprcrash):
|
| 624 |
+
self.reprtraceback = reprtraceback
|
| 625 |
+
self.reprcrash = reprcrash
|
| 626 |
+
self.sections = []
|
| 627 |
+
|
| 628 |
+
def addsection(self, name, content, sep="-"):
|
| 629 |
+
self.sections.append((name, content, sep))
|
| 630 |
+
|
| 631 |
+
def toterminal(self, tw):
|
| 632 |
+
self.reprtraceback.toterminal(tw)
|
| 633 |
+
for name, content, sep in self.sections:
|
| 634 |
+
tw.sep(sep, name)
|
| 635 |
+
tw.line(content)
|
| 636 |
+
|
| 637 |
+
class ReprTraceback(TerminalRepr):
|
| 638 |
+
entrysep = "_ "
|
| 639 |
+
|
| 640 |
+
def __init__(self, reprentries, extraline, style):
|
| 641 |
+
self.reprentries = reprentries
|
| 642 |
+
self.extraline = extraline
|
| 643 |
+
self.style = style
|
| 644 |
+
|
| 645 |
+
def toterminal(self, tw):
|
| 646 |
+
# the entries might have different styles
|
| 647 |
+
last_style = None
|
| 648 |
+
for i, entry in enumerate(self.reprentries):
|
| 649 |
+
if entry.style == "long":
|
| 650 |
+
tw.line("")
|
| 651 |
+
entry.toterminal(tw)
|
| 652 |
+
if i < len(self.reprentries) - 1:
|
| 653 |
+
next_entry = self.reprentries[i+1]
|
| 654 |
+
if entry.style == "long" or \
|
| 655 |
+
entry.style == "short" and next_entry.style == "long":
|
| 656 |
+
tw.sep(self.entrysep)
|
| 657 |
+
|
| 658 |
+
if self.extraline:
|
| 659 |
+
tw.line(self.extraline)
|
| 660 |
+
|
| 661 |
+
class ReprTracebackNative(ReprTraceback):
|
| 662 |
+
def __init__(self, tblines):
|
| 663 |
+
self.style = "native"
|
| 664 |
+
self.reprentries = [ReprEntryNative(tblines)]
|
| 665 |
+
self.extraline = None
|
| 666 |
+
|
| 667 |
+
class ReprEntryNative(TerminalRepr):
|
| 668 |
+
style = "native"
|
| 669 |
+
|
| 670 |
+
def __init__(self, tblines):
|
| 671 |
+
self.lines = tblines
|
| 672 |
+
|
| 673 |
+
def toterminal(self, tw):
|
| 674 |
+
tw.write("".join(self.lines))
|
| 675 |
+
|
| 676 |
+
class ReprEntry(TerminalRepr):
|
| 677 |
+
localssep = "_ "
|
| 678 |
+
|
| 679 |
+
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
|
| 680 |
+
self.lines = lines
|
| 681 |
+
self.reprfuncargs = reprfuncargs
|
| 682 |
+
self.reprlocals = reprlocals
|
| 683 |
+
self.reprfileloc = filelocrepr
|
| 684 |
+
self.style = style
|
| 685 |
+
|
| 686 |
+
def toterminal(self, tw):
|
| 687 |
+
if self.style == "short":
|
| 688 |
+
self.reprfileloc.toterminal(tw)
|
| 689 |
+
for line in self.lines:
|
| 690 |
+
red = line.startswith("E ")
|
| 691 |
+
tw.line(line, bold=True, red=red)
|
| 692 |
+
#tw.line("")
|
| 693 |
+
return
|
| 694 |
+
if self.reprfuncargs:
|
| 695 |
+
self.reprfuncargs.toterminal(tw)
|
| 696 |
+
for line in self.lines:
|
| 697 |
+
red = line.startswith("E ")
|
| 698 |
+
tw.line(line, bold=True, red=red)
|
| 699 |
+
if self.reprlocals:
|
| 700 |
+
#tw.sep(self.localssep, "Locals")
|
| 701 |
+
tw.line("")
|
| 702 |
+
self.reprlocals.toterminal(tw)
|
| 703 |
+
if self.reprfileloc:
|
| 704 |
+
if self.lines:
|
| 705 |
+
tw.line("")
|
| 706 |
+
self.reprfileloc.toterminal(tw)
|
| 707 |
+
|
| 708 |
+
def __str__(self):
|
| 709 |
+
return "%s\n%s\n%s" % ("\n".join(self.lines),
|
| 710 |
+
self.reprlocals,
|
| 711 |
+
self.reprfileloc)
|
| 712 |
+
|
| 713 |
+
class ReprFileLocation(TerminalRepr):
|
| 714 |
+
def __init__(self, path, lineno, message):
|
| 715 |
+
self.path = str(path)
|
| 716 |
+
self.lineno = lineno
|
| 717 |
+
self.message = message
|
| 718 |
+
|
| 719 |
+
def toterminal(self, tw):
|
| 720 |
+
# filename and lineno output for each entry,
|
| 721 |
+
# using an output format that most editors unterstand
|
| 722 |
+
msg = self.message
|
| 723 |
+
i = msg.find("\n")
|
| 724 |
+
if i != -1:
|
| 725 |
+
msg = msg[:i]
|
| 726 |
+
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
|
| 727 |
+
|
| 728 |
+
class ReprLocals(TerminalRepr):
|
| 729 |
+
def __init__(self, lines):
|
| 730 |
+
self.lines = lines
|
| 731 |
+
|
| 732 |
+
def toterminal(self, tw):
|
| 733 |
+
for line in self.lines:
|
| 734 |
+
tw.line(line)
|
| 735 |
+
|
| 736 |
+
class ReprFuncArgs(TerminalRepr):
|
| 737 |
+
def __init__(self, args):
|
| 738 |
+
self.args = args
|
| 739 |
+
|
| 740 |
+
def toterminal(self, tw):
|
| 741 |
+
if self.args:
|
| 742 |
+
linesofar = ""
|
| 743 |
+
for name, value in self.args:
|
| 744 |
+
ns = "%s = %s" %(name, value)
|
| 745 |
+
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
|
| 746 |
+
if linesofar:
|
| 747 |
+
tw.line(linesofar)
|
| 748 |
+
linesofar = ns
|
| 749 |
+
else:
|
| 750 |
+
if linesofar:
|
| 751 |
+
linesofar += ", " + ns
|
| 752 |
+
else:
|
| 753 |
+
linesofar = ns
|
| 754 |
+
if linesofar:
|
| 755 |
+
tw.line(linesofar)
|
| 756 |
+
tw.line("")
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
oldbuiltins = {}
|
| 761 |
+
|
| 762 |
+
def patch_builtins(assertion=True, compile=True):
|
| 763 |
+
""" put compile and AssertionError builtins to Python's builtins. """
|
| 764 |
+
if assertion:
|
| 765 |
+
from py._code import assertion
|
| 766 |
+
l = oldbuiltins.setdefault('AssertionError', [])
|
| 767 |
+
l.append(py.builtin.builtins.AssertionError)
|
| 768 |
+
py.builtin.builtins.AssertionError = assertion.AssertionError
|
| 769 |
+
if compile:
|
| 770 |
+
l = oldbuiltins.setdefault('compile', [])
|
| 771 |
+
l.append(py.builtin.builtins.compile)
|
| 772 |
+
py.builtin.builtins.compile = py.code.compile
|
| 773 |
+
|
| 774 |
+
def unpatch_builtins(assertion=True, compile=True):
|
| 775 |
+
""" remove compile and AssertionError builtins from Python builtins. """
|
| 776 |
+
if assertion:
|
| 777 |
+
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
|
| 778 |
+
if compile:
|
| 779 |
+
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
|
| 780 |
+
|
| 781 |
+
def getrawcode(obj, trycall=True):
|
| 782 |
+
""" return code object for given function. """
|
| 783 |
+
try:
|
| 784 |
+
return obj.__code__
|
| 785 |
+
except AttributeError:
|
| 786 |
+
obj = getattr(obj, 'im_func', obj)
|
| 787 |
+
obj = getattr(obj, 'func_code', obj)
|
| 788 |
+
obj = getattr(obj, 'f_code', obj)
|
| 789 |
+
obj = getattr(obj, '__code__', obj)
|
| 790 |
+
if trycall and not hasattr(obj, 'co_firstlineno'):
|
| 791 |
+
if hasattr(obj, '__call__') and not isclass(obj):
|
| 792 |
+
x = getrawcode(obj.__call__, trycall=False)
|
| 793 |
+
if hasattr(x, 'co_firstlineno'):
|
| 794 |
+
return x
|
| 795 |
+
return obj
|
| 796 |
+
|