diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..3413341e666b549226911a35dd84460376267f26 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace at::caching { + +// Some systems (just cudagraphs currently) will persist a static tensor output +// whose TensorImpl does not change across iterations. For these tensors caching +// dtype conversions is invalid. Additionally, there will be an extra reference +// count to these cached tensors that would prevent buffer inplacing and other +// checks on tensor uniqueness. If we are not using these systems the enabled +// flag will be false and we will avoid the hash lookup. + +TORCH_API bool is_cached_tensor(const at::Tensor& t); +TORCH_API void add_cached_tensor(const at::Tensor& t); +TORCH_API void remove_cached_tensor(const at::Tensor& t); +TORCH_API void set_cached_tensors_enabled(bool enable); + +// For gradient buffer stealing we will adjust the use count of tensors +// which are persisted by cudagraphs, just as we need to adjust reference +// count of tensors with hooks. +TORCH_API size_t adjusted_use_count(const at::Tensor& t); + +} // namespace at::caching diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..f4c259e834904182d12463d679a870cee554e8d6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h @@ -0,0 +1,321 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h new file mode 100644 index 0000000000000000000000000000000000000000..9b8fce1015fe43cac53b1e09488be942365d9d80 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +// this convertor will: +// 1) take a Tensor object and wrap it in the DLPack tensor +// 2) take a dlpack tensor and convert it to the ATen Tensor + +namespace at { + +TORCH_API ScalarType toScalarType(const DLDataType& dtype); +TORCH_API DLManagedTensor* toDLPack(const Tensor& src); +TORCH_API Tensor fromDLPack(const DLManagedTensor* src); +TORCH_API Tensor +fromDLPack(const DLManagedTensor* src, std::function deleter); +TORCH_API DLDataType getDLDataType(const Tensor& t); +TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id); + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h new file mode 100644 index 0000000000000000000000000000000000000000..b32b1820565565d02ce54f39fc744d357567b43e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h @@ -0,0 +1,183 @@ +#pragma once + +#include +#include + +namespace at { + +// This file contains abstractions used for transforming *logical* vmap +// arguments into *physical* arguments. (Keep reading for definitions of these +// terms). + +// NOTE: [Logical vs physical args] +// Consider the following vmap. +// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4)) +// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4], +// with batch dims 0 and 2: +// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)]) +// +// We say the *logical* view of the tensor has size [3] -- tensors inside +// `func` appear to have size [3]. +// However, the *physical* underlying tensor (the one passed to vmap) has size +// [2, 3, 4]. +// +// This notion of logical vs physical also extends to non-tensor arguments. +// Consider the previous tensor; let's assume the user called +// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical +// dimension they are reducing over is dim 0 but the physical dim is dim 1 +// (the first non-batch dimension) + +// Forward declared; see NOTE: [What is a VmapPhysicalView?] +struct VmapPhysicalView; + +// Most PyTorch operators take 4 or fewer inputs. +constexpr int64_t kVmapTransformStaticInputSize = 4; +using VmapPhysicalViewVec = + SmallVector; + +// Pytorch generally advertises good performance for <= 5 dims. +// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap +// dimensions to get 8. Adjust this number as necessary +constexpr int64_t kVmapStaticDimVecSize = 8; +using VmapDimVector = SmallVector; +using VmapSymDimVector = SmallVector; + +// NOTE: [What is an VmapTransform?] +// An *VmapTransform* converts logical views of tensors to physical views. +// +// Batching rules use VmapTransforms to convert logical arguments to +// physical arguments, then call one or more at:: operator that handles the +// physical arguments, and then converts the physical result back to a logical +// argument. + +// VmapTransform for operators that take tensors with multiple batch dims. +// Given one or more logical views on Tensors, `logicalToPhysical` +// permutes all of the batch dims to the front of the tensor, aligns +// and expands the batch dims to match each other (according to their `level`), +// and returns a VmapPhysicalView on the tensor(s). +struct TORCH_API MultiBatchVmapTransform { + static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor); + static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors); +}; + +// VmapTransform for operators that broadcast all inputs. +// Given some logical views on Tensors, `logicalToPhysical`: +// - permutes all of the batch dims to the front of the tensors +// - aligns all the batch dims to the collective levels of all of the tensors. +// If a tensor does not have a batch dim for a vmap level, then it receives +// a size-one dimension for said level. +// - aligns the non-batch dims to have the same dimensionality, adding extra +// size-1 dimensions in between the batch dimensions and the non-batch +// dimensions so that the batch dimensions are lined up from the right. +// +// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch +// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap +// tensors of size (B, 1, 2) and (B, 3, 2). +// +// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns +// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't +// actually *need* to return a tensor of size (1, 2) for the second tensor +// because the broadcasting operation takes care of that for us, but we do +// it anyways to keep things simple. +struct TORCH_API BroadcastingVmapTransform { + static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors); +}; + +// Forward declared, if you're reading this file head to toe, don't worry about +// it yet. +struct VmapPhysicalToLogicalMap; + +// NOTE: [What is a VmapPhysicalView?] +// VmapPhysicalView represents a physical view on a Tensor. +// +// One can use it to further convert logical dimension indices, logical shapes, +// and more to their physical variants, or convert a new (physical) tensor into +// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented). +// +// VmapPhysicalView stores a physical tensor with all of its batch dimensions at +// the front and some levels that correspond to said batch dimensions. +// +// The levels bitset specifies which vmap levels correspond to the batch +// dimensions at the front of the tensor. In particular, the number of set bits +// corresponds to the number of batch dimensions on `tensor` and the rightmost +// bit of `levels` specifies the maximum number of nested vmaps we are in at +// this point in time. +// For example, given: +// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3}) +// +// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less +// than or equal to 3. +// bitset: 010100 +// ^ +// | +// levels: 012345 +struct TORCH_API VmapPhysicalView { + VmapPhysicalView(Tensor&& tensor, std::bitset levels) + : levels_(levels), tensor_(tensor) { + TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor)); + } + + Tensor& tensor() { + return tensor_; + } + const Tensor& tensor() const { + return tensor_; + } + + // Maps logical dim indices to physical dim indices. Also does dim wrapping. + // + // For example, given: + // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3}) + // + // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}. + // This is because the size of levels tell us that the first two dimensions + // of `tensor_` are batch dimensions, so a logical dim of `n` is actually + // a physical dim of `n + 2`. + VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const; + int64_t getPhysicalDim(int64_t logical_dim) const; + + // Returns a VmapPhysicalToLogicalMap object. This can be used for + // mapping a physical tensor to a new logical tensor (BatchedTensor) + VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const; + + // Maps a logical shape to a physical shape by pre-pending the batch + // sizes to the logical shape. + VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const; + + int64_t numBatchDims() const; + + private: + int64_t numLogicalDims() const; + + std::bitset levels_; + Tensor tensor_; +}; + +// Convenience struct used for mapping a physical tensor (a non-BatchedTensor) +// to a logical one (BatchedTensor). It holds some levels that are used to do +// the mapping and assumes that the batch dimensions in the physical tensor all +// occur at the front of the tensor. +struct TORCH_API VmapPhysicalToLogicalMap { + VmapPhysicalToLogicalMap(std::bitset levels) + : levels_(levels) {} + + // Maps a physical tensor to a new logical tensor (BatchedTensor). + // Assumes that all of the "batch dimensions" are at the front + // of the physical tensor. For example, given: + // - x = rank-4 Tensor with size 2, 3, 5, 7 + // - levels = (2, 4) + // Returns: + // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)]) + Tensor apply(const Tensor& physical_tensor) const; + + // Given a vector of physical tensors, + // 1. maps each tensor to a new logical tensor. Assumes that all of the + // "batch dimensions" are at the front of the physical tensors. + // 2. stores the new logical tensors back into the passed-in vector. This is + // to avoid additional dynamic allocations. + void applyInplace(std::vector& physical_tensors) const; + + std::bitset levels_; +}; + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..c1443b7eaa01b4d3215e14c478a5c38195e0a5c0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h @@ -0,0 +1,215 @@ +#pragma once +#include +#include +#include + +#include +#include +#include + +namespace at { + +using NameVector = SmallVector; + +inline bool has_names(const ITensorListRef& tensors) { + return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) { + return t.has_names(); + }); +} + +// Converts dim to an positional index. Errors if `dim` cannot be used to +// refer to any dimension of tensor. +TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim); +TORCH_API std::vector dimnames_to_positions( + const Tensor& tensor, + DimnameList dims); + +// Unifies two DimnameList to produce a third. This is useful for implementing +// the named inference rule for binary broadcasting operations like add. +// +// There are three main constraints: +// 1) Check matching: Names must match positionally from the right. +// 2) Check misaligned: If a name `n` is in `names`, then it must appear at +// the same index from the right in other. +// 3) The output names are obtained by unifying the names individually from the +// right. +TORCH_API std::vector unify_from_right( + DimnameList names, + DimnameList other, + const char* action = "broadcast"); + +[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) { + TORCH_CHECK( + false, + op_name, + ": You passed a dimname (string) to this op in place of a dimension " + "index but it does not yet support this behavior. Please pass a dimension " + "index to work around this."); +} + +// [NOTE] Writing name inference rules +// +// Operators that support named tensors are either composed of operations that +// support named tensors or implement some name inference rule. An op that +// implements its own name inference rule generally looks like the following: +// +// Tensor op(...) { +// perform_shape_checks(...); +// # (1) +// auto maybe_outnames = compute_outnames(...); +// auto result = [&]() { +// NoNamesGuard guard; +// return op_impl(...); +// }(); +// # (2) +// propagate_names_if_nonempty(result, maybe_outnames); +// +// Each op has (1) a compute outnames step and (2) a propagate names step. +// +// compute_outnames is responsible for checking that input names match and +// determining what the output names should be. It returns either: +// - {} (if the inputs tensors are all unnamed) +// - non-empty outnames. +// +// propagate_names_if_nonempty propagates the outnames if they exist to the +// result tensors. +// +// The {} case is an optimization; if the user does not use named tensors they +// pay no perf cost for it. + +namespace namedinference { + +const Tensor& propagate_names_if_present_and_nonempty( + const Tensor& result, + c10::optional maybe_names, + bool validate_names = false); +// Propagates `names` to `result` if `names` is not empty. +// `names` can be empty; see [NOTE] Writing name inference rules +// If `names` is not empty, `names.size()` should equal `result.dim()`. +// When in doubt, use this overload instead of the others. +TORCH_API const Tensor& propagate_names_if_nonempty( + const Tensor& result, + DimnameList maybe_names, + bool validate_names = false); + +// Propagates `names` to `result`. Only use this if we are certain that there +// are names to propagate (that names is not empty). +TORCH_API const Tensor& propagate_names( + const Tensor& result, + DimnameList names, + bool validate_names = false); + +// Propagates all names from src to result. +TORCH_API void propagate_names(const Tensor& result, const Tensor& src); + +// Propagates all names except for those at the excluded_idxs. +TORCH_API void propagate_names_except( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs); + +// Used for reduction ops that have a `keepdim` arg. +TORCH_API void propagate_names_for_reduction( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs, + bool keepdim); + +TORCH_API void propagate_names_for_expand( + const Tensor& result, + const Tensor& self); + +TORCH_API std::vector compute_cat_outnames( + const MaterializedITensorListRef& tensors); + +TORCH_API std::vector compute_broadcast_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector broadcast_to_outnames( + const Tensor& tensor, + const Tensor& reference_tensor, + const char* op_name); + +TORCH_API std::vector compute_matmul_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_cdist_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_bmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_squeeze_outnames(const Tensor& tensor); +TORCH_API std::vector compute_squeeze_outnames( + const Tensor& tensor, + std::bitset dims); + +std::vector compute_diagonal_outnames( + const Tensor& tensor, + int64_t dim1, + int64_t dim2); + +// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. + +TORCH_API TensorImpl* propagate_names_if_nonempty( + TensorImpl* result, + DimnameList maybe_names, + bool validate_names = false); + +TORCH_API TensorImpl* propagate_names( + TensorImpl* result, + DimnameList names, + bool validate_names = false); + +TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src); + +TORCH_API inline void propagate_names( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names(result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names_if_nonempty( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names_if_nonempty( + result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names( + const TensorBase& result, + const TensorBase& src) { + propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl()); +} + +// result = m1 @ m2 + bias +TORCH_API std::vector propagate_names_for_addmm( + const Tensor& m1, + const Tensor& m2, + const Tensor& bias); + +TORCH_API std::vector propagate_names_for_addmv( + const Tensor& mat, + const Tensor& vec, + const Tensor& bias); + +TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2); + +TORCH_API std::vector compute_baddbmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other, + const Tensor& bias); + +TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other); + +} // namespace namedinference + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..e6c6413815bbdb9fa5b88285a4b12e8659eb121a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +// An "Opaque" TensorImpl -- there are no strides and (for now) +// even data() is not supported (thus no pointer arithmetic). + +// NOTE: We could allow data() in the future, but would have to ensure pointer +// arithmetic code is properly guarded. +// +// NOTE: This does not support resize_ (and other metadata-changing ops) because +// of `shallow_copy_and_detach`. We would need to define an interface to +// "shallow copy" in order to add support. + +template +struct TORCH_API OpaqueTensorImpl : public TensorImpl { + // public constructor for now... + OpaqueTensorImpl( + at::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + c10::Device device, + OpaqueHandle opaque_handle, + c10::IntArrayRef sizes, + bool is_non_overlapping_and_dense = true) + : TensorImpl(key_set, data_type, device), + opaque_handle_(std::move(opaque_handle)) { + set_storage_access_should_throw(); + set_custom_sizes_strides(SizesStridesPolicy::CustomStrides); + sizes_and_strides_.set_sizes(sizes); + refresh_numel(); + is_non_overlapping_and_dense_ = is_non_overlapping_and_dense; + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + TensorImpl::release_resources(); + opaque_handle_ = {}; + } + + void set_size(int64_t dim, int64_t new_size) override { + AT_ERROR("opaque tensors do not have set_size"); + } + + void set_stride(int64_t dim, int64_t new_stride) override { + AT_ERROR("opaque tensors do not have set_stride"); + } + + void set_storage_offset(int64_t storage_offset) override { + AT_ERROR("opaque tensors do not have set_storage_offset"); + } + +#ifdef DEBUG + bool has_storage() const override { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !storage_, "OpaqueTensorImpl assumes that storage_ is never set"); + return false; + } +#endif + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto opaque_impl = + static_cast*>(impl.get()); + copy_tensor_metadata( + /*src_impl=*/opaque_impl, + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + } + + const OpaqueHandle& opaque_handle() const { + return opaque_handle_; + } + + OpaqueHandle& unsafe_opaque_handle() { + return opaque_handle_; + } + + protected: + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + version_counter, + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + std::move(version_counter), + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + private: + const char* tensorimpl_type_name() const override { + return "OpaqueTensorImpl"; + } + + OpaqueHandle opaque_handle_; +}; + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..ff14f568d22a6e0d319bedb4e68194cd0971259e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h @@ -0,0 +1,160 @@ +#pragma once +#include +#include +#include +#include + +namespace at { + +inline int64_t divup(int64_t x, int64_t y) { + return (x + y - 1) / y; +} + +// Called during new thread initialization +TORCH_API void init_num_threads(); + +// Sets the number of threads to be used in parallel region +TORCH_API void set_num_threads(int); + +// Returns the maximum number of threads that may be used in a parallel region +TORCH_API int get_num_threads(); + +// Returns the current thread number (starting from 0) +// in the current parallel region, or 0 in the sequential region +TORCH_API int get_thread_num(); + +// Checks whether the code runs in parallel region +TORCH_API bool in_parallel_region(); + +namespace internal { + +// Initialise num_threads lazily at first parallel call +inline void lazy_init_num_threads() { + thread_local bool init = false; + if (C10_UNLIKELY(!init)) { + at::init_num_threads(); + init = true; + } +} + +TORCH_API void set_thread_num(int); + +class TORCH_API ThreadIdGuard { + public: + ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) { + set_thread_num(new_id); + } + + ~ThreadIdGuard() { + set_thread_num(old_id_); + } + + private: + int old_id_; +}; + +} // namespace internal + +/* +parallel_for + +begin: index at which to start applying user function + +end: index at which to stop applying user function + +grain_size: number of elements per chunk. impacts the degree of parallelization + +f: user function applied in parallel to the chunks, signature: + void f(int64_t begin, int64_t end) + +Warning: parallel_for does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. +*/ +template +inline void parallel_for( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f); + +/* +parallel_reduce + +begin: index at which to start applying reduction + +end: index at which to stop applying reduction + +grain_size: number of elements per chunk. impacts number of elements in +intermediate results tensor and degree of parallelization. + +ident: identity for binary combination function sf. sf(ident, x) needs to return +x. + +f: function for reduction over a chunk. f needs to be of signature scalar_t +f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) + +sf: function to combine two partial results. sf needs to be of signature +scalar_t sf(scalar_t x, scalar_t y) + +For example, you might have a tensor of 10000 entires and want to sum together +all the elements. Parallel_reduce with a grain_size of 2500 will then allocate +an intermediate result tensor with 4 elements. Then it will execute the function +"f" you provide and pass the beginning and end index of these chunks, so +0-2499, 2500-4999, etc. and the combination identity. It will then write out +the result from each of these chunks into the intermediate result tensor. After +that it'll reduce the partial results from each chunk into a single number using +the combination function sf and the identity ident. For a total summation this +would be "+" and 0 respectively. This is similar to tbb's approach [1], where +you need to provide a function to accumulate a subrange, a function to combine +two partial results and an identity. + +Warning: parallel_reduce does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. + +[1] https://software.intel.com/en-us/node/506154 +*/ +template +inline scalar_t parallel_reduce( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const scalar_t ident, + const F& f, + const SF& sf); + +// Returns a detailed string describing parallelization settings +TORCH_API std::string get_parallel_info(); + +// Sets number of threads used for inter-op parallelism +TORCH_API void set_num_interop_threads(int); + +// Returns the number of threads used for inter-op parallelism +TORCH_API int get_num_interop_threads(); + +// Launches inter-op parallel task +TORCH_API void launch(std::function func); +namespace internal { +void launch_no_thread_state(std::function fn); +} // namespace internal + +// Launches intra-op parallel task +TORCH_API void intraop_launch(std::function func); + +// Returns number of intra-op threads used by default +TORCH_API int intraop_default_num_threads(); + +} // namespace at + +#if AT_PARALLEL_OPENMP +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE_TBB +#include // IWYU pragma: keep +#endif + +#include // IWYU pragma: keep diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h new file mode 100644 index 0000000000000000000000000000000000000000..9193e06ed695233637fe5ee8344777e3e42c799b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#endif +#include + +#define INTRA_OP_PARALLEL + +namespace at::internal { + +template +inline void invoke_parallel( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f) { + // Choose number of tasks based on grain size and number of threads. + int64_t chunk_size = divup((end - begin), get_num_threads()); + // Make sure each task is at least grain_size size. + chunk_size = std::max(grain_size, chunk_size); + + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + tbb::parallel_for( + tbb::blocked_range(begin, end, chunk_size), + [&eptr, &err_flag, f](const tbb::blocked_range& r) { + try { + internal::ThreadIdGuard tid_guard( + tbb::this_task_arena::current_thread_index()); + f(r.begin(), r.end()); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + }, + tbb::static_partitioner{}); + if (eptr) { + std::rethrow_exception(eptr); + } +} + +} // namespace at::internal diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..8c5003a676d80fea79e7facab42a2818d9e2aa74 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h @@ -0,0 +1,137 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +class Tensor; + +namespace impl { + +// Use this to define the prototype for a meta function. There are two +// versions; one that takes one argument (just the operator name), or FUNC2 +// variant that takes two arguments (operator name and overload name). +// +// Example usage: +// +// TORCH_META_FUNC2(add, Tensor) ( +// const Tensor& self, const Tensor& other +// ) { +// ... compute sizes and options ... +// set_output(sizes, options); +// } +// +#define TORCH_META_FUNC(name) void structured_##name::meta +#define TORCH_META_FUNC2(name, overload) \ + void structured_##name##_##overload::meta + +// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct +// as a return value. They should be used when the kernel in question has +// precomputed values declared in native_functions.yaml and the corresponding +// implementation should return an instance of the aforementioned struct. +#define TORCH_PRECOMPUTE_META_FUNC(name) \ + structured_##name::meta_return_ty structured_##name::meta +#define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \ + structured_##name##_##overload::meta_return_ty \ + structured_##name##_##overload::meta + +// Use this to create a precompute struct in a meta function. +#define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<> +#define TORCH_PRECOMPUTE_STRUCT2(name, overload) \ + structured_##name##_##overload::precompute_out<> + +// Use this to define the prototype for an implementation. This takes only +// one argument, which is the name of the dispatch key entry you're +// implementing. +// +// Example usage: +// +// TORCH_IMPL_FUNC(add_cpu) ( +// Tensor& result, const Tensor& self, const Tensor& other +// ) { +// ... do the actual implementation ... +// } +// +#define TORCH_IMPL_FUNC(name) void structured_##name::impl + +// Base class for all structured kernel classes. The set_output virtual +// method is varied depending whether or not the operator is +// functional/out/inplace, and could also be specialized for CPU/CUDA/etc +// (although presently it isn't). +// +// A notable subclass of this interface is TensorIteratorBase. +struct TORCH_API MetaBase { + MetaBase() = default; + MetaBase(const MetaBase&) = default; + MetaBase& operator=(const MetaBase&) = default; + MetaBase(MetaBase&&) noexcept = default; + MetaBase& operator=(MetaBase&&) noexcept = default; + virtual const Tensor& maybe_get_output(int64_t output_idx) = 0; + + // Note: [set_output_*] + // See: https://github.com/pytorch/pytorch/issues/69813 + // Whenever defining the output properties in the META function of a + // structured kernel (what was usually done with `set_output`), use one of + // these 3 variants, instead. In order to decide which variant to use, check + // the following decision tree: + // + // - Can the kernel you are going to implement support output tensors + // with arbitrary strides? + // | + // -- YES: `set_output_raw_strided` + // | + // -- NO: Should the output tensor strides be contiguous? + // | + // -- YES: `set_output_contiguous` + // | + // -- NO: `set_output_strided` + // + // Use this function whenever the kernel requires specific strides for the + // output. If `strides` does not match the given output strides, proxy outputs + // will be created and passed to the IMPL function. + virtual void set_output_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function whenever the kernel knows how to handle arbitrary strided + // outputs. This function has the same behavior as the old `set_output`: it + // will only re-stride if the given output was resized. + virtual void set_output_raw_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides_hint, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function if the kernel requires contiguous strides. + // Alias for `set_output_strided`, but with contiguous strides. + void set_output_contiguous( + int64_t output_idx, + IntArrayRef sizes, + TensorOptions options, + DimnameList names = {}) { + auto strides = c10::contiguous_strides(sizes); + set_output_strided(output_idx, sizes, strides, options, names); + } + + // Returns a reference to an undefined tensor if there is no presupplied + // output + const Tensor& maybe_get_output() { + return maybe_get_output(0); + } + virtual ~MetaBase() = default; +}; + +} // namespace impl + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h new file mode 100644 index 0000000000000000000000000000000000000000..4ec3d064867fb7ef9ec4c40abf7875de82f88b77 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h @@ -0,0 +1,75 @@ +#pragma once + +#include + +namespace at::namedinference { + +// TensorName and TensorNames are wrappers around Dimname and DimnameList +// that contain helper functions to make writing name inference rules easier. +// +// A TensorName represents a Dimname associated with some DimnameList (from a +// Tensor). This encapsulates all the information that is needed to check if +// names *match* and to *unify* names. +// +// Definition: Two names in two tensors *match* if they are equal, or if at +// least one of them is a wildcard that can be *refined* to the other name. +// +// Definition: unify(name, other) fails if the names do not match. Otherwise, +// it returns the most refined of name and other. +// +// Here is an example of checking if two names match. +// tensor: Tensor[A, None] +// other: Tensor[A] +// +// Let's say we wish to check if tensor.names[-1] matches other.names[-1]. +// None (in tensor) cannot match A (in other) because if the None were refined +// to A, `tensor` would have duplicate names [A, A]. Therefore we need to check +// tensor.names [A, None] for the existence of A. +struct TORCH_API TensorName { + explicit TensorName(ArrayRef origin, int origin_idx) + : origin_(origin), + name_(origin[maybe_wrap_dim( + origin_idx, + static_cast(origin.size()))]), + origin_idx_(origin_idx) {} + + // op_name is only used for error reporting. + const TensorName& unify(const TensorName& other, const char* op_name) const; + Dimname toDimname() const; + + private: + ArrayRef origin_; + Dimname name_; + int origin_idx_; // A named tensor can have at most 64 dims. + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const TensorName& tensorname); +}; + +using TensorNameVec = SmallVector; + +struct TORCH_API TensorNames { + explicit TensorNames(ArrayRef names); + + // Create TensorNames from names[start:end]. Each individual TensorName stores + // `names`, NOT names[start:end], because the original tensor's names are + // `names`. + explicit TensorNames(ArrayRef names, int64_t start, int64_t end); + + // op_name is only used for error reporting. + TensorNames& unifyFromRightInplace( + const TensorNames& other, + const char* op_name = "unify"); + void checkUnique(const char* op_name) const; + + void append(TensorName&& name); + std::vector toDimnameVec() const; + + private: + explicit TensorNames(TensorNameVec&& names) : names_(names){}; + + TensorNameVec names_; +}; + +} // namespace at::namedinference diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h new file mode 100644 index 0000000000000000000000000000000000000000..2c13ff8115a0921805db651bfd2d525e18cf85db --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h @@ -0,0 +1,24 @@ +#pragma once +#include +#include + +namespace at { + +/** + Computes ceil(a / b) +*/ +template ::value>> +C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) { + return (a + b - 1) / b; +} + +/** + Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest + multiple of b +*/ +template +C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) { + return ceil_div(a, b) * b; +} + +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..9af826549021a0853beb83c74b6ac695728ab054 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +// AT_USE_JITERATOR(), controls whether we jit some elementwise kernels +#define AT_USE_JITERATOR() true +#define jiterator_stringify(...) std::string(#__VA_ARGS__); diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..a27b8b399ce475f614d6314e527847f8541ec155 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compute/exec.h" +#include "arrow/result.h" + +namespace arrow { +namespace acero { +namespace util { + +using arrow::compute::ExecBatch; + +/// \brief A container that accumulates batches until they are ready to +/// be processed. +class AccumulationQueue { + public: + AccumulationQueue() : row_count_(0) {} + ~AccumulationQueue() = default; + + // We should never be copying ExecBatch around + AccumulationQueue(const AccumulationQueue&) = delete; + AccumulationQueue& operator=(const AccumulationQueue&) = delete; + + AccumulationQueue(AccumulationQueue&& that); + AccumulationQueue& operator=(AccumulationQueue&& that); + + void Concatenate(AccumulationQueue&& that); + void InsertBatch(ExecBatch batch); + int64_t row_count() { return row_count_; } + size_t batch_count() { return batches_.size(); } + bool empty() const { return batches_.empty(); } + void Clear(); + ExecBatch& operator[](size_t i); + + private: + int64_t row_count_; + std::vector batches_; +}; + +/// A queue that sequences incoming batches +/// +/// This can be used when a node needs to do some kind of ordered processing on +/// the stream. +/// +/// Batches can be inserted in any order. The process_callback will be called on +/// the batches, in order, without reentrant calls. For this reason the callback +/// should be quick. +/// +/// For example, in a top-n node, the process callback should determine how many +/// rows need to be delivered for the given batch, and then return a task to actually +/// deliver those rows. +class SequencingQueue { + public: + using Task = std::function; + + /// Strategy that describes how to handle items + class Processor { + public: + /// Process the batch, potentially generating a task + /// + /// This method will be called on each batch in order. Calls to this method + /// will be serialized and it will not be called reentrantly. This makes it + /// safe to do things that rely on order but minimal time should be spent here + /// to avoid becoming a bottleneck. + /// + /// \return a follow-up task that will be scheduled. The follow-up task(s) are + /// is not guaranteed to run in any particular order. If nullopt is + /// returned then nothing will be scheduled. + virtual Result> Process(ExecBatch batch) = 0; + /// Schedule a task + virtual void Schedule(Task task) = 0; + }; + + virtual ~SequencingQueue() = default; + + /// Insert a batch into the queue + /// + /// This will insert the batch into the queue. If this batch was the next batch + /// to deliver then this will trigger 1+ calls to the process callback to generate + /// 1+ tasks. + /// + /// The task generated by this call will be executed immediately. The remaining + /// tasks will be scheduled using the schedule callback. + /// + /// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If + /// a task arrives in order then this call will usually execute the downstream pipeline. + /// If this task arrives early then this call will only queue the data. + virtual Status InsertBatch(ExecBatch batch) = 0; + + /// Create a queue + /// \param processor describes how to process the batches, must outlive the queue + static std::unique_ptr Make(Processor* processor); +}; + +/// A queue that sequences incoming batches +/// +/// Unlike SequencingQueue the Process method is not expected to schedule new tasks. +/// +/// If a batch arrives and another thread is currently processing then the batch +/// will be queued and control will return. In other words, delivery of batches will +/// not block on the Process method. +/// +/// It can be helpful to think of this as if a dedicated thread is running Process as +/// batches arrive +class SerialSequencingQueue { + public: + /// Strategy that describes how to handle items + class Processor { + public: + /// Process the batch + /// + /// This method will be called on each batch in order. Calls to this method + /// will be serialized and it will not be called reentrantly. This makes it + /// safe to do things that rely on order. + /// + /// If this falls behind then data may accumulate + /// + /// TODO: Could add backpressure if needed but right now all uses of this should + /// be pretty fast and so are unlikely to block. + virtual Status Process(ExecBatch batch) = 0; + }; + + virtual ~SerialSequencingQueue() = default; + + /// Insert a batch into the queue + /// + /// This will insert the batch into the queue. If this batch was the next batch + /// to deliver then this may trigger calls to the processor which will be run + /// as part of this call. + virtual Status InsertBatch(ExecBatch batch) = 0; + + /// Create a queue + /// \param processor describes how to process the batches, must outlive the queue + static std::unique_ptr Make(Processor* processor); +}; + +} // namespace util +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h new file mode 100644 index 0000000000000000000000000000000000000000..790264b2083052c4623e52718f569a65451475d9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include + +#include "arrow/acero/visibility.h" +#include "arrow/compute/api_aggregate.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace acero { +namespace aggregate { + +using compute::Aggregate; +using compute::default_exec_context; +using compute::ExecContext; + +/// \brief Make the output schema of an aggregate node +/// +/// The output schema is determined by the aggregation kernels, which may depend on the +/// ExecContext argument. To guarantee correct results, the same ExecContext argument +/// should be used in execution. +/// +/// \param[in] input_schema the schema of the input to the node +/// \param[in] keys the grouping keys for the aggregation +/// \param[in] segment_keys the segmenting keys for the aggregation +/// \param[in] aggregates the aggregates for the aggregation +/// \param[in] exec_ctx the execution context for the aggregation +ARROW_ACERO_EXPORT Result> MakeOutputSchema( + const std::shared_ptr& input_schema, const std::vector& keys, + const std::vector& segment_keys, const std::vector& aggregates, + ExecContext* exec_ctx = default_exec_context()); + +} // namespace aggregate +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h new file mode 100644 index 0000000000000000000000000000000000000000..0ba8553887c03f876b6e08f031f5641170c2e09f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "benchmark/benchmark.h" + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/test_util_internal.h" +#include "arrow/compute/exec.h" + +namespace arrow { + +namespace acero { + +Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches, + int32_t batch_size, arrow::acero::BatchesWithSchema data, + std::vector& node_declarations, + arrow::MemoryPool* pool = default_memory_pool()); + +Status BenchmarkIsolatedNodeOverhead(benchmark::State& state, + arrow::compute::Expression expr, int32_t num_batches, + int32_t batch_size, + arrow::acero::BatchesWithSchema data, + std::string factory_name, + arrow::acero::ExecNodeOptions& options, + arrow::MemoryPool* pool = default_memory_pool()); + +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h new file mode 100644 index 0000000000000000000000000000000000000000..dba6c64ddc8379f7a8e6aa666f55555ced6c78aa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h @@ -0,0 +1,819 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/acero/type_fwd.h" +#include "arrow/acero/visibility.h" +#include "arrow/compute/api_vector.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/ordering.h" +#include "arrow/type_fwd.h" +#include "arrow/util/future.h" +#include "arrow/util/macros.h" +#include "arrow/util/tracing.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { + +using compute::ExecBatch; +using compute::ExecContext; +using compute::FunctionRegistry; +using compute::GetFunctionRegistry; +using compute::Ordering; +using compute::threaded_exec_context; + +namespace acero { + +/// \addtogroup acero-internals +/// @{ + +class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this { + public: + // This allows operators to rely on signed 16-bit indices + static const uint32_t kMaxBatchSize = 1 << 15; + using NodeVector = std::vector; + + virtual ~ExecPlan() = default; + + QueryContext* query_context(); + + /// \brief retrieve the nodes in the plan + const NodeVector& nodes() const; + + /// Make an empty exec plan + static Result> Make( + QueryOptions options, ExecContext exec_context = *threaded_exec_context(), + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + ExecContext exec_context = *threaded_exec_context(), + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + QueryOptions options, ExecContext* exec_context, + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + ExecContext* exec_context, + std::shared_ptr metadata = NULLPTR); + + ExecNode* AddNode(std::unique_ptr node); + + template + Node* EmplaceNode(Args&&... args) { + std::unique_ptr node{new Node{std::forward(args)...}}; + auto out = node.get(); + AddNode(std::move(node)); + return out; + } + + Status Validate(); + + /// \brief Start producing on all nodes + /// + /// Nodes are started in reverse topological order, such that any node + /// is started before all of its inputs. + void StartProducing(); + + /// \brief Stop producing on all nodes + /// + /// Triggers all sources to stop producing new data. In order to cleanly stop the plan + /// will continue to run any tasks that are already in progress. The caller should + /// still wait for `finished` to complete before destroying the plan. + void StopProducing(); + + /// \brief A future which will be marked finished when all tasks have finished. + Future<> finished(); + + /// \brief Return whether the plan has non-empty metadata + bool HasMetadata() const; + + /// \brief Return the plan's attached metadata + std::shared_ptr metadata() const; + + std::string ToString() const; +}; + +// Acero can be extended by providing custom implementations of ExecNode. The methods +// below are documented in detail and provide careful instruction on how to fulfill the +// ExecNode contract. It's suggested you familiarize yourself with the Acero +// documentation in the C++ user guide. +class ARROW_ACERO_EXPORT ExecNode { + public: + using NodeVector = std::vector; + + virtual ~ExecNode() = default; + + virtual const char* kind_name() const = 0; + + // The number of inputs expected by this node + int num_inputs() const { return static_cast(inputs_.size()); } + + /// This node's predecessors in the exec plan + const NodeVector& inputs() const { return inputs_; } + + /// True if the plan has no output schema (is a sink) + bool is_sink() const { return !output_schema_; } + + /// \brief Labels identifying the function of each input. + const std::vector& input_labels() const { return input_labels_; } + + /// This node's successor in the exec plan + const ExecNode* output() const { return output_; } + + /// The datatypes for batches produced by this node + const std::shared_ptr& output_schema() const { return output_schema_; } + + /// This node's exec plan + ExecPlan* plan() { return plan_; } + + /// \brief An optional label, for display and debugging + /// + /// There is no guarantee that this value is non-empty or unique. + const std::string& label() const { return label_; } + void SetLabel(std::string label) { label_ = std::move(label); } + + virtual Status Validate() const; + + /// \brief the ordering of the output batches + /// + /// This does not guarantee the batches will be emitted by this node + /// in order. Instead it guarantees that the batches will have their + /// ExecBatch::index property set in a way that respects this ordering. + /// + /// In other words, given the ordering {{"x", SortOrder::Ascending}} we + /// know that all values of x in a batch with index N will be less than + /// or equal to all values of x in a batch with index N+k (assuming k > 0). + /// Furthermore, we also know that values will be sorted within a batch. + /// Any row N will have a value of x that is less than the value for + /// any row N+k. + /// + /// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit. + /// A node's output should be marked Ordering::Unordered if the order is + /// non-deterministic. For example, a hash-join has no predictable output order. + /// + /// If the ordering is Ordering::Implicit then there is a meaningful order but that + /// ordering is not represented by any column in the data. The most common case for + /// this is when reading data from an in-memory table. The data has an implicit "row + /// order" which is not necessarily represented in the data set. + /// + /// A filter or project node will not modify the ordering. Nothing needs to be done + /// other than ensure the index assigned to output batches is the same as the + /// input batch that was mapped. + /// + /// Other nodes may introduce order. For example, an order-by node will emit + /// a brand new ordering independent of the input ordering. + /// + /// Finally, as described above, such as a hash-join or aggregation may may + /// destroy ordering (although these nodes could also choose to establish a + /// new ordering based on the hash keys). + /// + /// Some nodes will require an ordering. For example, a fetch node or an + /// asof join node will only function if the input data is ordered (for fetch + /// it is enough to be implicitly ordered. For an asof join the ordering must + /// be explicit and compatible with the on key.) + /// + /// Nodes that maintain ordering should be careful to avoid introducing gaps + /// in the batch index. This may require emitting empty batches in order to + /// maintain continuity. + virtual const Ordering& ordering() const; + + /// Upstream API: + /// These functions are called by input nodes that want to inform this node + /// about an updated condition (a new input batch or an impending + /// end of stream). + /// + /// Implementation rules: + /// - these may be called anytime after StartProducing() has succeeded + /// (and even during or after StopProducing()) + /// - these may be called concurrently + /// - these are allowed to call back into PauseProducing(), ResumeProducing() + /// and StopProducing() + + /// Transfer input batch to ExecNode + /// + /// A node will typically perform some kind of operation on the batch + /// and then call InputReceived on its outputs with the result. + /// + /// Other nodes may need to accumulate some number of inputs before any + /// output can be produced. These nodes will add the batch to some kind + /// of in-memory accumulation queue and return. + virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0; + + /// Mark the inputs finished after the given number of batches. + /// + /// This may be called before all inputs are received. This simply fixes + /// the total number of incoming batches for an input, so that the ExecNode + /// knows when it has received all input, regardless of order. + virtual Status InputFinished(ExecNode* input, int total_batches) = 0; + + /// \brief Perform any needed initialization + /// + /// This hook performs any actions in between creation of ExecPlan and the call to + /// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes + /// that executes this method is undefined, but the calls are made synchronously. + /// + /// At this point a node can rely on all inputs & outputs (and the input schemas) + /// being well defined. + virtual Status Init(); + + /// Lifecycle API: + /// - start / stop to initiate and terminate production + /// - pause / resume to apply backpressure + /// + /// Implementation rules: + /// - StartProducing() should not recurse into the inputs, as it is + /// handled by ExecPlan::StartProducing() + /// - PauseProducing(), ResumeProducing(), StopProducing() may be called + /// concurrently, potentially even before the call to StartProducing + /// has finished. + /// - PauseProducing(), ResumeProducing(), StopProducing() may be called + /// by the downstream nodes' InputReceived(), InputFinished() methods + /// + /// StopProducing may be called due to an error, by the user (e.g. cancel), or + /// because a node has all the data it needs (e.g. limit, top-k on sorted data). + /// This means the method may be called multiple times and we have the following + /// additional rules + /// - StopProducing() must be idempotent + /// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k + /// case because we may not be stopping the entire plan) + + // Right now, since synchronous calls happen in both directions (input to + // output and then output to input), a node must be careful to be reentrant + // against synchronous calls from its output, *and* also concurrent calls from + // other threads. The most reliable solution is to update the internal state + // first, and notify outputs only at the end. + // + // Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence + // as they may travel at different speeds through the plan. + // + // For example, consider a resume that comes quickly after a pause. If the source + // receives the resume before the pause the source may think the destination is full + // and halt production which would lead to deadlock. + // + // To resolve this a counter is sent for all calls to pause/resume. Only the call with + // the highest counter value is valid. So if a call to PauseProducing(5) comes after + // a call to ResumeProducing(6) then the source should continue producing. + + /// \brief Start producing + /// + /// This must only be called once. + /// + /// This is typically called automatically by ExecPlan::StartProducing(). + virtual Status StartProducing() = 0; + + /// \brief Pause producing temporarily + /// + /// \param output Pointer to the output that is full + /// \param counter Counter used to sequence calls to pause/resume + /// + /// This call is a hint that an output node is currently not willing + /// to receive data. + /// + /// This may be called any number of times. + /// However, the node is still free to produce data (which may be difficult + /// to prevent anyway if data is produced using multiple threads). + virtual void PauseProducing(ExecNode* output, int32_t counter) = 0; + + /// \brief Resume producing after a temporary pause + /// + /// \param output Pointer to the output that is now free + /// \param counter Counter used to sequence calls to pause/resume + /// + /// This call is a hint that an output node is willing to receive data again. + /// + /// This may be called any number of times. + virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0; + + /// \brief Stop producing new data + /// + /// If this node is a source then the source should stop generating data + /// as quickly as possible. If this node is not a source then there is typically + /// nothing that needs to be done although a node may choose to start ignoring incoming + /// data. + /// + /// This method will be called when an error occurs in the plan + /// This method may also be called by the user if they wish to end a plan early + /// Finally, this method may be called if a node determines it no longer needs any more + /// input (for example, a limit node). + /// + /// This method may be called multiple times. + /// + /// This is not a pause. There will be no way to start the source again after this has + /// been called. + virtual Status StopProducing(); + + std::string ToString(int indent = 0) const; + + protected: + ExecNode(ExecPlan* plan, NodeVector inputs, std::vector input_labels, + std::shared_ptr output_schema); + + virtual Status StopProducingImpl() = 0; + + /// Provide extra info to include in the string representation. + virtual std::string ToStringExtra(int indent = 0) const; + + std::atomic stopped_; + ExecPlan* plan_; + std::string label_; + + NodeVector inputs_; + std::vector input_labels_; + + std::shared_ptr output_schema_; + ExecNode* output_ = NULLPTR; +}; + +/// \brief An extensible registry for factories of ExecNodes +class ARROW_ACERO_EXPORT ExecFactoryRegistry { + public: + using Factory = std::function(ExecPlan*, std::vector, + const ExecNodeOptions&)>; + + virtual ~ExecFactoryRegistry() = default; + + /// \brief Get the named factory from this registry + /// + /// will raise if factory_name is not found + virtual Result GetFactory(const std::string& factory_name) = 0; + + /// \brief Add a factory to this registry with the provided name + /// + /// will raise if factory_name is already in the registry + virtual Status AddFactory(std::string factory_name, Factory factory) = 0; +}; + +/// The default registry, which includes built-in factories. +ARROW_ACERO_EXPORT +ExecFactoryRegistry* default_exec_factory_registry(); + +/// \brief Construct an ExecNode using the named factory +inline Result MakeExecNode( + const std::string& factory_name, ExecPlan* plan, std::vector inputs, + const ExecNodeOptions& options, + ExecFactoryRegistry* registry = default_exec_factory_registry()) { + ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name)); + return factory(plan, std::move(inputs), options); +} + +/// @} + +/// \addtogroup acero-api +/// @{ + +/// \brief Helper class for declaring execution nodes +/// +/// A Declaration represents an unconstructed ExecNode (and potentially an entire graph +/// since its inputs may also be Declarations) +/// +/// A Declaration can be converted to a plan and executed using one of the +/// DeclarationToXyz methods. +/// +/// For more direct control, a Declaration can be added to an existing execution +/// plan with Declaration::AddToPlan, which will recursively construct any inputs as +/// necessary. +struct ARROW_ACERO_EXPORT Declaration { + using Input = std::variant; + + Declaration() {} + + /// \brief construct a declaration + /// \param factory_name the name of the exec node to construct. The node must have + /// been added to the exec node registry with this name. + /// \param inputs the inputs to the node, these should be other declarations + /// \param options options that control the behavior of the node. You must use + /// the appropriate subclass. For example, if `factory_name` is + /// "project" then `options` should be ProjectNodeOptions. + /// \param label a label to give the node. Can be used to distinguish it from other + /// nodes of the same type in the plan. + Declaration(std::string factory_name, std::vector inputs, + std::shared_ptr options, std::string label) + : factory_name{std::move(factory_name)}, + inputs{std::move(inputs)}, + options{std::move(options)}, + label{std::move(label)} {} + + template + Declaration(std::string factory_name, std::vector inputs, Options options, + std::string label) + : Declaration{std::move(factory_name), std::move(inputs), + std::shared_ptr( + std::make_shared(std::move(options))), + std::move(label)} {} + + template + Declaration(std::string factory_name, std::vector inputs, Options options) + : Declaration{std::move(factory_name), std::move(inputs), std::move(options), + /*label=*/""} {} + + template + Declaration(std::string factory_name, Options options) + : Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {} + + template + Declaration(std::string factory_name, Options options, std::string label) + : Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {} + + /// \brief Convenience factory for the common case of a simple sequence of nodes. + /// + /// Each of decls will be appended to the inputs of the subsequent declaration, + /// and the final modified declaration will be returned. + /// + /// Without this convenience factory, constructing a sequence would require explicit, + /// difficult-to-read nesting: + /// + /// Declaration{"n3", + /// { + /// Declaration{"n2", + /// { + /// Declaration{"n1", + /// { + /// Declaration{"n0", N0Opts{}}, + /// }, + /// N1Opts{}}, + /// }, + /// N2Opts{}}, + /// }, + /// N3Opts{}}; + /// + /// An equivalent Declaration can be constructed more tersely using Sequence: + /// + /// Declaration::Sequence({ + /// {"n0", N0Opts{}}, + /// {"n1", N1Opts{}}, + /// {"n2", N2Opts{}}, + /// {"n3", N3Opts{}}, + /// }); + static Declaration Sequence(std::vector decls); + + /// \brief add the declaration to an already created execution plan + /// \param plan the plan to add the node to + /// \param registry the registry to use to lookup the node factory + /// + /// This method will recursively call AddToPlan on all of the declaration's inputs. + /// This method is only for advanced use when the DeclarationToXyz methods are not + /// sufficient. + /// + /// \return the instantiated execution node + Result AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry = + default_exec_factory_registry()) const; + + // Validate a declaration + bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const; + + /// \brief the name of the factory to use when creating a node + std::string factory_name; + /// \brief the declarations's inputs + std::vector inputs; + /// \brief options to control the behavior of the node + std::shared_ptr options; + /// \brief a label to give the node in the plan + std::string label; +}; + +/// \brief How to handle unaligned buffers +enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError }; + +/// \brief get the default behavior of unaligned buffer handling +/// +/// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which +/// can be set to "warn", "ignore", "reallocate", or "error". If the environment +/// variable is not set, or is set to an invalid value, this will return kWarn +UnalignedBufferHandling GetDefaultUnalignedBufferHandling(); + +/// \brief plan-wide options that can be specified when executing an execution plan +struct ARROW_ACERO_EXPORT QueryOptions { + /// \brief Should the plan use a legacy batching strategy + /// + /// This is currently in place only to support the Scanner::ToTable + /// method. This method relies on batch indices from the scanner + /// remaining consistent. This is impractical in the ExecPlan which + /// might slice batches as needed (e.g. for a join) + /// + /// However, it still works for simple plans and this is the only way + /// we have at the moment for maintaining implicit order. + bool use_legacy_batching = false; + + /// If the output has a meaningful order then sequence the output of the plan + /// + /// The default behavior (std::nullopt) will sequence output batches if there + /// is a meaningful ordering in the final node and will emit batches immediately + /// otherwise. + /// + /// If explicitly set to true then plan execution will fail if there is no + /// meaningful ordering. This can be useful to validate a query that should + /// be emitting ordered results. + /// + /// If explicitly set to false then batches will be emit immediately even if there + /// is a meaningful ordering. This could cause batches to be emit out of order but + /// may offer a small decrease to latency. + std::optional sequence_output = std::nullopt; + + /// \brief should the plan use multiple background threads for CPU-intensive work + /// + /// If this is false then all CPU work will be done on the calling thread. I/O tasks + /// will still happen on the I/O executor and may be multi-threaded (but should not use + /// significant CPU resources). + /// + /// Will be ignored if custom_cpu_executor is set + bool use_threads = true; + + /// \brief custom executor to use for CPU-intensive work + /// + /// Must be null or remain valid for the duration of the plan. If this is null then + /// a default thread pool will be chosen whose behavior will be controlled by + /// the `use_threads` option. + ::arrow::internal::Executor* custom_cpu_executor = NULLPTR; + + /// \brief custom executor to use for IO work + /// + /// Must be null or remain valid for the duration of the plan. If this is null then + /// the global io thread pool will be chosen whose behavior will be controlled by + /// the "ARROW_IO_THREADS" environment. + ::arrow::internal::Executor* custom_io_executor = NULLPTR; + + /// \brief a memory pool to use for allocations + /// + /// Must remain valid for the duration of the plan. + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief a function registry to use for the plan + /// + /// Must remain valid for the duration of the plan. + FunctionRegistry* function_registry = GetFunctionRegistry(); + /// \brief the names of the output columns + /// + /// If this is empty then names will be generated based on the input columns + /// + /// If set then the number of names must equal the number of output columns + std::vector field_names; + + /// \brief Policy for unaligned buffers in source data + /// + /// Various compute functions and acero internals will type pun array + /// buffers from uint8_t* to some kind of value type (e.g. we might + /// cast to int32_t* to add two int32 arrays) + /// + /// If the buffer is poorly aligned (e.g. an int32 array is not aligned + /// on a 4-byte boundary) then this is technically undefined behavior in C++. + /// However, most modern compilers and CPUs are fairly tolerant of this + /// behavior and nothing bad (beyond a small hit to performance) is likely + /// to happen. + /// + /// Note that this only applies to source buffers. All buffers allocated internally + /// by Acero will be suitably aligned. + /// + /// If this field is set to kWarn then Acero will check if any buffers are unaligned + /// and, if they are, will emit a warning. + /// + /// If this field is set to kReallocate then Acero will allocate a new, suitably aligned + /// buffer and copy the contents from the old buffer into this new buffer. + /// + /// If this field is set to kError then Acero will gracefully abort the plan instead. + /// + /// If this field is set to kIgnore then Acero will not even check if the buffers are + /// unaligned. + /// + /// If this field is not set then it will be treated as kWarn unless overridden + /// by the ACERO_ALIGNMENT_HANDLING environment variable + std::optional unaligned_buffer_handling; +}; + +/// \brief Calculate the output schema of a declaration +/// +/// This does not actually execute the plan. This operation may fail if the +/// declaration represents an invalid plan (e.g. a project node with multiple inputs) +/// +/// \param declaration A declaration describing an execution plan +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// \return the schema that batches would have after going through the execution plan +ARROW_ACERO_EXPORT Result> DeclarationToSchema( + const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR); + +/// \brief Create a string representation of a plan +/// +/// This representation is for debug purposes only. +/// +/// Conversion to a string may fail if the declaration represents an +/// invalid plan. +/// +/// Use Substrait for complete serialization of plans +/// +/// \param declaration A declaration describing an execution plan +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// \return a string representation of the plan suitable for debugging output +ARROW_ACERO_EXPORT Result DeclarationToString( + const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR); + +/// \brief Utility method to run a declaration and collect the results into a table +/// +/// \param declaration A declaration describing the plan to run +/// \param use_threads If `use_threads` is false then all CPU work will be done on the +/// calling thread. I/O tasks will still happen on the I/O executor +/// and may be multi-threaded (but should not use significant CPU +/// resources). +/// \param memory_pool The memory pool to use for allocations made while running the plan. +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// This method will add a sink node to the declaration to collect results into a +/// table. It will then create an ExecPlan from the declaration, start the exec plan, +/// block until the plan has finished, and return the created table. +ARROW_ACERO_EXPORT Result> DeclarationToTable( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result> DeclarationToTable( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToTable +/// +/// \param declaration A declaration describing the plan to run +/// \param use_threads The behavior of use_threads is slightly different than the +/// synchronous version since we cannot run synchronously on the +/// calling thread. Instead, if use_threads=false then a new thread +/// pool will be created with a single thread and this will be used for +/// all compute work. +/// \param memory_pool The memory pool to use for allocations made while running the plan. +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +ARROW_ACERO_EXPORT Future> DeclarationToTableAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context +/// +/// The executor must be specified (cannot be null) and must be kept alive until the +/// returned future finishes. +ARROW_ACERO_EXPORT Future> DeclarationToTableAsync( + Declaration declaration, ExecContext custom_exec_context); + +/// \brief a collection of exec batches with a common schema +struct BatchesWithCommonSchema { + std::vector batches; + std::shared_ptr schema; +}; + +/// \brief Utility method to run a declaration and collect the results into ExecBatch +/// vector +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Result DeclarationToExecBatches( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result DeclarationToExecBatches( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToExecBatches +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future DeclarationToExecBatchesAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future DeclarationToExecBatchesAsync( + Declaration declaration, ExecContext custom_exec_context); + +/// \brief Utility method to run a declaration and collect the results into a vector +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Result>> DeclarationToBatches( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result>> DeclarationToBatches( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToBatches +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future>> +DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future>> +DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context); + +/// \brief Utility method to run a declaration and return results as a RecordBatchReader +/// +/// If an exec context is not provided then a default exec context will be used based +/// on the value of `use_threads`. If `use_threads` is false then the CPU executor will +/// be a serial executor and all CPU work will be done on the calling thread. I/O tasks +/// will still happen on the I/O executor and may be multi-threaded. +/// +/// If `use_threads` is false then all CPU work will happen during the calls to +/// RecordBatchReader::Next and no CPU work will happen in the background. If +/// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may +/// run in between calls to RecordBatchReader::Next. If the returned reader is not +/// consumed quickly enough then the plan will eventually pause as the backpressure queue +/// fills up. +/// +/// If a custom exec context is provided then the value of `use_threads` will be ignored. +/// +/// The returned RecordBatchReader can be closed early to cancel the computation of record +/// batches. In this case, only errors encountered by the computation may be reported. In +/// particular, no cancellation error may be reported. +ARROW_ACERO_EXPORT Result> DeclarationToReader( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result> DeclarationToReader( + Declaration declaration, QueryOptions query_options); + +/// \brief Utility method to run a declaration and ignore results +/// +/// This can be useful when the data are consumed as part of the plan itself, for +/// example, when the plan ends with a write node. +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Status +DeclarationToStatus(Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration, + QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToStatus +/// +/// This can be useful when the data are consumed as part of the plan itself, for +/// example, when the plan ends with a write node. +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration, + ExecContext exec_context); + +/// @} + +/// \brief Wrap an ExecBatch generator in a RecordBatchReader. +/// +/// The RecordBatchReader does not impose any ordering on emitted batches. +ARROW_ACERO_EXPORT +std::shared_ptr MakeGeneratorReader( + std::shared_ptr, std::function>()>, + MemoryPool*); + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Make a generator of RecordBatchReaders +/// +/// Useful as a source node for an Exec plan +ARROW_ACERO_EXPORT +Result>()>> MakeReaderGenerator( + std::shared_ptr reader, arrow::internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart); + +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h new file mode 100644 index 0000000000000000000000000000000000000000..19745b8675cf0c63ed92c6e5448c9e6a68467f59 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/acero/options.h" +#include "arrow/acero/schema_util.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { + +using compute::ExecContext; + +namespace acero { + +class ARROW_ACERO_EXPORT HashJoinSchema { + public: + Status Init(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, const Schema& right_schema, + const std::vector& right_keys, const Expression& filter, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + Status Init(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, + const std::vector& left_output, const Schema& right_schema, + const std::vector& right_keys, + const std::vector& right_output, const Expression& filter, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + static Status ValidateSchemas(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, + const std::vector& left_output, + const Schema& right_schema, + const std::vector& right_keys, + const std::vector& right_output, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + bool HasDictionaries() const; + + bool HasLargeBinary() const; + + Result BindFilter(Expression filter, const Schema& left_schema, + const Schema& right_schema, ExecContext* exec_context); + std::shared_ptr MakeOutputSchema(const std::string& left_field_name_suffix, + const std::string& right_field_name_suffix); + + bool LeftPayloadIsEmpty() const { return PayloadIsEmpty(0); } + + bool RightPayloadIsEmpty() const { return PayloadIsEmpty(1); } + + static int kMissingField() { + return SchemaProjectionMaps::kMissingField; + } + + SchemaProjectionMaps proj_maps[2]; + + private: + static bool IsTypeSupported(const DataType& type); + + Status CollectFilterColumns(std::vector& left_filter, + std::vector& right_filter, + const Expression& filter, const Schema& left_schema, + const Schema& right_schema); + + Expression RewriteFilterToUseFilterSchema(int right_filter_offset, + const SchemaProjectionMap& left_to_filter, + const SchemaProjectionMap& right_to_filter, + const Expression& filter); + + bool PayloadIsEmpty(int side) const { + assert(side == 0 || side == 1); + return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0; + } + + static Result> ComputePayload(const Schema& schema, + const std::vector& output, + const std::vector& filter, + const std::vector& key); +}; + +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddb4c120f2a877ffb794b8443f8af1f7707d2cf6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h new file mode 100644 index 0000000000000000000000000000000000000000..db3076a58841a6cb85fcc3d5033ef3b74ed18898 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h @@ -0,0 +1,226 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/type.h" // for DataType, FieldRef, Field and Schema + +namespace arrow { + +using internal::checked_cast; + +namespace acero { + +// Identifiers for all different row schemas that are used in a join +// +enum class HashJoinProjection : int { + INPUT = 0, + KEY = 1, + PAYLOAD = 2, + FILTER = 3, + OUTPUT = 4 +}; + +struct SchemaProjectionMap { + static constexpr int kMissingField = -1; + int num_cols; + const int* source_to_base; + const int* base_to_target; + inline int get(int i) const { + assert(i >= 0 && i < num_cols); + assert(source_to_base[i] != kMissingField); + return base_to_target[source_to_base[i]]; + } +}; + +/// Helper class for managing different projections of the same row schema. +/// Used to efficiently map any field in one projection to a corresponding field in +/// another projection. +/// Materialized mappings are generated lazily at the time of the first access. +/// Thread-safe apart from initialization. +template +class SchemaProjectionMaps { + public: + static constexpr int kMissingField = -1; + + Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema, + const std::vector& projection_handles, + const std::vector*>& projections) { + assert(projection_handles.size() == projections.size()); + ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema)); + for (size_t i = 0; i < projections.size(); ++i) { + ARROW_RETURN_NOT_OK( + RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema)); + } + RegisterEnd(); + return Status::OK(); + } + + int num_cols(ProjectionIdEnum schema_handle) const { + int id = schema_id(schema_handle); + return static_cast(schemas_[id].second.data_types.size()); + } + + bool is_empty(ProjectionIdEnum schema_handle) const { + return num_cols(schema_handle) == 0; + } + + const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const { + int id = schema_id(schema_handle); + return schemas_[id].second.field_names[field_id]; + } + + const std::shared_ptr& data_type(ProjectionIdEnum schema_handle, + int field_id) const { + int id = schema_id(schema_handle); + return schemas_[id].second.data_types[field_id]; + } + + const std::vector>& data_types( + ProjectionIdEnum schema_handle) const { + int id = schema_id(schema_handle); + return schemas_[id].second.data_types; + } + + SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const { + int id_from = schema_id(from); + int id_to = schema_id(to); + SchemaProjectionMap result; + result.num_cols = num_cols(from); + result.source_to_base = mappings_[id_from].data(); + result.base_to_target = inverse_mappings_[id_to].data(); + return result; + } + + protected: + struct FieldInfos { + std::vector field_paths; + std::vector field_names; + std::vector> data_types; + }; + + Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) { + FieldInfos out_fields; + const FieldVector& in_fields = schema.fields(); + out_fields.field_paths.resize(in_fields.size()); + out_fields.field_names.resize(in_fields.size()); + out_fields.data_types.resize(in_fields.size()); + for (size_t i = 0; i < in_fields.size(); ++i) { + const std::string& name = in_fields[i]->name(); + const std::shared_ptr& type = in_fields[i]->type(); + out_fields.field_paths[i] = static_cast(i); + out_fields.field_names[i] = name; + out_fields.data_types[i] = type; + } + schemas_.push_back(std::make_pair(handle, out_fields)); + return Status::OK(); + } + + Status RegisterProjectedSchema(ProjectionIdEnum handle, + const std::vector& selected_fields, + const Schema& full_schema) { + FieldInfos out_fields; + const FieldVector& in_fields = full_schema.fields(); + out_fields.field_paths.resize(selected_fields.size()); + out_fields.field_names.resize(selected_fields.size()); + out_fields.data_types.resize(selected_fields.size()); + for (size_t i = 0; i < selected_fields.size(); ++i) { + // All fields must be found in schema without ambiguity + ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema)); + const std::string& name = in_fields[match[0]]->name(); + const std::shared_ptr& type = in_fields[match[0]]->type(); + out_fields.field_paths[i] = match[0]; + out_fields.field_names[i] = name; + out_fields.data_types[i] = type; + } + schemas_.push_back(std::make_pair(handle, out_fields)); + return Status::OK(); + } + + void RegisterEnd() { + size_t size = schemas_.size(); + mappings_.resize(size); + inverse_mappings_.resize(size); + int id_base = 0; + for (size_t i = 0; i < size; ++i) { + GenerateMapForProjection(static_cast(i), id_base); + } + } + + int schema_id(ProjectionIdEnum schema_handle) const { + for (size_t i = 0; i < schemas_.size(); ++i) { + if (schemas_[i].first == schema_handle) { + return static_cast(i); + } + } + // We should never get here + assert(false); + return -1; + } + + void GenerateMapForProjection(int id_proj, int id_base) { + int num_cols_proj = static_cast(schemas_[id_proj].second.data_types.size()); + int num_cols_base = static_cast(schemas_[id_base].second.data_types.size()); + + std::vector& mapping = mappings_[id_proj]; + std::vector& inverse_mapping = inverse_mappings_[id_proj]; + mapping.resize(num_cols_proj); + inverse_mapping.resize(num_cols_base); + + if (id_proj == id_base) { + for (int i = 0; i < num_cols_base; ++i) { + mapping[i] = inverse_mapping[i] = i; + } + } else { + const FieldInfos& fields_proj = schemas_[id_proj].second; + const FieldInfos& fields_base = schemas_[id_base].second; + for (int i = 0; i < num_cols_base; ++i) { + inverse_mapping[i] = SchemaProjectionMap::kMissingField; + } + for (int i = 0; i < num_cols_proj; ++i) { + int field_id = SchemaProjectionMap::kMissingField; + for (int j = 0; j < num_cols_base; ++j) { + if (fields_proj.field_paths[i] == fields_base.field_paths[j]) { + field_id = j; + // If there are multiple matches for the same input field, + // it will be mapped to the first match. + break; + } + } + assert(field_id != SchemaProjectionMap::kMissingField); + mapping[i] = field_id; + inverse_mapping[field_id] = i; + } + } + } + + // vector used as a mapping from ProjectionIdEnum to fields + std::vector> schemas_; + std::vector> mappings_; + std::vector> inverse_mappings_; +}; + +using HashJoinProjectionMaps = SchemaProjectionMaps; + +} // namespace acero +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h new file mode 100644 index 0000000000000000000000000000000000000000..97707f43bf20b95387f463a9c07e37f54c33998c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/record_batch.h" +#include "arrow/type_traits.h" + +namespace arrow::acero { + +// normalize the value to unsigned 64-bits while preserving ordering of values +template ::value, bool> = true> +uint64_t NormalizeTime(T t); + +uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row); + +} // namespace arrow::acero diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..21a697a56eca962602b34b2766d74442d185c3d7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +# if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4251) +# else +# pragma GCC diagnostic ignored "-Wattributes" +# endif + +# ifdef ARROW_ACERO_STATIC +# define ARROW_ACERO_EXPORT +# elif defined(ARROW_ACERO_EXPORTING) +# define ARROW_ACERO_EXPORT __declspec(dllexport) +# else +# define ARROW_ACERO_EXPORT __declspec(dllimport) +# endif + +# define ARROW_ACERO_NO_EXPORT +#else // Not Windows +# ifndef ARROW_ACERO_EXPORT +# define ARROW_ACERO_EXPORT __attribute__((visibility("default"))) +# endif +# ifndef ARROW_ACERO_NO_EXPORT +# define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden"))) +# endif +#endif // Not-Windows + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..4ffff81f355f1ddcdc19516746c61b8021477de4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h @@ -0,0 +1,323 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/adapters/orc/options.h" +#include "arrow/io/interfaces.h" +#include "arrow/memory_pool.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace adapters { +namespace orc { + +/// \brief Information about an ORC stripe +struct StripeInformation { + /// \brief Offset of the stripe from the start of the file, in bytes + int64_t offset; + /// \brief Length of the stripe, in bytes + int64_t length; + /// \brief Number of rows in the stripe + int64_t num_rows; + /// \brief Index of the first row of the stripe + int64_t first_row_id; +}; + +/// \class ORCFileReader +/// \brief Read an Arrow Table or RecordBatch from an ORC file. +class ARROW_EXPORT ORCFileReader { + public: + ~ORCFileReader(); + + /// \brief Creates a new ORC reader + /// + /// \param[in] file the data source + /// \param[in] pool a MemoryPool to use for buffer allocations + /// \return the returned reader object + static Result> Open( + const std::shared_ptr& file, MemoryPool* pool); + + /// \brief Return the schema read from the ORC file + /// + /// \return the returned Schema object + Result> ReadSchema(); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \return the returned Table + Result> Read(); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] schema the Table schema + /// \return the returned Table + Result> Read(const std::shared_ptr& schema); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] include_indices the selected field indices to read + /// \return the returned Table + Result> Read(const std::vector& include_indices); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] include_names the selected field names to read + /// \return the returned Table + Result> Read(const std::vector& include_names); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] schema the Table schema + /// \param[in] include_indices the selected field indices to read + /// \return the returned Table + Result> Read(const std::shared_ptr& schema, + const std::vector& include_indices); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \return the returned RecordBatch + Result> ReadStripe(int64_t stripe); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \param[in] include_indices the selected field indices to read + /// \return the returned RecordBatch + Result> ReadStripe( + int64_t stripe, const std::vector& include_indices); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \param[in] include_names the selected field names to read + /// \return the returned RecordBatch + Result> ReadStripe( + int64_t stripe, const std::vector& include_names); + + /// \brief Seek to designated row. Invoke NextStripeReader() after seek + /// will return stripe reader starting from designated row. + /// + /// \param[in] row_number the rows number to seek + Status Seek(int64_t row_number); + + /// \brief Get a stripe level record batch iterator. + /// + /// Each record batch will have up to `batch_size` rows. + /// NextStripeReader serves as a fine-grained alternative to ReadStripe + /// which may cause OOM issues by loading the whole stripe into memory. + /// + /// Note this will only read rows for the current stripe, not the entire + /// file. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \return the returned stripe reader + Result> NextStripeReader(int64_t batch_size); + + /// \brief Get a stripe level record batch iterator. + /// + /// Each record batch will have up to `batch_size` rows. + /// NextStripeReader serves as a fine-grained alternative to ReadStripe + /// which may cause OOM issues by loading the whole stripe into memory. + /// + /// Note this will only read rows for the current stripe, not the entire + /// file. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \param[in] include_indices the selected field indices to read + /// \return the stripe reader + Result> NextStripeReader( + int64_t batch_size, const std::vector& include_indices); + + /// \brief Get a record batch iterator for the entire file. + /// + /// Each record batch will have up to `batch_size` rows. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \param[in] include_names the selected field names to read, if not empty + /// (otherwise all fields are read) + /// \return the record batch iterator + Result> GetRecordBatchReader( + int64_t batch_size, const std::vector& include_names); + + /// \brief The number of stripes in the file + int64_t NumberOfStripes(); + + /// \brief The number of rows in the file + int64_t NumberOfRows(); + + /// \brief StripeInformation for each stripe. + StripeInformation GetStripeInformation(int64_t stripe); + + /// \brief Get the format version of the file. + /// Currently known values are 0.11 and 0.12. + /// + /// \return The FileVersion of the ORC file. + FileVersion GetFileVersion(); + + /// \brief Get the software instance and version that wrote this file. + /// + /// \return a user-facing string that specifies the software version + std::string GetSoftwareVersion(); + + /// \brief Get the compression kind of the file. + /// + /// \return The kind of compression in the ORC file. + Result GetCompression(); + + /// \brief Get the buffer size for the compression. + /// + /// \return Number of bytes to buffer for the compression codec. + int64_t GetCompressionSize(); + + /// \brief Get the number of rows per an entry in the row index. + /// \return the number of rows per an entry in the row index or 0 if there + /// is no row index. + int64_t GetRowIndexStride(); + + /// \brief Get ID of writer that generated the file. + /// + /// \return UNKNOWN_WRITER if the writer ID is undefined + WriterId GetWriterId(); + + /// \brief Get the writer id value when getWriterId() returns an unknown writer. + /// + /// \return the integer value of the writer ID. + int32_t GetWriterIdValue(); + + /// \brief Get the version of the writer. + /// + /// \return the version of the writer. + + WriterVersion GetWriterVersion(); + + /// \brief Get the number of stripe statistics in the file. + /// + /// \return the number of stripe statistics + int64_t GetNumberOfStripeStatistics(); + + /// \brief Get the length of the data stripes in the file. + /// + /// \return return the number of bytes in stripes + int64_t GetContentLength(); + + /// \brief Get the length of the file stripe statistics. + /// + /// \return the number of compressed bytes in the file stripe statistics + int64_t GetStripeStatisticsLength(); + + /// \brief Get the length of the file footer. + /// + /// \return the number of compressed bytes in the file footer + int64_t GetFileFooterLength(); + + /// \brief Get the length of the file postscript. + /// + /// \return the number of bytes in the file postscript + int64_t GetFilePostscriptLength(); + + /// \brief Get the total length of the file. + /// + /// \return the number of bytes in the file + int64_t GetFileLength(); + + /// \brief Get the serialized file tail. + /// Useful if another reader of the same file wants to avoid re-reading + /// the file tail. See ReadOptions.SetSerializedFileTail(). + /// + /// \return a string of bytes with the file tail + std::string GetSerializedFileTail(); + + /// \brief Return the metadata read from the ORC file + /// + /// \return A KeyValueMetadata object containing the ORC metadata + Result> ReadMetadata(); + + private: + class Impl; + std::unique_ptr impl_; + ORCFileReader(); +}; + +/// \class ORCFileWriter +/// \brief Write an Arrow Table or RecordBatch to an ORC file. +class ARROW_EXPORT ORCFileWriter { + public: + ~ORCFileWriter(); + /// \brief Creates a new ORC writer. + /// + /// \param[in] output_stream a pointer to the io::OutputStream to write into + /// \param[in] write_options the ORC writer options for Arrow + /// \return the returned writer object + static Result> Open( + io::OutputStream* output_stream, + const WriteOptions& write_options = WriteOptions()); + + /// \brief Write a table. This can be called multiple times. + /// + /// Tables passed in subsequent calls must match the schema of the table that was + /// written first. + /// + /// \param[in] table the Arrow table from which data is extracted. + /// \return Status + Status Write(const Table& table); + + /// \brief Write a RecordBatch. This can be called multiple times. + /// + /// RecordBatches passed in subsequent calls must match the schema of the + /// RecordBatch that was written first. + /// + /// \param[in] record_batch the Arrow RecordBatch from which data is extracted. + /// \return Status + Status Write(const RecordBatch& record_batch); + + /// \brief Close an ORC writer (orc::Writer) + /// + /// \return Status + Status Close(); + + private: + class Impl; + std::unique_ptr impl_; + + private: + ORCFileWriter(); +}; + +} // namespace orc +} // namespace adapters +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h new file mode 100644 index 0000000000000000000000000000000000000000..3a300da678db98c24949203be7ab471a57502640 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace adapters { + +namespace orc { + +enum class WriterId : int32_t { + kOrcJava = 0, + kOrcCpp = 1, + kPresto = 2, + kScritchleyGo = 3, + kTrino = 4, + kUnknown = INT32_MAX +}; + +enum class WriterVersion : int32_t { + kOriginal = 0, + kHive8732 = 1, + kHive4243 = 2, + kHive12055 = 3, + kHive13083 = 4, + kOrc101 = 5, + kOrc135 = 6, + kOrc517 = 7, + kOrc203 = 8, + kOrc14 = 9, + kMax = INT32_MAX +}; + +enum class CompressionStrategy : int32_t { kSpeed = 0, kCompression }; + +class ARROW_EXPORT FileVersion { + private: + int32_t major_version_; + int32_t minor_version_; + + public: + static const FileVersion& v_0_11(); + static const FileVersion& v_0_12(); + + FileVersion(int32_t major, int32_t minor) + : major_version_(major), minor_version_(minor) {} + + /** + * Get major version + */ + int32_t major_version() const { return this->major_version_; } + + /** + * Get minor version + */ + int32_t minor_version() const { return this->minor_version_; } + + bool operator==(const FileVersion& right) const { + return this->major_version() == right.major_version() && + this->minor_version() == right.minor_version(); + } + + bool operator!=(const FileVersion& right) const { return !(*this == right); } + + std::string ToString() const; +}; + +/// Options for the ORC Writer +struct ARROW_EXPORT WriteOptions { + /// Number of rows the ORC writer writes at a time, default 1024 + int64_t batch_size = 1024; + /// Which ORC file version to use, default FileVersion(0, 12) + FileVersion file_version = FileVersion(0, 12); + /// Size of each ORC stripe in bytes, default 64 MiB + int64_t stripe_size = 64 * 1024 * 1024; + /// The compression codec of the ORC file, there is no compression by default + Compression::type compression = Compression::UNCOMPRESSED; + /// The size of each compression block in bytes, default 64 KiB + int64_t compression_block_size = 64 * 1024; + /// The compression strategy i.e. speed vs size reduction, default + /// CompressionStrategy::kSpeed + CompressionStrategy compression_strategy = CompressionStrategy::kSpeed; + /// The number of rows per an entry in the row index, default 10000 + int64_t row_index_stride = 10000; + /// The padding tolerance, default 0.0 + double padding_tolerance = 0.0; + /// The dictionary key size threshold. 0 to disable dictionary encoding. + /// 1 to always enable dictionary encoding, default 0.0 + double dictionary_key_size_threshold = 0.0; + /// The array of columns that use the bloom filter, default empty + std::vector bloom_filter_columns; + /// The upper limit of the false-positive rate of the bloom filter, default 0.05 + double bloom_filter_fpp = 0.05; +}; + +} // namespace orc +} // namespace adapters +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h new file mode 100644 index 0000000000000000000000000000000000000000..9d093eddf6b598150ddb55da0e84699a5b7ef4b8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "tensorflow/core/framework/op.h" + +#include "arrow/type.h" + +// These utilities are supposed to be included in TensorFlow operators +// that need to be compiled separately from Arrow because of ABI issues. +// They therefore need to be header-only. + +namespace arrow { + +namespace adapters { + +namespace tensorflow { + +Status GetArrowType(::tensorflow::DataType dtype, std::shared_ptr* out) { + switch (dtype) { + case ::tensorflow::DT_BOOL: + *out = arrow::boolean(); + break; + case ::tensorflow::DT_FLOAT: + *out = arrow::float32(); + break; + case ::tensorflow::DT_DOUBLE: + *out = arrow::float64(); + break; + case ::tensorflow::DT_HALF: + *out = arrow::float16(); + break; + case ::tensorflow::DT_INT8: + *out = arrow::int8(); + break; + case ::tensorflow::DT_INT16: + *out = arrow::int16(); + break; + case ::tensorflow::DT_INT32: + *out = arrow::int32(); + break; + case ::tensorflow::DT_INT64: + *out = arrow::int64(); + break; + case ::tensorflow::DT_UINT8: + *out = arrow::uint8(); + break; + case ::tensorflow::DT_UINT16: + *out = arrow::uint16(); + break; + case ::tensorflow::DT_UINT32: + *out = arrow::uint32(); + break; + case ::tensorflow::DT_UINT64: + *out = arrow::uint64(); + break; + default: + return Status::TypeError("TensorFlow data type is not supported"); + } + return Status::OK(); +} + +Status GetTensorFlowType(std::shared_ptr dtype, ::tensorflow::DataType* out) { + switch (dtype->id()) { + case Type::BOOL: + *out = ::tensorflow::DT_BOOL; + break; + case Type::UINT8: + *out = ::tensorflow::DT_UINT8; + break; + case Type::INT8: + *out = ::tensorflow::DT_INT8; + break; + case Type::UINT16: + *out = ::tensorflow::DT_UINT16; + break; + case Type::INT16: + *out = ::tensorflow::DT_INT16; + break; + case Type::UINT32: + *out = ::tensorflow::DT_UINT32; + break; + case Type::INT32: + *out = ::tensorflow::DT_INT32; + break; + case Type::UINT64: + *out = ::tensorflow::DT_UINT64; + break; + case Type::INT64: + *out = ::tensorflow::DT_INT64; + break; + case Type::HALF_FLOAT: + *out = ::tensorflow::DT_HALF; + break; + case Type::FLOAT: + *out = ::tensorflow::DT_FLOAT; + break; + case Type::DOUBLE: + *out = ::tensorflow::DT_DOUBLE; + break; + default: + return Status::TypeError("Arrow data type is not supported"); + } + return arrow::Status::OK(); +} + +} // namespace tensorflow + +} // namespace adapters + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..f122f9378b52592403633f62ff50d8e804b02d12 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h @@ -0,0 +1,887 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for List, LargeList, ListView, LargeListView, FixedSizeList, +// Map, Struct, and Union + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeArray + +template +class VarLengthListLikeArray; + +namespace internal { + +// Private helper for [Large]List[View]Array::SetData. +// Unfortunately, trying to define VarLengthListLikeArray::SetData outside of this header +// doesn't play well with MSVC. +template +void SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id = TYPE::type_id); + +/// \brief A version of Flatten that keeps recursively flattening until an array of +/// non-list values is reached. +/// +/// Array types considered to be lists by this function: +/// - list +/// - large_list +/// - list_view +/// - large_list_view +/// - fixed_size_list +/// +/// \see ListArray::Flatten +ARROW_EXPORT Result> FlattenLogicalListRecursively( + const Array& in_array, MemoryPool* memory_pool); + +} // namespace internal + +/// Base class for variable-sized list and list-view arrays, regardless of offset size. +template +class VarLengthListLikeArray : public Array { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + const TypeClass* var_length_list_like_type() const { return this->list_type_; } + + /// \brief Return array object containing the list's values + /// + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& values() const { return values_; } + + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_offsets() const { return data_->buffers[1]; } + + const std::shared_ptr& value_type() const { return list_type_->value_type(); } + + /// Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_offsets() const { return raw_value_offsets_; } + + // The following functions will not perform boundschecking + + offset_type value_offset(int64_t i) const { return raw_value_offsets_[i]; } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists and list-views are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + virtual offset_type value_length(int64_t i) const = 0; + + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + /// \brief Flatten all level recursively until reach a non-list type, and return + /// a non-list type Array. + /// + /// \see internal::FlattenLogicalListRecursively + Result> FlattenRecursively( + MemoryPool* memory_pool = default_memory_pool()) const { + return internal::FlattenLogicalListRecursively(*this, memory_pool); + } + + protected: + friend void internal::SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id); + + const TypeClass* list_type_ = NULLPTR; + std::shared_ptr values_; + const offset_type* raw_value_offsets_ = NULLPTR; +}; + +// ---------------------------------------------------------------------- +// ListArray / LargeListArray + +template +class BaseListArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_type() const { return this->var_length_list_like_type(); } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + return this->raw_value_offsets_[i + 1] - this->raw_value_offsets_[i]; + } +}; + +/// Concrete Array class for list data +class ARROW_EXPORT ListArray : public BaseListArray { + public: + explicit ListArray(std::shared_ptr data); + + ListArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListArray from a ListViewArray + static Result> FromListView(const ListViewArray& source, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + protected: + // This constructor defers SetData to a derived array class + ListArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// Concrete Array class for large list data (with 64-bit offsets) +class ARROW_EXPORT LargeListArray : public BaseListArray { + public: + explicit LargeListArray(const std::shared_ptr& data); + + LargeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int64 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListArray from a LargeListViewArray + static Result> FromListView( + const LargeListViewArray& source, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int64Array + std::shared_ptr offsets() const; + + protected: + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// ListViewArray / LargeListViewArray + +template +class BaseListViewArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_view_type() const { return this->var_length_list_like_type(); } + + /// \brief Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_sizes() const { return this->data_->buffers[2]; } + + /// \brief Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_sizes() const { return raw_value_sizes_; } + + /// \brief Return the size of the value at a particular index + /// + /// This should not be called if the list-view at slot i is null. + /// The returned size in those cases could be any value from 0 to the + /// length of the child values array. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { return this->raw_value_sizes_[i]; } + + protected: + const offset_type* raw_value_sizes_ = NULLPTR; +}; + +/// \brief Concrete Array class for list-view data +class ARROW_EXPORT ListViewArray : public BaseListViewArray { + public: + explicit ListViewArray(std::shared_ptr data); + + ListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct a ListViewArray using buffers from offsets and sizes arrays + /// that project views into the child values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the + /// offsets's null bitmap. But if a null_bitmap is provided, the offsets array + /// can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int32 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int32 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListViewArray from a ListArray + static Result> FromList(const ListArray& list_array, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the list-views in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + /// + /// This function invokes Concatenate() if list-views are non-contiguous. It + /// will try to minimize the number of array slices passed to Concatenate() by + /// maximizing the size of each slice (containing as many contiguous + /// list-views as possible). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + ListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// \brief Concrete Array class for large list-view data (with 64-bit offsets +/// and sizes) +class ARROW_EXPORT LargeListViewArray : public BaseListViewArray { + public: + explicit LargeListViewArray(std::shared_ptr data); + + LargeListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct an LargeListViewArray using buffers from offsets and sizes arrays + /// that project views into the values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' or + /// sizes' null bitmap. Only one of these two is allowed to have a null bitmap. But if a + /// null_bitmap is provided, the offsets array and the sizes array can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int64 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int64 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListViewArray from a LargeListArray + static Result> FromList( + const LargeListArray& list_array, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the large list-views in this + /// array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + LargeListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// MapArray + +/// Concrete Array class for map data +/// +/// NB: "value" in this context refers to a pair of a key and the corresponding item +class ARROW_EXPORT MapArray : public ListArray { + public: + using TypeClass = MapType; + + explicit MapArray(const std::shared_ptr& data); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, BufferVector buffers, + const std::shared_ptr& keys, const std::shared_ptr& items, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct MapArray from array of offsets and child key, item arrays + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] keys Array containing key values + /// \param[in] items Array containing item values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// \param[in] null_bitmap Optional validity bitmap + /// allocated because of null values + static Result> FromArrays( + const std::shared_ptr& offsets, const std::shared_ptr& keys, + const std::shared_ptr& items, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR); + + static Result> FromArrays( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR); + + const MapType* map_type() const { return map_type_; } + + /// \brief Return array object containing all map keys + const std::shared_ptr& keys() const { return keys_; } + + /// \brief Return array object containing all mapped items + const std::shared_ptr& items() const { return items_; } + + /// Validate child data before constructing the actual MapArray. + static Status ValidateChildData( + const std::vector>& child_data); + + protected: + void SetData(const std::shared_ptr& data); + + static Result> FromArraysInternal( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool, std::shared_ptr null_bitmap = NULLPTR); + + private: + const MapType* map_type_; + std::shared_ptr keys_, items_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeListArray + +/// Concrete Array class for fixed size list data +class ARROW_EXPORT FixedSizeListArray : public Array { + public: + using TypeClass = FixedSizeListType; + using offset_type = TypeClass::offset_type; + + explicit FixedSizeListArray(const std::shared_ptr& data); + + FixedSizeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const FixedSizeListType* list_type() const; + + /// \brief Return array object containing the list's values + const std::shared_ptr& values() const; + + const std::shared_ptr& value_type() const; + + // The following functions will not perform boundschecking + int64_t value_offset(int64_t i) const { + i += data_->offset; + return list_size_ * i; + } + /// \brief Return the fixed-size of the values + /// + /// No matter the value of the index parameter, the result is the same. + /// So even when the value at slot i is null, this function will return a + /// non-zero size. + /// + /// \pre IsValid(i) + int32_t value_length(int64_t i = 0) const { + ARROW_UNUSED(i); + return list_size_; + } + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration null elements (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Flatten all level recursively until reach a non-list type, and return + /// a non-list type Array. + /// + /// \see internal::FlattenLogicalListRecursively + Result> FlattenRecursively( + MemoryPool* memory_pool = default_memory_pool()) const { + return internal::FlattenLogicalListRecursively(*this, memory_pool); + } + + /// \brief Construct FixedSizeListArray from child value array and value_length + /// + /// \param[in] values Array containing list values + /// \param[in] list_size The fixed length of each list + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / list_size + static Result> FromArrays( + const std::shared_ptr& values, int32_t list_size, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Construct FixedSizeListArray from child value array and type + /// + /// \param[in] values Array containing list values + /// \param[in] type The fixed sized list type + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / type.list_size() + static Result> FromArrays( + const std::shared_ptr& values, std::shared_ptr type, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + protected: + void SetData(const std::shared_ptr& data); + int32_t list_size_; + + private: + std::shared_ptr values_; +}; + +// ---------------------------------------------------------------------- +// Struct + +/// Concrete Array class for struct data +class ARROW_EXPORT StructArray : public Array { + public: + using TypeClass = StructType; + + explicit StructArray(const std::shared_ptr& data); + + StructArray(const std::shared_ptr& type, int64_t length, + const std::vector>& children, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and field names. + /// + /// The length and data type are automatically inferred from the arguments. + /// There should be at least one child array. + static Result> Make( + const ArrayVector& children, const std::vector& field_names, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and fields. + /// + /// The length is automatically inferred from the arguments. + /// There should be at least one child array. This method does not + /// check that field types and child array types are consistent. + static Result> Make( + const ArrayVector& children, const FieldVector& fields, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const StructType* struct_type() const; + + // Return a shared pointer in case the requestor desires to share ownership + // with this array. The returned array has its offset, length and null + // count adjusted. + const std::shared_ptr& field(int pos) const; + + const ArrayVector& fields() const; + + /// Returns null if name not found + std::shared_ptr GetFieldByName(const std::string& name) const; + + /// Indicate if field named `name` can be found unambiguously in the struct. + Status CanReferenceFieldByName(const std::string& name) const; + + /// Indicate if fields named `names` can be found unambiguously in the struct. + Status CanReferenceFieldsByNames(const std::vector& names) const; + + /// \brief Flatten this array as a vector of arrays, one for each field + /// + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result Flatten(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Get one of the child arrays, combining its null bitmap + /// with the parent struct array's bitmap. + /// + /// \param[in] index Which child array to get + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + private: + // For caching boxed child data + // XXX This is not handled in a thread-safe manner. + mutable ArrayVector boxed_fields_; +}; + +// ---------------------------------------------------------------------- +// Union + +/// Base class for SparseUnionArray and DenseUnionArray +class ARROW_EXPORT UnionArray : public Array { + public: + using type_code_t = int8_t; + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& type_codes() const { return data_->buffers[1]; } + + const type_code_t* raw_type_codes() const { return raw_type_codes_; } + + /// The logical type code of the value at index. + type_code_t type_code(int64_t i) const { return raw_type_codes_[i]; } + + /// The physical child id containing value at index. + int child_id(int64_t i) const { return union_type_->child_ids()[raw_type_codes_[i]]; } + + const UnionType* union_type() const { return union_type_; } + + UnionMode::type mode() const { return union_type_->mode(); } + + /// \brief Return the given field as an individual array. + /// + /// For sparse unions, the returned array has its offset, length and null + /// count adjusted. + std::shared_ptr field(int pos) const; + + protected: + void SetData(std::shared_ptr data); + + const type_code_t* raw_type_codes_; + const UnionType* union_type_; + + // For caching boxed child data + mutable std::vector> boxed_fields_; +}; + +/// Concrete Array class for sparse union data +class ARROW_EXPORT SparseUnionArray : public UnionArray { + public: + using TypeClass = SparseUnionType; + + explicit SparseUnionArray(std::shared_ptr data); + + SparseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, int64_t offset = 0); + + /// \brief Construct SparseUnionArray from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector type_codes) { + return Make(std::move(type_ids), std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct SparseUnionArray with custom field names from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const SparseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// \brief Get one of the child arrays, adjusting its null bitmap + /// where the union array type code does not match. + /// + /// \param[in] index Which child array to get (i.e. the physical index, not the type + /// code) \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + protected: + void SetData(std::shared_ptr data); +}; + +/// \brief Concrete Array class for dense union data +/// +/// Note that union types do not have a validity bitmap +class ARROW_EXPORT DenseUnionArray : public UnionArray { + public: + using TypeClass = DenseUnionType; + + explicit DenseUnionArray(const std::shared_ptr& data); + + DenseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, + std::shared_ptr value_offsets = NULLPTR, int64_t offset = 0); + + /// \brief Construct DenseUnionArray from type_ids, value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector type_codes) { + return Make(type_ids, value_offsets, std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct DenseUnionArray with custom field names from type_ids, + /// value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const DenseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& value_offsets() const { return data_->buffers[2]; } + + int32_t value_offset(int64_t i) const { return raw_value_offsets_[i]; } + + const int32_t* raw_value_offsets() const { return raw_value_offsets_; } + + protected: + const int32_t* raw_value_offsets_; + + void SetData(const std::shared_ptr& data); +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..d11ccfc1fd72253600501d7de3a150944608ca06 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array/array_base.h" +#include "arrow/c/dlpack_abi.h" + +namespace arrow::dlpack { + +/// \brief Export Arrow array as DLPack tensor. +/// +/// DLMangedTensor is produced as defined by the DLPack protocol, +/// see https://dmlc.github.io/dlpack/latest/. +/// +/// Data types for which the protocol is supported are +/// integer and floating-point data types. +/// +/// DLPack protocol only supports arrays with one contiguous +/// memory region which means Arrow Arrays with validity buffers +/// are not supported. +/// +/// \param[in] arr Arrow array +/// \return DLManagedTensor struct +ARROW_EXPORT +Result ExportArray(const std::shared_ptr& arr); + +/// \brief Get DLDevice with enumerator specifying the +/// type of the device data is stored on and index of the +/// device which is 0 by default for CPU. +/// +/// \param[in] arr Arrow array +/// \return DLDevice struct +ARROW_EXPORT +Result ExportDevice(const std::shared_ptr& arr); + +} // namespace arrow::dlpack diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h new file mode 100644 index 0000000000000000000000000000000000000000..b701d9928691f42b70a201569feb27d5ea86f8cd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +/// \defgroup compute-functions Abstract compute function API +/// @{ +/// @} + +/// \defgroup compute-concrete-options Concrete option classes for compute functions +/// @{ +/// @} + +#include "arrow/compute/api_aggregate.h" // IWYU pragma: export +#include "arrow/compute/api_scalar.h" // IWYU pragma: export +#include "arrow/compute/api_vector.h" // IWYU pragma: export +#include "arrow/compute/cast.h" // IWYU pragma: export +#include "arrow/compute/function.h" // IWYU pragma: export +#include "arrow/compute/function_options.h" // IWYU pragma: export +#include "arrow/compute/kernel.h" // IWYU pragma: export +#include "arrow/compute/registry.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export + +#include "arrow/compute/expression.h" // IWYU pragma: export + +/// \defgroup execnode-row Utilities for working with data in a row-major format +/// @{ +/// @} + +#include "arrow/compute/row/grouper.h" // IWYU pragma: export + +/// \defgroup acero-internals Acero internals, useful for those extending Acero +/// @{ +/// @} + +#include "arrow/compute/exec.h" // IWYU pragma: export diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..e5bcc3732966185e00612619d64a86867e1f4fca --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h @@ -0,0 +1,709 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/ordering.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT FilterOptions : public FunctionOptions { + public: + /// Configure the action taken when a slot of the selection mask is null + enum NullSelectionBehavior { + /// The corresponding filtered value will be removed in the output. + DROP, + /// The corresponding filtered value will be null in the output. + EMIT_NULL, + }; + + explicit FilterOptions(NullSelectionBehavior null_selection = DROP); + static constexpr char const kTypeName[] = "FilterOptions"; + static FilterOptions Defaults() { return FilterOptions(); } + + NullSelectionBehavior null_selection_behavior = DROP; +}; + +class ARROW_EXPORT TakeOptions : public FunctionOptions { + public: + explicit TakeOptions(bool boundscheck = true); + static constexpr char const kTypeName[] = "TakeOptions"; + static TakeOptions BoundsCheck() { return TakeOptions(true); } + static TakeOptions NoBoundsCheck() { return TakeOptions(false); } + static TakeOptions Defaults() { return BoundsCheck(); } + + bool boundscheck = true; +}; + +/// \brief Options for the dictionary encode function +class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions { + public: + /// Configure how null values will be encoded + enum NullEncodingBehavior { + /// The null value will be added to the dictionary with a proper index. + ENCODE, + /// The null value will be masked in the indices array. + MASK + }; + + explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK); + static constexpr char const kTypeName[] = "DictionaryEncodeOptions"; + static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); } + + NullEncodingBehavior null_encoding_behavior = MASK; +}; + +/// \brief Options for the run-end encode function +class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions { + public: + explicit RunEndEncodeOptions(std::shared_ptr run_end_type = int32()); + static constexpr char const kTypeName[] = "RunEndEncodeOptions"; + static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); } + + std::shared_ptr run_end_type; +}; + +class ARROW_EXPORT ArraySortOptions : public FunctionOptions { + public: + explicit ArraySortOptions(SortOrder order = SortOrder::Ascending, + NullPlacement null_placement = NullPlacement::AtEnd); + static constexpr char const kTypeName[] = "ArraySortOptions"; + static ArraySortOptions Defaults() { return ArraySortOptions(); } + + /// Sorting order + SortOrder order; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +class ARROW_EXPORT SortOptions : public FunctionOptions { + public: + explicit SortOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd); + explicit SortOptions(const Ordering& ordering); + static constexpr char const kTypeName[] = "SortOptions"; + static SortOptions Defaults() { return SortOptions(); } + /// Convenience constructor to create an ordering from SortOptions + /// + /// Note: Both classes contain the exact same information. However, + /// sort_options should only be used in a "function options" context while Ordering + /// is used more generally. + Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); } + Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +/// \brief SelectK options +class ARROW_EXPORT SelectKOptions : public FunctionOptions { + public: + explicit SelectKOptions(int64_t k = -1, std::vector sort_keys = {}); + static constexpr char const kTypeName[] = "SelectKOptions"; + static SelectKOptions Defaults() { return SelectKOptions(); } + + static SelectKOptions TopKDefault(int64_t k, std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Descending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Descending)); + } + return SelectKOptions{k, keys}; + } + static SelectKOptions BottomKDefault(int64_t k, + std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Ascending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Ascending)); + } + return SelectKOptions{k, keys}; + } + + /// The number of `k` elements to keep. + int64_t k; + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; +}; + +/// \brief Rank options +class ARROW_EXPORT RankOptions : public FunctionOptions { + public: + /// Configure how ties between equal values are handled + enum Tiebreaker { + /// Ties get the smallest possible rank in sorted order. + Min, + /// Ties get the largest possible rank in sorted order. + Max, + /// Ranks are assigned in order of when ties appear in the input. + /// This ensures the ranks are a stable permutation of the input. + First, + /// The ranks span a dense [1, M] interval where M is the number + /// of distinct values in the input. + Dense + }; + + explicit RankOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First); + /// Convenience constructor for array inputs + explicit RankOptions(SortOrder order, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First) + : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {} + + static constexpr char const kTypeName[] = "RankOptions"; + static RankOptions Defaults() { return RankOptions(); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; + /// Tiebreaker for dealing with equal values in ranks + Tiebreaker tiebreaker; +}; + +/// \brief Partitioning options for NthToIndices +class ARROW_EXPORT PartitionNthOptions : public FunctionOptions { + public: + explicit PartitionNthOptions(int64_t pivot, + NullPlacement null_placement = NullPlacement::AtEnd); + PartitionNthOptions() : PartitionNthOptions(0) {} + static constexpr char const kTypeName[] = "PartitionNthOptions"; + + /// The index into the equivalent sorted array of the partition pivot element. + int64_t pivot; + /// Whether nulls and NaNs are partitioned at the start or at the end + NullPlacement null_placement; +}; + +/// \brief Options for cumulative functions +/// \note Also aliased as CumulativeSumOptions for backward compatibility +class ARROW_EXPORT CumulativeOptions : public FunctionOptions { + public: + explicit CumulativeOptions(bool skip_nulls = false); + explicit CumulativeOptions(double start, bool skip_nulls = false); + explicit CumulativeOptions(std::shared_ptr start, bool skip_nulls = false); + static constexpr char const kTypeName[] = "CumulativeOptions"; + static CumulativeOptions Defaults() { return CumulativeOptions(); } + + /// Optional starting value for cumulative operation computation, default depends on the + /// operation and input type. + /// - sum: 0 + /// - prod: 1 + /// - min: maximum of the input type + /// - max: minimum of the input type + /// - mean: start is ignored because it has no meaning for mean + std::optional> start; + + /// If true, nulls in the input are ignored and produce a corresponding null output. + /// When false, the first null encountered is propagated through the remaining output. + bool skip_nulls = false; +}; +using CumulativeSumOptions = CumulativeOptions; // For backward compatibility + +/// \brief Options for pairwise functions +class ARROW_EXPORT PairwiseOptions : public FunctionOptions { + public: + explicit PairwiseOptions(int64_t periods = 1); + static constexpr char const kTypeName[] = "PairwiseOptions"; + static PairwiseOptions Defaults() { return PairwiseOptions(); } + + /// Periods to shift for applying the binary operation, accepts negative values. + int64_t periods = 1; +}; + +/// \brief Options for list_flatten function +class ARROW_EXPORT ListFlattenOptions : public FunctionOptions { + public: + explicit ListFlattenOptions(bool recursive = false); + static constexpr char const kTypeName[] = "ListFlattenOptions"; + static ListFlattenOptions Defaults() { return ListFlattenOptions(); } + + /// \brief If true, the list is flattened recursively until a non-list + /// array is formed. + bool recursive = false; +}; + +/// @} + +/// \brief Filter with a boolean selection filter +/// +/// The output will be populated with values from the input at positions +/// where the selection filter is not 0. Nulls in the filter will be handled +/// based on options.null_selection_behavior. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// filter = [0, 1, 1, 0, null, 1], the output will be +/// (null_selection_behavior == DROP) = ["b", "c", "f"] +/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"] +/// +/// \param[in] values array to filter +/// \param[in] filter indicates which values should be filtered out +/// \param[in] options configures null_selection_behavior +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Filter(const Datum& values, const Datum& filter, + const FilterOptions& options = FilterOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +namespace internal { + +// These internal functions are implemented in kernels/vector_selection.cc + +/// \brief Return the number of selected indices in the boolean filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +int64_t GetFilterOutputSize(const ArraySpan& filter, + FilterOptions::NullSelectionBehavior null_selection); + +/// \brief Compute uint64 selection indices for use with Take given a boolean +/// filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +Result> GetTakeIndices( + const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace internal + +/// \brief ReplaceWithMask replaces each value in the array corresponding +/// to a true value in the mask with the next element from `replacements`. +/// +/// \param[in] values Array input to replace +/// \param[in] mask Array or Scalar of Boolean mask values +/// \param[in] replacements The replacement values to draw from. There must +/// be as many replacement values as true values in the mask. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ReplaceWithMask(const Datum& values, const Datum& mask, + const Datum& replacements, ExecContext* ctx = NULLPTR); + +/// \brief FillNullForward fill null values in forward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in forward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "c", "c", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief FillNullBackward fill null values in backward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in backward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "f", "f", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Take from an array of values at indices in another array +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array at the given +/// indices. If an index is null then the taken element will be null. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// indices = [2, 1, null, 3], the output will be +/// = [values[2], values[1], null, values[3]] +/// = ["c", "b", null, null] +/// +/// \param[in] values datum from which to take +/// \param[in] indices which values to take +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Take(const Datum& values, const Datum& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Take with Array inputs and output +ARROW_EXPORT +Result> Take(const Array& values, const Array& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Drop Null from an array of values +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array without nulls. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"], +/// the output will be = ["a", "b", "c", "e", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result DropNull(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DropNull with Array inputs and output +ARROW_EXPORT +Result> DropNull(const Array& values, ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// Find index of n-th(0 based) smallest value and perform indirect +/// partition of an array around that element. Output indices[0 ~ n-1] +/// holds values no greater than n-th element, and indices[n+1 ~ end] +/// holds values no less than n-th element. Elements in each partition +/// is not sorted. Nulls will be partitioned to the end of the output. +/// Output is not guaranteed to be stable. +/// +/// \param[in] values array to be partitioned +/// \param[in] n pivot array around sorted n-th element +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, int64_t n, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// This overload takes a PartitionNthOptions specifying the pivot index +/// and the null handling. +/// +/// \param[in] values array to be partitioned +/// \param[in] options options including pivot index and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, + const PartitionNthOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that would select the first `k` elements. +/// +/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output +/// array will contain indices such that the item indicated by the k-th index will be in +/// the position it would be if the datum were sorted by `options.sort_keys`. However, +/// indices of null values will not be part of the output. The sort is not guaranteed to +/// be stable. +/// +/// \param[in] datum datum to be partitioned +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return a datum with the same schema as the input +ARROW_EXPORT +Result> SelectKUnstable(const Datum& datum, + const SelectKOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// Perform an indirect sort of array. The output array will contain +/// indices that would sort an array, which would be the same length +/// as input. Nulls will be stably partitioned to the end of the output +/// regardless of order. +/// +/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order +/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0, +/// 3]. +/// +/// \param[in] array array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] array array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// Perform an indirect sort of chunked array. The output array will +/// contain indices that would sort a chunked array, which would be +/// the same length as input. Nulls will be stably partitioned to the +/// end of the output regardless of order. +/// +/// For example given chunked_array = [[null, 1], [3.3], [null, 2, +/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2, +/// 4, 1, 0, 3]. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an input in the +/// specified order. Input is one of array, chunked array record batch +/// or table. +/// +/// Perform an indirect sort of input. The output array will contain +/// indices that would sort an input, which would be the same length +/// as input. Nulls will be stably partitioned to the start or to the end +/// of the output depending on SortOrder::null_placement. +/// +/// For example given input (table) = { +/// "column1": [[null, 1], [ 3, null, 2, 1]], +/// "column2": [[ 5], [3, null, null, 5, 5]], +/// } and options = { +/// {"column1", SortOrder::Ascending}, +/// {"column2", SortOrder::Descending}, +/// }, the output will be [5, 1, 4, 2, 0, 3]. +/// +/// \param[in] datum array, chunked array, record batch or table to sort +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort a table +ARROW_EXPORT +Result> SortIndices(const Datum& datum, const SortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Compute unique elements from an array-like object +/// +/// Note if a null occurs in the input it will NOT be included in the output. +/// +/// \param[in] datum array-like input +/// \param[in] ctx the function execution context, optional +/// \return result as Array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Unique(const Datum& datum, ExecContext* ctx = NULLPTR); + +// Constants for accessing the output of ValueCounts +ARROW_EXPORT extern const char kValuesFieldName[]; +ARROW_EXPORT extern const char kCountsFieldName[]; +ARROW_EXPORT extern const int32_t kValuesFieldIndex; +ARROW_EXPORT extern const int32_t kCountsFieldIndex; + +/// \brief Return counts of unique elements from an array-like object. +/// +/// Note that the counts do not include counts for nulls in the array. These can be +/// obtained separately from metadata. +/// +/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values +/// which can lead to unexpected results if the input Array has these values. +/// +/// \param[in] value array-like input +/// \param[in] ctx the function execution context, optional +/// \return counts An array of structs. +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> ValueCounts(const Datum& value, + ExecContext* ctx = NULLPTR); + +/// \brief Dictionary-encode values in an array-like object +/// +/// Any nulls encountered in the dictionary will be handled according to the +/// specified null encoding behavior. +/// +/// For example, given values ["a", "b", null, "a", null] the output will be +/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null] +/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"] +/// +/// If the input is already dictionary encoded this function is a no-op unless +/// it needs to modify the null_encoding (TODO) +/// +/// \param[in] data array-like input +/// \param[in] ctx the function execution context, optional +/// \param[in] options configures null encoding behavior +/// \return result with same shape and type as input +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result DictionaryEncode( + const Datum& data, + const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Run-end-encode values in an array-like object +/// +/// The returned run-end encoded type uses the same value type of the input and +/// run-end type defined in the options. +/// +/// \param[in] value array-like input +/// \param[in] options configures encoding behavior +/// \param[in] ctx the function execution context, optional +/// \return result with same shape but run-end encoded +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndEncode( + const Datum& value, + const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Decode a Run-End Encoded array to a plain array +/// +/// The output data type is the same as the values array type of run-end encoded +/// input. +/// +/// \param[in] value run-end-encoded input +/// \param[in] ctx the function execution context, optional +/// \return plain array resulting from decoding the run-end encoded input +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative sum of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative sum behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeSum( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative product of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative prod behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeProd( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative max of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative max behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMax( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative min of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative min behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMin( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative mean of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative mean behavior, `start` is ignored +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMean( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Return the first order difference of an array. +/// +/// Computes the first order difference of an array, i.e. +/// output[i] = input[i] - input[i - p] if i >= p +/// output[i] = null otherwise +/// where p is the period. For example, with p = 1, +/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5]. +/// With p = 2, +/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6] +/// p can also be negative, in which case the diff is computed in +/// the opposite direction. +/// \param[in] array array input +/// \param[in] options options, specifying overflow behavior and period +/// \param[in] check_overflow whether to return error on overflow +/// \param[in] ctx the function execution context, optional +/// \return result as array +ARROW_EXPORT +Result> PairwiseDiff(const Array& array, + const PairwiseOptions& options, + bool check_overflow = false, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbefe4a1ab7b7e432e07607f674b5de1c947cd5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h @@ -0,0 +1,489 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +// It seems like 64K might be a good default chunksize to use for execution +// based on the experience of other query processing systems. The current +// default is not to chunk contiguous arrays, though, but this may change in +// the future once parallel execution is implemented +static constexpr int64_t kDefaultExecChunksize = UINT16_MAX; + +/// \brief Context for expression-global variables and options used by +/// function evaluation +class ARROW_EXPORT ExecContext { + public: + // If no function registry passed, the default is used. + explicit ExecContext(MemoryPool* pool = default_memory_pool(), + ::arrow::internal::Executor* executor = NULLPTR, + FunctionRegistry* func_registry = NULLPTR); + + /// \brief The MemoryPool used for allocations, default is + /// default_memory_pool(). + MemoryPool* memory_pool() const { return pool_; } + + const ::arrow::internal::CpuInfo* cpu_info() const; + + /// \brief An Executor which may be used to parallelize execution. + ::arrow::internal::Executor* executor() const { return executor_; } + + /// \brief The FunctionRegistry for looking up functions by name and + /// selecting kernels for execution. Defaults to the library-global function + /// registry provided by GetFunctionRegistry. + FunctionRegistry* func_registry() const { return func_registry_; } + + // \brief Set maximum length unit of work for kernel execution. Larger + // contiguous array inputs will be split into smaller chunks, and, if + // possible and enabled, processed in parallel. The default chunksize is + // INT64_MAX, so contiguous arrays are not split. + void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; } + + // \brief Maximum length for ExecBatch data chunks processed by + // kernels. Contiguous array inputs with longer length will be split into + // smaller chunks. + int64_t exec_chunksize() const { return exec_chunksize_; } + + /// \brief Set whether to use multiple threads for function execution. This + /// is not yet used. + void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; } + + /// \brief If true, then utilize multiple threads where relevant for function + /// execution. This is not yet used. + bool use_threads() const { return use_threads_; } + + // Set the preallocation strategy for kernel execution as it relates to + // chunked execution. For chunked execution, whether via ChunkedArray inputs + // or splitting larger Array arguments into smaller pieces, contiguous + // allocation (if permitted by the kernel) will allocate one large array to + // write output into yielding it to the caller at the end. If this option is + // set to off, then preallocations will be performed independently for each + // chunk of execution + // + // TODO: At some point we might want the limit the size of contiguous + // preallocations. For example, even if the exec_chunksize is 64K or less, we + // might limit contiguous allocations to 1M records, say. + void set_preallocate_contiguous(bool preallocate) { + preallocate_contiguous_ = preallocate; + } + + /// \brief If contiguous preallocations should be used when doing chunked + /// execution as specified by exec_chunksize(). See + /// set_preallocate_contiguous() for more information. + bool preallocate_contiguous() const { return preallocate_contiguous_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + FunctionRegistry* func_registry_; + int64_t exec_chunksize_ = std::numeric_limits::max(); + bool preallocate_contiguous_ = true; + bool use_threads_ = true; +}; + +// TODO: Consider standardizing on uint16 selection vectors and only use them +// when we can ensure that each value is 64K length or smaller + +/// \brief Container for an array of value selection indices that were +/// materialized from a filter. +/// +/// Columnar query engines (see e.g. [1]) have found that rather than +/// materializing filtered data, the filter can instead be converted to an +/// array of the "on" indices and then "fusing" these indices in operator +/// implementations. This is especially relevant for aggregations but also +/// applies to scalar operations. +/// +/// We are not yet using this so this is mostly a placeholder for now. +/// +/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf +class ARROW_EXPORT SelectionVector { + public: + explicit SelectionVector(std::shared_ptr data); + + explicit SelectionVector(const Array& arr); + + /// \brief Create SelectionVector from boolean mask + static Result> FromMask(const BooleanArray& arr); + + const int32_t* indices() const { return indices_; } + int32_t length() const; + + private: + std::shared_ptr data_; + const int32_t* indices_; +}; + +/// An index to represent that a batch does not belong to an ordered stream +constexpr int64_t kUnsequencedIndex = -1; + +/// \brief A unit of work for kernel execution. It contains a collection of +/// Array and Scalar values and an optional SelectionVector indicating that +/// there is an unmaterialized filter that either must be materialized, or (if +/// the kernel supports it) pushed down into the kernel implementation. +/// +/// ExecBatch is semantically similar to RecordBatch in that in a SQL context +/// it represents a collection of records, but constant "columns" are +/// represented by Scalar values rather than having to be converted into arrays +/// with repeated values. +/// +/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight +/// than is desirable for this class. Microbenchmarks would help determine for +/// sure. See ARROW-8928. + +/// \addtogroup acero-internals +/// @{ + +struct ARROW_EXPORT ExecBatch { + ExecBatch() = default; + ExecBatch(std::vector values, int64_t length) + : values(std::move(values)), length(length) {} + + explicit ExecBatch(const RecordBatch& batch); + + /// \brief Infer the ExecBatch length from values. + static Result InferLength(const std::vector& values); + + /// Creates an ExecBatch with length-validation. + /// + /// If any value is given, then all values must have a common length. If the given + /// length is negative, then the length of the ExecBatch is set to this common length, + /// or to 1 if no values are given. Otherwise, the given length must equal the common + /// length, if any value is given. + static Result Make(std::vector values, int64_t length = -1); + + Result> ToRecordBatch( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()) const; + + /// The values representing positional arguments to be passed to a kernel's + /// exec function for processing. + std::vector values; + + /// A deferred filter represented as an array of indices into the values. + /// + /// For example, the filter [true, true, false, true] would be represented as + /// the selection vector [0, 1, 3]. When the selection vector is set, + /// ExecBatch::length is equal to the length of this array. + std::shared_ptr selection_vector; + + /// A predicate Expression guaranteed to evaluate to true for all rows in this batch. + Expression guarantee = literal(true); + + /// The semantic length of the ExecBatch. When the values are all scalars, + /// the length should be set to 1 for non-aggregate kernels, otherwise the + /// length is taken from the array values, except when there is a selection + /// vector. When there is a selection vector set, the length of the batch is + /// the length of the selection. Aggregate kernels can have an ExecBatch + /// formed by projecting just the partition columns from a batch in which + /// case, it would have scalar rows with length greater than 1. + /// + /// If the array values are of length 0 then the length is 0 regardless of + /// whether any values are Scalar. + int64_t length = 0; + + /// \brief index of this batch in a sorted stream of batches + /// + /// This index must be strictly monotonic starting at 0 without gaps or + /// it can be set to kUnsequencedIndex if there is no meaningful order + int64_t index = kUnsequencedIndex; + + /// \brief The sum of bytes in each buffer referenced by the batch + /// + /// Note: Scalars are not counted + /// Note: Some values may referenced only part of a buffer, for + /// example, an array with an offset. The actual data + /// visible to this batch will be smaller than the total + /// buffer size in this case. + int64_t TotalBufferSize() const; + + /// \brief Return the value at the i-th index + template + inline const Datum& operator[](index_type i) const { + return values[i]; + } + + bool Equals(const ExecBatch& other) const; + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + ExecBatch Slice(int64_t offset, int64_t length) const; + + Result SelectValues(const std::vector& ids) const; + + /// \brief A convenience for returning the types from the batch. + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + std::string ToString() const; +}; + +inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); } +inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*); + +/// @} + +/// \defgroup compute-internals Utilities for calling functions, useful for those +/// extending the function registry +/// +/// @{ + +struct ExecValue { + ArraySpan array = {}; + const Scalar* scalar = NULLPTR; + + ExecValue(Scalar* scalar) // NOLINT implicit conversion + : scalar(scalar) {} + + ExecValue(ArraySpan array) // NOLINT implicit conversion + : array(std::move(array)) {} + + ExecValue(const ArrayData& array) { // NOLINT implicit conversion + this->array.SetMembers(array); + } + + ExecValue() = default; + ExecValue(const ExecValue& other) = default; + ExecValue& operator=(const ExecValue& other) = default; + ExecValue(ExecValue&& other) = default; + ExecValue& operator=(ExecValue&& other) = default; + + int64_t length() const { return this->is_array() ? this->array.length : 1; } + + bool is_array() const { return this->scalar == NULLPTR; } + bool is_scalar() const { return !this->is_array(); } + + void SetArray(const ArrayData& array) { + this->array.SetMembers(array); + this->scalar = NULLPTR; + } + + void SetScalar(const Scalar* scalar) { this->scalar = scalar; } + + template + const ExactType& scalar_as() const { + return ::arrow::internal::checked_cast(*this->scalar); + } + + /// XXX: here temporarily for compatibility with datum, see + /// e.g. MakeStructExec in scalar_nested.cc + int64_t null_count() const { + if (this->is_array()) { + return this->array.GetNullCount(); + } else { + return this->scalar->is_valid ? 0 : 1; + } + } + + const DataType* type() const { + if (this->is_array()) { + return array.type; + } else { + return scalar->type.get(); + } + } +}; + +struct ARROW_EXPORT ExecResult { + // The default value of the variant is ArraySpan + std::variant> value; + + int64_t length() const { + if (this->is_array_span()) { + return this->array_span()->length; + } else { + return this->array_data()->length; + } + } + + const DataType* type() const { + if (this->is_array_span()) { + return this->array_span()->type; + } else { + return this->array_data()->type.get(); + } + } + + const ArraySpan* array_span() const { return &std::get(this->value); } + ArraySpan* array_span_mutable() { return &std::get(this->value); } + + bool is_array_span() const { return this->value.index() == 0; } + + const std::shared_ptr& array_data() const { + return std::get>(this->value); + } + ArrayData* array_data_mutable() { + return std::get>(this->value).get(); + } + + bool is_array_data() const { return this->value.index() == 1; } +}; + +/// \brief A "lightweight" column batch object which contains no +/// std::shared_ptr objects and does not have any memory ownership +/// semantics. Can represent a view onto an "owning" ExecBatch. +struct ARROW_EXPORT ExecSpan { + ExecSpan() = default; + ExecSpan(const ExecSpan& other) = default; + ExecSpan& operator=(const ExecSpan& other) = default; + ExecSpan(ExecSpan&& other) = default; + ExecSpan& operator=(ExecSpan&& other) = default; + + explicit ExecSpan(std::vector values, int64_t length) + : length(length), values(std::move(values)) {} + + explicit ExecSpan(const ExecBatch& batch) { + this->length = batch.length; + this->values.resize(batch.values.size()); + for (size_t i = 0; i < batch.values.size(); ++i) { + const Datum& in_value = batch[i]; + ExecValue* out_value = &this->values[i]; + if (in_value.is_array()) { + out_value->SetArray(*in_value.array()); + } else { + out_value->SetScalar(in_value.scalar().get()); + } + } + } + + /// \brief Return the value at the i-th index + template + inline const ExecValue& operator[](index_type i) const { + return values[i]; + } + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + ExecBatch ToExecBatch() const { + ExecBatch result; + result.length = this->length; + for (const ExecValue& value : this->values) { + if (value.is_array()) { + result.values.push_back(value.array.ToArrayData()); + } else { + result.values.push_back(value.scalar->GetSharedPtr()); + } + } + return result; + } + + int64_t length = 0; + std::vector values; +}; + +/// \defgroup compute-call-function One-shot calls to compute functions +/// +/// @{ + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + ExecContext* ctx = NULLPTR); + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + ExecContext* ctx = NULLPTR); + +/// @} + +/// \defgroup compute-function-executor One-shot calls to obtain function executors +/// +/// @{ + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, std::vector in_types, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types (taken from the Datum arguments) +/// and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, const std::vector& args, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h new file mode 100644 index 0000000000000000000000000000000000000000..88ec2fd2d0679b5c849549179aa652bec9b37b56 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Extension point for defining options outside libarrow (but +/// still within this project). +class ARROW_EXPORT FunctionOptionsType { + public: + virtual ~FunctionOptionsType() = default; + + virtual const char* type_name() const = 0; + virtual std::string Stringify(const FunctionOptions&) const = 0; + virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0; + virtual Result> Serialize(const FunctionOptions&) const; + virtual Result> Deserialize( + const Buffer& buffer) const; + virtual std::unique_ptr Copy(const FunctionOptions&) const = 0; +}; + +/// \brief Base class for specifying options configuring a function's behavior, +/// such as error handling. +class ARROW_EXPORT FunctionOptions : public util::EqualityComparable { + public: + virtual ~FunctionOptions() = default; + + const FunctionOptionsType* options_type() const { return options_type_; } + const char* type_name() const { return options_type()->type_name(); } + + bool Equals(const FunctionOptions& other) const; + std::string ToString() const; + std::unique_ptr Copy() const; + /// \brief Serialize an options struct to a buffer. + Result> Serialize() const; + /// \brief Deserialize an options struct from a buffer. + /// Note: this will only look for `type_name` in the default FunctionRegistry; + /// to use a custom FunctionRegistry, look up the FunctionOptionsType, then + /// call FunctionOptionsType::Deserialize(). + static Result> Deserialize( + const std::string& type_name, const Buffer& buffer); + + protected: + explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {} + const FunctionOptionsType* options_type_; +}; + +ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..cfb6265f12904bf3c7c16f272f942ead1765b444 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h @@ -0,0 +1,753 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compute/exec.h" +#include "arrow/datum.h" +#include "arrow/device_allocation_type_set.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h. +// No other BSD seems to do so. The name is used as an identifier in MemAllocation enum. +#if defined(__APPLE__) && defined(PREALLOCATE) +# undef PREALLOCATE +#endif + +namespace arrow { +namespace compute { + +class FunctionOptions; + +/// \brief Base class for opaque kernel-specific state. For example, if there +/// is some kind of initialization required. +struct ARROW_EXPORT KernelState { + virtual ~KernelState() = default; +}; + +/// \brief Context/state for the execution of a particular kernel. +class ARROW_EXPORT KernelContext { + public: + // Can pass optional backreference; not used consistently for the + // moment but will be made so in the future + explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR) + : exec_ctx_(exec_ctx), kernel_(kernel) {} + + /// \brief Allocate buffer from the context's memory pool. The contents are + /// not initialized. + Result> Allocate(int64_t nbytes); + + /// \brief Allocate buffer for bitmap from the context's memory pool. Like + /// Allocate, the contents of the buffer are not initialized but the last + /// byte is preemptively zeroed to help avoid ASAN or valgrind issues. + Result> AllocateBitmap(int64_t num_bits); + + /// \brief Assign the active KernelState to be utilized for each stage of + /// kernel execution. Ownership and memory lifetime of the KernelState must + /// be minded separately. + void SetState(KernelState* state) { state_ = state; } + + // Set kernel that is being invoked since some kernel + // implementations will examine the kernel state. + void SetKernel(const Kernel* kernel) { kernel_ = kernel; } + + KernelState* state() { return state_; } + + /// \brief Configuration related to function execution that is to be shared + /// across multiple kernels. + ExecContext* exec_context() { return exec_ctx_; } + + /// \brief The memory pool to use for allocations. For now, it uses the + /// MemoryPool contained in the ExecContext used to create the KernelContext. + MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); } + + const Kernel* kernel() const { return kernel_; } + + private: + ExecContext* exec_ctx_; + KernelState* state_ = NULLPTR; + const Kernel* kernel_ = NULLPTR; +}; + +/// \brief An type-checking interface to permit customizable validation rules +/// for use with InputType and KernelSignature. This is for scenarios where the +/// acceptance is not an exact type instance, such as a TIMESTAMP type for a +/// specific TimeUnit, but permitting any time zone. +struct ARROW_EXPORT TypeMatcher { + virtual ~TypeMatcher() = default; + + /// \brief Return true if this matcher accepts the data type. + virtual bool Matches(const DataType& type) const = 0; + + /// \brief A human-interpretable string representation of what the type + /// matcher checks for, usable when printing KernelSignature or formatting + /// error messages. + virtual std::string ToString() const = 0; + + /// \brief Return true if this TypeMatcher contains the same matching rule as + /// the other. Currently depends on RTTI. + virtual bool Equals(const TypeMatcher& other) const = 0; +}; + +namespace match { + +/// \brief Match any DataType instance having the same DataType::id. +ARROW_EXPORT std::shared_ptr SameTypeId(Type::type type_id); + +/// \brief Match any TimestampType instance having the same unit, but the time +/// zones can be different. +ARROW_EXPORT std::shared_ptr TimestampTypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time32TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time64TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr DurationTypeUnit(TimeUnit::type unit); + +// \brief Match any integer type +ARROW_EXPORT std::shared_ptr Integer(); + +// Match types using 32-bit varbinary representation +ARROW_EXPORT std::shared_ptr BinaryLike(); + +// Match types using 64-bit varbinary representation +ARROW_EXPORT std::shared_ptr LargeBinaryLike(); + +// Match any fixed binary type +ARROW_EXPORT std::shared_ptr FixedSizeBinaryLike(); + +// \brief Match any primitive type (boolean or any type representable as a C +// Type) +ARROW_EXPORT std::shared_ptr Primitive(); + +// \brief Match any integer type that can be used as run-end in run-end encoded +// arrays +ARROW_EXPORT std::shared_ptr RunEndInteger(); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr value_type_matcher); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_id a type id that the type of the values field should match +ARROW_EXPORT std::shared_ptr RunEndEncoded(Type::type value_type_id); + +/// \brief Match run-end encoded types that encode specific run-end and value types +/// +/// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr run_end_type_matcher, + std::shared_ptr value_type_matcher); + +} // namespace match + +/// \brief An object used for type-checking arguments to be passed to a kernel +/// and stored in a KernelSignature. The type-checking rule can be supplied +/// either with an exact DataType instance or a custom TypeMatcher. +class ARROW_EXPORT InputType { + public: + /// \brief The kind of type-checking rule that the InputType contains. + enum Kind { + /// \brief Accept any value type. + ANY_TYPE, + + /// \brief A fixed arrow::DataType and will only exact match having this + /// exact type (e.g. same TimestampType unit, same decimal scale and + /// precision, or same nested child types). + EXACT_TYPE, + + /// \brief Uses a TypeMatcher implementation to check the type. + USE_TYPE_MATCHER + }; + + /// \brief Accept any value type + InputType() : kind_(ANY_TYPE) {} + + /// \brief Accept an exact value type. + InputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(EXACT_TYPE), type_(std::move(type)) {} + + /// \brief Use the passed TypeMatcher to type check. + InputType(std::shared_ptr type_matcher) // NOLINT implicit construction + : kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {} + + /// \brief Match any type with the given Type::type. Uses a TypeMatcher for + /// its implementation. + InputType(Type::type type_id) // NOLINT implicit construction + : InputType(match::SameTypeId(type_id)) {} + + InputType(const InputType& other) { CopyInto(other); } + + void operator=(const InputType& other) { CopyInto(other); } + + InputType(InputType&& other) { MoveInto(std::forward(other)); } + + void operator=(InputType&& other) { MoveInto(std::forward(other)); } + + // \brief Match any input (array, scalar of any type) + static InputType Any() { return InputType(); } + + /// \brief Return true if this input type matches the same type cases as the + /// other. + bool Equals(const InputType& other) const; + + bool operator==(const InputType& other) const { return this->Equals(other); } + + bool operator!=(const InputType& other) const { return !(*this == other); } + + /// \brief Return hash code. + size_t Hash() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return true if the Datum matches this argument kind in + /// type (and only allows scalar or array-like Datums). + bool Matches(const Datum& value) const; + + /// \brief Return true if the type matches this InputType + bool Matches(const DataType& type) const; + + /// \brief The type matching rule that this InputType uses. + Kind kind() const { return kind_; } + + /// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType + /// must match. Otherwise this function should not be used and will assert in + /// debug builds. + const std::shared_ptr& type() const; + + /// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for + /// checking the type of a value. Otherwise this function should not be used + /// and will assert in debug builds. + const TypeMatcher& type_matcher() const; + + private: + void CopyInto(const InputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->type_matcher_ = other.type_matcher_; + } + + void MoveInto(InputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->type_matcher_ = std::move(other.type_matcher_); + } + + Kind kind_; + + // For EXACT_TYPE Kind + std::shared_ptr type_; + + // For USE_TYPE_MATCHER Kind + std::shared_ptr type_matcher_; +}; + +/// \brief Container to capture both exact and input-dependent output types. +class ARROW_EXPORT OutputType { + public: + /// \brief An enum indicating whether the value type is an invariant fixed + /// value or one that's computed by a kernel-defined resolver function. + enum ResolveKind { FIXED, COMPUTED }; + + /// Type resolution function. Given input types, return output type. This + /// function MAY may use the kernel state to decide the output type based on + /// the FunctionOptions. + /// + /// This function SHOULD _not_ be used to check for arity, that is to be + /// performed one or more layers above. + using Resolver = + std::function(KernelContext*, const std::vector&)>; + + /// \brief Output an exact type + OutputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(FIXED), type_(std::move(type)) {} + + /// \brief Output a computed type depending on actual input types + template + OutputType(Fn resolver) // NOLINT implicit construction + : kind_(COMPUTED), resolver_(std::move(resolver)) {} + + OutputType(const OutputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->resolver_ = other.resolver_; + } + + OutputType(OutputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->resolver_ = other.resolver_; + } + + OutputType& operator=(const OutputType&) = default; + OutputType& operator=(OutputType&&) = default; + + /// \brief Return the type of the expected output value of the kernel given + /// the input argument types. The resolver may make use of state information + /// kept in the KernelContext. + Result Resolve(KernelContext* ctx, + const std::vector& args) const; + + /// \brief The exact output value type for the FIXED kind. + const std::shared_ptr& type() const; + + /// \brief For use with COMPUTED resolution strategy. It may be more + /// convenient to invoke this with OutputType::Resolve returned from this + /// method. + const Resolver& resolver() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return the kind of type resolution of this output type, whether + /// fixed/invariant or computed by a resolver. + ResolveKind kind() const { return kind_; } + + private: + ResolveKind kind_; + + // For FIXED resolution + std::shared_ptr type_; + + // For COMPUTED resolution + Resolver resolver_ = NULLPTR; +}; + +/// \brief Holds the input types and output type of the kernel. +/// +/// VarArgs functions with minimum N arguments should pass up to N input types to be +/// used to validate the input types of a function invocation. The first N-1 types +/// will be matched against the first N-1 arguments, and the last type will be +/// matched against the remaining arguments. +class ARROW_EXPORT KernelSignature { + public: + KernelSignature(std::vector in_types, OutputType out_type, + bool is_varargs = false); + + /// \brief Convenience ctor since make_shared can be awkward + static std::shared_ptr Make(std::vector in_types, + OutputType out_type, + bool is_varargs = false); + + /// \brief Return true if the signature if compatible with the list of input + /// value descriptors. + bool MatchesInputs(const std::vector& types) const; + + /// \brief Returns true if the input types of each signature are + /// equal. Well-formed functions should have a deterministic output type + /// given input types, but currently it is the responsibility of the + /// developer to ensure this. + bool Equals(const KernelSignature& other) const; + + bool operator==(const KernelSignature& other) const { return this->Equals(other); } + + bool operator!=(const KernelSignature& other) const { return !(*this == other); } + + /// \brief Compute a hash code for the signature + size_t Hash() const; + + /// \brief The input types for the kernel. For VarArgs functions, this should + /// generally contain a single validator to use for validating all of the + /// function arguments. + const std::vector& in_types() const { return in_types_; } + + /// \brief The output type for the kernel. Use Resolve to return the + /// exact output given input argument types, since many kernels' + /// output types depend on their input types (or their type + /// metadata). + const OutputType& out_type() const { return out_type_; } + + /// \brief Render a human-readable string representation + std::string ToString() const; + + bool is_varargs() const { return is_varargs_; } + + private: + std::vector in_types_; + OutputType out_type_; + bool is_varargs_; + + // For caching the hash code after it's computed the first time + mutable uint64_t hash_code_; +}; + +/// \brief A function may contain multiple variants of a kernel for a given +/// type combination for different SIMD levels. Based on the active system's +/// CPU info or the user's preferences, we can elect to use one over the other. +struct SimdLevel { + enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX }; +}; + +/// \brief The strategy to use for propagating or otherwise populating the +/// validity bitmap of a kernel output. +struct NullHandling { + enum type { + /// Compute the output validity bitmap by intersecting the validity bitmaps + /// of the arguments using bitwise-and operations. This means that values + /// in the output are valid/non-null only if the corresponding values in + /// all input arguments were valid/non-null. Kernel generally need not + /// touch the bitmap thereafter, but a kernel's exec function is permitted + /// to alter the bitmap after the null intersection is computed if it needs + /// to. + INTERSECTION, + + /// Kernel expects a pre-allocated buffer to write the result bitmap + /// into. The preallocated memory is not zeroed (except for the last byte), + /// so the kernel should ensure to completely populate the bitmap. + COMPUTED_PREALLOCATE, + + /// Kernel allocates and sets the validity bitmap of the output. + COMPUTED_NO_PREALLOCATE, + + /// Kernel output is never null and a validity bitmap does not need to be + /// allocated. + OUTPUT_NOT_NULL + }; +}; + +/// \brief The preference for memory preallocation of fixed-width type outputs +/// in kernel execution. +struct MemAllocation { + enum type { + // For data types that support pre-allocation (i.e. fixed-width), the + // kernel expects to be provided a pre-allocated data buffer to write + // into. Non-fixed-width types must always allocate their own data + // buffers. The allocation made for the same length as the execution batch, + // so vector kernels yielding differently sized output should not use this. + // + // It is valid for the data to not be preallocated but the validity bitmap + // is (or is computed using the intersection/bitwise-and method). + // + // For variable-size output types like BinaryType or StringType, or for + // nested types, this option has no effect. + PREALLOCATE, + + // The kernel is responsible for allocating its own data buffer for + // fixed-width type outputs. + NO_PREALLOCATE + }; +}; + +struct Kernel; + +/// \brief Arguments to pass to an KernelInit function. A struct is used to help +/// avoid API breakage should the arguments passed need to be expanded. +struct KernelInitArgs { + /// \brief A pointer to the kernel being initialized. The init function may + /// depend on the kernel's KernelSignature or other data contained there. + const Kernel* kernel; + + /// \brief The types of the input arguments that the kernel is + /// about to be executed against. + const std::vector& inputs; + + /// \brief Opaque options specific to this kernel. May be nullptr for functions + /// that do not require options. + const FunctionOptions* options; +}; + +/// \brief Common initializer function for all kernel types. +using KernelInit = std::function>( + KernelContext*, const KernelInitArgs&)>; + +/// \brief Base type for kernels. Contains the function signature and +/// optionally the state initialization function, along with some common +/// attributes +struct ARROW_EXPORT Kernel { + Kernel() = default; + + Kernel(std::shared_ptr sig, KernelInit init) + : signature(std::move(sig)), init(std::move(init)) {} + + Kernel(std::vector in_types, OutputType out_type, KernelInit init) + : Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init)) {} + + /// \brief The "signature" of the kernel containing the InputType input + /// argument validators and OutputType output type resolver. + std::shared_ptr signature; + + /// \brief Create a new KernelState for invocations of this kernel, e.g. to + /// set up any options or state relevant for execution. + KernelInit init; + + /// \brief Create a vector of new KernelState for invocations of this kernel. + static Status InitAll(KernelContext*, const KernelInitArgs&, + std::vector>*); + + /// \brief Indicates whether execution can benefit from parallelization + /// (splitting large chunks into smaller chunks and using multiple + /// threads). Some kernels may not support parallel execution at + /// all. Synchronization and concurrency-related issues are currently the + /// responsibility of the Kernel's implementation. + bool parallelizable = true; + + /// \brief Indicates the level of SIMD instruction support in the host CPU is + /// required to use the function. The intention is for functions to be able to + /// contain multiple kernels with the same signature but different levels of SIMD, + /// so that the most optimized kernel supported on a host's processor can be chosen. + SimdLevel::type simd_level = SimdLevel::NONE; + + // Additional kernel-specific data + std::shared_ptr data; +}; + +/// \brief The scalar kernel execution API that must be implemented for SCALAR +/// kernel types. This includes both stateless and stateful kernels. Kernels +/// depending on some execution state access that state via subclasses of +/// KernelState set on the KernelContext object. Implementations should +/// endeavor to write into pre-allocated memory if they are able, though for +/// some kernels (e.g. in cases when a builder like StringBuilder) must be +/// employed this may not be possible. +using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*); + +/// \brief Kernel data structure for implementations of ScalarFunction. In +/// addition to the members found in Kernel, contains the null handling +/// and memory pre-allocation preferences. +struct ARROW_EXPORT ScalarKernel : public Kernel { + ScalarKernel() = default; + + ScalarKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(sig), init), exec(exec) {} + + ScalarKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {} + + /// \brief Perform a single invocation of this kernel. Depending on the + /// implementation, it may only write into preallocated memory, while in some + /// cases it will allocate its own memory. Any required state is managed + /// through the KernelContext. + ArrayKernelExec exec; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + // For scalar functions preallocated data and intersecting arg validity + // bitmaps is a reasonable default + NullHandling::type null_handling = NullHandling::INTERSECTION; + MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE; +}; + +// ---------------------------------------------------------------------- +// VectorKernel (for VectorFunction) + +/// \brief Kernel data structure for implementations of VectorFunction. In +/// contains an optional finalizer function, the null handling and memory +/// pre-allocation preferences (which have different defaults from +/// ScalarKernel), and some other execution-related options. +struct ARROW_EXPORT VectorKernel : public Kernel { + /// \brief See VectorKernel::finalize member for usage + using FinalizeFunc = std::function*)>; + + /// \brief Function for executing a stateful VectorKernel against a + /// ChunkedArray input. Does not need to be defined for all VectorKernels + using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out); + + VectorKernel() = default; + + VectorKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + VectorKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(sig), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + /// \brief Perform a single invocation of this kernel. Any required state is + /// managed through the KernelContext. + ArrayKernelExec exec; + + /// \brief Execute the kernel on a ChunkedArray. Does not need to be defined + ChunkedExec exec_chunked = NULLPTR; + + /// \brief For VectorKernel, convert intermediate results into finalized + /// results. Mutates input argument. Some kernels may accumulate state + /// (example: hashing-related functions) through processing chunked inputs, and + /// then need to attach some accumulated state to each of the outputs of + /// processing each chunk of data. + FinalizeFunc finalize; + + /// Since vector kernels generally are implemented rather differently from + /// scalar/elementwise kernels (and they may not even yield arrays of the same + /// size), so we make the developer opt-in to any memory preallocation rather + /// than having to turn it off. + NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE; + MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + /// Some vector kernels can do chunkwise execution using ExecSpanIterator, + /// in some cases accumulating some state. Other kernels (like Take) need to + /// be passed whole arrays and don't work on ChunkedArray inputs + bool can_execute_chunkwise = true; + + /// Some kernels (like unique and value_counts) yield non-chunked output from + /// chunked-array inputs. This option controls how the results are boxed when + /// returned from ExecVectorFunction + /// + /// true -> ChunkedArray + /// false -> Array + bool output_chunked = true; +}; + +// ---------------------------------------------------------------------- +// ScalarAggregateKernel (for ScalarAggregateFunction) + +using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*); +// Finalize returns Datum to permit multiple return values +using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// ScalarAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * consume: processes an ExecSpan and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT ScalarAggregateKernel : public Kernel { + ScalarAggregateKernel(std::shared_ptr sig, KernelInit init, + ScalarAggregateConsume consume, ScalarAggregateMerge merge, + ScalarAggregateFinalize finalize, const bool ordered) + : Kernel(std::move(sig), std::move(init)), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + ScalarAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, ScalarAggregateConsume consume, + ScalarAggregateMerge merge, ScalarAggregateFinalize finalize, + const bool ordered) + : ScalarAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), consume, merge, finalize, ordered) {} + + /// \brief Merge a vector of KernelStates into a single KernelState. + /// The merged state will be returned and will be set on the KernelContext. + static Result> MergeAll( + const ScalarAggregateKernel* kernel, KernelContext* ctx, + std::vector> states); + + ScalarAggregateConsume consume; + ScalarAggregateMerge merge; + ScalarAggregateFinalize finalize; + /// \brief Whether this kernel requires ordering + /// Some aggregations, such as, "first", requires some kind of input order. The + /// order can be implicit, e.g., the order of the input data, or explicit, e.g. + /// the ordering specified with a window aggregation. + /// The caller of the aggregate kernel is responsible for passing data in some + /// defined order to the kernel. The flag here is a way for the kernel to tell + /// the caller that data passed to the kernel must be defined in some order. + bool ordered = false; +}; + +// ---------------------------------------------------------------------- +// HashAggregateKernel (for HashAggregateFunction) + +using HashAggregateResize = Status (*)(KernelContext*, int64_t); +using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&); + +// Finalize returns Datum to permit multiple return values +using HashAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// HashAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * resize: ensure that the KernelState can accommodate the specified number of groups. +/// * consume: processes an ExecSpan (which includes the argument as well +/// as an array of group identifiers) and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT HashAggregateKernel : public Kernel { + HashAggregateKernel() = default; + + HashAggregateKernel(std::shared_ptr sig, KernelInit init, + HashAggregateResize resize, HashAggregateConsume consume, + HashAggregateMerge merge, HashAggregateFinalize finalize, + const bool ordered) + : Kernel(std::move(sig), std::move(init)), + resize(resize), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + HashAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, HashAggregateConsume consume, + HashAggregateResize resize, HashAggregateMerge merge, + HashAggregateFinalize finalize, const bool ordered) + : HashAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), resize, consume, merge, finalize, ordered) {} + + HashAggregateResize resize; + HashAggregateConsume consume; + HashAggregateMerge merge; + HashAggregateFinalize finalize; + /// @brief whether the summarizer requires ordering + /// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel + /// for detailed doc of this variable. + bool ordered = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h new file mode 100644 index 0000000000000000000000000000000000000000..61caa2b570dd31dc988d34406f9b05c3573333e2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/compare.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +enum class SortOrder { + /// Arrange values in increasing order + Ascending, + /// Arrange values in decreasing order + Descending, +}; + +enum class NullPlacement { + /// Place nulls and NaNs before any non-null values. + /// NaNs will come after nulls. + AtStart, + /// Place nulls and NaNs after any non-null values. + /// NaNs will come before nulls. + AtEnd, +}; + +/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices +class ARROW_EXPORT SortKey : public util::EqualityComparable { + public: + explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending) + : target(std::move(target)), order(order) {} + + bool Equals(const SortKey& other) const; + std::string ToString() const; + + /// A FieldRef targeting the sort column. + FieldRef target; + /// How to order by this sort key. + SortOrder order; +}; + +class ARROW_EXPORT Ordering : public util::EqualityComparable { + public: + Ordering(std::vector sort_keys, + NullPlacement null_placement = NullPlacement::AtStart) + : sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {} + /// true if data ordered by other is also ordered by this + /// + /// For example, if data is ordered by [a, b, c] then it is also ordered + /// by [a, b] but not by [b, c] or [a, b, c, d]. + /// + /// [a, b].IsSuborderOf([a, b, c]) - true + /// [a, b, c].IsSuborderOf([a, b, c]) - true + /// [b, c].IsSuborderOf([a, b, c]) - false + /// [a, b, c, d].IsSuborderOf([a, b, c]) - false + /// + /// The implicit ordering is not a suborder of any other ordering and + /// no other ordering is a suborder of it. The implicit ordering is not a + /// suborder of itself. + /// + /// The unordered ordering is a suborder of all other orderings but no + /// other ordering is a suborder of it. The unordered ordering is a suborder + /// of itself. + /// + /// The unordered ordering is a suborder of the implicit ordering. + bool IsSuborderOf(const Ordering& other) const; + + bool Equals(const Ordering& other) const; + std::string ToString() const; + + bool is_implicit() const { return is_implicit_; } + bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); } + + const std::vector& sort_keys() const { return sort_keys_; } + NullPlacement null_placement() const { return null_placement_; } + + static const Ordering& Implicit() { + static const Ordering kImplicit(true); + return kImplicit; + } + + static const Ordering& Unordered() { + static const Ordering kUnordered(false); + // It is also possible to get an unordered ordering by passing in an empty vector + // using the normal constructor. This is ok and useful when ordering comes from user + // input. + return kUnordered; + } + + private: + explicit Ordering(bool is_implicit) + : null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {} + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys_; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement_; + bool is_implicit_ = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h new file mode 100644 index 0000000000000000000000000000000000000000..f31c4c1ba5920626578a4e4170e3cd2d28288545 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +class Function; +class FunctionOptionsType; + +/// \brief A mutable central function registry for built-in functions as well +/// as user-defined functions. Functions are implementations of +/// arrow::compute::Function. +/// +/// Generally, each function contains kernels which are implementations of a +/// function for a specific argument signature. After looking up a function in +/// the registry, one can either execute it eagerly with Function::Execute or +/// use one of the function's dispatch methods to pick a suitable kernel for +/// lower-level function execution. +class ARROW_EXPORT FunctionRegistry { + public: + ~FunctionRegistry(); + + /// \brief Construct a new registry. + /// + /// Most users only need to use the global registry. + static std::unique_ptr Make(); + + /// \brief Construct a new nested registry with the given parent. + /// + /// Most users only need to use the global registry. The returned registry never changes + /// its parent, even when an operation allows overwriting. + static std::unique_ptr Make(FunctionRegistry* parent); + + /// \brief Check whether a new function can be added to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status CanAddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Add a new function to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status AddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Check whether an alias can be added for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status CanAddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Add alias for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status AddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Check whether a new function options type can be added to the registry. + /// + /// \return Status::KeyError if a function options type with the same name is already + /// registered. + Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Add a new function options type to the registry. + /// + /// \returns Status::KeyError if a function options type with the same name is already + /// registered. + Status AddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Retrieve a function by name from the registry. + Result> GetFunction(const std::string& name) const; + + /// \brief Return vector of all entry names in the registry. + /// + /// Helpful for displaying a manifest of available functions. + std::vector GetFunctionNames() const; + + /// \brief Retrieve a function options type by name from the registry. + Result GetFunctionOptionsType( + const std::string& name) const; + + /// \brief The number of currently registered functions. + int num_functions() const; + + /// \brief The cast function object registered in AddFunction. + /// + /// Helpful for get cast function as needed. + const Function* cast_function() const; + + private: + FunctionRegistry(); + + // Use PIMPL pattern to not have std::unordered_map here + class FunctionRegistryImpl; + std::unique_ptr impl_; + + explicit FunctionRegistry(FunctionRegistryImpl* impl); +}; + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..89f32ceb0f906e0d50bf063da22f33c3a856fe5d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { + +struct Datum; +struct TypeHolder; + +namespace compute { + +class Function; +class ScalarAggregateFunction; +class FunctionExecutor; +class FunctionOptions; +class FunctionRegistry; + +/// \brief Return the process-global function registry. +// Defined in registry.cc +ARROW_EXPORT FunctionRegistry* GetFunctionRegistry(); + +class CastOptions; + +struct ExecBatch; +class ExecContext; +class KernelContext; + +struct Kernel; +struct ScalarKernel; +struct ScalarAggregateKernel; +struct VectorKernel; + +struct KernelState; + +class Expression; + +ARROW_EXPORT ExecContext* default_exec_context(); +ARROW_EXPORT ExecContext* threaded_exec_context(); + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h new file mode 100644 index 0000000000000000000000000000000000000000..1aaff43e10e1fd6b10a1e05eb1d33039b55b8563 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h @@ -0,0 +1,215 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/simd.h" + +#if defined(__clang__) || defined(__GNUC__) +# define BYTESWAP(x) __builtin_bswap64(x) +# define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31))) +# define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63))) +#elif defined(_MSC_VER) +# include +# define BYTESWAP(x) _byteswap_uint64(x) +# define ROTL(x, n) _rotl((x), (n)) +# define ROTL64(x, n) _rotl64((x), (n)) +#endif + +namespace arrow { +namespace util { + +// Some platforms typedef int64_t as long int instead of long long int, +// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics +// which need long long. +// We use the cast to the type below in these intrinsics to make the code +// compile in all cases. +// +using int64_for_gather_t = const long long int; // NOLINT runtime-int + +// All MiniBatch... classes use TempVectorStack for vector allocations and can +// only work with vectors up to 1024 elements. +// +// They should only be allocated on the stack to guarantee the right sequence +// of allocation and deallocation of vectors from TempVectorStack. +// +class MiniBatch { + public: + static constexpr int kLogMiniBatchLength = 10; + static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength; +}; + +namespace bit_util { + +ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + int* num_indexes, uint16_t* indexes, + int bit_offset = 0); + +ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + const uint16_t* input_indexes, int* num_indexes, + uint16_t* indexes, int bit_offset = 0); + +// Input and output indexes may be pointing to the same data (in-place filtering). +ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, int* num_indexes_bit0, + uint16_t* indexes_bit0, uint16_t* indexes_bit1, + int bit_offset = 0); + +// Bit 1 is replaced with byte 0xFF. +ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, uint8_t* bytes, int bit_offset = 0); + +// Return highest bit of each byte. +ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits, + const uint8_t* bytes, uint8_t* bits, int bit_offset = 0); + +ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes, + uint32_t num_bytes); + +#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2) +// The functions below use BMI2 instructions, be careful before calling! + +namespace avx2 { +ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, + const uint16_t* input_indexes, + int* num_indexes, uint16_t* indexes); +ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, int* num_indexes, + uint16_t* indexes, uint16_t base_index = 0); +ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits, + uint8_t* bytes); +ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes, + uint8_t* bits); +ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes); +} // namespace avx2 + +#endif + +} // namespace bit_util +} // namespace util + +namespace compute { + +/// Modify an Expression with pre-order and post-order visitation. +/// `pre` will be invoked on each Expression. `pre` will visit Calls before their +/// arguments, `post_call` will visit Calls (and no other Expressions) after their +/// arguments. Visitors should return the Identical expression to indicate no change; this +/// will prevent unnecessary construction in the common case where a modification is not +/// possible/necessary/... +/// +/// If an argument was modified, `post_call` visits a reconstructed Call with the modified +/// arguments but also receives a pointer to the unmodified Expression as a second +/// argument. If no arguments were modified the unmodified Expression* will be nullptr. +template +Result ModifyExpression(Expression expr, const PreVisit& pre, + const PostVisitCall& post_call) { + ARROW_ASSIGN_OR_RAISE(expr, Result(pre(std::move(expr)))); + + auto call = expr.call(); + if (!call) return expr; + + bool at_least_one_modified = false; + std::vector modified_arguments; + + for (size_t i = 0; i < call->arguments.size(); ++i) { + ARROW_ASSIGN_OR_RAISE(auto modified_argument, + ModifyExpression(call->arguments[i], pre, post_call)); + + if (Identical(modified_argument, call->arguments[i])) { + continue; + } + + if (!at_least_one_modified) { + modified_arguments = call->arguments; + at_least_one_modified = true; + } + + modified_arguments[i] = std::move(modified_argument); + } + + if (at_least_one_modified) { + // reconstruct the call expression with the modified arguments + auto modified_call = *call; + modified_call.arguments = std::move(modified_arguments); + return post_call(Expression(std::move(modified_call)), &expr); + } + + return post_call(std::move(expr), NULLPTR); +} + +// Helper class to calculate the modified number of rows to process using SIMD. +// +// Some array elements at the end will be skipped in order to avoid buffer +// overrun, when doing memory loads and stores using larger word size than a +// single array element. +// +class TailSkipForSIMD { + public: + static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows, + int bit_offset) { + int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset); + int64_t num_bytes_safe = + std::max(static_cast(0LL), num_bytes - num_bytes_accessed_together + 1); + int64_t num_rows_safe = + std::max(static_cast(0LL), 8 * num_bytes_safe - bit_offset); + return std::min(num_rows_safe, num_rows); + } + static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + int64_t length) { + int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together); + int64_t num_rows_safe = + std::max(static_cast(0LL), num_rows - num_rows_to_skip); + return num_rows_safe; + } + static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + const uint32_t* offsets) { + // Do not process rows that could read past the end of the buffer using N + // byte loads/stores. + // + int64_t num_rows_safe = num_rows; + while (num_rows_safe > 0 && + offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) { + --num_rows_safe; + } + return num_rows_safe; + } + static int FixSelection(int64_t num_rows_safe, int num_selected, + const uint16_t* selection) { + int num_selected_safe = num_selected; + while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) { + --num_selected_safe; + } + return num_selected_safe; + } +}; + +} // namespace compute +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h new file mode 100644 index 0000000000000000000000000000000000000000..38caa1cff19def66d09d0d6ed25c67ce52259f9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/compute/expression.h" +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/discovery.h" +#include "arrow/dataset/file_base.h" +#ifdef ARROW_CSV +# include "arrow/dataset/file_csv.h" +#endif +#ifdef ARROW_JSON +# include "arrow/dataset/file_json.h" +#endif +#include "arrow/dataset/file_ipc.h" +#ifdef ARROW_ORC +# include "arrow/dataset/file_orc.h" +#endif +#ifdef ARROW_PARQUET +# include "arrow/dataset/file_parquet.h" +#endif +#include "arrow/dataset/scanner.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h new file mode 100644 index 0000000000000000000000000000000000000000..1cdd92d5c42f2717c00b7bdeb2c7adc6117754b5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h @@ -0,0 +1,481 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/compute/expression.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/util/async_generator_fwd.h" +#include "arrow/util/future.h" +#include "arrow/util/macros.h" +#include "arrow/util/mutex.h" + +namespace arrow { + +namespace internal { +class Executor; +} // namespace internal + +namespace dataset { + +using RecordBatchGenerator = std::function>()>; + +/// \brief Description of a column to scan +struct ARROW_DS_EXPORT FragmentSelectionColumn { + /// \brief The path to the column to load + FieldPath path; + /// \brief The type of the column in the dataset schema + /// + /// A format may choose to ignore this field completely. For example, when + /// reading from IPC the reader can just return the column in the data type + /// that is stored on disk. There is no point in doing anything special. + /// + /// However, some formats may be capable of casting on the fly. For example, + /// when reading from CSV, if we know the target type of the column, we can + /// convert from string to the target type as we read. + DataType* requested_type; +}; + +/// \brief A list of columns that should be loaded from a fragment +/// +/// The paths in this selection should be referring to the fragment schema. This class +/// contains a virtual destructor as it is expected evolution strategies will need to +/// extend this to add any information needed to later evolve the batches. +/// +/// For example, in the basic evolution strategy, we keep track of which columns +/// were missing from the file so that we can fill those in with null when evolving. +class ARROW_DS_EXPORT FragmentSelection { + public: + explicit FragmentSelection(std::vector columns) + : columns_(std::move(columns)) {} + virtual ~FragmentSelection() = default; + /// The columns that should be loaded from the fragment + const std::vector& columns() const { return columns_; } + + private: + std::vector columns_; +}; + +/// \brief Instructions for scanning a particular fragment +/// +/// The fragment scan request is derived from ScanV2Options. The main +/// difference is that the scan options are based on the dataset schema +/// while the fragment request is based on the fragment schema. +struct ARROW_DS_EXPORT FragmentScanRequest { + /// \brief A row filter + /// + /// The filter expression should be written against the fragment schema. + /// + /// \see ScanV2Options for details on how this filter should be applied + compute::Expression filter = compute::literal(true); + + /// \brief The columns to scan + /// + /// These indices refer to the fragment schema + /// + /// Note: This is NOT a simple list of top-level column indices. + /// For more details \see ScanV2Options + /// + /// If possible a fragment should only read from disk the data needed + /// to satisfy these columns. If a format cannot partially read a nested + /// column (e.g. JSON) then it must apply the column selection (in memory) + /// before returning the scanned batch. + std::shared_ptr fragment_selection; + /// \brief Options specific to the format being scanned + const FragmentScanOptions* format_scan_options; +}; + +/// \brief An iterator-like object that can yield batches created from a fragment +class ARROW_DS_EXPORT FragmentScanner { + public: + /// This instance will only be destroyed after all ongoing scan futures + /// have been completed. + /// + /// This means any callbacks created as part of the scan can safely + /// capture `this` + virtual ~FragmentScanner() = default; + /// \brief Scan a batch of data from the file + /// \param batch_number The index of the batch to read + virtual Future> ScanBatch(int batch_number) = 0; + /// \brief Calculate an estimate of how many data bytes the given batch will represent + /// + /// "Data bytes" should be the total size of all the buffers once the data has been + /// decoded into the Arrow format. + virtual int64_t EstimatedDataBytes(int batch_number) = 0; + /// \brief The number of batches in the fragment to scan + virtual int NumBatches() = 0; +}; + +/// \brief Information learned about a fragment through inspection +/// +/// This information can be used to figure out which fields need +/// to be read from a file and how the data read in should be evolved +/// to match the dataset schema. +/// +/// For example, from a CSV file we can inspect and learn the column +/// names and use those column names to determine which columns to load +/// from the CSV file. +struct ARROW_DS_EXPORT InspectedFragment { + explicit InspectedFragment(std::vector column_names) + : column_names(std::move(column_names)) {} + std::vector column_names; +}; + +/// \brief A granular piece of a Dataset, such as an individual file. +/// +/// A Fragment can be read/scanned separately from other fragments. It yields a +/// collection of RecordBatches when scanned +/// +/// Note that Fragments have well defined physical schemas which are reconciled by +/// the Datasets which contain them; these physical schemas may differ from a parent +/// Dataset's schema and the physical schemas of sibling Fragments. +class ARROW_DS_EXPORT Fragment : public std::enable_shared_from_this { + public: + /// \brief An expression that represents no known partition information + static const compute::Expression kNoPartitionInformation; + + /// \brief Return the physical schema of the Fragment. + /// + /// The physical schema is also called the writer schema. + /// This method is blocking and may suffer from high latency filesystem. + /// The schema is cached after being read once, or may be specified at construction. + Result> ReadPhysicalSchema(); + + /// An asynchronous version of Scan + virtual Result ScanBatchesAsync( + const std::shared_ptr& options) = 0; + + /// \brief Inspect a fragment to learn basic information + /// + /// This will be called before a scan and a fragment should attach whatever + /// information will be needed to figure out an evolution strategy. This information + /// will then be passed to the call to BeginScan + virtual Future> InspectFragment( + const FragmentScanOptions* format_options, compute::ExecContext* exec_context); + + /// \brief Start a scan operation + virtual Future> BeginScan( + const FragmentScanRequest& request, const InspectedFragment& inspected_fragment, + const FragmentScanOptions* format_options, compute::ExecContext* exec_context); + + /// \brief Count the number of rows in this fragment matching the filter using metadata + /// only. That is, this method may perform I/O, but will not load data. + /// + /// If this is not possible, resolve with an empty optional. The fragment can perform + /// I/O (e.g. to read metadata) before it deciding whether it can satisfy the request. + virtual Future> CountRows( + compute::Expression predicate, const std::shared_ptr& options); + + virtual std::string type_name() const = 0; + virtual std::string ToString() const { return type_name(); } + + /// \brief An expression which evaluates to true for all data viewed by this + /// Fragment. + const compute::Expression& partition_expression() const { + return partition_expression_; + } + + virtual ~Fragment() = default; + + protected: + Fragment() = default; + explicit Fragment(compute::Expression partition_expression, + std::shared_ptr physical_schema); + + virtual Result> ReadPhysicalSchemaImpl() = 0; + + util::Mutex physical_schema_mutex_; + compute::Expression partition_expression_ = compute::literal(true); + std::shared_ptr physical_schema_; +}; + +/// \brief Per-scan options for fragment(s) in a dataset. +/// +/// These options are not intrinsic to the format or fragment itself, but do affect +/// the results of a scan. These are options which make sense to change between +/// repeated reads of the same dataset, such as format-specific conversion options +/// (that do not affect the schema). +/// +/// \ingroup dataset-scanning +class ARROW_DS_EXPORT FragmentScanOptions { + public: + virtual std::string type_name() const = 0; + virtual std::string ToString() const { return type_name(); } + virtual ~FragmentScanOptions() = default; +}; + +/// \defgroup dataset-implementations Concrete implementations +/// +/// @{ + +/// \brief A trivial Fragment that yields ScanTask out of a fixed set of +/// RecordBatch. +class ARROW_DS_EXPORT InMemoryFragment : public Fragment { + public: + class Scanner; + InMemoryFragment(std::shared_ptr schema, RecordBatchVector record_batches, + compute::Expression = compute::literal(true)); + explicit InMemoryFragment(RecordBatchVector record_batches, + compute::Expression = compute::literal(true)); + + Result ScanBatchesAsync( + const std::shared_ptr& options) override; + Future> CountRows( + compute::Expression predicate, + const std::shared_ptr& options) override; + + Future> InspectFragment( + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) override; + Future> BeginScan( + const FragmentScanRequest& request, const InspectedFragment& inspected_fragment, + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) override; + + std::string type_name() const override { return "in-memory"; } + + protected: + Result> ReadPhysicalSchemaImpl() override; + + RecordBatchVector record_batches_; +}; + +/// @} + +using FragmentGenerator = AsyncGenerator>; + +/// \brief Rules for converting the dataset schema to and from fragment schemas +class ARROW_DS_EXPORT FragmentEvolutionStrategy { + public: + /// This instance will only be destroyed when all scan operations for the + /// fragment have completed. + virtual ~FragmentEvolutionStrategy() = default; + /// \brief A guarantee that applies to all batches of this fragment + /// + /// For example, if a fragment is missing one of the fields in the dataset + /// schema then a typical evolution strategy is to set that field to null. + /// + /// So if the column at index 3 is missing then the guarantee is + /// FieldRef(3) == null + /// + /// Individual field guarantees should be AND'd together and returned + /// as a single expression. + virtual Result GetGuarantee( + const std::vector& dataset_schema_selection) const = 0; + + /// \brief Return a fragment schema selection given a dataset schema selection + /// + /// For example, if the user wants fields 2 & 4 of the dataset schema and + /// in this fragment the field 2 is missing and the field 4 is at index 1 then + /// this should return {1} + virtual Result> DevolveSelection( + const std::vector& dataset_schema_selection) const = 0; + + /// \brief Return a filter expression bound to the fragment schema given + /// a filter expression bound to the dataset schema + /// + /// The dataset scan filter will first be simplified by the guarantee returned + /// by GetGuarantee. This means an evolution that only handles dropping or casting + /// fields doesn't need to do anything here except return the given filter. + /// + /// On the other hand, an evolution that is doing some kind of aliasing will likely + /// need to convert field references in the filter to the aliased field references + /// where appropriate. + virtual Result DevolveFilter( + const compute::Expression& filter) const = 0; + + /// \brief Convert a batch from the fragment schema to the dataset schema + /// + /// Typically this involves casting columns from the data type stored on disk + /// to the data type of the dataset schema. For example, this fragment might + /// have columns stored as int32 and the dataset schema might have int64 for + /// the column. In this case we should cast the column from int32 to int64. + /// + /// Note: A fragment may perform this cast as the data is read from disk. In + /// that case a cast might not be needed. + virtual Result EvolveBatch( + const std::shared_ptr& batch, + const std::vector& dataset_selection, + const FragmentSelection& selection) const = 0; + + /// \brief Return a string description of this strategy + virtual std::string ToString() const = 0; +}; + +/// \brief Lookup to create a FragmentEvolutionStrategy for a given fragment +class ARROW_DS_EXPORT DatasetEvolutionStrategy { + public: + virtual ~DatasetEvolutionStrategy() = default; + /// \brief Create a strategy for evolving from the given fragment + /// to the schema of the given dataset + virtual std::unique_ptr GetStrategy( + const Dataset& dataset, const Fragment& fragment, + const InspectedFragment& inspected_fragment) = 0; + + /// \brief Return a string description of this strategy + virtual std::string ToString() const = 0; +}; + +ARROW_DS_EXPORT std::unique_ptr +MakeBasicDatasetEvolutionStrategy(); + +/// \brief A container of zero or more Fragments. +/// +/// A Dataset acts as a union of Fragments, e.g. files deeply nested in a +/// directory. A Dataset has a schema to which Fragments must align during a +/// scan operation. This is analogous to Avro's reader and writer schema. +class ARROW_DS_EXPORT Dataset : public std::enable_shared_from_this { + public: + /// \brief Begin to build a new Scan operation against this Dataset + Result> NewScan(); + + /// \brief GetFragments returns an iterator of Fragments given a predicate. + Result GetFragments(compute::Expression predicate); + Result GetFragments(); + + /// \brief Async versions of `GetFragments`. + Result GetFragmentsAsync(compute::Expression predicate); + Result GetFragmentsAsync(); + + const std::shared_ptr& schema() const { return schema_; } + + /// \brief An expression which evaluates to true for all data viewed by this Dataset. + /// May be null, which indicates no information is available. + const compute::Expression& partition_expression() const { + return partition_expression_; + } + + /// \brief The name identifying the kind of Dataset + virtual std::string type_name() const = 0; + + /// \brief Return a copy of this Dataset with a different schema. + /// + /// The copy will view the same Fragments. If the new schema is not compatible with the + /// original dataset's schema then an error will be raised. + virtual Result> ReplaceSchema( + std::shared_ptr schema) const = 0; + + /// \brief Rules used by this dataset to handle schema evolution + DatasetEvolutionStrategy* evolution_strategy() { return evolution_strategy_.get(); } + + virtual ~Dataset() = default; + + protected: + explicit Dataset(std::shared_ptr schema) : schema_(std::move(schema)) {} + + Dataset(std::shared_ptr schema, compute::Expression partition_expression); + + virtual Result GetFragmentsImpl(compute::Expression predicate) = 0; + /// \brief Default non-virtual implementation method for the base + /// `GetFragmentsAsyncImpl` method, which creates a fragment generator for + /// the dataset, possibly filtering results with a predicate (forwarding to + /// the synchronous `GetFragmentsImpl` method and moving the computations + /// to the background, using the IO thread pool). + /// + /// Currently, `executor` is always the same as `internal::GetCPUThreadPool()`, + /// which means the results from the underlying fragment generator will be + /// transferred to the default CPU thread pool. The generator itself is + /// offloaded to run on the default IO thread pool. + virtual Result GetFragmentsAsyncImpl( + compute::Expression predicate, arrow::internal::Executor* executor); + + std::shared_ptr schema_; + compute::Expression partition_expression_ = compute::literal(true); + std::unique_ptr evolution_strategy_ = + MakeBasicDatasetEvolutionStrategy(); +}; + +/// \addtogroup dataset-implementations +/// +/// @{ + +/// \brief A Source which yields fragments wrapping a stream of record batches. +/// +/// The record batches must match the schema provided to the source at construction. +class ARROW_DS_EXPORT InMemoryDataset : public Dataset { + public: + class RecordBatchGenerator { + public: + virtual ~RecordBatchGenerator() = default; + virtual RecordBatchIterator Get() const = 0; + }; + + /// Construct a dataset from a schema and a factory of record batch iterators. + InMemoryDataset(std::shared_ptr schema, + std::shared_ptr get_batches) + : Dataset(std::move(schema)), get_batches_(std::move(get_batches)) {} + + /// Convenience constructor taking a fixed list of batches + InMemoryDataset(std::shared_ptr schema, RecordBatchVector batches); + + /// Convenience constructor taking a Table + explicit InMemoryDataset(std::shared_ptr table); + + std::string type_name() const override { return "in-memory"; } + + Result> ReplaceSchema( + std::shared_ptr schema) const override; + + protected: + Result GetFragmentsImpl(compute::Expression predicate) override; + + std::shared_ptr get_batches_; +}; + +/// \brief A Dataset wrapping child Datasets. +class ARROW_DS_EXPORT UnionDataset : public Dataset { + public: + /// \brief Construct a UnionDataset wrapping child Datasets. + /// + /// \param[in] schema the schema of the resulting dataset. + /// \param[in] children one or more child Datasets. Their schemas must be identical to + /// schema. + static Result> Make(std::shared_ptr schema, + DatasetVector children); + + const DatasetVector& children() const { return children_; } + + std::string type_name() const override { return "union"; } + + Result> ReplaceSchema( + std::shared_ptr schema) const override; + + protected: + Result GetFragmentsImpl(compute::Expression predicate) override; + + explicit UnionDataset(std::shared_ptr schema, DatasetVector children) + : Dataset(std::move(schema)), children_(std::move(children)) {} + + DatasetVector children_; + + friend class UnionDatasetFactory; +}; + +/// @} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..edb1649b5f196aa3c6cd923c9e6540c4173fc102 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/dataset/file_base.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/util/async_util.h" +#include "arrow/util/future.h" + +namespace arrow { +namespace dataset { +namespace internal { + +// This lines up with our other defaults in the scanner and execution plan +constexpr uint64_t kDefaultDatasetWriterMaxRowsQueued = 8 * 1024 * 1024; + +/// \brief Utility class that manages a set of writers to different paths +/// +/// Writers may be closed and reopened (and a new file created) based on the dataset +/// write options (for example, max_rows_per_file or max_open_files) +/// +/// The dataset writer enforces its own back pressure based on the # of rows (as opposed +/// to # of batches which is how it is typically enforced elsewhere) and # of files. +class ARROW_DS_EXPORT DatasetWriter { + public: + /// \brief Create a dataset writer + /// + /// Will fail if basename_template is invalid or if there is existing data and + /// existing_data_behavior is kError + /// + /// \param write_options options to control how the data should be written + /// \param max_rows_queued max # of rows allowed to be queued before the dataset_writer + /// will ask for backpressure + static Result> Make( + FileSystemDatasetWriteOptions write_options, util::AsyncTaskScheduler* scheduler, + std::function pause_callback, std::function resume_callback, + std::function finish_callback, + uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued); + + ~DatasetWriter(); + + /// \brief Write a batch to the dataset + /// \param[in] batch The batch to write + /// \param[in] directory The directory to write to + /// + /// Note: The written filename will be {directory}/{filename_factory(i)} where i is a + /// counter controlled by `max_open_files` and `max_rows_per_file` + /// + /// If multiple WriteRecordBatch calls arrive with the same `directory` then the batches + /// may be written to the same file. + /// + /// The returned future will be marked finished when the record batch has been queued + /// to be written. If the returned future is unfinished then this indicates the dataset + /// writer's queue is full and the data provider should pause. + /// + /// This method is NOT async reentrant. The returned future will only be unfinished + /// if back pressure needs to be applied. Async reentrancy is not necessary for + /// concurrent writes to happen. Calling this method again before the previous future + /// completes will not just violate max_rows_queued but likely lead to race conditions. + /// + /// One thing to note is that the ordering of your data can affect your maximum + /// potential parallelism. If this seems odd then consider a dataset where the first + /// 1000 batches go to the same directory and then the 1001st batch goes to a different + /// directory. The only way to get two parallel writes immediately would be to queue + /// all 1000 pending writes to the first directory. + void WriteRecordBatch(std::shared_ptr batch, const std::string& directory, + const std::string& prefix = ""); + + /// Finish all pending writes and close any open files + void Finish(); + + protected: + DatasetWriter(FileSystemDatasetWriteOptions write_options, + util::AsyncTaskScheduler* scheduler, std::function pause_callback, + std::function resume_callback, + std::function finish_callback, + uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued); + + class DatasetWriterImpl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h new file mode 100644 index 0000000000000000000000000000000000000000..6d76dcef727e7643ba559d8802665755a4f8a870 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h @@ -0,0 +1,275 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// Logic for automatically determining the structure of multi-file +/// dataset with possible partitioning according to available +/// partitioning + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/dataset/partition.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/filesystem/type_fwd.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace dataset { + +/// \defgroup dataset-discovery Discovery API +/// +/// @{ + +struct InspectOptions { + /// See `fragments` property. + static constexpr int kInspectAllFragments = -1; + + /// Indicate how many fragments should be inspected to infer the unified dataset + /// schema. Limiting the number of fragments accessed improves the latency of + /// the discovery process when dealing with a high number of fragments and/or + /// high latency file systems. + /// + /// The default value of `1` inspects the schema of the first (in no particular + /// order) fragment only. If the dataset has a uniform schema for all fragments, + /// this default is the optimal value. In order to inspect all fragments and + /// robustly unify their potentially varying schemas, set this option to + /// `kInspectAllFragments`. A value of `0` disables inspection of fragments + /// altogether so only the partitioning schema will be inspected. + int fragments = 1; + + /// Control how to unify types. By default, types are merged strictly (the + /// type must match exactly, except nulls can be merged with other types). + Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults(); +}; + +struct FinishOptions { + /// Finalize the dataset with this given schema. If the schema is not + /// provided, infer the schema via the Inspect, see the `inspect_options` + /// property. + std::shared_ptr schema = NULLPTR; + + /// If the schema is not provided, it will be discovered by passing the + /// following options to `DatasetDiscovery::Inspect`. + InspectOptions inspect_options{}; + + /// Indicate if the given Schema (when specified), should be validated against + /// the fragments' schemas. `inspect_options` will control how many fragments + /// are checked. + bool validate_fragments = false; +}; + +/// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected +/// schema before materializing said Dataset. +class ARROW_DS_EXPORT DatasetFactory { + public: + /// \brief Get the schemas of the Fragments and Partitioning. + virtual Result>> InspectSchemas( + InspectOptions options) = 0; + + /// \brief Get unified schema for the resulting Dataset. + Result> Inspect(InspectOptions options = {}); + + /// \brief Create a Dataset + Result> Finish(); + /// \brief Create a Dataset with the given schema (see \a InspectOptions::schema) + Result> Finish(std::shared_ptr schema); + /// \brief Create a Dataset with the given options + virtual Result> Finish(FinishOptions options) = 0; + + /// \brief Optional root partition for the resulting Dataset. + const compute::Expression& root_partition() const { return root_partition_; } + /// \brief Set the root partition for the resulting Dataset. + Status SetRootPartition(compute::Expression partition) { + root_partition_ = std::move(partition); + return Status::OK(); + } + + virtual ~DatasetFactory() = default; + + protected: + DatasetFactory(); + + compute::Expression root_partition_; +}; + +/// @} + +/// \brief DatasetFactory provides a way to inspect/discover a Dataset's +/// expected schema before materialization. +/// \ingroup dataset-implementations +class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory { + public: + static Result> Make( + std::vector> factories); + + /// \brief Return the list of child DatasetFactory + const std::vector>& factories() const { + return factories_; + } + + /// \brief Get the schemas of the Datasets. + /// + /// Instead of applying options globally, it applies at each child factory. + /// This will not respect `options.fragments` exactly, but will respect the + /// spirit of peeking the first fragments or all of them. + Result>> InspectSchemas( + InspectOptions options) override; + + /// \brief Create a Dataset. + Result> Finish(FinishOptions options) override; + + protected: + explicit UnionDatasetFactory(std::vector> factories); + + std::vector> factories_; +}; + +/// \ingroup dataset-filesystem +struct FileSystemFactoryOptions { + /// Either an explicit Partitioning or a PartitioningFactory to discover one. + /// + /// If a factory is provided, it will be used to infer a schema for partition fields + /// based on file and directory paths then construct a Partitioning. The default + /// is a Partitioning which will yield no partition information. + /// + /// The (explicit or discovered) partitioning will be applied to discovered files + /// and the resulting partition information embedded in the Dataset. + PartitioningOrFactory partitioning{Partitioning::Default()}; + + /// For the purposes of applying the partitioning, paths will be stripped + /// of the partition_base_dir. Files not matching the partition_base_dir + /// prefix will be skipped for partition discovery. The ignored files will still + /// be part of the Dataset, but will not have partition information. + /// + /// Example: + /// partition_base_dir = "/dataset"; + /// + /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning + /// + /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery. + /// + /// This is useful for partitioning which parses directory when ordering + /// is important, e.g. DirectoryPartitioning. + std::string partition_base_dir; + + /// Invalid files (via selector or explicitly) will be excluded by checking + /// with the FileFormat::IsSupported method. This will incur IO for each files + /// in a serial and single threaded fashion. Disabling this feature will skip the + /// IO, but unsupported files may be present in the Dataset + /// (resulting in an error at scan time). + bool exclude_invalid_files = false; + + /// When discovering from a Selector (and not from an explicit file list), ignore + /// files and directories matching any of these prefixes. + /// + /// Example (with selector = "/dataset/**"): + /// selector_ignore_prefixes = {"_", ".DS_STORE" }; + /// + /// - "/dataset/data.csv" -> not ignored + /// - "/dataset/_metadata" -> ignored + /// - "/dataset/.DS_STORE" -> ignored + /// - "/dataset/_hidden/dat" -> ignored + /// - "/dataset/nested/.DS_STORE" -> ignored + std::vector selector_ignore_prefixes = { + ".", + "_", + }; +}; + +/// \brief FileSystemDatasetFactory creates a Dataset from a vector of +/// fs::FileInfo or a fs::FileSelector. +/// \ingroup dataset-filesystem +class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory { + public: + /// \brief Build a FileSystemDatasetFactory from an explicit list of + /// paths. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] paths passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, const std::vector& paths, + std::shared_ptr format, FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from a fs::FileSelector. + /// + /// The selector will expand to a vector of FileInfo. The expansion/crawling + /// is performed in this function call. Thus, the finalized Dataset is + /// working with a snapshot of the filesystem. + // + /// If options.partition_base_dir is not provided, it will be overwritten + /// with selector.base_dir. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] selector used to crawl and search files + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, fs::FileSelector selector, + std::shared_ptr format, FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from an uri including filesystem + /// information. + /// + /// \param[in] uri passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make(std::string uri, + std::shared_ptr format, + FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from an explicit list of + /// file information. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] files passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, const std::vector& files, + std::shared_ptr format, FileSystemFactoryOptions options); + + Result>> InspectSchemas( + InspectOptions options) override; + + Result> Finish(FinishOptions options) override; + + protected: + FileSystemDatasetFactory(std::vector files, + std::shared_ptr filesystem, + std::shared_ptr format, + FileSystemFactoryOptions options); + + Result> PartitionSchema(); + + std::vector files_; + std::shared_ptr fs_; + std::shared_ptr format_; + FileSystemFactoryOptions options_; +}; + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h new file mode 100644 index 0000000000000000000000000000000000000000..46fc8ebc40db097a0bb3fc25f00351c68e36991f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h @@ -0,0 +1,495 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/partition.h" +#include "arrow/dataset/scanner.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/filesystem/filesystem.h" +#include "arrow/io/file.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" + +namespace arrow { + +namespace dataset { + +/// \defgroup dataset-file-formats File formats for reading and writing datasets +/// \defgroup dataset-filesystem File system datasets +/// +/// @{ + +/// \brief The path and filesystem where an actual file is located or a buffer which can +/// be read like a file +class ARROW_DS_EXPORT FileSource : public util::EqualityComparable { + public: + FileSource(std::string path, std::shared_ptr filesystem, + Compression::type compression = Compression::UNCOMPRESSED) + : file_info_(std::move(path)), + filesystem_(std::move(filesystem)), + compression_(compression) {} + + FileSource(fs::FileInfo info, std::shared_ptr filesystem, + Compression::type compression = Compression::UNCOMPRESSED) + : file_info_(std::move(info)), + filesystem_(std::move(filesystem)), + compression_(compression) {} + + explicit FileSource(std::shared_ptr buffer, + Compression::type compression = Compression::UNCOMPRESSED) + : buffer_(std::move(buffer)), compression_(compression) {} + + using CustomOpen = std::function>()>; + FileSource(CustomOpen open, int64_t size) + : custom_open_(std::move(open)), custom_size_(size) {} + + using CustomOpenWithCompression = + std::function>(Compression::type)>; + FileSource(CustomOpenWithCompression open_with_compression, int64_t size, + Compression::type compression = Compression::UNCOMPRESSED) + : custom_open_(std::bind(std::move(open_with_compression), compression)), + custom_size_(size), + compression_(compression) {} + + FileSource(std::shared_ptr file, int64_t size, + Compression::type compression = Compression::UNCOMPRESSED) + : custom_open_([=] { return ToResult(file); }), + custom_size_(size), + compression_(compression) {} + + explicit FileSource(std::shared_ptr file, + Compression::type compression = Compression::UNCOMPRESSED); + + FileSource() : custom_open_(CustomOpen{&InvalidOpen}) {} + + static std::vector FromPaths(const std::shared_ptr& fs, + std::vector paths) { + std::vector sources; + for (auto&& path : paths) { + sources.emplace_back(std::move(path), fs); + } + return sources; + } + + /// \brief Return the type of raw compression on the file, if any. + Compression::type compression() const { return compression_; } + + /// \brief Return the file path, if any. Only valid when file source wraps a path. + const std::string& path() const { + static std::string buffer_path = ""; + static std::string custom_open_path = ""; + return filesystem_ ? file_info_.path() : buffer_ ? buffer_path : custom_open_path; + } + + /// \brief Return the filesystem, if any. Otherwise returns nullptr + const std::shared_ptr& filesystem() const { return filesystem_; } + + /// \brief Return the buffer containing the file, if any. Otherwise returns nullptr + const std::shared_ptr& buffer() const { return buffer_; } + + /// \brief Get a RandomAccessFile which views this file source + Result> Open() const; + Future> OpenAsync() const; + + /// \brief Get the size (in bytes) of the file or buffer + /// If the file is compressed this should be the compressed (on-disk) size. + int64_t Size() const; + + /// \brief Get an InputStream which views this file source (and decompresses if needed) + /// \param[in] compression If nullopt, guess the compression scheme from the + /// filename, else decompress with the given codec + Result> OpenCompressed( + std::optional compression = std::nullopt) const; + + /// \brief equality comparison with another FileSource + bool Equals(const FileSource& other) const; + + private: + static Result> InvalidOpen() { + return Status::Invalid("Called Open() on an uninitialized FileSource"); + } + + fs::FileInfo file_info_; + std::shared_ptr filesystem_; + std::shared_ptr buffer_; + CustomOpen custom_open_; + int64_t custom_size_ = 0; + Compression::type compression_ = Compression::UNCOMPRESSED; +}; + +/// \brief Base class for file format implementation +class ARROW_DS_EXPORT FileFormat : public std::enable_shared_from_this { + public: + /// Options affecting how this format is scanned. + /// + /// The options here can be overridden at scan time. + std::shared_ptr default_fragment_scan_options; + + virtual ~FileFormat() = default; + + /// \brief The name identifying the kind of file format + virtual std::string type_name() const = 0; + + virtual bool Equals(const FileFormat& other) const = 0; + + /// \brief Indicate if the FileSource is supported/readable by this format. + virtual Result IsSupported(const FileSource& source) const = 0; + + /// \brief Return the schema of the file if possible. + virtual Result> Inspect(const FileSource& source) const = 0; + + /// \brief Learn what we need about the file before we start scanning it + virtual Future> InspectFragment( + const FileSource& source, const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const; + + virtual Result ScanBatchesAsync( + const std::shared_ptr& options, + const std::shared_ptr& file) const = 0; + + virtual Future> CountRows( + const std::shared_ptr& file, compute::Expression predicate, + const std::shared_ptr& options); + + virtual Future> BeginScan( + const FragmentScanRequest& request, const InspectedFragment& inspected_fragment, + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const; + + /// \brief Open a fragment + virtual Result> MakeFragment( + FileSource source, compute::Expression partition_expression, + std::shared_ptr physical_schema); + + /// \brief Create a FileFragment for a FileSource. + Result> MakeFragment( + FileSource source, compute::Expression partition_expression); + + /// \brief Create a FileFragment for a FileSource. + Result> MakeFragment( + FileSource source, std::shared_ptr physical_schema = NULLPTR); + + /// \brief Create a writer for this format. + virtual Result> MakeWriter( + std::shared_ptr destination, std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator) const = 0; + + /// \brief Get default write options for this format. + /// + /// May return null shared_ptr if this file format does not yet support + /// writing datasets. + virtual std::shared_ptr DefaultWriteOptions() = 0; + + protected: + explicit FileFormat(std::shared_ptr default_fragment_scan_options) + : default_fragment_scan_options(std::move(default_fragment_scan_options)) {} +}; + +/// \brief A Fragment that is stored in a file with a known format +class ARROW_DS_EXPORT FileFragment : public Fragment, + public util::EqualityComparable { + public: + Result ScanBatchesAsync( + const std::shared_ptr& options) override; + Future> CountRows( + compute::Expression predicate, + const std::shared_ptr& options) override; + Future> BeginScan( + const FragmentScanRequest& request, const InspectedFragment& inspected_fragment, + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) override; + Future> InspectFragment( + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) override; + + std::string type_name() const override { return format_->type_name(); } + std::string ToString() const override { return source_.path(); }; + + const FileSource& source() const { return source_; } + const std::shared_ptr& format() const { return format_; } + + bool Equals(const FileFragment& other) const; + + protected: + FileFragment(FileSource source, std::shared_ptr format, + compute::Expression partition_expression, + std::shared_ptr physical_schema) + : Fragment(std::move(partition_expression), std::move(physical_schema)), + source_(std::move(source)), + format_(std::move(format)) {} + + Result> ReadPhysicalSchemaImpl() override; + + FileSource source_; + std::shared_ptr format_; + + friend class FileFormat; +}; + +/// \brief A Dataset of FileFragments. +/// +/// A FileSystemDataset is composed of one or more FileFragment. The fragments +/// are independent and don't need to share the same format and/or filesystem. +class ARROW_DS_EXPORT FileSystemDataset : public Dataset { + public: + /// \brief Create a FileSystemDataset. + /// + /// \param[in] schema the schema of the dataset + /// \param[in] root_partition the partition expression of the dataset + /// \param[in] format the format of each FileFragment. + /// \param[in] filesystem the filesystem of each FileFragment, or nullptr if the + /// fragments wrap buffers. + /// \param[in] fragments list of fragments to create the dataset from. + /// \param[in] partitioning the Partitioning object in case the dataset is created + /// with a known partitioning (e.g. from a discovered partitioning + /// through a DatasetFactory), or nullptr if not known. + /// + /// Note that fragments wrapping files resident in differing filesystems are not + /// permitted; to work with multiple filesystems use a UnionDataset. + /// + /// \return A constructed dataset. + static Result> Make( + std::shared_ptr schema, compute::Expression root_partition, + std::shared_ptr format, std::shared_ptr filesystem, + std::vector> fragments, + std::shared_ptr partitioning = NULLPTR); + + /// \brief Write a dataset. + static Status Write(const FileSystemDatasetWriteOptions& write_options, + std::shared_ptr scanner); + + /// \brief Return the type name of the dataset. + std::string type_name() const override { return "filesystem"; } + + /// \brief Replace the schema of the dataset. + Result> ReplaceSchema( + std::shared_ptr schema) const override; + + /// \brief Return the path of files. + std::vector files() const; + + /// \brief Return the format. + const std::shared_ptr& format() const { return format_; } + + /// \brief Return the filesystem. May be nullptr if the fragments wrap buffers. + const std::shared_ptr& filesystem() const { return filesystem_; } + + /// \brief Return the partitioning. May be nullptr if the dataset was not constructed + /// with a partitioning. + const std::shared_ptr& partitioning() const { return partitioning_; } + + std::string ToString() const; + + protected: + struct FragmentSubtrees; + + explicit FileSystemDataset(std::shared_ptr schema) + : Dataset(std::move(schema)) {} + + FileSystemDataset(std::shared_ptr schema, + compute::Expression partition_expression) + : Dataset(std::move(schema), partition_expression) {} + + Result GetFragmentsImpl(compute::Expression predicate) override; + + void SetupSubtreePruning(); + + std::shared_ptr format_; + std::shared_ptr filesystem_; + std::vector> fragments_; + std::shared_ptr partitioning_; + + std::shared_ptr subtrees_; +}; + +/// \brief Options for writing a file of this format. +class ARROW_DS_EXPORT FileWriteOptions { + public: + virtual ~FileWriteOptions() = default; + + const std::shared_ptr& format() const { return format_; } + + std::string type_name() const { return format_->type_name(); } + + protected: + explicit FileWriteOptions(std::shared_ptr format) + : format_(std::move(format)) {} + + std::shared_ptr format_; +}; + +/// \brief A writer for this format. +class ARROW_DS_EXPORT FileWriter { + public: + virtual ~FileWriter() = default; + + /// \brief Write the given batch. + virtual Status Write(const std::shared_ptr& batch) = 0; + + /// \brief Write all batches from the reader. + Status Write(RecordBatchReader* batches); + + /// \brief Indicate that writing is done. + virtual Future<> Finish(); + + const std::shared_ptr& format() const { return options_->format(); } + const std::shared_ptr& schema() const { return schema_; } + const std::shared_ptr& options() const { return options_; } + const fs::FileLocator& destination() const { return destination_locator_; } + + /// \brief After Finish() is called, provides number of bytes written to file. + Result GetBytesWritten() const; + + protected: + FileWriter(std::shared_ptr schema, std::shared_ptr options, + std::shared_ptr destination, + fs::FileLocator destination_locator) + : schema_(std::move(schema)), + options_(std::move(options)), + destination_(std::move(destination)), + destination_locator_(std::move(destination_locator)) {} + + virtual Future<> FinishInternal() = 0; + + std::shared_ptr schema_; + std::shared_ptr options_; + std::shared_ptr destination_; + fs::FileLocator destination_locator_; + std::optional bytes_written_; +}; + +/// \brief Options for writing a dataset. +struct ARROW_DS_EXPORT FileSystemDatasetWriteOptions { + /// Options for individual fragment writing. + std::shared_ptr file_write_options; + + /// FileSystem into which a dataset will be written. + std::shared_ptr filesystem; + + /// Root directory into which the dataset will be written. + std::string base_dir; + + /// Partitioning used to generate fragment paths. + std::shared_ptr partitioning; + + /// Maximum number of partitions any batch may be written into, default is 1K. + int max_partitions = 1024; + + /// Template string used to generate fragment basenames. + /// {i} will be replaced by an auto incremented integer. + std::string basename_template; + + /// A functor which will be applied on an incremented counter. The result will be + /// inserted into the basename_template in place of {i}. + /// + /// This can be used, for example, to left-pad the file counter. + std::function basename_template_functor; + + /// If greater than 0 then this will limit the maximum number of files that can be left + /// open. If an attempt is made to open too many files then the least recently used file + /// will be closed. If this setting is set too low you may end up fragmenting your data + /// into many small files. + /// + /// The default is 900 which also allows some # of files to be open by the scanner + /// before hitting the default Linux limit of 1024 + uint32_t max_open_files = 900; + + /// If greater than 0 then this will limit how many rows are placed in any single file. + /// Otherwise there will be no limit and one file will be created in each output + /// directory unless files need to be closed to respect max_open_files + uint64_t max_rows_per_file = 0; + + /// If greater than 0 then this will cause the dataset writer to batch incoming data + /// and only write the row groups to the disk when sufficient rows have accumulated. + /// The final row group size may be less than this value and other options such as + /// `max_open_files` or `max_rows_per_file` lead to smaller row group sizes. + uint64_t min_rows_per_group = 0; + + /// If greater than 0 then the dataset writer may split up large incoming batches into + /// multiple row groups. If this value is set then min_rows_per_group should also be + /// set or else you may end up with very small row groups (e.g. if the incoming row + /// group size is just barely larger than this value). + uint64_t max_rows_per_group = 1 << 20; + + /// Controls what happens if an output directory already exists. + ExistingDataBehavior existing_data_behavior = ExistingDataBehavior::kError; + + /// \brief If false the dataset writer will not create directories + /// This is mainly intended for filesystems that do not require directories such as S3. + bool create_dir = true; + + /// Callback to be invoked against all FileWriters before + /// they are finalized with FileWriter::Finish(). + std::function writer_pre_finish = [](FileWriter*) { + return Status::OK(); + }; + + /// Callback to be invoked against all FileWriters after they have + /// called FileWriter::Finish(). + std::function writer_post_finish = [](FileWriter*) { + return Status::OK(); + }; + + const std::shared_ptr& format() const { + return file_write_options->format(); + } +}; + +/// \brief Wraps FileSystemDatasetWriteOptions for consumption as compute::ExecNodeOptions +class ARROW_DS_EXPORT WriteNodeOptions : public acero::ExecNodeOptions { + public: + explicit WriteNodeOptions( + FileSystemDatasetWriteOptions options, + std::shared_ptr custom_metadata = NULLPTR) + : write_options(std::move(options)), custom_metadata(std::move(custom_metadata)) {} + + /// \brief Options to control how to write the dataset + FileSystemDatasetWriteOptions write_options; + /// \brief Optional schema to attach to all written batches + /// + /// By default, we will use the output schema of the input. + /// + /// This can be used to alter schema metadata, field nullability, or field metadata. + /// However, this cannot be used to change the type of data. If the custom schema does + /// not have the same number of fields and the same data types as the input then the + /// plan will fail. + std::shared_ptr custom_schema; + /// \brief Optional metadata to attach to written batches + std::shared_ptr custom_metadata; +}; + +/// @} + +namespace internal { +ARROW_DS_EXPORT void InitializeDatasetWriter(arrow::acero::ExecFactoryRegistry* registry); +} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h new file mode 100644 index 0000000000000000000000000000000000000000..42e3fd7246988e625e0d2e69a29bd40c553e3219 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h @@ -0,0 +1,144 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/status.h" +#include "arrow/util/compression.h" + +namespace arrow { +namespace dataset { + +constexpr char kCsvTypeName[] = "csv"; + +/// \addtogroup dataset-file-formats +/// +/// @{ + +/// \brief A FileFormat implementation that reads from and writes to Csv files +class ARROW_DS_EXPORT CsvFileFormat : public FileFormat { + public: + // TODO(ARROW-18328) Remove this, moved to CsvFragmentScanOptions + /// Options affecting the parsing of CSV files + csv::ParseOptions parse_options = csv::ParseOptions::Defaults(); + + CsvFileFormat(); + + std::string type_name() const override { return kCsvTypeName; } + + bool Equals(const FileFormat& other) const override; + + Result IsSupported(const FileSource& source) const override; + + /// \brief Return the schema of the file if possible. + Result> Inspect(const FileSource& source) const override; + + Future> BeginScan( + const FragmentScanRequest& request, const InspectedFragment& inspected_fragment, + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const override; + + Result ScanBatchesAsync( + const std::shared_ptr& scan_options, + const std::shared_ptr& file) const override; + + Future> InspectFragment( + const FileSource& source, const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const override; + + Future> CountRows( + const std::shared_ptr& file, compute::Expression predicate, + const std::shared_ptr& options) override; + + Result> MakeWriter( + std::shared_ptr destination, std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator) const override; + + std::shared_ptr DefaultWriteOptions() override; +}; + +/// \brief Per-scan options for CSV fragments +struct ARROW_DS_EXPORT CsvFragmentScanOptions : public FragmentScanOptions { + std::string type_name() const override { return kCsvTypeName; } + + using StreamWrapFunc = std::function>( + std::shared_ptr)>; + + /// CSV conversion options + csv::ConvertOptions convert_options = csv::ConvertOptions::Defaults(); + + /// CSV reading options + /// + /// Note that use_threads is always ignored. + csv::ReadOptions read_options = csv::ReadOptions::Defaults(); + + /// CSV parse options + csv::ParseOptions parse_options = csv::ParseOptions::Defaults(); + + /// Optional stream wrapping function + /// + /// If defined, all open dataset file fragments will be passed + /// through this function. One possible use case is to transparently + /// transcode all input files from a given character set to utf8. + StreamWrapFunc stream_transform_func{}; +}; + +class ARROW_DS_EXPORT CsvFileWriteOptions : public FileWriteOptions { + public: + /// Options passed to csv::MakeCSVWriter. + std::shared_ptr write_options; + + protected: + explicit CsvFileWriteOptions(std::shared_ptr format) + : FileWriteOptions(std::move(format)) {} + + friend class CsvFileFormat; +}; + +class ARROW_DS_EXPORT CsvFileWriter : public FileWriter { + public: + Status Write(const std::shared_ptr& batch) override; + + private: + CsvFileWriter(std::shared_ptr destination, + std::shared_ptr writer, + std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator); + + Future<> FinishInternal() override; + + std::shared_ptr destination_; + std::shared_ptr batch_writer_; + + friend class CsvFileFormat; +}; + +/// @} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h new file mode 100644 index 0000000000000000000000000000000000000000..0f7da82a0af5b1e58b724646853e8f482781778b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include + +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/result.h" + +namespace arrow { +namespace dataset { + +/// \addtogroup dataset-file-formats +/// +/// @{ + +constexpr char kIpcTypeName[] = "ipc"; + +/// \brief A FileFormat implementation that reads from and writes to Ipc files +class ARROW_DS_EXPORT IpcFileFormat : public FileFormat { + public: + std::string type_name() const override { return kIpcTypeName; } + + IpcFileFormat(); + + bool Equals(const FileFormat& other) const override { + return type_name() == other.type_name(); + } + + Result IsSupported(const FileSource& source) const override; + + /// \brief Return the schema of the file if possible. + Result> Inspect(const FileSource& source) const override; + + Result ScanBatchesAsync( + const std::shared_ptr& options, + const std::shared_ptr& file) const override; + + Future> CountRows( + const std::shared_ptr& file, compute::Expression predicate, + const std::shared_ptr& options) override; + + Result> MakeWriter( + std::shared_ptr destination, std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator) const override; + + std::shared_ptr DefaultWriteOptions() override; +}; + +/// \brief Per-scan options for IPC fragments +class ARROW_DS_EXPORT IpcFragmentScanOptions : public FragmentScanOptions { + public: + std::string type_name() const override { return kIpcTypeName; } + + /// Options passed to the IPC file reader. + /// included_fields, memory_pool, and use_threads are ignored. + std::shared_ptr options; + /// If present, the async scanner will enable I/O coalescing. + /// This is ignored by the sync scanner. + std::shared_ptr cache_options; +}; + +class ARROW_DS_EXPORT IpcFileWriteOptions : public FileWriteOptions { + public: + /// Options passed to ipc::MakeFileWriter. use_threads is ignored + std::shared_ptr options; + + /// custom_metadata written to the file's footer + std::shared_ptr metadata; + + protected: + explicit IpcFileWriteOptions(std::shared_ptr format) + : FileWriteOptions(std::move(format)) {} + + friend class IpcFileFormat; +}; + +class ARROW_DS_EXPORT IpcFileWriter : public FileWriter { + public: + Status Write(const std::shared_ptr& batch) override; + + private: + IpcFileWriter(std::shared_ptr destination, + std::shared_ptr writer, + std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator); + + Future<> FinishInternal() override; + + std::shared_ptr destination_; + std::shared_ptr batch_writer_; + + friend class IpcFileFormat; +}; + +/// @} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h new file mode 100644 index 0000000000000000000000000000000000000000..4b8112d87095ccc9d02b0c52b4df2b1e674b8cc5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/json/options.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/future.h" +#include "arrow/util/macros.h" + +namespace arrow::dataset { + +/// \addtogroup dataset-file-formats +/// +/// @{ + +constexpr char kJsonTypeName[] = "json"; + +/// \brief A FileFormat implementation that reads from JSON files +class ARROW_DS_EXPORT JsonFileFormat : public FileFormat { + public: + JsonFileFormat(); + + std::string type_name() const override { return kJsonTypeName; } + + bool Equals(const FileFormat& other) const override; + + Result IsSupported(const FileSource& source) const override; + + Result> Inspect(const FileSource& source) const override; + + Future> InspectFragment( + const FileSource& source, const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const override; + + Future> BeginScan( + const FragmentScanRequest& scan_request, const InspectedFragment& inspected, + const FragmentScanOptions* format_options, + compute::ExecContext* exec_context) const override; + + Result ScanBatchesAsync( + const std::shared_ptr& scan_options, + const std::shared_ptr& file) const override; + + Future> CountRows( + const std::shared_ptr& file, compute::Expression predicate, + const std::shared_ptr& scan_options) override; + + Result> MakeWriter( + std::shared_ptr destination, std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator) const override { + return Status::NotImplemented("Writing JSON files is not currently supported"); + } + + std::shared_ptr DefaultWriteOptions() override { return NULLPTR; } +}; + +/// \brief Per-scan options for JSON fragments +struct ARROW_DS_EXPORT JsonFragmentScanOptions : public FragmentScanOptions { + std::string type_name() const override { return kJsonTypeName; } + + /// @brief Options that affect JSON parsing + /// + /// Note: `explicit_schema` and `unexpected_field_behavior` are ignored. + json::ParseOptions parse_options = json::ParseOptions::Defaults(); + + /// @brief Options that affect JSON reading + json::ReadOptions read_options = json::ReadOptions::Defaults(); +}; + +/// @} + +} // namespace arrow::dataset diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h new file mode 100644 index 0000000000000000000000000000000000000000..5bfefd1e02b5cccf74cf8ade579a937341aef013 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include + +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/io/type_fwd.h" +#include "arrow/result.h" + +namespace arrow { +namespace dataset { + +/// \addtogroup dataset-file-formats +/// +/// @{ + +constexpr char kOrcTypeName[] = "orc"; + +/// \brief A FileFormat implementation that reads from and writes to ORC files +class ARROW_DS_EXPORT OrcFileFormat : public FileFormat { + public: + OrcFileFormat(); + + std::string type_name() const override { return kOrcTypeName; } + + bool Equals(const FileFormat& other) const override { + return type_name() == other.type_name(); + } + + Result IsSupported(const FileSource& source) const override; + + /// \brief Return the schema of the file if possible. + Result> Inspect(const FileSource& source) const override; + + Result ScanBatchesAsync( + const std::shared_ptr& options, + const std::shared_ptr& file) const override; + + Future> CountRows( + const std::shared_ptr& file, compute::Expression predicate, + const std::shared_ptr& options) override; + + Result> MakeWriter( + std::shared_ptr destination, std::shared_ptr schema, + std::shared_ptr options, + fs::FileLocator destination_locator) const override; + + std::shared_ptr DefaultWriteOptions() override; +}; + +/// @} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h new file mode 100644 index 0000000000000000000000000000000000000000..96200b8a3118b82c92977d222ba8775f61a02b0b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/dataset/type_fwd.h" + +namespace parquet::encryption { +class CryptoFactory; +struct KmsConnectionConfig; +struct EncryptionConfiguration; +struct DecryptionConfiguration; +} // namespace parquet::encryption + +namespace arrow { +namespace dataset { + +/// \brief Core configuration class encapsulating parameters for high-level encryption +/// within Parquet framework. +/// +/// ParquetEncryptionConfig serves as a bridge, passing encryption-related +/// parameters to appropriate components within the Parquet library. It holds references +/// to objects defining encryption strategy, Key Management Service (KMS) configuration, +/// and specific encryption configurations for Parquet data. +struct ARROW_DS_EXPORT ParquetEncryptionConfig { + /// Shared pointer to CryptoFactory object, responsible for creating cryptographic + /// components like encryptors and decryptors. + std::shared_ptr crypto_factory; + + /// Shared pointer to KmsConnectionConfig object, holding configuration parameters for + /// connecting to a Key Management Service (KMS). + std::shared_ptr kms_connection_config; + + /// Shared pointer to EncryptionConfiguration object, defining specific encryption + /// settings for Parquet data, like keys for different columns. + std::shared_ptr encryption_config; +}; + +/// \brief Core configuration class encapsulating parameters for high-level decryption +/// within Parquet framework. +/// +/// ParquetDecryptionConfig is designed to pass decryption-related parameters to +/// appropriate decryption components within Parquet library. It holds references to +/// objects defining decryption strategy, Key Management Service (KMS) configuration, +/// and specific decryption configurations for reading encrypted Parquet data. +struct ARROW_DS_EXPORT ParquetDecryptionConfig { + /// Shared pointer to CryptoFactory object, pivotal in creating cryptographic + /// components for decryption process. + std::shared_ptr crypto_factory; + + /// Shared pointer to KmsConnectionConfig object, containing parameters for connecting + /// to a Key Management Service (KMS) during decryption. + std::shared_ptr kms_connection_config; + + /// Shared pointer to DecryptionConfiguration object, specifying decryption settings + /// for reading encrypted Parquet data. + std::shared_ptr decryption_config; +}; + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h new file mode 100644 index 0000000000000000000000000000000000000000..315a3d384d28c1b313bf1483fb38ad99c6713663 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h @@ -0,0 +1,432 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/compute/expression.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/util/compare.h" + +namespace arrow { + +namespace dataset { + +constexpr char kFilenamePartitionSep = '_'; + +struct ARROW_DS_EXPORT PartitionPathFormat { + std::string directory, filename; +}; + +// ---------------------------------------------------------------------- +// Partitioning + +/// \defgroup dataset-partitioning Partitioning API +/// +/// @{ + +/// \brief Interface for parsing partition expressions from string partition +/// identifiers. +/// +/// For example, the identifier "foo=5" might be parsed to an equality expression +/// between the "foo" field and the value 5. +/// +/// Some partitionings may store the field names in a metadata +/// store instead of in file paths, for example +/// dataset_root/2009/11/... could be used when the partition fields +/// are "year" and "month" +/// +/// Paths are consumed from left to right. Paths must be relative to +/// the root of a partition; path prefixes must be removed before passing +/// the path to a partitioning for parsing. +class ARROW_DS_EXPORT Partitioning : public util::EqualityComparable { + public: + virtual ~Partitioning() = default; + + /// \brief The name identifying the kind of partitioning + virtual std::string type_name() const = 0; + + //// \brief Return whether the partitionings are equal + virtual bool Equals(const Partitioning& other) const { + return schema_->Equals(other.schema_, /*check_metadata=*/false); + } + + /// \brief If the input batch shares any fields with this partitioning, + /// produce sub-batches which satisfy mutually exclusive Expressions. + struct PartitionedBatches { + RecordBatchVector batches; + std::vector expressions; + }; + virtual Result Partition( + const std::shared_ptr& batch) const = 0; + + /// \brief Parse a path into a partition expression + virtual Result Parse(const std::string& path) const = 0; + + virtual Result Format(const compute::Expression& expr) const = 0; + + /// \brief A default Partitioning which is a DirectoryPartitioning + /// with an empty schema. + static std::shared_ptr Default(); + + /// \brief The partition schema. + const std::shared_ptr& schema() const { return schema_; } + + protected: + explicit Partitioning(std::shared_ptr schema) : schema_(std::move(schema)) {} + + std::shared_ptr schema_; +}; + +/// \brief The encoding of partition segments. +enum class SegmentEncoding : int8_t { + /// No encoding. + None = 0, + /// Segment values are URL-encoded. + Uri = 1, +}; + +ARROW_DS_EXPORT +std::ostream& operator<<(std::ostream& os, SegmentEncoding segment_encoding); + +/// \brief Options for key-value based partitioning (hive/directory). +struct ARROW_DS_EXPORT KeyValuePartitioningOptions { + /// After splitting a path into components, decode the path components + /// before parsing according to this scheme. + SegmentEncoding segment_encoding = SegmentEncoding::Uri; +}; + +/// \brief Options for inferring a partitioning. +struct ARROW_DS_EXPORT PartitioningFactoryOptions { + /// When inferring a schema for partition fields, yield dictionary encoded types + /// instead of plain. This can be more efficient when materializing virtual + /// columns, and Expressions parsed by the finished Partitioning will include + /// dictionaries of all unique inspected values for each field. + bool infer_dictionary = false; + /// Optionally, an expected schema can be provided, in which case inference + /// will only check discovered fields against the schema and update internal + /// state (such as dictionaries). + std::shared_ptr schema; + /// After splitting a path into components, decode the path components + /// before parsing according to this scheme. + SegmentEncoding segment_encoding = SegmentEncoding::Uri; + + KeyValuePartitioningOptions AsPartitioningOptions() const; +}; + +/// \brief Options for inferring a hive-style partitioning. +struct ARROW_DS_EXPORT HivePartitioningFactoryOptions : PartitioningFactoryOptions { + /// The hive partitioning scheme maps null to a hard coded fallback string. + std::string null_fallback; + + HivePartitioningOptions AsHivePartitioningOptions() const; +}; + +/// \brief PartitioningFactory provides creation of a partitioning when the +/// specific schema must be inferred from available paths (no explicit schema is known). +class ARROW_DS_EXPORT PartitioningFactory { + public: + virtual ~PartitioningFactory() = default; + + /// \brief The name identifying the kind of partitioning + virtual std::string type_name() const = 0; + + /// Get the schema for the resulting Partitioning. + /// This may reset internal state, for example dictionaries of unique representations. + virtual Result> Inspect( + const std::vector& paths) = 0; + + /// Create a partitioning using the provided schema + /// (fields may be dropped). + virtual Result> Finish( + const std::shared_ptr& schema) const = 0; +}; + +/// \brief Subclass for the common case of a partitioning which yields an equality +/// expression for each segment +class ARROW_DS_EXPORT KeyValuePartitioning : public Partitioning { + public: + /// An unconverted equality expression consisting of a field name and the representation + /// of a scalar value + struct Key { + std::string name; + std::optional value; + }; + + Result Partition( + const std::shared_ptr& batch) const override; + + Result Parse(const std::string& path) const override; + + Result Format(const compute::Expression& expr) const override; + + const ArrayVector& dictionaries() const { return dictionaries_; } + + SegmentEncoding segment_encoding() const { return options_.segment_encoding; } + + bool Equals(const Partitioning& other) const override; + + protected: + KeyValuePartitioning(std::shared_ptr schema, ArrayVector dictionaries, + KeyValuePartitioningOptions options) + : Partitioning(std::move(schema)), + dictionaries_(std::move(dictionaries)), + options_(options) { + if (dictionaries_.empty()) { + dictionaries_.resize(schema_->num_fields()); + } + } + + virtual Result> ParseKeys(const std::string& path) const = 0; + + virtual Result FormatValues(const ScalarVector& values) const = 0; + + /// Convert a Key to a full expression. + Result ConvertKey(const Key& key) const; + + Result> FormatPartitionSegments( + const ScalarVector& values) const; + Result> ParsePartitionSegments( + const std::vector& segments) const; + + ArrayVector dictionaries_; + KeyValuePartitioningOptions options_; +}; + +/// \brief DirectoryPartitioning parses one segment of a path for each field in its +/// schema. All fields are required, so paths passed to DirectoryPartitioning::Parse +/// must contain segments for each field. +/// +/// For example given schema the path "/2009/11" would be +/// parsed to ("year"_ == 2009 and "month"_ == 11) +class ARROW_DS_EXPORT DirectoryPartitioning : public KeyValuePartitioning { + public: + /// If a field in schema is of dictionary type, the corresponding element of + /// dictionaries must be contain the dictionary of values for that field. + explicit DirectoryPartitioning(std::shared_ptr schema, + ArrayVector dictionaries = {}, + KeyValuePartitioningOptions options = {}); + + std::string type_name() const override { return "directory"; } + + bool Equals(const Partitioning& other) const override; + + /// \brief Create a factory for a directory partitioning. + /// + /// \param[in] field_names The names for the partition fields. Types will be + /// inferred. + static std::shared_ptr MakeFactory( + std::vector field_names, PartitioningFactoryOptions = {}); + + private: + Result> ParseKeys(const std::string& path) const override; + + Result FormatValues(const ScalarVector& values) const override; +}; + +/// \brief The default fallback used for null values in a Hive-style partitioning. +static constexpr char kDefaultHiveNullFallback[] = "__HIVE_DEFAULT_PARTITION__"; + +struct ARROW_DS_EXPORT HivePartitioningOptions : public KeyValuePartitioningOptions { + std::string null_fallback = kDefaultHiveNullFallback; + + static HivePartitioningOptions DefaultsWithNullFallback(std::string fallback) { + HivePartitioningOptions options; + options.null_fallback = std::move(fallback); + return options; + } +}; + +/// \brief Multi-level, directory based partitioning +/// originating from Apache Hive with all data files stored in the +/// leaf directories. Data is partitioned by static values of a +/// particular column in the schema. Partition keys are represented in +/// the form $key=$value in directory names. +/// Field order is ignored, as are missing or unrecognized field names. +/// +/// For example given schema the path +/// "/day=321/ignored=3.4/year=2009" parses to ("year"_ == 2009 and "day"_ == 321) +class ARROW_DS_EXPORT HivePartitioning : public KeyValuePartitioning { + public: + /// If a field in schema is of dictionary type, the corresponding element of + /// dictionaries must be contain the dictionary of values for that field. + explicit HivePartitioning(std::shared_ptr schema, ArrayVector dictionaries = {}, + std::string null_fallback = kDefaultHiveNullFallback) + : KeyValuePartitioning(std::move(schema), std::move(dictionaries), + KeyValuePartitioningOptions()), + hive_options_( + HivePartitioningOptions::DefaultsWithNullFallback(std::move(null_fallback))) { + } + + explicit HivePartitioning(std::shared_ptr schema, ArrayVector dictionaries, + HivePartitioningOptions options) + : KeyValuePartitioning(std::move(schema), std::move(dictionaries), options), + hive_options_(options) {} + + std::string type_name() const override { return "hive"; } + std::string null_fallback() const { return hive_options_.null_fallback; } + const HivePartitioningOptions& options() const { return hive_options_; } + + static Result> ParseKey(const std::string& segment, + const HivePartitioningOptions& options); + + bool Equals(const Partitioning& other) const override; + + /// \brief Create a factory for a hive partitioning. + static std::shared_ptr MakeFactory( + HivePartitioningFactoryOptions = {}); + + private: + const HivePartitioningOptions hive_options_; + Result> ParseKeys(const std::string& path) const override; + + Result FormatValues(const ScalarVector& values) const override; +}; + +/// \brief Implementation provided by lambda or other callable +class ARROW_DS_EXPORT FunctionPartitioning : public Partitioning { + public: + using ParseImpl = std::function(const std::string&)>; + + using FormatImpl = + std::function(const compute::Expression&)>; + + FunctionPartitioning(std::shared_ptr schema, ParseImpl parse_impl, + FormatImpl format_impl = NULLPTR, std::string name = "function") + : Partitioning(std::move(schema)), + parse_impl_(std::move(parse_impl)), + format_impl_(std::move(format_impl)), + name_(std::move(name)) {} + + std::string type_name() const override { return name_; } + + bool Equals(const Partitioning& other) const override { return false; } + + Result Parse(const std::string& path) const override { + return parse_impl_(path); + } + + Result Format(const compute::Expression& expr) const override { + if (format_impl_) { + return format_impl_(expr); + } + return Status::NotImplemented("formatting paths from ", type_name(), " Partitioning"); + } + + Result Partition( + const std::shared_ptr& batch) const override { + return Status::NotImplemented("partitioning batches from ", type_name(), + " Partitioning"); + } + + private: + ParseImpl parse_impl_; + FormatImpl format_impl_; + std::string name_; +}; + +class ARROW_DS_EXPORT FilenamePartitioning : public KeyValuePartitioning { + public: + /// \brief Construct a FilenamePartitioning from its components. + /// + /// If a field in schema is of dictionary type, the corresponding element of + /// dictionaries must be contain the dictionary of values for that field. + explicit FilenamePartitioning(std::shared_ptr schema, + ArrayVector dictionaries = {}, + KeyValuePartitioningOptions options = {}); + + std::string type_name() const override { return "filename"; } + + /// \brief Create a factory for a filename partitioning. + /// + /// \param[in] field_names The names for the partition fields. Types will be + /// inferred. + static std::shared_ptr MakeFactory( + std::vector field_names, PartitioningFactoryOptions = {}); + + bool Equals(const Partitioning& other) const override; + + private: + Result> ParseKeys(const std::string& path) const override; + + Result FormatValues(const ScalarVector& values) const override; +}; + +ARROW_DS_EXPORT std::string StripPrefix(const std::string& path, + const std::string& prefix); + +/// \brief Extracts the directory and filename and removes the prefix of a path +/// +/// e.g., `StripPrefixAndFilename("/data/year=2019/c.txt", "/data") -> +/// {"year=2019","c.txt"}` +ARROW_DS_EXPORT std::string StripPrefixAndFilename(const std::string& path, + const std::string& prefix); + +/// \brief Vector version of StripPrefixAndFilename. +ARROW_DS_EXPORT std::vector StripPrefixAndFilename( + const std::vector& paths, const std::string& prefix); + +/// \brief Vector version of StripPrefixAndFilename. +ARROW_DS_EXPORT std::vector StripPrefixAndFilename( + const std::vector& files, const std::string& prefix); + +/// \brief Either a Partitioning or a PartitioningFactory +class ARROW_DS_EXPORT PartitioningOrFactory { + public: + explicit PartitioningOrFactory(std::shared_ptr partitioning) + : partitioning_(std::move(partitioning)) {} + + explicit PartitioningOrFactory(std::shared_ptr factory) + : factory_(std::move(factory)) {} + + PartitioningOrFactory& operator=(std::shared_ptr partitioning) { + return *this = PartitioningOrFactory(std::move(partitioning)); + } + + PartitioningOrFactory& operator=(std::shared_ptr factory) { + return *this = PartitioningOrFactory(std::move(factory)); + } + + /// \brief The partitioning (if given). + const std::shared_ptr& partitioning() const { return partitioning_; } + + /// \brief The partition factory (if given). + const std::shared_ptr& factory() const { return factory_; } + + /// \brief Get the partition schema, inferring it with the given factory if needed. + Result> GetOrInferSchema(const std::vector& paths); + + private: + std::shared_ptr factory_; + std::shared_ptr partitioning_; +}; + +/// @} + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..a74fd96e3554e660c7bd01fcbd07974af8b68c98 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +// This API is EXPERIMENTAL. + +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/scanner.h" +#include "arrow/pch.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h new file mode 100644 index 0000000000000000000000000000000000000000..10260ccec81d159ffd40d86144e39c4d91739db1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#include "arrow/dataset/visibility.h" + +namespace arrow { +namespace dataset { +namespace internal { + +/// Register dataset-based exec nodes with the exec node registry +/// +/// This function must be called before using dataset ExecNode factories +ARROW_DS_EXPORT void Initialize(); + +} // namespace internal +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h new file mode 100644 index 0000000000000000000000000000000000000000..86d38f0af23522a08dcebc1c290fe6bc25ae014e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/dataset/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace dataset { + +// FIXME this is superceded by compute::Expression::Bind +ARROW_DS_EXPORT Status CheckProjectable(const Schema& from, const Schema& to); + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h new file mode 100644 index 0000000000000000000000000000000000000000..d2de267897180f138792d154c59d393f92832e21 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h @@ -0,0 +1,583 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/acero/options.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/dataset/dataset.h" +#include "arrow/dataset/projector.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/io/interfaces.h" +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/iterator.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { + +using RecordBatchGenerator = std::function>()>; + +namespace dataset { + +/// \defgroup dataset-scanning Scanning API +/// +/// @{ + +constexpr int64_t kDefaultBatchSize = 1 << 17; // 128Ki rows +// This will yield 64 batches ~ 8Mi rows +constexpr int32_t kDefaultBatchReadahead = 16; +constexpr int32_t kDefaultFragmentReadahead = 4; +constexpr int32_t kDefaultBytesReadahead = 1 << 25; // 32MiB + +/// Scan-specific options, which can be changed between scans of the same dataset. +struct ARROW_DS_EXPORT ScanOptions { + /// A row filter (which will be pushed down to partitioning/reading if supported). + compute::Expression filter = compute::literal(true); + /// A projection expression (which can add/remove/rename columns). + compute::Expression projection; + + /// Schema with which batches will be read from fragments. This is also known as the + /// "reader schema" it will be used (for example) in constructing CSV file readers to + /// identify column types for parsing. Usually only a subset of its fields (see + /// MaterializedFields) will be materialized during a scan. + std::shared_ptr dataset_schema; + + /// Schema of projected record batches. This is independent of dataset_schema as its + /// fields are derived from the projection. For example, let + /// + /// dataset_schema = {"a": int32, "b": int32, "id": utf8} + /// projection = project({equal(field_ref("a"), field_ref("b"))}, {"a_plus_b"}) + /// + /// (no filter specified). In this case, the projected_schema would be + /// + /// {"a_plus_b": int32} + std::shared_ptr projected_schema; + + /// Maximum row count for scanned batches. + int64_t batch_size = kDefaultBatchSize; + + /// How many batches to read ahead within a fragment. + /// + /// Set to 0 to disable batch readahead + /// + /// Note: May not be supported by all formats + /// Note: Will be ignored if use_threads is set to false + int32_t batch_readahead = kDefaultBatchReadahead; + + /// How many files to read ahead + /// + /// Set to 0 to disable fragment readahead + /// + /// Note: May not be enforced by all scanners + /// Note: Will be ignored if use_threads is set to false + int32_t fragment_readahead = kDefaultFragmentReadahead; + + /// A pool from which materialized and scanned arrays will be allocated. + MemoryPool* pool = arrow::default_memory_pool(); + + /// IOContext for any IO tasks + /// + /// Note: The IOContext executor will be ignored if use_threads is set to false + io::IOContext io_context; + + /// If true the scanner will scan in parallel + /// + /// Note: If true, this will use threads from both the cpu_executor and the + /// io_context.executor + /// Note: This must be true in order for any readahead to happen + bool use_threads = false; + + /// If true the scanner will add augmented fields to the output schema. + bool add_augmented_fields = true; + + /// Fragment-specific scan options. + std::shared_ptr fragment_scan_options; + + /// Return a vector of FieldRefs that require materialization. + /// + /// This is usually the union of the fields referenced in the projection and the + /// filter expression. Examples: + /// + /// - `SELECT a, b WHERE a < 2 && c > 1` => ["a", "b", "a", "c"] + /// - `SELECT a + b < 3 WHERE a > 1` => ["a", "b", "a"] + /// + /// This is needed for expression where a field may not be directly + /// used in the final projection but is still required to evaluate the + /// expression. + /// + /// This is used by Fragment implementations to apply the column + /// sub-selection optimization. + std::vector MaterializedFields() const; + + /// Parameters which control when the plan should pause for a slow consumer + acero::BackpressureOptions backpressure = + acero::BackpressureOptions::DefaultBackpressure(); +}; + +/// Scan-specific options, which can be changed between scans of the same dataset. +/// +/// A dataset consists of one or more individual fragments. A fragment is anything +/// that is independently scannable, often a file. +/// +/// Batches from all fragments will be converted to a single schema. This unified +/// schema is referred to as the "dataset schema" and is the output schema for +/// this node. +/// +/// Individual fragments may have schemas that are different from the dataset +/// schema. This is sometimes referred to as the physical or fragment schema. +/// Conversion from the fragment schema to the dataset schema is a process +/// known as evolution. +struct ARROW_DS_EXPORT ScanV2Options : public acero::ExecNodeOptions { + explicit ScanV2Options(std::shared_ptr dataset) + : dataset(std::move(dataset)) {} + + /// \brief The dataset to scan + std::shared_ptr dataset; + /// \brief A row filter + /// + /// The filter expression should be written against the dataset schema. + /// The filter must be unbound. + /// + /// This is an opportunistic pushdown filter. Filtering capabilities will + /// vary between formats. If a format is not capable of applying the filter + /// then it will ignore it. + /// + /// Each fragment will do its best to filter the data based on the information + /// (partitioning guarantees, statistics) available to it. If it is able to + /// apply some filtering then it will indicate what filtering it was able to + /// apply by attaching a guarantee to the batch. + /// + /// For example, if a filter is x < 50 && y > 40 then a batch may be able to + /// apply a guarantee x < 50. Post-scan filtering would then only need to + /// consider y > 40 (for this specific batch). The next batch may not be able + /// to attach any guarantee and both clauses would need to be applied to that batch. + /// + /// A single guarantee-aware filtering operation should generally be applied to all + /// resulting batches. The scan node is not responsible for this. + /// + /// Fields that are referenced by the filter should be included in the `columns` vector. + /// The scan node will not automatically fetch fields referenced by the filter + /// expression. \see AddFieldsNeededForFilter + /// + /// If the filter references fields that are not included in `columns` this may or may + /// not be an error, depending on the format. + compute::Expression filter = compute::literal(true); + + /// \brief The columns to scan + /// + /// This is not a simple list of top-level column indices but instead a set of paths + /// allowing for partial selection of columns + /// + /// These paths refer to the dataset schema + /// + /// For example, consider the following dataset schema: + /// schema({ + /// field("score", int32()), + /// "marker", struct_({ + /// field("color", utf8()), + /// field("location", struct_({ + /// field("x", float64()), + /// field("y", float64()) + /// }) + /// }) + /// }) + /// + /// If `columns` is {{0}, {1,1,0}} then the output schema is: + /// schema({field("score", int32()), field("x", float64())}) + /// + /// If `columns` is {{1,1,1}, {1,1}} then the output schema is: + /// schema({ + /// field("y", float64()), + /// field("location", struct_({ + /// field("x", float64()), + /// field("y", float64()) + /// }) + /// }) + std::vector columns; + + /// \brief Target number of bytes to read ahead in a fragment + /// + /// This limit involves some amount of estimation. Formats typically only know + /// batch boundaries in terms of rows (not decoded bytes) and so an estimation + /// must be done to guess the average row size. Other formats like CSV and JSON + /// must make even more generalized guesses. + /// + /// This is a best-effort guide. Some formats may need to read ahead further, + /// for example, if scanning a parquet file that has batches with 100MiB of data + /// then the actual readahead will be at least 100MiB + /// + /// Set to 0 to disable readahead. When disabled, the scanner will read the + /// dataset one batch at a time + /// + /// This limit applies across all fragments. If the limit is 32MiB and the + /// fragment readahead allows for 20 fragments to be read at once then the + /// total readahead will still be 32MiB and NOT 20 * 32MiB. + int32_t target_bytes_readahead = kDefaultBytesReadahead; + + /// \brief Number of fragments to read ahead + /// + /// Higher readahead will potentially lead to more efficient I/O but will lead + /// to the scan operation using more RAM. The default is fairly conservative + /// and designed for fast local disks (or slow local spinning disks which cannot + /// handle much parallelism anyways). When using a highly parallel remote filesystem + /// you will likely want to increase these values. + /// + /// Set to 0 to disable fragment readahead. When disabled the dataset will be scanned + /// one fragment at a time. + int32_t fragment_readahead = kDefaultFragmentReadahead; + /// \brief Options specific to the file format + const FragmentScanOptions* format_options = NULLPTR; + + /// \brief Utility method to get a selection representing all columns in a dataset + static std::vector AllColumns(const Schema& dataset_schema); + + /// \brief Utility method to add fields needed for the current filter + /// + /// This method adds any fields that are needed by `filter` which are not already + /// included in the list of columns. Any new fields added will be added to the end + /// in no particular order. + static Status AddFieldsNeededForFilter(ScanV2Options* options); +}; + +/// \brief Describes a projection +struct ARROW_DS_EXPORT ProjectionDescr { + /// \brief The projection expression itself + /// This expression must be a call to make_struct + compute::Expression expression; + /// \brief The output schema of the projection. + + /// This can be calculated from the input schema and the expression but it + /// is cached here for convenience. + std::shared_ptr schema; + + /// \brief Create a ProjectionDescr by binding an expression to the dataset schema + /// + /// expression must return a struct type + static Result FromStructExpression( + const compute::Expression& expression, const Schema& dataset_schema); + + /// \brief Create a ProjectionDescr from expressions/names for each field + static Result FromExpressions(std::vector exprs, + std::vector names, + const Schema& dataset_schema); + + /// \brief Create a default projection referencing fields in the dataset schema + static Result FromNames(std::vector names, + const Schema& dataset_schema, + bool add_augmented_fields = true); + + /// \brief Make a projection that projects every field in the dataset schema + static Result Default(const Schema& dataset_schema, + bool add_augmented_fields = true); +}; + +/// \brief Utility method to set the projection expression and schema +ARROW_DS_EXPORT void SetProjection(ScanOptions* options, ProjectionDescr projection); + +/// \brief Combines a record batch with the fragment that the record batch originated +/// from +/// +/// Knowing the source fragment can be useful for debugging & understanding loaded +/// data +struct TaggedRecordBatch { + std::shared_ptr record_batch; + std::shared_ptr fragment; +}; +using TaggedRecordBatchGenerator = std::function()>; +using TaggedRecordBatchIterator = Iterator; + +/// \brief Combines a tagged batch with positional information +/// +/// This is returned when scanning batches in an unordered fashion. This information is +/// needed if you ever want to reassemble the batches in order +struct EnumeratedRecordBatch { + Enumerated> record_batch; + Enumerated> fragment; +}; +using EnumeratedRecordBatchGenerator = std::function()>; +using EnumeratedRecordBatchIterator = Iterator; + +/// @} + +} // namespace dataset + +template <> +struct IterationTraits { + static dataset::TaggedRecordBatch End() { + return dataset::TaggedRecordBatch{NULLPTR, NULLPTR}; + } + static bool IsEnd(const dataset::TaggedRecordBatch& val) { + return val.record_batch == NULLPTR; + } +}; + +template <> +struct IterationTraits { + static dataset::EnumeratedRecordBatch End() { + return dataset::EnumeratedRecordBatch{ + IterationEnd>>(), + IterationEnd>>()}; + } + static bool IsEnd(const dataset::EnumeratedRecordBatch& val) { + return IsIterationEnd(val.fragment); + } +}; + +namespace dataset { + +/// \defgroup dataset-scanning Scanning API +/// +/// @{ + +/// \brief A scanner glues together several dataset classes to load in data. +/// The dataset contains a collection of fragments and partitioning rules. +/// +/// The fragments identify independently loadable units of data (i.e. each fragment has +/// a potentially unique schema and possibly even format. It should be possible to read +/// fragments in parallel if desired). +/// +/// The fragment's format contains the logic necessary to actually create a task to load +/// the fragment into memory. That task may or may not support parallel execution of +/// its own. +/// +/// The scanner is then responsible for creating scan tasks from every fragment in the +/// dataset and (potentially) sequencing the loaded record batches together. +/// +/// The scanner should not buffer the entire dataset in memory (unless asked) instead +/// yielding record batches as soon as they are ready to scan. Various readahead +/// properties control how much data is allowed to be scanned before pausing to let a +/// slow consumer catchup. +/// +/// Today the scanner also handles projection & filtering although that may change in +/// the future. +class ARROW_DS_EXPORT Scanner { + public: + virtual ~Scanner() = default; + + /// \brief Apply a visitor to each RecordBatch as it is scanned. If multiple threads + /// are used (via use_threads), the visitor will be invoked from those threads and is + /// responsible for any synchronization. + virtual Status Scan(std::function visitor) = 0; + /// \brief Convert a Scanner into a Table. + /// + /// Use this convenience utility with care. This will serially materialize the + /// Scan result in memory before creating the Table. + virtual Result> ToTable() = 0; + /// \brief Scan the dataset into a stream of record batches. Each batch is tagged + /// with the fragment it originated from. The batches will arrive in order. The + /// order of fragments is determined by the dataset. + /// + /// Note: The scanner will perform some readahead but will avoid materializing too + /// much in memory (this is goverended by the readahead options and use_threads option). + /// If the readahead queue fills up then I/O will pause until the calling thread catches + /// up. + virtual Result ScanBatches() = 0; + virtual Result ScanBatchesAsync() = 0; + virtual Result ScanBatchesAsync( + ::arrow::internal::Executor* cpu_thread_pool) = 0; + /// \brief Scan the dataset into a stream of record batches. Unlike ScanBatches this + /// method may allow record batches to be returned out of order. This allows for more + /// efficient scanning: some fragments may be accessed more quickly than others (e.g. + /// may be cached in RAM or just happen to get scheduled earlier by the I/O) + /// + /// To make up for the out-of-order iteration each batch is further tagged with + /// positional information. + virtual Result ScanBatchesUnordered() = 0; + virtual Result ScanBatchesUnorderedAsync() = 0; + virtual Result ScanBatchesUnorderedAsync( + ::arrow::internal::Executor* cpu_thread_pool) = 0; + /// \brief A convenience to synchronously load the given rows by index. + /// + /// Will only consume as many batches as needed from ScanBatches(). + virtual Result> TakeRows(const Array& indices) = 0; + /// \brief Get the first N rows. + virtual Result> Head(int64_t num_rows) = 0; + /// \brief Count rows matching a predicate. + /// + /// This method will push down the predicate and compute the result based on fragment + /// metadata if possible. + virtual Result CountRows() = 0; + virtual Future CountRowsAsync() = 0; + /// \brief Convert the Scanner to a RecordBatchReader so it can be + /// easily used with APIs that expect a reader. + virtual Result> ToRecordBatchReader() = 0; + + /// \brief Get the options for this scan. + const std::shared_ptr& options() const { return scan_options_; } + /// \brief Get the dataset that this scanner will scan + virtual const std::shared_ptr& dataset() const = 0; + + protected: + explicit Scanner(std::shared_ptr scan_options) + : scan_options_(std::move(scan_options)) {} + + Result AddPositioningToInOrderScan( + TaggedRecordBatchIterator scan); + + const std::shared_ptr scan_options_; +}; + +/// \brief ScannerBuilder is a factory class to construct a Scanner. It is used +/// to pass information, notably a potential filter expression and a subset of +/// columns to materialize. +class ARROW_DS_EXPORT ScannerBuilder { + public: + explicit ScannerBuilder(std::shared_ptr dataset); + + ScannerBuilder(std::shared_ptr dataset, + std::shared_ptr scan_options); + + ScannerBuilder(std::shared_ptr schema, std::shared_ptr fragment, + std::shared_ptr scan_options); + + /// \brief Make a scanner from a record batch reader. + /// + /// The resulting scanner can be scanned only once. This is intended + /// to support writing data from streaming sources or other sources + /// that can be iterated only once. + static std::shared_ptr FromRecordBatchReader( + std::shared_ptr reader); + + /// \brief Set the subset of columns to materialize. + /// + /// Columns which are not referenced may not be read from fragments. + /// + /// \param[in] columns list of columns to project. Order and duplicates will + /// be preserved. + /// + /// \return Failure if any column name does not exists in the dataset's + /// Schema. + Status Project(std::vector columns); + + /// \brief Set expressions which will be evaluated to produce the materialized + /// columns. + /// + /// Columns which are not referenced may not be read from fragments. + /// + /// \param[in] exprs expressions to evaluate to produce columns. + /// \param[in] names list of names for the resulting columns. + /// + /// \return Failure if any referenced column does not exists in the dataset's + /// Schema. + Status Project(std::vector exprs, std::vector names); + + /// \brief Set the filter expression to return only rows matching the filter. + /// + /// The predicate will be passed down to Sources and corresponding + /// Fragments to exploit predicate pushdown if possible using + /// partition information or Fragment internal metadata, e.g. Parquet statistics. + /// Columns which are not referenced may not be read from fragments. + /// + /// \param[in] filter expression to filter rows with. + /// + /// \return Failure if any referenced columns does not exist in the dataset's + /// Schema. + Status Filter(const compute::Expression& filter); + + /// \brief Indicate if the Scanner should make use of the available + /// ThreadPool found in ScanOptions; + Status UseThreads(bool use_threads = true); + + /// \brief Set the maximum number of rows per RecordBatch. + /// + /// \param[in] batch_size the maximum number of rows. + /// \returns An error if the number for batch is not greater than 0. + /// + /// This option provides a control limiting the memory owned by any RecordBatch. + Status BatchSize(int64_t batch_size); + + /// \brief Set the number of batches to read ahead within a fragment. + /// + /// \param[in] batch_readahead How many batches to read ahead within a fragment + /// \returns an error if this number is less than 0. + /// + /// This option provides a control on the RAM vs I/O tradeoff. + /// It might not be supported by all file formats, in which case it will + /// simply be ignored. + Status BatchReadahead(int32_t batch_readahead); + + /// \brief Set the number of fragments to read ahead + /// + /// \param[in] fragment_readahead How many fragments to read ahead + /// \returns an error if this number is less than 0. + /// + /// This option provides a control on the RAM vs I/O tradeoff. + Status FragmentReadahead(int32_t fragment_readahead); + + /// \brief Set the pool from which materialized and scanned arrays will be allocated. + Status Pool(MemoryPool* pool); + + /// \brief Set fragment-specific scan options. + Status FragmentScanOptions(std::shared_ptr fragment_scan_options); + + /// \brief Override default backpressure configuration + Status Backpressure(acero::BackpressureOptions backpressure); + + /// \brief Return the current scan options for the builder. + Result> GetScanOptions(); + + /// \brief Return the constructed now-immutable Scanner object + Result> Finish(); + + const std::shared_ptr& schema() const; + const std::shared_ptr& projected_schema() const; + + private: + std::shared_ptr dataset_; + std::shared_ptr scan_options_ = std::make_shared(); +}; + +/// \brief Construct a source ExecNode which yields batches from a dataset scan. +/// +/// Does not construct associated filter or project nodes. +/// Yielded batches will be augmented with fragment/batch indices to enable stable +/// ordering for simple ExecPlans. +class ARROW_DS_EXPORT ScanNodeOptions : public acero::ExecNodeOptions { + public: + explicit ScanNodeOptions(std::shared_ptr dataset, + std::shared_ptr scan_options, + bool require_sequenced_output = false) + : dataset(std::move(dataset)), + scan_options(std::move(scan_options)), + require_sequenced_output(require_sequenced_output) {} + + std::shared_ptr dataset; + std::shared_ptr scan_options; + bool require_sequenced_output; +}; + +/// @} + +namespace internal { +ARROW_DS_EXPORT void InitializeScanner(arrow::acero::ExecFactoryRegistry* registry); +ARROW_DS_EXPORT void InitializeScannerV2(arrow::acero::ExecFactoryRegistry* registry); +} // namespace internal +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..d58781e038de9ffc2686ebfda9f640eeacdd6668 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include + +#include "arrow/compute/type_fwd.h" // IWYU pragma: export +#include "arrow/dataset/visibility.h" +#include "arrow/filesystem/type_fwd.h" // IWYU pragma: export +#include "arrow/type_fwd.h" // IWYU pragma: export + +namespace arrow { +namespace dataset { + +class Dataset; +class DatasetFactory; +using DatasetVector = std::vector>; + +class UnionDataset; +class UnionDatasetFactory; + +class Fragment; +using FragmentIterator = Iterator>; +using FragmentVector = std::vector>; + +class FragmentScanOptions; + +class FileSource; +class FileFormat; +class FileFragment; +class FileWriter; +class FileWriteOptions; +class FileSystemDataset; +class FileSystemDatasetFactory; +struct FileSystemDatasetWriteOptions; +class WriteNodeOptions; + +/// \brief Controls what happens if files exist in an output directory during a dataset +/// write +enum class ExistingDataBehavior : int8_t { + /// Deletes all files in a directory the first time that directory is encountered + kDeleteMatchingPartitions, + /// Ignores existing files, overwriting any that happen to have the same name as an + /// output file + kOverwriteOrIgnore, + /// Returns an error if there are any files or subdirectories in the output directory + kError, +}; + +class InMemoryDataset; + +class CsvFileFormat; +class CsvFileWriter; +class CsvFileWriteOptions; +struct CsvFragmentScanOptions; + +class JsonFileFormat; +class JsonFileWriter; +class JsonFileWriteOptions; +struct JsonFragmentScanOptions; + +class IpcFileFormat; +class IpcFileWriter; +class IpcFileWriteOptions; +class IpcFragmentScanOptions; + +class ParquetFileFormat; +class ParquetFileFragment; +class ParquetFragmentScanOptions; +class ParquetFileWriter; +class ParquetFileWriteOptions; + +class Partitioning; +class PartitioningFactory; +class PartitioningOrFactory; +struct KeyValuePartitioningOptions; +class DirectoryPartitioning; +class HivePartitioning; +struct HivePartitioningOptions; +class FilenamePartitioning; +struct FilenamePartitioningOptions; + +class ScanNodeOptions; +struct ScanOptions; + +class Scanner; + +class ScannerBuilder; + +class ScanTask; +using ScanTaskVector = std::vector>; +using ScanTaskIterator = Iterator>; + +} // namespace dataset +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h new file mode 100644 index 0000000000000000000000000000000000000000..7211ad5c2ccdbd20cad3599652766f7562cf5158 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/config.h" // IWYU pragma: export + +#include "arrow/filesystem/filesystem.h" // IWYU pragma: export +#ifdef ARROW_AZURE +# include "arrow/filesystem/azurefs.h" // IWYU pragma: export +#endif +#ifdef ARROW_GCS +# include "arrow/filesystem/gcsfs.h" // IWYU pragma: export +#endif +#include "arrow/filesystem/hdfs.h" // IWYU pragma: export +#include "arrow/filesystem/localfs.h" // IWYU pragma: export +#include "arrow/filesystem/mockfs.h" // IWYU pragma: export +#ifdef ARROW_S3 +# include "arrow/filesystem/s3fs.h" // IWYU pragma: export +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h new file mode 100644 index 0000000000000000000000000000000000000000..c5e5091256959959956cb5d15bdbfbf2e9930190 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h @@ -0,0 +1,375 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Azure::Core::Credentials { +class TokenCredential; +} + +namespace Azure::Storage { +class StorageSharedKeyCredential; +} + +namespace Azure::Storage::Blobs { +class BlobServiceClient; +} + +namespace Azure::Storage::Sas { +struct BlobSasBuilder; +} + +namespace Azure::Storage::Files::DataLake { +class DataLakeFileSystemClient; +class DataLakeServiceClient; +} // namespace Azure::Storage::Files::DataLake + +namespace arrow::fs { + +class TestAzureFileSystem; +class TestAzureOptions; + +/// Options for the AzureFileSystem implementation. +/// +/// By default, authentication is handled by the Azure SDK's credential chain +/// which may read from multiple environment variables, such as: +/// - `AZURE_TENANT_ID` +/// - `AZURE_CLIENT_ID` +/// - `AZURE_CLIENT_SECRET` +/// - `AZURE_AUTHORITY_HOST` +/// - `AZURE_CLIENT_CERTIFICATE_PATH` +/// - `AZURE_FEDERATED_TOKEN_FILE` +/// +/// Functions are provided for explicit configuration of credentials if that is preferred. +struct ARROW_EXPORT AzureOptions { + friend class TestAzureOptions; + + /// \brief The name of the Azure Storage Account being accessed. + /// + /// All service URLs will be constructed using this storage account name. + /// `ConfigureAccountKeyCredential` assumes the user wants to authenticate + /// this account. + std::string account_name; + + /// \brief hostname[:port] of the Azure Blob Storage Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".blob.core.windows.net" + std::string blob_storage_authority = ".blob.core.windows.net"; + + /// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".dfs.core.windows.net" + std::string dfs_storage_authority = ".dfs.core.windows.net"; + + /// \brief Azure Blob Storage connection transport. + /// + /// Default: "https" + std::string blob_storage_scheme = "https"; + + /// \brief Azure Data Lake Storage Gen 2 connection transport. + /// + /// Default: "https" + std::string dfs_storage_scheme = "https"; + + // TODO(GH-38598): Add support for more auth methods. + // std::string connection_string; + // std::string sas_token; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// Whether OutputStream writes will be issued in the background, without blocking. + bool background_writes = true; + + private: + enum class CredentialKind { + kDefault, + kAnonymous, + kStorageSharedKey, + kClientSecret, + kManagedIdentity, + kCLI, + kWorkloadIdentity, + kEnvironment, + } credential_kind_ = CredentialKind::kDefault; + + std::shared_ptr + storage_shared_key_credential_; + mutable std::shared_ptr token_credential_; + + public: + AzureOptions(); + ~AzureOptions(); + + private: + void ExtractFromUriSchemeAndHierPart(const Uri& uri, std::string* out_path); + Status ExtractFromUriQuery(const Uri& uri); + + public: + /// \brief Construct a new AzureOptions from an URI. + /// + /// Supported formats: + /// + /// 1. abfs[s]://\.blob.core.windows.net[/\[/\]] + /// 2. abfs[s]://\\@\.dfs.core.windows.net[/path] + /// 3. abfs[s]://[\[\<:port\>][/\[/path]] + /// 4. abfs[s]://[\[/path] + /// + /// (1) and (2) are compatible with the Azure Data Lake Storage Gen2 URIs + /// [1], (3) is for Azure Blob Storage compatible service including Azurite, + /// and (4) is a shorter version of (1) and (2). + /// + /// Note that there is no difference between abfs and abfss. HTTPS is + /// used with abfs by default. You can force to use HTTP by specifying + /// "enable_tls=false" query. + /// + /// Supported query parameters: + /// + /// * blob_storage_authority: Set AzureOptions::blob_storage_authority + /// * dfs_storage_authority: Set AzureOptions::dfs_storage_authority + /// * enable_tls: If it's "false" or "0", HTTP not HTTPS is used. + /// * credential_kind: One of "default", "anonymous", "workload_identity", + /// "environment" or "cli". If "default" is specified, it's + /// just ignored. If "anonymous" is specified, + /// AzureOptions::ConfigureAnonymousCredential() is called. If + /// "workload_identity" is specified, + /// AzureOptions::ConfigureWorkloadIdentityCredential() is called. If + /// "environment" is specified, + /// AzureOptions::ConfigureEnvironmentCredential() is called. If "cli" is + /// specified, AzureOptions::ConfigureCLICredential() is called. + /// * tenant_id: You must specify "client_id" and "client_secret" + /// too. AzureOptions::ConfigureClientSecretCredential() is called. + /// * client_id: If you don't specify "tenant_id" and + /// "client_secret", + /// AzureOptions::ConfigureManagedIdentityCredential() is + /// called. If you specify "tenant_id" and "client_secret" too, + /// AzureOptions::ConfigureClientSecretCredential() is called. + /// * client_secret: You must specify "tenant_id" and "client_id" + /// too. AzureOptions::ConfigureClientSecretCredential() is called. + /// + /// [1]: + /// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri + static Result FromUri(const Uri& uri, std::string* out_path); + static Result FromUri(const std::string& uri, std::string* out_path); + + Status ConfigureDefaultCredential(); + Status ConfigureAnonymousCredential(); + Status ConfigureAccountKeyCredential(const std::string& account_key); + Status ConfigureClientSecretCredential(const std::string& tenant_id, + const std::string& client_id, + const std::string& client_secret); + Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string()); + Status ConfigureCLICredential(); + Status ConfigureWorkloadIdentityCredential(); + Status ConfigureEnvironmentCredential(); + + bool Equals(const AzureOptions& other) const; + + std::string AccountBlobUrl(const std::string& account_name) const; + std::string AccountDfsUrl(const std::string& account_name) const; + + Result> + MakeBlobServiceClient() const; + + Result> + MakeDataLakeServiceClient() const; + + Result GenerateSASToken( + Azure::Storage::Sas::BlobSasBuilder* builder, + Azure::Storage::Blobs::BlobServiceClient* client) const; +}; + +/// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and +/// Azure Data Lake Storage Gen2 (ADLS Gen2) [2]. +/// +/// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that +/// support high throughput analytic workloads, built on Azure Blob Storage. All the data +/// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account. +/// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop +/// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29 +/// and new ADLS accounts use Gen2 instead. +/// +/// ADLS Gen2 and Blob APIs can operate on the same data, but there are +/// some limitations [3]. The ones that are relevant to this +/// implementation are listed here: +/// +/// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If +/// you write to a file by using ADLS APIs then that file's blocks won't be visible +/// to calls to the GetBlockList Blob API. The only exception is when you're +/// overwriting. +/// - When you use the ListBlobs operation without specifying a delimiter, the results +/// include both directories and blobs. If you choose to use a delimiter, use only a +/// forward slash (/) \--- the only supported delimiter. +/// - If you use the DeleteBlob API to delete a directory, that directory is deleted only +/// if it's empty. This means that you can't use the Blob API delete directories +/// recursively. +/// +/// [1]: https://azure.microsoft.com/en-us/products/storage/blobs +/// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage +/// [3]: +/// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues +class ARROW_EXPORT AzureFileSystem : public FileSystem { + private: + class Impl; + std::unique_ptr impl_; + + explicit AzureFileSystem(std::unique_ptr&& impl); + + friend class TestAzureFileSystem; + void ForceCachedHierarchicalNamespaceSupport(int hns_support); + + public: + ~AzureFileSystem() override = default; + + static Result> Make( + const AzureOptions& options, const io::IOContext& = io::default_io_context()); + + std::string type_name() const override { return "abfs"; } + + /// Return the original Azure options when constructing the filesystem + const AzureOptions& options() const; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + /// \brief Delete a directory and its contents recursively. + /// + /// Atomicity is guaranteed only on Hierarchical Namespace Storage accounts. + Status DeleteDir(const std::string& path) override; + + /// \brief Non-atomically deletes the contents of a directory. + /// + /// This function can return a bad Status after only partially deleting the + /// contents of the directory. + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + + /// \brief Deletion of all the containers in the storage account (not + /// implemented for safety reasons). + /// + /// \return Status::NotImplemented + Status DeleteRootDirContents() override; + + /// \brief Deletes a file. + /// + /// Supported on both flat namespace and Hierarchical Namespace storage + /// accounts. A check is made to guarantee the parent directory doesn't + /// disappear after the blob is deleted and while this operation is running, + /// no other client can delete the parent directory due to the use of leases. + /// + /// This means applications can safely retry this operation without coordination to + /// guarantee only one client/process is trying to delete the same file. + Status DeleteFile(const std::string& path) override; + + /// \brief Move/rename a file or directory. + /// + /// There are no files immediately at the root directory, so paths like + /// "/segment" always refer to a container of the storage account and are + /// treated as directories. + /// + /// If `dest` exists but the operation fails for some reason, `Move` + /// guarantees `dest` is not lost. + /// + /// Conditions for a successful move: + /// + /// 1. `src` must exist. + /// 2. `dest` can't contain a strict path prefix of `src`. More generally, + /// a directory can't be made a subdirectory of itself. + /// 3. If `dest` already exists and it's a file, `src` must also be a file. + /// `dest` is then replaced by `src`. + /// 4. All components of `dest` must exist, except for the last. + /// 5. If `dest` already exists and it's a directory, `src` must also be a + /// directory and `dest` must be empty. `dest` is then replaced by `src` + /// and its contents. + /// + /// Leases are used to guarantee the pre-condition checks and the rename + /// operation are atomic: other clients can't invalidate the pre-condition in + /// the time between the checks and the actual rename operation. + /// + /// This is possible because Move() is only support on storage accounts with + /// Hierarchical Namespace Support enabled. + /// + /// ## Limitations + /// + /// - Moves are not supported on storage accounts without + /// Hierarchical Namespace support enabled + /// - Moves across different containers are not supported + /// - Moving a path of the form `/container` is not supported as it would + /// require moving all the files in a container to another container. + /// The only exception is a `Move("/container_a", "/container_b")` where + /// both containers are empty or `container_b` doesn't even exist. + /// The atomicity of the emptiness checks followed by the renaming operation + /// is guaranteed by the use of leases. + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result PathFromUri(const std::string& uri_string) const override; +}; + +} // namespace arrow::fs diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h new file mode 100644 index 0000000000000000000000000000000000000000..d4f62f86a7482b5ab38cc118c249cfc4911c0fad --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h @@ -0,0 +1,723 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/filesystem/type_fwd.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow { +namespace fs { + +using arrow::util::Uri; + +// A system clock time point expressed as a 64-bit (or more) number of +// nanoseconds since the epoch. +using TimePoint = + std::chrono::time_point; + +ARROW_EXPORT std::string ToString(FileType); + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, FileType); + +static const int64_t kNoSize = -1; +static const TimePoint kNoTime = TimePoint(TimePoint::duration(-1)); + +/// \brief FileSystem entry info +struct ARROW_EXPORT FileInfo : public util::EqualityComparable { + FileInfo() = default; + FileInfo(FileInfo&&) = default; + FileInfo& operator=(FileInfo&&) = default; + FileInfo(const FileInfo&) = default; + FileInfo& operator=(const FileInfo&) = default; + + explicit FileInfo(std::string path, FileType type = FileType::Unknown) + : path_(std::move(path)), type_(type) {} + + /// The file type + FileType type() const { return type_; } + void set_type(FileType type) { type_ = type; } + + /// The full file path in the filesystem + const std::string& path() const { return path_; } + void set_path(std::string path) { path_ = std::move(path); } + + /// The file base name (component after the last directory separator) + std::string base_name() const; + + // The directory base name (component before the file base name). + std::string dir_name() const; + + /// The size in bytes, if available + /// + /// Only regular files are guaranteed to have a size. + int64_t size() const { return size_; } + void set_size(int64_t size) { size_ = size; } + + /// The file extension (excluding the dot) + std::string extension() const; + + /// The time of last modification, if available + TimePoint mtime() const { return mtime_; } + void set_mtime(TimePoint mtime) { mtime_ = mtime; } + + bool IsFile() const { return type_ == FileType::File; } + bool IsDirectory() const { return type_ == FileType::Directory; } + + bool Equals(const FileInfo& other) const { + return type() == other.type() && path() == other.path() && size() == other.size() && + mtime() == other.mtime(); + } + + std::string ToString() const; + + /// Function object implementing less-than comparison and hashing by + /// path, to support sorting infos, using them as keys, and other + /// interactions with the STL. + struct ByPath { + bool operator()(const FileInfo& l, const FileInfo& r) const { + return l.path() < r.path(); + } + + size_t operator()(const FileInfo& i) const { + return std::hash{}(i.path()); + } + }; + + protected: + std::string path_; + FileType type_ = FileType::Unknown; + int64_t size_ = kNoSize; + TimePoint mtime_ = kNoTime; +}; + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, const FileInfo&); + +/// \brief File selector for filesystem APIs +struct ARROW_EXPORT FileSelector { + /// The directory in which to select files. + /// If the path exists but doesn't point to a directory, this should be an error. + std::string base_dir; + /// The behavior if `base_dir` isn't found in the filesystem. If false, + /// an error is returned. If true, an empty selection is returned. + bool allow_not_found; + /// Whether to recurse into subdirectories. + bool recursive; + /// The maximum number of subdirectories to recurse into. + int32_t max_recursion; + + FileSelector() : allow_not_found(false), recursive(false), max_recursion(INT32_MAX) {} +}; + +/// \brief FileSystem, path pair +struct ARROW_EXPORT FileLocator { + std::shared_ptr filesystem; + std::string path; +}; + +using FileInfoVector = std::vector; +using FileInfoGenerator = std::function()>; + +} // namespace fs + +template <> +struct IterationTraits { + static fs::FileInfoVector End() { return {}; } + static bool IsEnd(const fs::FileInfoVector& val) { return val.empty(); } +}; + +namespace fs { + +/// \brief Abstract file system API +class ARROW_EXPORT FileSystem + /// \cond false + : public std::enable_shared_from_this +/// \endcond +{ // NOLINT + public: + virtual ~FileSystem(); + + virtual std::string type_name() const = 0; + + /// EXPERIMENTAL: The IOContext associated with this filesystem. + const io::IOContext& io_context() const { return io_context_; } + + /// Normalize path for the given filesystem + /// + /// The default implementation of this method is a no-op, but subclasses + /// may allow normalizing irregular path forms (such as Windows local paths). + virtual Result NormalizePath(std::string path); + + /// \brief Ensure a URI (or path) is compatible with the given filesystem and return the + /// path + /// + /// \param uri_string A URI representing a resource in the given filesystem. + /// + /// This method will check to ensure the given filesystem is compatible with the + /// URI. This can be useful when the user provides both a URI and a filesystem or + /// when a user provides multiple URIs that should be compatible with the same + /// filesystem. + /// + /// uri_string can be an absolute path instead of a URI. In that case it will ensure + /// the filesystem (if supplied) is the local filesystem (or some custom filesystem that + /// is capable of reading local paths) and will normalize the path's file separators. + /// + /// Note, this method only checks to ensure the URI scheme is valid. It will not detect + /// inconsistencies like a mismatching region or endpoint override. + /// + /// \return The path inside the filesystem that is indicated by the URI. + virtual Result PathFromUri(const std::string& uri_string) const; + + /// \brief Make a URI from which FileSystemFromUri produces an equivalent filesystem + /// \param path The path component to use in the resulting URI + /// \return A URI string, or an error if an equivalent URI cannot be produced + virtual Result MakeUri(std::string path) const; + + virtual bool Equals(const FileSystem& other) const = 0; + + virtual bool Equals(const std::shared_ptr& other) const { + return Equals(*other); + } + + /// Get info for the given target. + /// + /// Any symlink is automatically dereferenced, recursively. + /// A nonexistent or unreachable file returns an Ok status and + /// has a FileType of value NotFound. An error status indicates + /// a truly exceptional condition (low-level I/O error, etc.). + virtual Result GetFileInfo(const std::string& path) = 0; + /// Same, for many targets at once. + virtual Result GetFileInfo(const std::vector& paths); + /// Same, according to a selector. + /// + /// The selector's base directory will not be part of the results, even if + /// it exists. + /// If it doesn't exist, see `FileSelector::allow_not_found`. + virtual Result GetFileInfo(const FileSelector& select) = 0; + + /// Async version of GetFileInfo + virtual Future GetFileInfoAsync(const std::vector& paths); + + /// Streaming async version of GetFileInfo + /// + /// The returned generator is not async-reentrant, i.e. you need to wait for + /// the returned future to complete before calling the generator again. + virtual FileInfoGenerator GetFileInfoGenerator(const FileSelector& select); + + /// Create a directory and subdirectories. + /// + /// This function succeeds if the directory already exists. + virtual Status CreateDir(const std::string& path, bool recursive) = 0; + Status CreateDir(const std::string& path) { return CreateDir(path, true); } + + /// Delete a directory and its contents, recursively. + virtual Status DeleteDir(const std::string& path) = 0; + + /// Delete a directory's contents, recursively. + /// + /// Like DeleteDir, but doesn't delete the directory itself. + /// Passing an empty path ("" or "/") is disallowed, see DeleteRootDirContents. + virtual Status DeleteDirContents(const std::string& path, bool missing_dir_ok) = 0; + Status DeleteDirContents(const std::string& path) { + return DeleteDirContents(path, false); + } + + /// Async version of DeleteDirContents. + virtual Future<> DeleteDirContentsAsync(const std::string& path, bool missing_dir_ok); + + /// Async version of DeleteDirContents. + /// + /// This overload allows missing directories. + Future<> DeleteDirContentsAsync(const std::string& path); + + /// EXPERIMENTAL: Delete the root directory's contents, recursively. + /// + /// Implementations may decide to raise an error if this operation is + /// too dangerous. + // NOTE: may decide to remove this if it's deemed not useful + virtual Status DeleteRootDirContents() = 0; + + /// Delete a file. + virtual Status DeleteFile(const std::string& path) = 0; + /// Delete many files. + /// + /// The default implementation issues individual delete operations in sequence. + virtual Status DeleteFiles(const std::vector& paths); + + /// Move / rename a file or directory. + /// + /// If the destination exists: + /// - if it is a non-empty directory, an error is returned + /// - otherwise, if it has the same type as the source, it is replaced + /// - otherwise, behavior is unspecified (implementation-dependent). + virtual Status Move(const std::string& src, const std::string& dest) = 0; + + /// Copy a file. + /// + /// If the destination exists and is a directory, an error is returned. + /// Otherwise, it is replaced. + virtual Status CopyFile(const std::string& src, const std::string& dest) = 0; + + /// Open an input stream for sequential reading. + virtual Result> OpenInputStream( + const std::string& path) = 0; + + /// Open an input stream for sequential reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputStream(const FileInfo& info); + + /// Open an input file for random access reading. + virtual Result> OpenInputFile( + const std::string& path) = 0; + + /// Open an input file for random access reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputFile( + const FileInfo& info); + + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const std::string& path); + + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const FileInfo& info); + + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const std::string& path); + + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const FileInfo& info); + + /// Open an output stream for sequential writing. + /// + /// If the target already exists, existing data is truncated. + virtual Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenOutputStream(const std::string& path); + + /// Open an output stream for appending. + /// + /// If the target doesn't exist, a new empty file is created. + /// + /// Note: some filesystem implementations do not support efficient appending + /// to an existing file, in which case this method will return NotImplemented. + /// Consider writing to multiple files (using e.g. the dataset layer) instead. + virtual Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenAppendStream(const std::string& path); + + protected: + explicit FileSystem(io::IOContext io_context = io::default_io_context()) + : io_context_(std::move(io_context)) {} + + io::IOContext io_context_; + // Whether metadata operations (such as GetFileInfo or OpenInputStream) + // are cheap enough that the default async variants don't bother with + // a thread pool. + bool default_async_is_sync_ = true; +}; + +struct FileSystemFactory { + std::function>( + const Uri& uri, const io::IOContext& io_context, std::string* out_path)> + function; + std::string_view file; + int line; + + bool operator==(const FileSystemFactory& other) const { + // In the case where libarrow is linked statically both to the executable and to a + // dynamically loaded filesystem implementation library, the library contains a + // duplicate definition of the registry and duplicate definitions of any + // FileSystemRegistrars which are statically linked to libarrow. When retrieving + // factories from the filesystem implementation library, we use the file and line + // of the registrar's definition to determine equivalence of the duplicate factories. + return file == other.file && line == other.line; + } +}; + +/// \brief A FileSystem implementation that delegates to another +/// implementation after prepending a fixed base path. +/// +/// This is useful to expose a logical view of a subtree of a filesystem, +/// for example a directory in a LocalFileSystem. +/// This works on abstract paths, i.e. paths using forward slashes and +/// and a single root "/". Windows paths are not guaranteed to work. +/// This makes no security guarantee. For example, symlinks may allow to +/// "escape" the subtree and access other parts of the underlying filesystem. +class ARROW_EXPORT SubTreeFileSystem : public FileSystem { + public: + // This constructor may abort if base_path is invalid. + explicit SubTreeFileSystem(const std::string& base_path, + std::shared_ptr base_fs); + ~SubTreeFileSystem() override; + + std::string type_name() const override { return "subtree"; } + std::string base_path() const { return base_path_; } + std::shared_ptr base_fs() const { return base_fs_; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Future> OpenInputStreamAsync( + const std::string& path) override; + Future> OpenInputStreamAsync( + const FileInfo& info) override; + Future> OpenInputFileAsync( + const std::string& path) override; + Future> OpenInputFileAsync( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + SubTreeFileSystem() = default; + + const std::string base_path_; + std::shared_ptr base_fs_; + + Result PrependBase(const std::string& s) const; + Result PrependBaseNonEmpty(const std::string& s) const; + Result StripBase(const std::string& s) const; + Status FixInfo(FileInfo* info) const; + + static Result NormalizeBasePath( + std::string base_path, const std::shared_ptr& base_fs); +}; + +/// \brief A FileSystem implementation that delegates to another +/// implementation but inserts latencies at various points. +class ARROW_EXPORT SlowFileSystem : public FileSystem { + public: + SlowFileSystem(std::shared_ptr base_fs, + std::shared_ptr latencies); + SlowFileSystem(std::shared_ptr base_fs, double average_latency); + SlowFileSystem(std::shared_ptr base_fs, double average_latency, + int32_t seed); + + std::string type_name() const override { return "slow"; } + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + std::shared_ptr base_fs_; + std::shared_ptr latencies_; +}; + +/// \brief Ensure all registered filesystem implementations are finalized. +/// +/// Individual finalizers may wait for concurrent calls to finish so as to avoid +/// race conditions. After this function has been called, all filesystem APIs +/// will fail with an error. +/// +/// The user is responsible for synchronization of calls to this function. +void EnsureFinalized(); + +/// \defgroup filesystem-factories Functions for creating FileSystem instances +/// +/// @{ + +/// \brief Create a new FileSystem by URI +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[in] io_context an IOContext which will be associated with the filesystem +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// @} + +/// \defgroup filesystem-factory-registration Helpers for FileSystem registration +/// +/// @{ + +/// \brief Register a FileSystem factory +/// +/// Support for custom URI schemes can be added by registering a factory +/// for the corresponding FileSystem. +/// +/// \param[in] scheme a Uri scheme which the factory will handle. +/// If a factory has already been registered for a scheme, +/// the new factory will be ignored. +/// \param[in] factory a function which can produce a FileSystem for Uris which match +/// scheme. +/// \param[in] finalizer a function which must be called to finalize the factory before +/// the process exits, or nullptr if no finalization is necessary. +/// \return raises KeyError if a name collision occurs. +ARROW_EXPORT Status RegisterFileSystemFactory(std::string scheme, + FileSystemFactory factory, + std::function finalizer = {}); + +/// \brief Register FileSystem factories from a shared library +/// +/// FileSystem implementations may be housed in separate shared libraries and only +/// registered when the shared library is explicitly loaded. FileSystemRegistrar is +/// provided to simplify definition of such libraries: each instance at namespace scope +/// in the library will register a factory for a scheme. Any library which uses +/// FileSystemRegistrars and which must be dynamically loaded should be loaded using +/// LoadFileSystemFactories(), which will additionally merge registries are if necessary +/// (static linkage to arrow can produce isolated registries). +ARROW_EXPORT Status LoadFileSystemFactories(const char* libpath); + +struct ARROW_EXPORT FileSystemRegistrar { + /// \brief Register a FileSystem factory at load time + /// + /// Support for custom URI schemes can be added by registering a factory for the + /// corresponding FileSystem. An instance of this helper can be defined at namespace + /// scope to cause the factory to be registered at load time. + /// + /// Global constructors will finish execution before main() starts if the registrar is + /// linked into the same binary as main(), or before dlopen()/LoadLibrary() returns if + /// the library in which the registrar is defined is dynamically loaded. + /// + /// \code + /// FileSystemRegistrar kSlowFileSystemModule{ + /// "slowfile", + /// [](const Uri& uri, const io::IOContext& io_context, std::string* out_path) + /// ->Result> { + /// auto local_uri = "file" + uri.ToString().substr(uri.scheme().size()); + /// ARROW_ASSIGN_OR_RAISE(auto base_fs, + /// FileSystemFromUri(local_uri, io_context, out_path)); + /// double average_latency = 1; + /// int32_t seed = 0xDEADBEEF; + /// ARROW_ASSIGN_OR_RAISE(auto params, uri.query_item()); + /// for (const auto& [key, value] : params) { + /// if (key == "average_latency") { + /// average_latency = std::stod(value); + /// } + /// if (key == "seed") { + /// seed = std::stoi(value, nullptr, /*base=*/16); + /// } + /// } + /// return std::make_shared(base_fs, average_latency, seed); + /// })); + /// \endcode + /// + /// \param[in] scheme a Uri scheme which the factory will handle. + /// If a factory has already been registered for a scheme, the + /// new factory will be ignored. + /// \param[in] factory a function which can produce a FileSystem for Uris which match + /// scheme. + /// \param[in] finalizer a function which must be called to finalize the factory before + /// the process exits, or nullptr if no finalization is necessary. + FileSystemRegistrar(std::string scheme, FileSystemFactory factory, + std::function finalizer = {}); +}; + +#define ARROW_REGISTER_FILESYSTEM(scheme, factory_function, finalizer) \ + ::arrow::fs::FileSystemRegistrar { \ + scheme, ::arrow::fs::FileSystemFactory{factory_function, __FILE__, __LINE__}, \ + finalizer \ + } + +/// @} + +namespace internal { +ARROW_EXPORT void* GetFileSystemRegistry(); +} // namespace internal + +/// \brief Copy files, including from one FileSystem to another +/// +/// If a source and destination are resident in the same FileSystem FileSystem::CopyFile +/// will be used, otherwise the file will be opened as a stream in both FileSystems and +/// chunks copied from the source to the destination. No directories will be created. +ARROW_EXPORT +Status CopyFiles(const std::vector& sources, + const std::vector& destinations, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +/// \brief Copy selected files, including from one FileSystem to another +/// +/// Directories will be created under the destination base directory as needed. +ARROW_EXPORT +Status CopyFiles(const std::shared_ptr& source_fs, + const FileSelector& source_sel, + const std::shared_ptr& destination_fs, + const std::string& destination_base_dir, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +struct FileSystemGlobalOptions { + /// Path to a single PEM file holding all TLS CA certificates + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_file_path; + + /// Path to a directory holding TLS CA certificates in individual PEM files + /// named along the OpenSSL "hashed" format. + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_dir_path; +}; + +/// EXPERIMENTAL: optional global initialization routine +/// +/// This is for environments (such as manylinux) where the path +/// to TLS CA certificates needs to be configured at runtime. +ARROW_EXPORT +Status Initialize(const FileSystemGlobalOptions& options); + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h new file mode 100644 index 0000000000000000000000000000000000000000..d610c72237a5a6afdfa20a905bf7d2d1203b0b0b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/filesystem/filesystem.h" + +namespace arrow::fs { +extern "C" { + +// ARROW_FORCE_EXPORT ensures this function's visibility is +// _declspec(dllexport)/[[gnu::visibility("default")]] even when +// this header is #included by a non-arrow source, as in a third +// party filesystem implementation. +ARROW_FORCE_EXPORT void* arrow_filesystem_get_registry() { + // In the case where libarrow is linked statically both to the executable and to a + // dynamically loaded filesystem implementation library, the library contains a + // duplicate definition of the registry into which the library's instances of + // FileSystemRegistrar insert their factories. This function is made accessible to + // dlsym/GetProcAddress to enable detection of such duplicate registries and merging + // into the registry accessible to the executable. + return internal::GetFileSystemRegistry(); +} +} +} // namespace arrow::fs diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h new file mode 100644 index 0000000000000000000000000000000000000000..f1fbc95bf957c850b9738561c07d09d258b367ab --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h @@ -0,0 +1,246 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/uri.h" + +namespace arrow { +namespace fs { +namespace internal { + +// Opaque wrapper for GCS's library credentials to avoid exposing in Arrow headers. +struct GcsCredentialsHolder; + +} // namespace internal + +class GcsFileSystem; + +/// \brief Container for GCS Credentials and information necessary to recreate them. +class ARROW_EXPORT GcsCredentials { + public: + bool Equals(const GcsCredentials& other) const; + bool anonymous() const { return anonymous_; } + const std::string& access_token() const { return access_token_; } + TimePoint expiration() const { return expiration_; } + const std::string& target_service_account() const { return target_service_account_; } + const std::string& json_credentials() const { return json_credentials_; } + const std::shared_ptr& holder() const { + return holder_; + } + + private: + GcsCredentials() = default; + bool anonymous_ = false; + std::string access_token_; + TimePoint expiration_; + std::string target_service_account_; + std::string json_credentials_; + std::shared_ptr holder_; + friend class GcsFileSystem; + friend struct GcsOptions; +}; + +/// Options for the GcsFileSystem implementation. +struct ARROW_EXPORT GcsOptions { + /// \brief Equivalent to GcsOptions::Defaults(). + GcsOptions(); + GcsCredentials credentials; + + std::string endpoint_override; + std::string scheme; + /// \brief Location to use for creating buckets. + std::string default_bucket_location; + + /// \brief If set used to control total time allowed for retrying underlying + /// errors. + /// + /// The default policy is to retry for up to 15 minutes. + std::optional retry_limit_seconds; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// \brief The project to use for creating buckets. + /// + /// If not set, the library uses the GOOGLE_CLOUD_PROJECT environment + /// variable. Most I/O operations do not need a project id, only applications + /// that create new buckets need a project id. + std::optional project_id; + + bool Equals(const GcsOptions& other) const; + + /// \brief Initialize with Google Default Credentials + /// + /// Create options configured to use [Application Default Credentials][aip/4110]. The + /// details of this mechanism are too involved to describe here, but suffice is to say + /// that applications can override any defaults using an environment variable + /// (`GOOGLE_APPLICATION_CREDENTIALS`), and that the defaults work with most Google + /// Cloud Platform deployment environments (GCE, GKE, Cloud Run, etc.), and that have + /// the same behavior as the `gcloud` CLI tool on your workstation. + /// + /// \see https://cloud.google.com/docs/authentication + /// + /// [aip/4110]: https://google.aip.dev/auth/4110 + static GcsOptions Defaults(); + + /// \brief Initialize with anonymous credentials + static GcsOptions Anonymous(); + + /// \brief Initialize with access token + /// + /// These credentials are useful when using an out-of-band mechanism to fetch access + /// tokens. Note that access tokens are time limited, you will need to manually refresh + /// the tokens created by the out-of-band mechanism. + static GcsOptions FromAccessToken(const std::string& access_token, + TimePoint expiration); + + /// \brief Initialize with service account impersonation + /// + /// Service account impersonation allows one principal (a user or service account) to + /// impersonate a service account. It requires that the calling principal has the + /// necessary permissions *on* the service account. + static GcsOptions FromImpersonatedServiceAccount( + const GcsCredentials& base_credentials, const std::string& target_service_account); + + /// Creates service account credentials from a JSON object in string form. + /// + /// The @p json_object is expected to be in the format described by [aip/4112]. Such an + /// object contains the identity of a service account, as well as a private key that can + /// be used to sign tokens, showing the caller was holding the private key. + /// + /// In GCP one can create several "keys" for each service account, and these keys are + /// downloaded as a JSON "key file". The contents of such a file are in the format + /// required by this function. Remember that key files and their contents should be + /// treated as any other secret with security implications, think of them as passwords + /// (because they are!), don't store them or output them where unauthorized persons may + /// read them. + /// + /// Most applications should probably use default credentials, maybe pointing them to a + /// file with these contents. Using this function may be useful when the json object is + /// obtained from a Cloud Secret Manager or a similar service. + /// + /// [aip/4112]: https://google.aip.dev/auth/4112 + static GcsOptions FromServiceAccountCredentials(const std::string& json_object); + + /// Initialize from URIs such as "gs://bucket/object". + static Result FromUri(const arrow::util::Uri& uri, std::string* out_path); + static Result FromUri(const std::string& uri, std::string* out_path); +}; + +/// \brief GCS-backed FileSystem implementation. +/// +/// GCS (Google Cloud Storage - https://cloud.google.com/storage) is a scalable object +/// storage system for any amount of data. The main abstractions in GCS are buckets and +/// objects. A bucket is a namespace for objects, buckets can store any number of objects, +/// tens of millions and even billions is not uncommon. Each object contains a single +/// blob of data, up to 5TiB in size. Buckets are typically configured to keep a single +/// version of each object, but versioning can be enabled. Versioning is important because +/// objects are immutable, once created one cannot append data to the object or modify the +/// object data in any way. +/// +/// GCS buckets are in a global namespace, if a Google Cloud customer creates a bucket +/// named `foo` no other customer can create a bucket with the same name. Note that a +/// principal (a user or service account) may only list the buckets they are entitled to, +/// and then only within a project. It is not possible to list "all" the buckets. +/// +/// Within each bucket objects are in flat namespace. GCS does not have folders or +/// directories. However, following some conventions it is possible to emulate +/// directories. To this end, this class: +/// +/// - All buckets are treated as directories at the "root" +/// - Creating a root directory results in a new bucket being created, this may be slower +/// than most GCS operations. +/// - The class creates marker objects for a directory, using a metadata attribute to +/// annotate the file. +/// - GCS can list all the objects with a given prefix, this is used to emulate listing +/// of directories. +/// - In object lists GCS can summarize all the objects with a common prefix as a single +/// entry, this is used to emulate non-recursive lists. Note that GCS list time is +/// proportional to the number of objects in the prefix. Listing recursively takes +/// almost the same time as non-recursive lists. +/// +class ARROW_EXPORT GcsFileSystem : public FileSystem { + public: + ~GcsFileSystem() override = default; + + std::string type_name() const override; + const GcsOptions& options() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + + /// This is not implemented in GcsFileSystem, as it would be too dangerous. + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + ARROW_DEPRECATED( + "Deprecated. " + "OpenAppendStream is unsupported on the GCS FileSystem.") + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a GcsFileSystem instance from the given options. + // TODO(ARROW-16884): make this return Result for consistency + static std::shared_ptr Make( + const GcsOptions& options, const io::IOContext& = io::default_io_context()); + + private: + explicit GcsFileSystem(const GcsOptions& options, const io::IOContext& io_context); + + class Impl; + std::shared_ptr impl_; +}; + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h new file mode 100644 index 0000000000000000000000000000000000000000..25604a39e3aceb26b2e7da5dc72e97a0cbd635d5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/io/hdfs.h" +#include "arrow/util/uri.h" + +namespace arrow::fs { + +/// Options for the HDFS implementation. +struct ARROW_EXPORT HdfsOptions { + HdfsOptions() = default; + ~HdfsOptions() = default; + + /// Hdfs configuration options, contains host, port, driver + io::HdfsConnectionConfig connection_config; + + /// Used by Hdfs OpenWritable Interface. + int32_t buffer_size = 0; + int16_t replication = 3; + int64_t default_block_size = 0; + + void ConfigureEndPoint(std::string host, int port); + void ConfigureReplication(int16_t replication); + void ConfigureUser(std::string user_name); + void ConfigureBufferSize(int32_t buffer_size); + void ConfigureBlockSize(int64_t default_block_size); + void ConfigureKerberosTicketCachePath(std::string path); + void ConfigureExtraConf(std::string key, std::string val); + + bool Equals(const HdfsOptions& other) const; + + static Result FromUri(const ::arrow::util::Uri& uri); + static Result FromUri(const std::string& uri); +}; + +/// HDFS-backed FileSystem implementation. +/// +/// implementation notes: +/// - This is a wrapper of arrow/io/hdfs, so we can use FileSystem API to handle hdfs. +class ARROW_EXPORT HadoopFileSystem : public FileSystem { + public: + ~HadoopFileSystem() override; + + std::string type_name() const override { return "hdfs"; } + HdfsOptions options() const; + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a HdfsFileSystem instance from the given options. + static Result> Make( + const HdfsOptions& options, const io::IOContext& = io::default_io_context()); + + protected: + HadoopFileSystem(const HdfsOptions& options, const io::IOContext&); + + class Impl; + std::unique_ptr impl_; +}; + +} // namespace arrow::fs diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h new file mode 100644 index 0000000000000000000000000000000000000000..d72e8f7d74d51659b67355c2bdf6b7a107102b75 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" + +namespace arrow { +namespace internal { + +class Uri; + +} + +namespace fs { + +/// Options for the LocalFileSystem implementation. +struct ARROW_EXPORT LocalFileSystemOptions { + static constexpr int32_t kDefaultDirectoryReadahead = 16; + static constexpr int32_t kDefaultFileInfoBatchSize = 1000; + + /// Whether OpenInputStream and OpenInputFile return a mmap'ed file, + /// or a regular one. + bool use_mmap = false; + + /// Options related to `GetFileInfoGenerator` interface. + + /// EXPERIMENTAL: The maximum number of directories processed in parallel + /// by `GetFileInfoGenerator`. + int32_t directory_readahead = kDefaultDirectoryReadahead; + + /// EXPERIMENTAL: The maximum number of entries aggregated into each + /// FileInfoVector chunk by `GetFileInfoGenerator`. + /// + /// Since each FileInfo entry needs a separate `stat` system call, a + /// directory with a very large number of files may take a lot of time to + /// process entirely. By generating a FileInfoVector after this chunk + /// size is reached, we ensure FileInfo entries can start being consumed + /// from the FileInfoGenerator with less initial latency. + int32_t file_info_batch_size = kDefaultFileInfoBatchSize; + + /// \brief Initialize with defaults + static LocalFileSystemOptions Defaults(); + + bool Equals(const LocalFileSystemOptions& other) const; + + static Result FromUri(const ::arrow::util::Uri& uri, + std::string* out_path); +}; + +/// \brief A FileSystem implementation accessing files on the local machine. +/// +/// This class handles only `/`-separated paths. If desired, conversion +/// from Windows backslash-separated paths should be done by the caller. +/// Details such as symlinks are abstracted away (symlinks are always +/// followed, except when deleting an entry). +class ARROW_EXPORT LocalFileSystem : public FileSystem { + public: + explicit LocalFileSystem(const io::IOContext& = io::default_io_context()); + explicit LocalFileSystem(const LocalFileSystemOptions&, + const io::IOContext& = io::default_io_context()); + ~LocalFileSystem() override; + + std::string type_name() const override { return "local"; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + Result MakeUri(std::string path) const override; + + bool Equals(const FileSystem& other) const override; + + LocalFileSystemOptions options() const { return options_; } + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + LocalFileSystemOptions options_; +}; + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h new file mode 100644 index 0000000000000000000000000000000000000000..5626560e08363f20c5479a1b5f540d6aed1a2d04 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::fs::internal { + +struct MockDirInfo { + std::string full_path; + TimePoint mtime; + + bool operator==(const MockDirInfo& other) const { + return mtime == other.mtime && full_path == other.full_path; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockDirInfo&); +}; + +struct MockFileInfo { + std::string full_path; + TimePoint mtime; + std::string_view data; + + bool operator==(const MockFileInfo& other) const { + return mtime == other.mtime && full_path == other.full_path && data == other.data; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockFileInfo&); +}; + +/// A mock FileSystem implementation that holds its contents in memory. +/// +/// Useful for validating the FileSystem API, writing conformance suite, +/// and bootstrapping FileSystem-based APIs. +class ARROW_EXPORT MockFileSystem : public FileSystem { + public: + explicit MockFileSystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~MockFileSystem() override; + + std::string type_name() const override { return "mock"; } + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + // Contents-dumping helpers to ease testing. + // Output is lexicographically-ordered by full path. + std::vector AllDirs(); + std::vector AllFiles(); + + // Create a File with a content from a string. + Status CreateFile(const std::string& path, std::string_view content, + bool recursive = true); + + // Create a MockFileSystem out of (empty) FileInfo. The content of every + // file is empty and of size 0. All directories will be created recursively. + static Result> Make(TimePoint current_time, + const std::vector& infos); + + class Impl; + + protected: + std::unique_ptr impl_; +}; + +class ARROW_EXPORT MockAsyncFileSystem : public MockFileSystem { + public: + explicit MockAsyncFileSystem(TimePoint current_time, + const io::IOContext& io_context = io::default_io_context()) + : MockFileSystem(current_time, io_context) { + default_async_is_sync_ = false; + } + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; +}; + +} // namespace arrow::fs::internal diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h new file mode 100644 index 0000000000000000000000000000000000000000..d49d9d2efa7f6aa92e568f8305c15dc06c86c806 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" + +namespace arrow { +namespace fs { +namespace internal { + +constexpr char kSep = '/'; + +// Computations on abstract paths (not local paths with system-dependent behaviour). +// Abstract paths are typically used in URIs. + +// Split an abstract path into its individual components. +ARROW_EXPORT +std::vector SplitAbstractPath(const std::string& path, char sep = kSep); + +// Slice the individual components of an abstract path and combine them +// +// If offset or length are negative then an empty string is returned +// If offset is >= the number of components then an empty string is returned +// If offset + length is >= the number of components then length is truncated +ARROW_EXPORT +std::string SliceAbstractPath(const std::string& path, int offset, int length, + char sep = kSep); + +// Return the extension of the file +ARROW_EXPORT std::string GetAbstractPathExtension(const std::string& s); + +// Return the depth (number of components) of an abstract path +// +// Trailing slashes do not count towards depth +// Leading slashes do not count towards depth +// +// The root path ("/") has depth 0 +ARROW_EXPORT int GetAbstractPathDepth(std::string_view path); + +// Return the parent directory and basename of an abstract path. Both values may be +// empty. +ARROW_EXPORT +std::pair GetAbstractPathParent(const std::string& s); + +// Validate an abstract path. +ARROW_EXPORT +Status ValidateAbstractPath(std::string_view path); + +// Validate the components of an abstract path. +ARROW_EXPORT +Status ValidateAbstractPathParts(const std::vector& parts); + +// Append a non-empty stem to an abstract path. +ARROW_EXPORT +std::string ConcatAbstractPath(std::string_view base, std::string_view stem); + +// Make path relative to base, if it starts with base. Otherwise error out. +ARROW_EXPORT +Result MakeAbstractPathRelative(const std::string& base, + const std::string& path); + +ARROW_EXPORT +std::string EnsureLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string_view RemoveLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string EnsureTrailingSlash(std::string_view s); + +/// \brief remove the forward slash (if any) from the given path +/// \param s the input path +/// \param preserve_root if true, allow a path of just "/" to remain unchanged +ARROW_EXPORT +std::string_view RemoveTrailingSlash(std::string_view s, bool preserve_root = false); + +ARROW_EXPORT +Status AssertNoTrailingSlash(std::string_view s); + +inline bool HasTrailingSlash(std::string_view s) { + return !s.empty() && s.back() == kSep; +} + +inline bool HasLeadingSlash(std::string_view s) { + return !s.empty() && s.front() == kSep; +} + +ARROW_EXPORT +bool IsAncestorOf(std::string_view ancestor, std::string_view descendant); + +ARROW_EXPORT +std::optional RemoveAncestor(std::string_view ancestor, + std::string_view descendant); + +/// Return a vector of ancestors between a base path and a descendant. +/// For example, +/// +/// AncestorsFromBasePath("a/b", "a/b/c/d/e") -> ["a/b/c", "a/b/c/d"] +ARROW_EXPORT +std::vector AncestorsFromBasePath(std::string_view base_path, + std::string_view descendant); + +/// Given a vector of paths of directories which must be created, produce a the minimal +/// subset for passing to CreateDir(recursive=true) by removing redundant parent +/// directories +ARROW_EXPORT +std::vector MinimalCreateDirSet(std::vector dirs); + +// Join the components of an abstract path. +template +std::string JoinAbstractPath(StringIt it, StringIt end, char sep = kSep) { + std::string path; + for (; it != end; ++it) { + if (it->empty()) continue; + + if (!path.empty()) { + path += sep; + } + path += *it; + } + return path; +} + +template +std::string JoinAbstractPath(const StringRange& range, char sep = kSep) { + return JoinAbstractPath(range.begin(), range.end(), sep); +} + +/// Convert slashes to backslashes, on all platforms. Mostly useful for testing. +ARROW_EXPORT +std::string ToBackslashes(std::string_view s); + +/// Ensure a local path is abstract, by converting backslashes to regular slashes +/// on Windows. Return the path unchanged on other systems. +ARROW_EXPORT +std::string ToSlashes(std::string_view s); + +ARROW_EXPORT +bool IsEmptyPath(std::string_view s); + +ARROW_EXPORT +bool IsLikelyUri(std::string_view s); + +class ARROW_EXPORT Globber { + public: + ~Globber(); + explicit Globber(std::string pattern); + bool Matches(const std::string& path); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e270a6e1c469abdc8905b6f00da6510bbb585258 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include + +#include "arrow/filesystem/s3fs.h" +#include "arrow/status.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace fs { + +// A minio test server, managed as a child process + +class MinioTestServer { + public: + MinioTestServer(); + ~MinioTestServer(); + + Status Start(); + + Status Stop(); + + std::string connect_string() const; + + std::string access_key() const; + + std::string secret_key() const; + + private: + struct Impl; + std::unique_ptr impl_; +}; + +// A Minio "environment" that spawns Minio processes in advances, such as +// to hide process launch latencies during testing. + +class MinioTestEnvironment : public ::testing::Environment { + public: + MinioTestEnvironment(); + ~MinioTestEnvironment(); + + void SetUp() override; + + Result> GetOneServer(); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +// A global test "environment", to ensure that the S3 API is initialized before +// running unit tests. + +class S3Environment : public ::testing::Environment { + public: + // We set this environment variable to speed up tests by ensuring + // DefaultAWSCredentialsProviderChain does not query (inaccessible) + // EC2 metadata endpoint. + // This must be done before spawning any Minio child process to avoid any race + // condition accessing environment variables. + S3Environment() : ec2_metadata_disabled_guard_("AWS_EC2_METADATA_DISABLED", "true") {} + + void SetUp() override { + // Change this to increase logging during tests + S3GlobalOptions options; + options.log_level = S3LogLevel::Fatal; + ASSERT_OK(InitializeS3(options)); + } + + void TearDown() override { ASSERT_OK(FinalizeS3()); } + + private: + EnvVarGuard ec2_metadata_disabled_guard_; +}; + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h new file mode 100644 index 0000000000000000000000000000000000000000..85d5ff8fed5538d668dc1f63e9ce20fcfca2457e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h @@ -0,0 +1,422 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Aws { +namespace Auth { + +class AWSCredentialsProvider; +class STSAssumeRoleCredentialsProvider; + +} // namespace Auth +namespace STS { +class STSClient; +} +} // namespace Aws + +namespace arrow { +namespace fs { + +/// Options for using a proxy for S3 +struct ARROW_EXPORT S3ProxyOptions { + std::string scheme; + std::string host; + int port = -1; + std::string username; + std::string password; + + /// Initialize from URI such as http://username:password@host:port + /// or http://host:port + static Result FromUri(const std::string& uri); + static Result FromUri(const ::arrow::util::Uri& uri); + + bool Equals(const S3ProxyOptions& other) const; +}; + +enum class S3CredentialsKind : int8_t { + /// Anonymous access (no credentials used) + Anonymous, + /// Use default AWS credentials, configured through environment variables + Default, + /// Use explicitly-provided access key pair + Explicit, + /// Assume role through a role ARN + Role, + /// Use web identity token to assume role, configured through environment variables + WebIdentity +}; + +/// Pure virtual class for describing custom S3 retry strategies +class ARROW_EXPORT S3RetryStrategy { + public: + virtual ~S3RetryStrategy() = default; + + /// Simple struct where each field corresponds to a field in Aws::Client::AWSError + struct AWSErrorDetail { + /// Corresponds to AWSError::GetErrorType() + int error_type; + /// Corresponds to AWSError::GetMessage() + std::string message; + /// Corresponds to AWSError::GetExceptionName() + std::string exception_name; + /// Corresponds to AWSError::ShouldRetry() + bool should_retry; + }; + /// Returns true if the S3 request resulting in the provided error should be retried. + virtual bool ShouldRetry(const AWSErrorDetail& error, int64_t attempted_retries) = 0; + /// Returns the time in milliseconds the S3 client should sleep for until retrying. + virtual int64_t CalculateDelayBeforeNextRetry(const AWSErrorDetail& error, + int64_t attempted_retries) = 0; + /// Returns a stock AWS Default retry strategy. + static std::shared_ptr GetAwsDefaultRetryStrategy( + int64_t max_attempts); + /// Returns a stock AWS Standard retry strategy. + static std::shared_ptr GetAwsStandardRetryStrategy( + int64_t max_attempts); +}; + +/// Options for the S3FileSystem implementation. +struct ARROW_EXPORT S3Options { + /// \brief AWS region to connect to. + /// + /// If unset, the AWS SDK will choose a default value. The exact algorithm + /// depends on the SDK version. Before 1.8, the default is hardcoded + /// to "us-east-1". Since 1.8, several heuristics are used to determine + /// the region (environment variables, configuration profile, EC2 metadata + /// server). + std::string region; + + /// \brief Socket connection timeout, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 1 second). + double connect_timeout = -1; + + /// \brief Socket read timeout on Windows and macOS, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 3 seconds). + /// This option is ignored on non-Windows, non-macOS systems. + double request_timeout = -1; + + /// If non-empty, override region with a connect string such as "localhost:9000" + // XXX perhaps instead take a URL like "http://localhost:9000"? + std::string endpoint_override; + /// S3 connection transport, default "https" + std::string scheme = "https"; + + /// ARN of role to assume + std::string role_arn; + /// Optional identifier for an assumed role session. + std::string session_name; + /// Optional external identifier to pass to STS when assuming a role + std::string external_id; + /// Frequency (in seconds) to refresh temporary credentials from assumed role + int load_frequency = 900; + + /// If connection is through a proxy, set options here + S3ProxyOptions proxy_options; + + /// AWS credentials provider + std::shared_ptr credentials_provider; + + /// Type of credentials being used. Set along with credentials_provider. + S3CredentialsKind credentials_kind = S3CredentialsKind::Default; + + /// Whether to use virtual addressing of buckets + /// + /// If true, then virtual addressing is always enabled. + /// If false, then virtual addressing is only enabled if `endpoint_override` is empty. + /// + /// This can be used for non-AWS backends that only support virtual hosted-style access. + bool force_virtual_addressing = false; + + /// Whether OutputStream writes will be issued in the background, without blocking. + bool background_writes = true; + + /// Whether to allow creation of buckets + /// + /// When S3FileSystem creates new buckets, it does not pass any non-default settings. + /// In AWS S3, the bucket and all objects will be not publicly visible, and there + /// will be no bucket policies and no resource tags. To have more control over how + /// buckets are created, use a different API to create them. + bool allow_bucket_creation = false; + + /// Whether to allow deletion of buckets + bool allow_bucket_deletion = false; + + /// Whether to allow pessimistic directory creation in CreateDir function + /// + /// By default, CreateDir function will try to create the directory without checking its + /// existence. It's an optimization to try directory creation and catch the error, + /// rather than issue two dependent I/O calls. + /// Though for key/value storage like Google Cloud Storage, too many creation calls will + /// breach the rate limit for object mutation operations and cause serious consequences. + /// It's also possible you don't have creation access for the parent directory. Set it + /// to be true to address these scenarios. + bool check_directory_existence_before_creation = false; + + /// Whether to allow file-open methods to return before the actual open. + /// + /// Enabling this may reduce the latency of `OpenInputStream`, `OpenOutputStream`, + /// and similar methods, by reducing the number of roundtrips necessary. It may also + /// allow usage of more efficient S3 APIs for small files. + /// The downside is that failure conditions such as attempting to open a file in a + /// non-existing bucket will only be reported when actual I/O is done (at worse, + /// when attempting to close the file). + bool allow_delayed_open = false; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// Optional retry strategy to determine which error types should be retried, and the + /// delay between retries. + std::shared_ptr retry_strategy; + + S3Options(); + + /// Configure with the default AWS credentials provider chain. + void ConfigureDefaultCredentials(); + + /// Configure with anonymous credentials. This will only let you access public buckets. + void ConfigureAnonymousCredentials(); + + /// Configure with explicit access and secret key. + void ConfigureAccessKey(const std::string& access_key, const std::string& secret_key, + const std::string& session_token = ""); + + /// Configure with credentials from an assumed role. + void ConfigureAssumeRoleCredentials( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// Configure with credentials from role assumed using a web identity token + void ConfigureAssumeRoleWithWebIdentityCredentials(); + + std::string GetAccessKey() const; + std::string GetSecretKey() const; + std::string GetSessionToken() const; + + bool Equals(const S3Options& other) const; + + /// \brief Initialize with default credentials provider chain + /// + /// This is recommended if you use the standard AWS environment variables + /// and/or configuration file. + static S3Options Defaults(); + + /// \brief Initialize with anonymous credentials. + /// + /// This will only let you access public buckets. + static S3Options Anonymous(); + + /// \brief Initialize with explicit access and secret key. + /// + /// Optionally, a session token may also be provided for temporary credentials + /// (from STS). + static S3Options FromAccessKey(const std::string& access_key, + const std::string& secret_key, + const std::string& session_token = ""); + + /// \brief Initialize from an assumed role. + static S3Options FromAssumeRole( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// \brief Initialize from an assumed role with web-identity. + /// Uses the AWS SDK which uses environment variables to + /// generate temporary credentials. + static S3Options FromAssumeRoleWithWebIdentity(); + + static Result FromUri(const ::arrow::util::Uri& uri, + std::string* out_path = NULLPTR); + static Result FromUri(const std::string& uri, + std::string* out_path = NULLPTR); +}; + +/// S3-backed FileSystem implementation. +/// +/// Some implementation notes: +/// - buckets are special and the operations available on them may be limited +/// or more expensive than desired. +class ARROW_EXPORT S3FileSystem : public FileSystem { + public: + ~S3FileSystem() override; + + std::string type_name() const override { return "s3"; } + + /// Return the original S3 options when constructing the filesystem + S3Options options() const; + /// Return the actual region this filesystem connects to + std::string region() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::DeleteDirContentsAsync; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Future<> DeleteDirContentsAsync(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + /// Create a sequential input stream for reading from a S3 object. + /// + /// NOTE: Reads from the stream will be synchronous and unbuffered. + /// You way want to wrap the stream in a BufferedInputStream or use + /// a custom readahead strategy to avoid idle waits. + Result> OpenInputStream( + const std::string& path) override; + /// Create a sequential input stream for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputStream(const FileInfo& info) override; + + /// Create a random access file for reading from a S3 object. + /// + /// See OpenInputStream for performance notes. + Result> OpenInputFile( + const std::string& path) override; + /// Create a random access file for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputFile( + const FileInfo& info) override; + + /// Create a sequential output stream for writing to a S3 object. + /// + /// NOTE: Writes to the stream will be buffered. Depending on + /// S3Options.background_writes, they can be synchronous or not. + /// It is recommended to enable background_writes unless you prefer + /// implementing your own background execution strategy. + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a S3FileSystem instance from the given options. + static Result> Make( + const S3Options& options, const io::IOContext& = io::default_io_context()); + + protected: + explicit S3FileSystem(const S3Options& options, const io::IOContext&); + + class Impl; + std::shared_ptr impl_; +}; + +enum class S3LogLevel : int8_t { Off, Fatal, Error, Warn, Info, Debug, Trace }; + +struct ARROW_EXPORT S3GlobalOptions { + S3LogLevel log_level; + /// The number of threads to configure when creating AWS' I/O event loop + /// + /// Defaults to 1 as recommended by AWS' doc when the # of connections is + /// expected to be, at most, in the hundreds + /// + /// For more details see Aws::Crt::Io::EventLoopGroup + int num_event_loop_threads = 1; + + /// \brief Initialize with default options + /// + /// For log_level, this method first tries to extract a suitable value from the + /// environment variable ARROW_S3_LOG_LEVEL. + static S3GlobalOptions Defaults(); +}; + +/// \brief Initialize the S3 APIs with the specified set of options. +/// +/// It is required to call this function at least once before using S3FileSystem. +/// +/// Once this function is called you MUST call FinalizeS3 before the end of the +/// application in order to avoid a segmentation fault at shutdown. +ARROW_EXPORT +Status InitializeS3(const S3GlobalOptions& options); + +/// \brief Ensure the S3 APIs are initialized, but only if not already done. +/// +/// If necessary, this will call InitializeS3() with some default options. +ARROW_EXPORT +Status EnsureS3Initialized(); + +/// Whether S3 was initialized, and not finalized. +ARROW_EXPORT +bool IsS3Initialized(); + +/// Whether S3 was finalized. +ARROW_EXPORT +bool IsS3Finalized(); + +/// \brief Shutdown the S3 APIs. +/// +/// This can wait for some S3 concurrent calls to finish so as to avoid +/// race conditions. +/// After this function has been called, all S3 calls will fail with an error. +/// +/// Calls to InitializeS3() and FinalizeS3() should be serialized by the +/// application (this also applies to EnsureS3Initialized() and +/// EnsureS3Finalized()). +ARROW_EXPORT +Status FinalizeS3(); + +/// \brief Ensure the S3 APIs are shutdown, but only if not already done. +/// +/// If necessary, this will call FinalizeS3(). +ARROW_EXPORT +Status EnsureS3Finalized(); + +ARROW_EXPORT +Result ResolveS3BucketRegion(const std::string& bucket); + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..04000c14e9c2a2ce9493de69d7e9611913b69297 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h @@ -0,0 +1,259 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/filesystem/mockfs.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/counting_semaphore.h" + +namespace arrow { +namespace fs { + +static constexpr double kTimeSlack = 2.0; // In seconds + +static inline FileInfo File(std::string path) { + return FileInfo(std::move(path), FileType::File); +} + +static inline FileInfo Dir(std::string path) { + return FileInfo(std::move(path), FileType::Directory); +} + +// A subclass of MockFileSystem that blocks operations until an unlock method is +// called. +// +// This is intended for testing fine-grained ordering of filesystem operations. +// +// N.B. Only OpenOutputStream supports gating at the moment but this is simply because +// it is all that has been needed so far. Feel free to add support for more methods +// as required. +class ARROW_TESTING_EXPORT GatedMockFilesystem : public internal::MockFileSystem { + public: + GatedMockFilesystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~GatedMockFilesystem() override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + // Wait until at least num_waiters are waiting on OpenOutputStream + Status WaitForOpenOutputStream(uint32_t num_waiters); + // Unlock `num_waiters` individual calls to OpenOutputStream + Status UnlockOpenOutputStream(uint32_t num_waiters); + + private: + util::CountingSemaphore open_output_sem_; +}; + +ARROW_TESTING_EXPORT +void CreateFile(FileSystem* fs, const std::string& path, const std::string& data); + +// Sort a vector of FileInfo by lexicographic path order +ARROW_TESTING_EXPORT +void SortInfos(FileInfoVector* infos); + +// Create a copy of a FileInfo vector sorted by lexicographic path order +ARROW_TESTING_EXPORT +FileInfoVector SortedInfos(const FileInfoVector& infos); + +ARROW_TESTING_EXPORT +void CollectFileInfoGenerator(FileInfoGenerator gen, FileInfoVector* out_infos); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileContents(FileSystem* fs, const std::string& path, + const std::string& expected_data); + +template +void AssertDurationBetween(Duration d, double min_secs, double max_secs) { + auto seconds = std::chrono::duration_cast>(d); + ASSERT_GE(seconds.count(), min_secs); + ASSERT_LE(seconds.count(), max_secs); +} + +// Generic tests for FileSystem implementations. +// To use this class, subclass both from it and ::testing::Test, +// implement GetEmptyFileSystem(), and use GENERIC_FS_TEST_FUNCTIONS() +// to define the various tests. +class ARROW_TESTING_EXPORT GenericFileSystemTest { + public: + virtual ~GenericFileSystemTest(); + + void TestEmpty(); + void TestNormalizePath(); + void TestCreateDir(); + void TestDeleteDir(); + void TestDeleteDirContents(); + void TestDeleteRootDirContents(); + void TestDeleteFile(); + void TestDeleteFiles(); + void TestMoveFile(); + void TestMoveDir(); + void TestCopyFile(); + void TestGetFileInfo(); + void TestGetFileInfoVector(); + void TestGetFileInfoSelector(); + void TestGetFileInfoSelectorWithRecursion(); + void TestGetFileInfoAsync(); + void TestGetFileInfoGenerator(); + void TestOpenOutputStream(); + void TestOpenAppendStream(); + void TestOpenInputStream(); + void TestOpenInputStreamWithFileInfo(); + void TestOpenInputStreamAsync(); + void TestOpenInputFile(); + void TestOpenInputFileWithFileInfo(); + void TestOpenInputFileAsync(); + void TestSpecialChars(); + + protected: + // This function should return the filesystem under test. + virtual std::shared_ptr GetEmptyFileSystem() = 0; + + // Override the following functions to specify deviations from expected + // filesystem semantics. + // - Whether the filesystem may "implicitly" create intermediate directories + virtual bool have_implicit_directories() const { return false; } + // - Whether the filesystem may allow writing a file "over" a directory + virtual bool allow_write_file_over_dir() const { return false; } + // - Whether the filesystem may allow writing a directory "over" a file, + // for example copying file "A" to "B/C" while "B" exists and is a file. + virtual bool allow_write_implicit_dir_over_file() const { return false; } + // - Whether the filesystem allows reading a directory + virtual bool allow_read_dir_as_file() const { return false; } + // - Whether the filesystem allows moving a file + virtual bool allow_move_file() const { return true; } + // - Whether the filesystem allows moving a directory + virtual bool allow_move_dir() const { return true; } + // - Whether the filesystem allows moving a directory "over" a non-empty destination + virtual bool allow_move_dir_over_non_empty_dir() const { return false; } + // - Whether the filesystem allows appending to a file + virtual bool allow_append_to_file() const { return true; } + // - Whether the filesystem allows appending to a nonexistent file + virtual bool allow_append_to_new_file() const { return true; } + // - Whether the filesystem supports directory modification times + virtual bool have_directory_mtimes() const { return true; } + // - Whether some directory tree deletion tests may fail randomly + virtual bool have_flaky_directory_tree_deletion() const { return false; } + // - Whether the filesystem stores some metadata alongside files + virtual bool have_file_metadata() const { return false; } + // - Whether the filesystem has a false positive memory leak with generator + virtual bool have_false_positive_memory_leak_with_generator() const { return false; } + + void TestEmpty(FileSystem* fs); + void TestNormalizePath(FileSystem* fs); + void TestCreateDir(FileSystem* fs); + void TestDeleteDir(FileSystem* fs); + void TestDeleteDirContents(FileSystem* fs); + void TestDeleteRootDirContents(FileSystem* fs); + void TestDeleteFile(FileSystem* fs); + void TestDeleteFiles(FileSystem* fs); + void TestMoveFile(FileSystem* fs); + void TestMoveDir(FileSystem* fs); + void TestCopyFile(FileSystem* fs); + void TestGetFileInfo(FileSystem* fs); + void TestGetFileInfoVector(FileSystem* fs); + void TestGetFileInfoSelector(FileSystem* fs); + void TestGetFileInfoSelectorWithRecursion(FileSystem* fs); + void TestGetFileInfoAsync(FileSystem* fs); + void TestGetFileInfoGenerator(FileSystem* fs); + void TestOpenOutputStream(FileSystem* fs); + void TestOpenAppendStream(FileSystem* fs); + void TestOpenInputStream(FileSystem* fs); + void TestOpenInputStreamWithFileInfo(FileSystem* fs); + void TestOpenInputStreamAsync(FileSystem* fs); + void TestOpenInputFile(FileSystem* fs); + void TestOpenInputFileWithFileInfo(FileSystem* fs); + void TestOpenInputFileAsync(FileSystem* fs); + void TestSpecialChars(FileSystem* fs); +}; + +#define GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NAME) \ + TEST_MACRO(TEST_CLASS, NAME) { this->Test##NAME(); } + +#define GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_MACRO, TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, Empty) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NormalizePath) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CreateDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteRootDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFiles) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CopyFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoVector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelectorWithRecursion) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoGenerator) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenOutputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenAppendStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, SpecialChars) + +#define GENERIC_FS_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_F, TEST_CLASS) + +#define GENERIC_FS_TYPED_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TYPED_TEST, TEST_CLASS) + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..92c70799be16c73804353a1f3bcae8b5a3674057 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace fs { + +/// \brief FileSystem entry type +enum class FileType : int8_t { + /// Entry is not found + NotFound, + /// Entry exists but its type is unknown + /// + /// This can designate a special file such as a Unix socket or character + /// device, or Windows NUL / CON / ... + Unknown, + /// Entry is a regular file + File, + /// Entry is a directory + Directory +}; + +struct FileInfo; + +struct FileSelector; + +class FileSystem; +class AzureFileSystem; +class GcsFileSystem; +class LocalFileSystem; +class S3FileSystem; +class SlowFileSystem; +class SubTreeFileSystem; + +} // namespace fs +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h new file mode 100644 index 0000000000000000000000000000000000000000..9dad36aa0948906ebb2447c0030cf117c8549c2c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/flight/visibility.h" +#include "arrow/status.h" + +namespace arrow { + +namespace flight { + +/// \brief A reader for messages from the server during an +/// authentication handshake. +class ARROW_FLIGHT_EXPORT ClientAuthReader { + public: + virtual ~ClientAuthReader() = default; + virtual Status Read(std::string* response) = 0; +}; + +/// \brief A writer for messages to the server during an +/// authentication handshake. +class ARROW_FLIGHT_EXPORT ClientAuthSender { + public: + virtual ~ClientAuthSender() = default; + virtual Status Write(const std::string& token) = 0; +}; + +/// \brief An authentication implementation for a Flight service. +/// Authentication includes both an initial negotiation and a per-call +/// token validation. Implementations may choose to use either or both +/// mechanisms. +class ARROW_FLIGHT_EXPORT ClientAuthHandler { + public: + virtual ~ClientAuthHandler() = default; + /// \brief Authenticate the client on initial connection. The client + /// can send messages to/read responses from the server at any time. + /// \return Status OK if authenticated successfully + virtual Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) = 0; + /// \brief Get a per-call token. + /// \param[out] token The token to send to the server. + virtual Status GetToken(std::string* token) = 0; +}; + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..6a56a632dfbd220ee1aaf749f1c7fb2b9ab0852e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Middleware implementation for sending and receiving HTTP cookies. + +#pragma once + +#include + +#include "arrow/flight/client_middleware.h" + +namespace arrow { +namespace flight { + +/// \brief Returns a ClientMiddlewareFactory that handles sending and receiving cookies. +ARROW_FLIGHT_EXPORT std::shared_ptr GetCookieFactory(); + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..8e3126553a953b9d8f2fcdb94b72f9214b690de1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight clients. Currently +// experimental. + +#pragma once + +#include + +#include "arrow/flight/middleware.h" +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief Client-side middleware for a call, instantiated per RPC. +/// +/// Middleware should be fast and must be infallible: there is no way +/// to reject the call or report errors from the middleware instance. +class ARROW_FLIGHT_EXPORT ClientMiddleware { + public: + virtual ~ClientMiddleware() = default; + + /// \brief A callback before headers are sent. Extra headers can be + /// added, but existing ones cannot be read. + virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0; + + /// \brief A callback when headers are received from the server. + /// + /// This may be called more than once, since servers send both + /// headers and trailers. Some implementations (e.g. gRPC-Java, and + /// hence Arrow Flight in Java) may consolidate headers into + /// trailers if the RPC errored. + virtual void ReceivedHeaders(const CallHeaders& incoming_headers) = 0; + + /// \brief A callback after the call has completed. + virtual void CallCompleted(const Status& status) = 0; +}; + +/// \brief A factory for new middleware instances. +/// +/// If added to a client, this will be called for each RPC (including +/// Handshake) to give the opportunity to intercept the call. +/// +/// It is guaranteed that all client middleware methods are called +/// from the same thread that calls the RPC method implementation. +class ARROW_FLIGHT_EXPORT ClientMiddlewareFactory { + public: + virtual ~ClientMiddlewareFactory() = default; + + /// \brief A callback for the start of a new call. + /// + /// \param info Information about the call. + /// \param[out] middleware The middleware instance for this call. If + /// unset, will not add middleware to this call instance from + /// this factory. + virtual void StartCall(const CallInfo& info, + std::unique_ptr* middleware) = 0; +}; + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..3a8b665ed6c0f0021abedea1917a4b4501157179 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Middleware implementation for propagating OpenTelemetry spans. + +#pragma once + +#include + +#include "arrow/flight/client_middleware.h" + +namespace arrow { +namespace flight { + +/// \brief Returns a ClientMiddlewareFactory that handles sending OpenTelemetry spans. +ARROW_FLIGHT_EXPORT std::shared_ptr +MakeTracingClientMiddlewareFactory(); + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..d717e396a8b68c749e53eeb241599ae28986d6da --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight clients and +// servers. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/flight/types.h" +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief A write-only wrapper around headers for an RPC call. +class ARROW_FLIGHT_EXPORT AddCallHeaders { + public: + virtual ~AddCallHeaders() = default; + + /// \brief Add a header to be sent to the client. + /// + /// \param[in] key The header name. Must be lowercase ASCII; some + /// transports may reject invalid header names. + /// \param[in] value The header value. Some transports may only + /// accept binary header values if the header name ends in "-bin". + virtual void AddHeader(const std::string& key, const std::string& value) = 0; +}; + +/// \brief An enumeration of the RPC methods Flight implements. +enum class FlightMethod : char { + Invalid = 0, + Handshake = 1, + ListFlights = 2, + GetFlightInfo = 3, + GetSchema = 4, + DoGet = 5, + DoPut = 6, + DoAction = 7, + ListActions = 8, + DoExchange = 9, + PollFlightInfo = 10, +}; + +/// \brief Get a human-readable name for a Flight method. +ARROW_FLIGHT_EXPORT +std::string ToString(FlightMethod method); + +/// \brief Information about an instance of a Flight RPC. +struct ARROW_FLIGHT_EXPORT CallInfo { + public: + /// \brief The RPC method of this call. + FlightMethod method; +}; + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..fff107fa8fcf4b3871cf48266ac858db33e5f5c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/flight/client.h" +#include "arrow/flight/server.h" +#include "arrow/flight/types.h" +#include "arrow/pch.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..498c87c5b7dc9ae94cb1fc1fa59e79338350493e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Internal header. Platform-specific definitions for Flight. + +#pragma once + +#ifdef _MSC_VER + +// The protobuf documentation says that C4251 warnings when using the +// library are spurious and suppressed when the build the library and +// compiler, but must be also suppressed in downstream projects +# pragma warning(disable : 4251) + +#endif // _MSC_VER + +#include "arrow/util/config.h" // IWYU pragma: keep diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h new file mode 100644 index 0000000000000000000000000000000000000000..8d73353ab16c10dcc6742632f082a2b4aca907b8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h @@ -0,0 +1,327 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces to use for defining Flight RPC servers. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/server_auth.h" +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" // IWYU pragma: keep +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" + +namespace arrow { + +class Schema; +class Status; + +namespace flight { + +/// \brief Interface that produces a sequence of IPC payloads to be sent in +/// FlightData protobuf messages +class ARROW_FLIGHT_EXPORT FlightDataStream { + public: + virtual ~FlightDataStream(); + + virtual std::shared_ptr schema() = 0; + + /// \brief Compute FlightPayload containing serialized RecordBatch schema + virtual arrow::Result GetSchemaPayload() = 0; + + // When the stream is completed, the last payload written will have null + // metadata + virtual arrow::Result Next() = 0; + + virtual Status Close(); +}; + +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT RecordBatchStream : public FlightDataStream { + public: + /// \param[in] reader produces a sequence of record batches + /// \param[in] options IPC options for writing + explicit RecordBatchStream( + const std::shared_ptr& reader, + const ipc::IpcWriteOptions& options = ipc::IpcWriteOptions::Defaults()); + ~RecordBatchStream() override; + + // inherit deprecated API + using FlightDataStream::GetSchemaPayload; + using FlightDataStream::Next; + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + + arrow::Result Next() override; + Status Close() override; + + private: + class RecordBatchStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief A reader for IPC payloads uploaded by a client. Also allows +/// reading application-defined metadata via the Flight protocol. +class ARROW_FLIGHT_EXPORT FlightMessageReader : public MetadataRecordBatchReader { + public: + /// \brief Get the descriptor for this upload. + virtual const FlightDescriptor& descriptor() const = 0; +}; + +/// \brief A writer for application-specific metadata sent back to the +/// client during an upload. +class ARROW_FLIGHT_EXPORT FlightMetadataWriter { + public: + virtual ~FlightMetadataWriter(); + /// \brief Send a message to the client. + virtual Status WriteMetadata(const Buffer& app_metadata) = 0; +}; + +/// \brief A writer for IPC payloads to a client. Also allows sending +/// application-defined metadata via the Flight protocol. +/// +/// This class offers more control compared to FlightDataStream, +/// including the option to write metadata without data and the +/// ability to interleave reading and writing. +class ARROW_FLIGHT_EXPORT FlightMessageWriter : public MetadataRecordBatchWriter { + public: + virtual ~FlightMessageWriter() = default; +}; + +/// \brief Call state/contextual data. +class ARROW_FLIGHT_EXPORT ServerCallContext { + public: + virtual ~ServerCallContext() = default; + /// \brief The name of the authenticated peer (may be the empty string) + virtual const std::string& peer_identity() const = 0; + /// \brief The peer address (not validated) + virtual const std::string& peer() const = 0; + /// \brief Add a response header. This is only valid before the server + /// starts sending the response; generally this isn't an issue unless you + /// are implementing FlightDataStream, ResultStream, or similar interfaces + /// yourself, or during a DoExchange or DoPut. + virtual void AddHeader(const std::string& key, const std::string& value) const = 0; + /// \brief Add a response trailer. This is only valid before the server + /// sends the final status; generally this isn't an issue unless your RPC + /// handler launches a thread or similar. + virtual void AddTrailer(const std::string& key, const std::string& value) const = 0; + /// \brief Look up a middleware by key. Do not maintain a reference + /// to the object beyond the request body. + /// \return The middleware, or nullptr if not found. + virtual ServerMiddleware* GetMiddleware(const std::string& key) const = 0; + /// \brief Check if the current RPC has been cancelled (by the client, by + /// a network error, etc.). + virtual bool is_cancelled() const = 0; + /// \brief The headers sent by the client for this call. + virtual const CallHeaders& incoming_headers() const = 0; +}; + +class ARROW_FLIGHT_EXPORT FlightServerOptions { + public: + explicit FlightServerOptions(const Location& location_); + + ~FlightServerOptions(); + + /// \brief The host & port (or domain socket path) to listen on. + /// Use port 0 to bind to an available port. + Location location; + /// \brief The authentication handler to use. + std::shared_ptr auth_handler; + /// \brief A list of TLS certificate+key pairs to use. + std::vector tls_certificates; + /// \brief Enable mTLS and require that the client present a certificate. + bool verify_client; + /// \brief If using mTLS, the PEM-encoded root certificate to use. + std::string root_certificates; + /// \brief A list of server middleware to apply, along with a key to + /// identify them by. + /// + /// Middleware are always applied in the order provided. Duplicate + /// keys are an error. + std::vector>> + middleware; + + /// \brief An optional memory manager to control where to allocate incoming data. + std::shared_ptr memory_manager; + + /// \brief A Flight implementation-specific callback to customize + /// transport-specific options. + /// + /// Not guaranteed to be called. The type of the parameter is + /// specific to the Flight implementation. Users should take care to + /// link to the same transport implementation as Flight to avoid + /// runtime problems. See "Using Arrow C++ in your own project" in + /// the documentation for more details. + std::function builder_hook; +}; + +/// \brief Skeleton RPC server implementation which can be used to create +/// custom servers by implementing its abstract methods +class ARROW_FLIGHT_EXPORT FlightServerBase { + public: + FlightServerBase(); + virtual ~FlightServerBase(); + + // Lifecycle methods. + + /// \brief Initialize a Flight server listening at the given location. + /// This method must be called before any other method. + /// \param[in] options The configuration for this server. + Status Init(const FlightServerOptions& options); + + /// \brief Get the port that the Flight server is listening on. + /// This method must only be called after Init(). Will return a + /// non-positive value if no port exists (e.g. when listening on a + /// domain socket). + int port() const; + + /// \brief Get the address that the Flight server is listening on. + /// This method must only be called after Init(). + Location location() const; + + /// \brief Set the server to stop when receiving any of the given signal + /// numbers. + /// This method must be called before Serve(). + Status SetShutdownOnSignals(const std::vector sigs); + + /// \brief Start serving. + /// This method blocks until the server shuts down. + /// + /// The server will start to shut down when either Shutdown() is called + /// or one of the signals registered in SetShutdownOnSignals() is received. + Status Serve(); + + /// \brief Query whether Serve() was interrupted by a signal. + /// This method must be called after Serve() has returned. + /// + /// \return int the signal number that interrupted Serve(), if any, otherwise 0 + int GotSignal() const; + + /// \brief Shut down the server, blocking until current requests finish. + /// + /// Can be called from a signal handler or another thread while Serve() + /// blocks. Optionally a deadline can be set. Once the deadline expires + /// server will wait until remaining running calls complete. + /// + /// Should only be called once. + Status Shutdown(const std::chrono::system_clock::time_point* deadline = NULLPTR); + + /// \brief Block until server shuts down with Shutdown. + /// + /// Does not respond to signals like Serve(). + Status Wait(); + + // Implement these methods to create your own server. The default + // implementations will return a not-implemented result to the client + + /// \brief Retrieve a list of available fields given an optional opaque + /// criteria + /// \param[in] context The call context. + /// \param[in] criteria may be null + /// \param[out] listings the returned listings iterator + /// \return Status + virtual Status ListFlights(const ServerCallContext& context, const Criteria* criteria, + std::unique_ptr* listings); + + /// \brief Retrieve the schema and an access plan for the indicated + /// descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] info the returned flight info provider + /// \return Status + virtual Status GetFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the current status of the target query + /// \param[in] context The call context. + /// \param[in] request the dataset request or a descriptor returned by a + /// prior PollFlightInfo call + /// \param[out] info the returned retry info provider + /// \return Status + virtual Status PollFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the schema for the indicated descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] schema the returned flight schema provider + /// \return Status + virtual Status GetSchema(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* schema); + + /// \brief Get a stream of IPC payloads to put on the wire + /// \param[in] context The call context. + /// \param[in] request an opaque ticket + /// \param[out] stream the returned stream provider + /// \return Status + virtual Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* stream); + + /// \brief Process a stream of IPC payloads sent from a client + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send metadata back to the client + /// \return Status + virtual Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Process a bidirectional stream of IPC payloads + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send data back to the client + /// \return Status + virtual Status DoExchange(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Execute an action, return stream of zero or more results + /// \param[in] context The call context. + /// \param[in] action the action to execute, with type and body + /// \param[out] result the result iterator + /// \return Status + virtual Status DoAction(const ServerCallContext& context, const Action& action, + std::unique_ptr* result); + + /// \brief Retrieve the list of available actions + /// \param[in] context The call context. + /// \param[out] actions a vector of available action types + /// \return Status + virtual Status ListActions(const ServerCallContext& context, + std::vector* actions); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace flight +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h new file mode 100644 index 0000000000000000000000000000000000000000..d5ed48d8a6438b5199fe7cf602ee2c9380326f67 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" + +namespace arrow::flight { + +/// \defgroup flight-async Async Flight Types +/// Common types used for asynchronous Flight APIs. +/// @{ + +/// \brief Non-templated state for an async RPC. +class ARROW_FLIGHT_EXPORT AsyncListenerBase { + public: + AsyncListenerBase(); + virtual ~AsyncListenerBase(); + + /// \brief Request cancellation of the RPC. + /// + /// The RPC is not cancelled until AsyncListener::OnFinish is called. + void TryCancel(); + + private: + friend class arrow::flight::internal::ClientTransport; + + /// Transport-specific state for this RPC. Transport + /// implementations may store and retrieve state here via + /// ClientTransport::SetAsyncRpc and ClientTransport::GetAsyncRpc. + std::unique_ptr rpc_state_; +}; + +/// \brief Callbacks for results from async RPCs. +/// +/// A single listener may not be used for multiple concurrent RPC +/// calls. The application MUST hold the listener alive until +/// OnFinish() is called and has finished. +template +class ARROW_FLIGHT_EXPORT AsyncListener : public AsyncListenerBase { + public: + /// \brief Get the next server result. + /// + /// This will never be called concurrently with itself or OnFinish. + virtual void OnNext(T message) = 0; + /// \brief Get the final status. + /// + /// This will never be called concurrently with itself or OnNext. If the + /// error comes from the remote server, then a TransportStatusDetail will be + /// attached. Otherwise, the error is generated by the client-side + /// transport and will not have a TransportStatusDetail. + virtual void OnFinish(Status status) = 0; +}; + +/// @} + +} // namespace arrow::flight diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..06f864ba8cffc16520e7768d51f43cfb25a72dd0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +# if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4251) +# else +# pragma GCC diagnostic ignored "-Wattributes" +# endif + +# ifdef ARROW_FLIGHT_STATIC +# define ARROW_FLIGHT_EXPORT +# elif defined(ARROW_FLIGHT_EXPORTING) +# define ARROW_FLIGHT_EXPORT __declspec(dllexport) +# else +# define ARROW_FLIGHT_EXPORT __declspec(dllimport) +# endif + +# define ARROW_FLIGHT_NO_EXPORT +#else // Not Windows +# ifndef ARROW_FLIGHT_EXPORT +# define ARROW_FLIGHT_EXPORT __attribute__((visibility("default"))) +# endif +# ifndef ARROW_FLIGHT_NO_EXPORT +# define ARROW_FLIGHT_NO_EXPORT __attribute__((visibility("hidden"))) +# endif +#endif // Non-Windows + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..408ab22305fff1665956ee8bb831fbc062b9994c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/sparse_tensor.h" // IWYU pragma: export + +#include + +namespace arrow { +namespace internal { + +struct SparseTensorConverterMixin { + static bool IsNonZero(const uint8_t val) { return val != 0; } + + static void AssignIndex(uint8_t* indices, int64_t val, const int elsize); + + static int64_t GetIndexValue(const uint8_t* value_ptr, const int elsize); +}; + +Status MakeSparseCOOTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSXMatrixFromTensor(SparseMatrixCompressedAxis axis, + const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSFTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Result> MakeTensorFromSparseCOOTensor( + MemoryPool* pool, const SparseCOOTensor* sparse_tensor); + +Result> MakeTensorFromSparseCSRMatrix( + MemoryPool* pool, const SparseCSRMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSCMatrix( + MemoryPool* pool, const SparseCSCMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSFTensor( + MemoryPool* pool, const SparseCSFTensor* sparse_tensor); + +} // namespace internal +} // namespace arrow