diff --git a/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8c5323178f60f895691e9b307a1d40053d236b9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbedb4ca7a458e97482f9d0164a2473e612b1352 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h new file mode 100644 index 0000000000000000000000000000000000000000..82e0a600513d4abd9bb956053a2a7e94a1033f39 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class Column; +class DataType; +class MemoryPool; +class Status; +class Table; + +namespace py { + +enum class MapConversionType { + DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas + LOSSY, // report warnings when lossiness is encountered due to duplicate keys + STRICT_, // raise a Python exception when lossiness is encountered due to duplicate + // keys +}; + +struct PandasOptions { + /// arrow::MemoryPool to use for memory allocations + MemoryPool* pool = default_memory_pool(); + + /// If true, we will convert all string columns to categoricals + bool strings_to_categorical = false; + bool zero_copy_only = false; + bool integer_object_nulls = false; + bool date_as_object = false; + bool timestamp_as_object = false; + bool use_threads = false; + + /// Coerce all date and timestamp to datetime64[ns] + bool coerce_temporal_nanoseconds = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + /// \brief If true, do not create duplicate PyObject versions of equal + /// objects. This only applies to immutable objects like strings or datetime + /// objects + bool deduplicate_objects = false; + + /// \brief For certain data types, a cast is needed in order to store the + /// data in a pandas DataFrame or Series (e.g. timestamps are always stored + /// as nanoseconds in pandas). This option controls whether it is a safe + /// cast or not. + bool safe_cast = true; + + /// \brief If true, create one block per column rather than consolidated + /// blocks (1 per data type). Do zero-copy wrapping when there are no + /// nulls. pandas currently will consolidate the blocks on its own, causing + /// increased memory use, so keep this in mind if you are working on a + /// memory-constrained situation. + bool split_blocks = false; + + /// \brief If true, allow non-writable zero-copy views to be created for + /// single column blocks. This option is also used to provide zero copy for + /// Series data + bool allow_zero_copy_blocks = false; + + /// \brief If true, attempt to deallocate buffers in passed Arrow object if + /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for + /// original context for this feature. Only currently implemented for Table + /// conversions + bool self_destruct = false; + + /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to + /// Python association lists (list-of-tuples) in the same order as the Arrow + /// Map, as in [(key1, value1), (key2, value2), ...] + /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts. + /// This can change the ordering of (key, value) pairs, and will deduplicate + /// multiple keys, resulting in a possible loss of data. + /// If 'lossy', this key deduplication results in a warning printed + /// when detected. If 'strict', this instead results in an exception + /// being raised when detected. + MapConversionType maps_as_pydicts = MapConversionType::DEFAULT; + + // Used internally for nested arrays. + bool decode_dictionaries = false; + + // Columns that should be casted to categorical + std::unordered_set categorical_columns; + + // Columns that should be passed through to be converted to + // ExtensionArray/Block + std::unordered_set extension_columns; + + // Used internally to decipher between to_numpy() and to_pandas() when + // the expected output differs + bool to_numpy = false; +}; + +ARROW_PYTHON_EXPORT +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out); + +ARROW_PYTHON_EXPORT +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr col, PyObject* py_ref, + PyObject** out); + +// Convert a whole table as efficiently as possible to a pandas.DataFrame. +// +// The returned Python object is a list of tuples consisting of the exact 2D +// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x. +// +// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) +ARROW_PYTHON_EXPORT +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr table, + PyObject** out); + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h new file mode 100644 index 0000000000000000000000000000000000000000..1568d21938e6e79e724d957120e68a7576ba9c2a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/status.h" +#include "arrow/util/future.h" + +namespace arrow::py { + +/// \brief Bind a Python callback to an arrow::Future. +/// +/// If the Future finishes successfully, py_wrapper is called with its +/// result value and should return a PyObject*. If py_wrapper is successful, +/// py_cb is called with its return value. +/// +/// If either the Future or py_wrapper fails, py_cb is called with the +/// associated Python exception. +/// +/// \param future The future to bind to. +/// \param py_cb The Python callback function. Will be passed the result of +/// py_wrapper, or a Python exception if the future failed or one was +/// raised by py_wrapper. +/// \param py_wrapper A function (likely defined in Cython) to convert the C++ +/// result of the future to a Python object. +template +void BindFuture(Future future, PyObject* py_cb, PyWrapper py_wrapper) { + Py_INCREF(py_cb); + OwnedRefNoGIL cb_ref(py_cb); + + auto future_cb = [cb_ref = std::move(cb_ref), + py_wrapper = std::move(py_wrapper)](Result result) { + SafeCallIntoPythonVoid([&]() { + OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))}; + Py_XDECREF( + PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR)); + ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call"); + }); + }; + future.AddCallback(std::move(future_cb)); +} + +} // namespace arrow::py diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..8060dd33722a08eb0935687ea5cb306dbd38a9f0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace benchmark { + +// Micro-benchmark routines for use from ASV + +// Run PandasObjectIsNull() once over every object in *list* +ARROW_PYTHON_EXPORT +void Benchmark_PandasObjectIsNull(PyObject* list); + +} // namespace benchmark +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h new file mode 100644 index 0000000000000000000000000000000000000000..fe1d73622a3dbe79fa8bb530b355d080e66132c3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/python/serialize.h" +#include "arrow/python/visibility.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class RecordBatch; +class Tensor; + +namespace io { + +class RandomAccessFile; + +} // namespace io + +namespace py { + +struct ARROW_PYTHON_EXPORT SparseTensorCounts { + int coo; + int csr; + int csc; + int csf; + int ndim_csf; + + int num_total_tensors() const { return coo + csr + csc + csf; } + int num_total_buffers() const { + return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf; + } +}; + +/// \brief Read serialized Python sequence from file interface using Arrow IPC +/// \param[in] src a RandomAccessFile +/// \param[out] out the reconstructed data +/// \return Status +ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0") +ARROW_PYTHON_EXPORT +Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out); + +/// \brief Reconstruct SerializedPyObject from representation produced by +/// SerializedPyObject::GetComponents. +/// +/// \param[in] num_tensors number of tensors in the object +/// \param[in] num_sparse_tensors number of sparse tensors in the object +/// \param[in] num_ndarrays number of numpy Ndarrays in the object +/// \param[in] num_buffers number of buffers in the object +/// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 + +/// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 + +/// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length +/// \param[out] out the reconstructed object +/// \return Status +ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0") +ARROW_PYTHON_EXPORT +Status GetSerializedFromComponents(int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, int num_buffers, PyObject* data, + SerializedPyObject* out); + +/// \brief Reconstruct Python object from Arrow-serialized representation +/// \param[in] context Serialization context which contains custom serialization +/// and deserialization callbacks. Can be any Python object with a +/// _serialize_callback method for serialization and a _deserialize_callback +/// method for deserialization. If context is None, no custom serialization +/// will be attempted. +/// \param[in] object Object to deserialize +/// \param[in] base a Python object holding the underlying data that any NumPy +/// arrays will reference, to avoid premature deallocation +/// \param[out] out The returned object +/// \return Status +/// This acquires the GIL +ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0") +ARROW_PYTHON_EXPORT +Status DeserializeObject(PyObject* context, const SerializedPyObject& object, + PyObject* base, PyObject** out); + +/// \brief Reconstruct Ndarray from Arrow-serialized representation +/// \param[in] object Object to deserialize +/// \param[out] out The deserialized tensor +/// \return Status +ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0") +ARROW_PYTHON_EXPORT +Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr* out); + +ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0") +ARROW_PYTHON_EXPORT +Status NdarrayFromBuffer(std::shared_ptr src, std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..e6523824eb9634c18b87e4e3e5c827d8be43f8a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { + +class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType { + public: + // Implement extensionType API + std::string extension_name() const override { return extension_name_; } + + std::string ToString(bool show_metadata = false) const override; + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override; + + // For use from Cython + // Assumes that `typ` is borrowed + static Status FromClass(const std::shared_ptr storage_type, + const std::string extension_name, PyObject* typ, + std::shared_ptr* out); + + // Return new ref + PyObject* GetInstance() const; + Status SetInstance(PyObject*) const; + + protected: + PyExtensionType(std::shared_ptr storage_type, PyObject* typ, + PyObject* inst = NULLPTR); + PyExtensionType(std::shared_ptr storage_type, std::string extension_name, + PyObject* typ, PyObject* inst = NULLPTR); + + std::string extension_name_; + + // These fields are mutable because of two-step initialization. + mutable OwnedRefNoGIL type_class_; + // A weakref or null. Storing a strong reference to the Python extension type + // instance would create an unreclaimable reference cycle between Python and C++ + // (the Python instance has to keep a strong reference to the C++ ExtensionType + // in other direction). Instead, we store a weakref to the instance. + // If the weakref is dead, we reconstruct the instance from its serialized form. + mutable OwnedRefNoGIL type_instance_; + // Empty if type_instance_ is null + mutable std::string serialized_; +}; + +ARROW_PYTHON_EXPORT std::string PyExtensionName(); + +ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr&); + +ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name); + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h new file mode 100644 index 0000000000000000000000000000000000000000..983384db118a16141e49a679388b83c75d1d77d6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +// These functions take a sequence input, not arbitrary iterables + +/// \brief Infer Arrow type from a Python sequence +/// \param[in] obj the sequence of values +/// \param[in] mask an optional mask where True values are null. May +/// be nullptr +/// \param[in] pandas_null_sentinels use pandas's null value markers +ARROW_PYTHON_EXPORT +Result> InferArrowType(PyObject* obj, PyObject* mask, + bool pandas_null_sentinels); + +/// Checks whether the passed Python object is a boolean scalar +ARROW_PYTHON_EXPORT +bool IsPyBool(PyObject* obj); + +/// Checks whether the passed Python object is an integer scalar +ARROW_PYTHON_EXPORT +bool IsPyInt(PyObject* obj); + +/// Checks whether the passed Python object is a float scalar +ARROW_PYTHON_EXPORT +bool IsPyFloat(PyObject* obj); + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h new file mode 100644 index 0000000000000000000000000000000000000000..1b0635effb371e981ba63328160e99d8e7c88059 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h @@ -0,0 +1,83 @@ +/* Generated by Cython 3.0.11 */ + +#ifndef __PYX_HAVE__pyarrow__lib +#define __PYX_HAVE__pyarrow__lib + +#include "Python.h" + +#ifndef __PYX_HAVE_API__pyarrow__lib + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #define __PYX_EXTERN_C extern "C++" +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(_T) _T +#endif + +__PYX_EXTERN_C PyObject *pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &); +__PYX_EXTERN_C std::shared_ptr< arrow::Buffer> pyarrow_unwrap_buffer(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::DataType> pyarrow_unwrap_data_type(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Field> pyarrow_unwrap_field(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Schema> pyarrow_unwrap_schema(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Scalar> pyarrow_unwrap_scalar(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Array> pyarrow_unwrap_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> pyarrow_unwrap_chunked_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> pyarrow_unwrap_sparse_coo_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> pyarrow_unwrap_sparse_csc_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> pyarrow_unwrap_sparse_csf_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> pyarrow_unwrap_sparse_csr_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Tensor> pyarrow_unwrap_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> pyarrow_unwrap_batch(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Table> pyarrow_unwrap_table(PyObject *); + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ + +/* WARNING: the interface of the module init function changed in CPython 3.5. */ +/* It now returns a PyModuleDef instance instead of a PyModule instance. */ + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initlib(void); +#else +/* WARNING: Use PyImport_AppendInittab("lib", PyInit_lib) instead of calling PyInit_lib directly from Python 3.5 */ +PyMODINIT_FUNC PyInit_lib(void); + +#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L)) +#if defined(__cplusplus) && __cplusplus >= 201402L +[[deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")]] inline +#elif defined(__GNUC__) || defined(__clang__) +__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly."), __unused__)) __inline__ +#elif defined(_MSC_VER) +__declspec(deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")) __inline +#endif +static PyObject* __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyObject* res) { + return res; +} +#define PyInit_lib() __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyInit_lib()) +#endif +#endif + +#endif /* !__PYX_HAVE__pyarrow__lib */ diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h new file mode 100644 index 0000000000000000000000000000000000000000..73792095d38c89b733f7ffac7b9b25acabb2a1c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h @@ -0,0 +1,201 @@ +/* Generated by Cython 3.0.11 */ + +#ifndef __PYX_HAVE_API__pyarrow__lib +#define __PYX_HAVE_API__pyarrow__lib +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "lib.h" + +static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0; +#define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0; +#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0; +#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0; +#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0; +#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0; +#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0; +#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0; +#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0; +#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0; +#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0; +#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0; +#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0; +#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0; +#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table +static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0; +#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer +static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0; +#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type +static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0; +#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field +static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0; +#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema +static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0; +#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar +static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0; +#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array +static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0; +#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array +static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor +static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix +static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor +static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix +static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor +static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0; +#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch +static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0; +#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0; +#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0; +#define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0; +#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0; +#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0; +#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0; +#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0; +#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0; +#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0; +#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0; +#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0; +#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0; +#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0; +#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_11 +#define __PYX_HAVE_RT_ImportFunction_3_0_11 +static int __Pyx_ImportFunction_3_0_11(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_pyarrow__lib(void) { + PyObject *module = 0; + module = PyImport_ImportModule("pyarrow.lib"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..a83ae4a62b944c71af70d58c7107befd659baa8c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" // IWYU pragma: export + +#include // IWYU pragma: export + +// Don't use the deprecated Numpy functions +#ifdef NPY_1_7_API_VERSION +# define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#else +# define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED +# define NPY_ARRAY_ALIGNED NPY_ALIGNED +# define NPY_ARRAY_WRITEABLE NPY_WRITEABLE +# define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY +#endif + +// This is required to be able to access the NumPy C API properly in C++ files +// other than init.cc. +#define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API +#ifndef NUMPY_IMPORT_ARRAY +# define NO_IMPORT_ARRAY +#endif + +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export + +// A bit subtle. Numpy has 5 canonical integer types: +// (or, rather, type pairs: signed and unsigned) +// NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG +// It also has 4 fixed-width integer aliases. +// When mapping Arrow integer types to these 4 fixed-width aliases, +// we always miss one of the canonical types (even though it may +// have the same width as one of the aliases). +// Which one depends on the platform... +// On a LP64 system, NPY_INT64 maps to NPY_LONG and +// NPY_LONGLONG needs to be handled separately. +// On a LLP64 system, NPY_INT32 maps to NPY_LONG and +// NPY_INT needs to be handled separately. + +#if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64 +# define NPY_INT64_IS_LONG_LONG 1 +#else +# define NPY_INT64_IS_LONG_LONG 0 +#endif + +#if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64 +# define NPY_INT32_IS_INT 1 +#else +# define NPY_INT32_IS_INT 0 +#endif + +// Backported NumPy 2 API (can be removed if numpy 2 is required) +#if NPY_ABI_VERSION < 0x02000000 +# define PyDataType_ELSIZE(descr) ((descr)->elsize) +# define PyDataType_C_METADATA(descr) ((descr)->c_metadata) +# define PyDataType_FIELDS(descr) ((descr)->fields) +#endif + +namespace arrow { +namespace py { + +inline int import_numpy() { +#ifdef NUMPY_IMPORT_ARRAY + import_array1(-1); + import_umath1(-1); +#endif + + return 0; +} + +// See above about the missing Numpy integer type numbers +inline int fix_numpy_type_num(int type_num) { +#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32 + if (type_num == NPY_INT) return NPY_INT32; + if (type_num == NPY_UINT) return NPY_UINT32; +#endif +#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64 + if (type_num == NPY_LONGLONG) return NPY_INT64; + if (type_num == NPY_ULONGLONG) return NPY_UINT64; +#endif + return type_num; +} + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..b6cd093e5542008cf173f43de311e40c418e7c8d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Converting from pandas memory representation to Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/compute/api.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class DataType; +class MemoryPool; +class Status; + +namespace py { + +/// Convert NumPy arrays to Arrow. If target data type is not known, pass a +/// type with null +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[in] cast_options casting options +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + const compute::CastOptions& cast_options, + std::shared_ptr* out); + +/// Safely convert NumPy arrays to Arrow. If target data type is not known, +/// pass a type with null. +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..7a107c89f0bdcd9189900293f2a537a2fe851778 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" +#include "parquet/encryption/crypto_factory.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +# if defined(_MSC_VER) +# pragma warning(disable : 4251) +# else +# pragma GCC diagnostic ignored "-Wattributes" +# endif + +# ifdef ARROW_PYTHON_STATIC +# define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +# elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING) +# define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport) +# else +# define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport) +# endif + +#else // Not Windows +# ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +# define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default"))) +# endif +#endif // Non-Windows + +namespace arrow { +namespace py { +namespace parquet { +namespace encryption { + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable { + public: + std::function + wrap_key; + std::function + unwrap_key; +}; + +/// \brief A helper for KmsClient implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient + : public ::parquet::encryption::KmsClient { + public: + PyKmsClient(PyObject* handler, PyKmsClientVtable vtable); + ~PyKmsClient() override; + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientVtable vtable_; +}; + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable { + public: + std::function* out)> + create_kms_client; +}; + +/// \brief A helper for KmsClientFactory implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory + : public ::parquet::encryption::KmsClientFactory { + public: + PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable); + ~PyKmsClientFactory() override; + + std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientFactoryVtable vtable_; +}; + +/// \brief A CryptoFactory that returns Results instead of throwing exceptions. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory + : public ::parquet::encryption::CryptoFactory { + public: + arrow::Result> + SafeGetFileEncryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::EncryptionConfiguration& encryption_config); + + /// The returned FileDecryptionProperties object will use the cache inside this + /// CryptoFactory object, so please keep this + /// CryptoFactory object alive along with the returned + /// FileDecryptionProperties object. + arrow::Result> + SafeGetFileDecryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::DecryptionConfiguration& decryption_config); +}; + +} // namespace encryption +} // namespace parquet +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h new file mode 100644 index 0000000000000000000000000000000000000000..113035500c0053dbb9dde5a99216aec1aefd1140 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" + +#include "arrow/sparse_tensor.h" + +// Work around ARROW-2317 (C linkage warning from Cython) +extern "C++" { + +namespace arrow { + +class Array; +class Buffer; +class DataType; +class Field; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; + +namespace py { + +// Returns 0 on success, -1 on error. +ARROW_PYTHON_EXPORT int import_pyarrow(); + +#define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \ + ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \ + ARROW_PYTHON_EXPORT Result> unwrap_##FUNC_SUFFIX( \ + PyObject*); \ + ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr&); + +DECLARE_WRAP_FUNCTIONS(buffer, Buffer) + +DECLARE_WRAP_FUNCTIONS(data_type, DataType) +DECLARE_WRAP_FUNCTIONS(field, Field) +DECLARE_WRAP_FUNCTIONS(schema, Schema) + +DECLARE_WRAP_FUNCTIONS(scalar, Scalar) + +DECLARE_WRAP_FUNCTIONS(array, Array) +DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray) + +DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix) +DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix) +DECLARE_WRAP_FUNCTIONS(tensor, Tensor) + +DECLARE_WRAP_FUNCTIONS(batch, RecordBatch) +DECLARE_WRAP_FUNCTIONS(table, Table) + +#undef DECLARE_WRAP_FUNCTIONS + +namespace internal { + +// If status is ok, return 0. +// If status is not ok, set Python error indicator and return -1. +ARROW_PYTHON_EXPORT int check_status(const Status& status); + +// Convert status to a Python exception object. Status must not be ok. +ARROW_PYTHON_EXPORT PyObject* convert_status(const Status& status); + +} // namespace internal +} // namespace py +} // namespace arrow + +} // extern "C++" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h new file mode 100644 index 0000000000000000000000000000000000000000..e509593c254468a62216e0e4a7ea073ad9a3f1d4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h @@ -0,0 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// For backward compatibility. +#include "arrow/python/lib.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..d167996ba8da6796ac62da0fa0186419a3211930 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "arrow/python/common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +struct PyConversionOptions { + PyConversionOptions() = default; + + PyConversionOptions(const std::shared_ptr& type, int64_t size, + MemoryPool* pool, bool from_pandas) + : type(type), size(size), from_pandas(from_pandas) {} + + // Set to null if to be inferred + std::shared_ptr type; + + // Default is -1, which indicates the size should the same as the input sequence + int64_t size = -1; + + bool from_pandas = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + bool strict = false; +}; + +/// \brief Convert sequence (list, generator, NumPy array with dtype object) of +/// Python objects. +/// \param[in] obj the sequence to convert +/// \param[in] mask a NumPy array of true/false values to indicate whether +/// values in the sequence are null (true) or not null (false). This parameter +/// may be null +/// \param[in] options various conversion options +/// \param[in] pool MemoryPool to use for allocations +/// \return Result ChunkedArray +ARROW_PYTHON_EXPORT +Result> ConvertPySequence( + PyObject* obj, PyObject* mask, PyConversionOptions options, + MemoryPool* pool = default_memory_pool()); + +} // namespace py + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..a941577f765583e3ac54ea163452342b5c07f309 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h @@ -0,0 +1,350 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Internal header + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include + +#include "arrow/python/numpy_interop.h" + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/logging.h" + +namespace arrow { +namespace py { + +static constexpr int64_t kPandasTimestampNull = std::numeric_limits::min(); +constexpr int64_t kNanosecondsInDay = 86400000000000LL; + +namespace internal { + +// +// Type traits for Numpy -> Arrow equivalence +// +template +struct npy_traits {}; + +template <> +struct npy_traits { + typedef uint8_t value_type; + using TypeClass = BooleanType; + using BuilderClass = BooleanBuilder; + + static constexpr bool supports_nulls = false; + static inline bool isnull(uint8_t v) { return false; } +}; + +#define NPY_INT_DECL(TYPE, CapType, T) \ + template <> \ + struct npy_traits { \ + typedef T value_type; \ + using TypeClass = CapType##Type; \ + using BuilderClass = CapType##Builder; \ + \ + static constexpr bool supports_nulls = false; \ + static inline bool isnull(T v) { return false; } \ + }; + +NPY_INT_DECL(INT8, Int8, int8_t); +NPY_INT_DECL(INT16, Int16, int16_t); +NPY_INT_DECL(INT32, Int32, int32_t); +NPY_INT_DECL(INT64, Int64, int64_t); + +NPY_INT_DECL(UINT8, UInt8, uint8_t); +NPY_INT_DECL(UINT16, UInt16, uint16_t); +NPY_INT_DECL(UINT32, UInt32, uint32_t); +NPY_INT_DECL(UINT64, UInt64, uint64_t); + +#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32 +NPY_INT_DECL(INT, Int32, int32_t); +NPY_INT_DECL(UINT, UInt32, uint32_t); +#endif +#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64 +NPY_INT_DECL(LONGLONG, Int64, int64_t); +NPY_INT_DECL(ULONGLONG, UInt64, uint64_t); +#endif + +template <> +struct npy_traits { + typedef npy_half value_type; + using TypeClass = HalfFloatType; + using BuilderClass = HalfFloatBuilder; + + static constexpr npy_half na_sentinel = NPY_HALF_NAN; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(npy_half v) { return v == NPY_HALF_NAN; } +}; + +template <> +struct npy_traits { + typedef float value_type; + using TypeClass = FloatType; + using BuilderClass = FloatBuilder; + + // We need to use quiet_NaN here instead of the NAN macro as on Windows + // the NAN macro leads to "division-by-zero" compile-time error with clang. + static constexpr float na_sentinel = std::numeric_limits::quiet_NaN(); + + static constexpr bool supports_nulls = true; + + static inline bool isnull(float v) { return v != v; } +}; + +template <> +struct npy_traits { + typedef double value_type; + using TypeClass = DoubleType; + using BuilderClass = DoubleBuilder; + + static constexpr double na_sentinel = std::numeric_limits::quiet_NaN(); + + static constexpr bool supports_nulls = true; + + static inline bool isnull(double v) { return v != v; } +}; + +template <> +struct npy_traits { + typedef int64_t value_type; + using TypeClass = TimestampType; + using BuilderClass = TimestampBuilder; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(int64_t v) { + // NaT = -2**63 + // = -0x8000000000000000 + // = -9223372036854775808; + // = std::numeric_limits::min() + return v == std::numeric_limits::min(); + } +}; + +template <> +struct npy_traits { + typedef int64_t value_type; + using TypeClass = DurationType; + using BuilderClass = DurationBuilder; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(int64_t v) { + // NaT = -2**63 = std::numeric_limits::min() + return v == std::numeric_limits::min(); + } +}; + +template <> +struct npy_traits { + typedef PyObject* value_type; + static constexpr bool supports_nulls = true; + + static inline bool isnull(PyObject* v) { return v == Py_None; } +}; + +// +// Type traits for Arrow -> Numpy equivalence +// Note *supports_nulls* means the equivalent Numpy type support nulls +// +template +struct arrow_traits {}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_BOOL; + static constexpr bool supports_nulls = false; + typedef typename npy_traits::value_type T; +}; + +#define INT_DECL(TYPE) \ + template <> \ + struct arrow_traits { \ + static constexpr int npy_type = NPY_##TYPE; \ + static constexpr bool supports_nulls = false; \ + static constexpr double na_value = std::numeric_limits::quiet_NaN(); \ + typedef typename npy_traits::value_type T; \ + }; + +INT_DECL(INT8); +INT_DECL(INT16); +INT_DECL(INT32); +INT_DECL(INT64); +INT_DECL(UINT8); +INT_DECL(UINT16); +INT_DECL(UINT32); +INT_DECL(UINT64); + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT16; + static constexpr bool supports_nulls = true; + static constexpr uint16_t na_value = NPY_HALF_NAN; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT32; + static constexpr bool supports_nulls = true; + static constexpr float na_value = std::numeric_limits::quiet_NaN(); + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT64; + static constexpr bool supports_nulls = true; + static constexpr double na_value = std::numeric_limits::quiet_NaN(); + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_DATETIME; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_TIMEDELTA; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + // Data stores as FR_D day unit + static constexpr int npy_type = NPY_DATETIME; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; + + static constexpr int64_t na_value = kPandasTimestampNull; + static inline bool isnull(int64_t v) { return npy_traits::isnull(v); } +}; + +template <> +struct arrow_traits { + // Data stores as FR_D day unit + static constexpr int npy_type = NPY_DATETIME; + + // There are 1000 * 60 * 60 * 24 = 86400000ms in a day + static constexpr int64_t npy_shift = 86400000; + + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; + + static constexpr int64_t na_value = kPandasTimestampNull; + static inline bool isnull(int64_t v) { return npy_traits::isnull(v); } +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; +}; + +static inline NPY_DATETIMEUNIT NumPyFrequency(TimeUnit::type unit) { + switch (unit) { + case TimestampType::Unit::SECOND: + return NPY_FR_s; + case TimestampType::Unit::MILLI: + return NPY_FR_ms; + break; + case TimestampType::Unit::MICRO: + return NPY_FR_us; + default: + // NANO + return NPY_FR_ns; + } +} + +static inline int NumPyTypeSize(int npy_type) { + npy_type = fix_numpy_type_num(npy_type); + + switch (npy_type) { + case NPY_BOOL: + case NPY_INT8: + case NPY_UINT8: + return 1; + case NPY_INT16: + case NPY_UINT16: + return 2; + case NPY_INT32: + case NPY_UINT32: + return 4; + case NPY_INT64: + case NPY_UINT64: + return 8; + case NPY_FLOAT16: + return 2; + case NPY_FLOAT32: + return 4; + case NPY_FLOAT64: + return 8; + case NPY_DATETIME: + return 8; + case NPY_OBJECT: + return sizeof(void*); + default: + ARROW_CHECK(false) << "unhandled numpy type"; + break; + } + return -1; +} + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h new file mode 100644 index 0000000000000000000000000000000000000000..d8c4e430e53d49a8fe7d237ffe7ba8feae5e452f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/compute/exec.h" +#include "arrow/compute/function.h" +#include "arrow/compute/registry.h" +#include "arrow/python/platform.h" +#include "arrow/record_batch.h" +#include "arrow/util/iterator.h" + +#include "arrow/python/common.h" +#include "arrow/python/pyarrow.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +namespace py { + +// TODO: TODO(ARROW-16041): UDF Options are not exposed to the Python +// users. This feature will be included when extending to provide advanced +// options for the users. +struct ARROW_PYTHON_EXPORT UdfOptions { + std::string func_name; + compute::Arity arity; + compute::FunctionDoc func_doc; + std::vector> input_types; + std::shared_ptr output_type; +}; + +/// \brief A context passed as the first argument of UDF functions. +struct ARROW_PYTHON_EXPORT UdfContext { + MemoryPool* pool; + int64_t batch_length; +}; + +using UdfWrapperCallback = std::function; + +/// \brief register a Scalar user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterScalarFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Table user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterTabularFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Aggregate user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterAggregateFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Vector user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterVectorFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +Result> ARROW_PYTHON_EXPORT +CallTabularFunction(const std::string& func_name, const std::vector& args, + compute::FunctionRegistry* registry = NULLPTR); + +} // namespace py + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/vendored/pythoncapi_compat.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/vendored/pythoncapi_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..4baa7b34a93500e0d0d120a60332fba1ed5091fe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/vendored/pythoncapi_compat.h @@ -0,0 +1,1519 @@ +// Header file providing new C API functions to old Python versions. +// +// File distributed under the Zero Clause BSD (0BSD) license. +// Copyright Contributors to the pythoncapi_compat project. +// +// Homepage: +// https://github.com/python/pythoncapi_compat +// +// Latest version: +// https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h +// +// Vendored from git revision: +// 39e2663e6acc0b68d5dd75bdaad0af33152552ae +// https://raw.githubusercontent.com/python/pythoncapi-compat/39e2663e6acc0b68d5dd75bdaad0af33152552ae/pythoncapi_compat.h +// +// SPDX-License-Identifier: 0BSD + +/* clang-format off */ + +#ifndef PYTHONCAPI_COMPAT +#define PYTHONCAPI_COMPAT + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +// Python 3.11.0b4 added PyFrame_Back() to Python.h +#if PY_VERSION_HEX < 0x030b00B4 && !defined(PYPY_VERSION) +# include "frameobject.h" // PyFrameObject, PyFrame_GetBack() +#endif + + +#ifndef _Py_CAST +# define _Py_CAST(type, expr) ((type)(expr)) +#endif + +// Static inline functions should use _Py_NULL rather than using directly NULL +// to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer, +// _Py_NULL is defined as nullptr. +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \ + || (defined(__cplusplus) && __cplusplus >= 201103) +# define _Py_NULL nullptr +#else +# define _Py_NULL NULL +#endif + +// Cast argument to PyObject* type. +#ifndef _PyObject_CAST +# define _PyObject_CAST(op) _Py_CAST(PyObject*, op) +#endif + + +// bpo-42262 added Py_NewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef) +static inline PyObject* _Py_NewRef(PyObject *obj) +{ + Py_INCREF(obj); + return obj; +} +#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-42262 added Py_XNewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef) +static inline PyObject* _Py_XNewRef(PyObject *obj) +{ + Py_XINCREF(obj); + return obj; +} +#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT) +static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) +{ + ob->ob_refcnt = refcnt; +} +#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt) +#endif + + +// Py_SETREF() and Py_XSETREF() were added to Python 3.5.2. +// It is excluded from the limited C API. +#if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API) +#define Py_SETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_DECREF(_tmp_dst); \ + } while (0) + +#define Py_XSETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_XDECREF(_tmp_dst); \ + } while (0) +#endif + + +// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse() +// to Python 3.10.0b1. +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is) +# define Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone) +# define Py_IsNone(x) Py_Is(x, Py_None) +#endif +#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsTrue) +# define Py_IsTrue(x) Py_Is(x, Py_True) +#endif +#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsFalse) +# define Py_IsFalse(x) Py_Is(x, Py_False) +#endif + + +// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE) +static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) +{ + ob->ob_type = type; +} +#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE) +static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) +{ + ob->ob_size = size; +} +#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size) +#endif + + +// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION) +static inline PyCodeObject* PyFrame_GetCode(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + assert(frame->f_code != _Py_NULL); + return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code)); +} +#endif + +static inline PyCodeObject* _PyFrame_GetCodeBorrow(PyFrameObject *frame) +{ + PyCodeObject *code = PyFrame_GetCode(frame); + Py_DECREF(code); + return code; +} + + +// bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +static inline PyFrameObject* PyFrame_GetBack(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back)); +} +#endif + +#if !defined(PYPY_VERSION) +static inline PyFrameObject* _PyFrame_GetBackBorrow(PyFrameObject *frame) +{ + PyFrameObject *back = PyFrame_GetBack(frame); + Py_XDECREF(back); + return back; +} +#endif + + +// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +static inline PyObject* PyFrame_GetLocals(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030400B1 + if (PyFrame_FastToLocalsWithError(frame) < 0) { + return NULL; + } +#else + PyFrame_FastToLocals(frame); +#endif + return Py_NewRef(frame->f_locals); +} +#endif + + +// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +static inline PyObject* PyFrame_GetGlobals(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_globals); +} +#endif + + +// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +static inline PyObject* PyFrame_GetBuiltins(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_builtins); +} +#endif + + +// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +static inline int PyFrame_GetLasti(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030A00A7 + // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset, + // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes) + // instructions. + if (frame->f_lasti < 0) { + return -1; + } + return frame->f_lasti * 2; +#else + return frame->f_lasti; +#endif +} +#endif + + +// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +static inline PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name) +{ + PyObject *locals, *value; + + locals = PyFrame_GetLocals(frame); + if (locals == NULL) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + value = PyDict_GetItemWithError(locals, name); +#else + value = _PyDict_GetItemWithError(locals, name); +#endif + Py_DECREF(locals); + + if (value == NULL) { + if (PyErr_Occurred()) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + PyErr_Format(PyExc_NameError, "variable %R does not exist", name); +#else + PyErr_SetString(PyExc_NameError, "variable does not exist"); +#endif + return NULL; + } + return Py_NewRef(value); +} +#endif + + +// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +static inline PyObject* +PyFrame_GetVarString(PyFrameObject *frame, const char *name) +{ + PyObject *name_obj, *value; +#if PY_VERSION_HEX >= 0x03000000 + name_obj = PyUnicode_FromString(name); +#else + name_obj = PyString_FromString(name); +#endif + if (name_obj == NULL) { + return NULL; + } + value = PyFrame_GetVar(frame, name_obj); + Py_DECREF(name_obj); + return value; +} +#endif + + +// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +static inline PyInterpreterState * +PyThreadState_GetInterpreter(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->interp; +} +#endif + + +// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +static inline PyFrameObject* PyThreadState_GetFrame(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame)); +} +#endif + +#if !defined(PYPY_VERSION) +static inline PyFrameObject* +_PyThreadState_GetFrameBorrow(PyThreadState *tstate) +{ + PyFrameObject *frame = PyThreadState_GetFrame(tstate); + Py_XDECREF(frame); + return frame; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +static inline PyInterpreterState* PyInterpreterState_Get(void) +{ + PyThreadState *tstate; + PyInterpreterState *interp; + + tstate = PyThreadState_GET(); + if (tstate == _Py_NULL) { + Py_FatalError("GIL released (tstate is NULL)"); + } + interp = tstate->interp; + if (interp == _Py_NULL) { + Py_FatalError("no current interpreter"); + } + return interp; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6 +#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +static inline uint64_t PyThreadState_GetID(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->id; +} +#endif + +// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_EnterTracing(PyThreadState *tstate) +{ + tstate->tracing++; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = 0; +#else + tstate->use_tracing = 0; +#endif +} +#endif + +// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_LeaveTracing(PyThreadState *tstate) +{ + int use_tracing = (tstate->c_tracefunc != _Py_NULL + || tstate->c_profilefunc != _Py_NULL); + tstate->tracing--; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = use_tracing; +#else + tstate->use_tracing = use_tracing; +#endif +} +#endif + + +// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1 +// PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1 +static inline PyObject* PyObject_CallNoArgs(PyObject *func) +{ + return PyObject_CallFunctionObjArgs(func, NULL); +} +#endif + + +// bpo-39245 made PyObject_CallOneArg() public (previously called +// _PyObject_CallOneArg) in Python 3.9.0a4 +// PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4 +static inline PyObject* PyObject_CallOneArg(PyObject *func, PyObject *arg) +{ + return PyObject_CallFunctionObjArgs(func, arg, NULL); +} +#endif + + +// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 +static inline int +PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value) +{ + int res; + + if (!value && !PyErr_Occurred()) { + // PyModule_AddObject() raises TypeError in this case + PyErr_SetString(PyExc_SystemError, + "PyModule_AddObjectRef() must be called " + "with an exception raised if value is NULL"); + return -1; + } + + Py_XINCREF(value); + res = PyModule_AddObject(module, name, value); + if (res < 0) { + Py_XDECREF(value); + } + return res; +} +#endif + + +// bpo-40024 added PyModule_AddType() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 +static inline int PyModule_AddType(PyObject *module, PyTypeObject *type) +{ + const char *name, *dot; + + if (PyType_Ready(type) < 0) { + return -1; + } + + // inline _PyType_Name() + name = type->tp_name; + assert(name != _Py_NULL); + dot = strrchr(name, '.'); + if (dot != _Py_NULL) { + name = dot + 1; + } + + return PyModule_AddObjectRef(module, name, _PyObject_CAST(type)); +} +#endif + + +// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6. +// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2. +#if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +static inline int PyObject_GC_IsTracked(PyObject* obj) +{ + return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)); +} +#endif + +// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6. +// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final. +#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION) +static inline int PyObject_GC_IsFinalized(PyObject *obj) +{ + PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1; + return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc)); +} +#endif + + +// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE) +static inline int _Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { + return Py_TYPE(ob) == type; +} +#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7. +// bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1. +// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal +// C API: Python 3.11a2-3.11a6 versions are not supported. +#if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +static inline int PyFloat_Pack2(double x, char *p, int le) +{ return _PyFloat_Pack2(x, (unsigned char*)p, le); } + +static inline double PyFloat_Unpack2(const char *p, int le) +{ return _PyFloat_Unpack2((const unsigned char *)p, le); } +#endif + + +// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and +// PyFloat_Unpack8() to Python 3.11a7. +// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4() +// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions +// are not supported. +#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +static inline int PyFloat_Pack4(double x, char *p, int le) +{ return _PyFloat_Pack4(x, (unsigned char*)p, le); } + +static inline int PyFloat_Pack8(double x, char *p, int le) +{ return _PyFloat_Pack8(x, (unsigned char*)p, le); } + +static inline double PyFloat_Unpack4(const char *p, int le) +{ return _PyFloat_Unpack4((const unsigned char *)p, le); } + +static inline double PyFloat_Unpack8(const char *p, int le) +{ return _PyFloat_Unpack8((const unsigned char *)p, le); } +#endif + + +// gh-92154 added PyCode_GetCode() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +static inline PyObject* PyCode_GetCode(PyCodeObject *code) +{ + return Py_NewRef(code->co_code); +} +#endif + + +// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +static inline PyObject* PyCode_GetVarnames(PyCodeObject *code) +{ + return Py_NewRef(code->co_varnames); +} +#endif + +// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +static inline PyObject* PyCode_GetFreevars(PyCodeObject *code) +{ + return Py_NewRef(code->co_freevars); +} +#endif + +// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +static inline PyObject* PyCode_GetCellvars(PyCodeObject *code) +{ + return Py_NewRef(code->co_cellvars); +} +#endif + + +// Py_UNUSED() was added to Python 3.4.0b2. +#if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED) +# if defined(__GNUC__) || defined(__clang__) +# define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) +# else +# define Py_UNUSED(name) _unused_ ## name +# endif +#endif + + +// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A0 +static inline PyObject* PyImport_AddModuleRef(const char *name) +{ + return Py_XNewRef(PyImport_AddModule(name)); +} +#endif + + +// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D0000 +static inline int PyWeakref_GetRef(PyObject *ref, PyObject **pobj) +{ + PyObject *obj; + if (ref != NULL && !PyWeakref_Check(ref)) { + *pobj = NULL; + PyErr_SetString(PyExc_TypeError, "expected a weakref"); + return -1; + } + obj = PyWeakref_GetObject(ref); + if (obj == NULL) { + // SystemError if ref is NULL + *pobj = NULL; + return -1; + } + if (obj == Py_None) { + *pobj = NULL; + return 0; + } + *pobj = Py_NewRef(obj); + return (*pobj != NULL); +} +#endif + + +// bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1 +#ifndef PY_VECTORCALL_ARGUMENTS_OFFSET +# define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1)) +#endif + +// bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1 +#if PY_VERSION_HEX < 0x030800B1 +static inline Py_ssize_t PyVectorcall_NARGS(size_t n) +{ + return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; +} +#endif + + +// gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 +static inline PyObject* +PyObject_Vectorcall(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames) +{ +#if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION) + // bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1 + return _PyObject_Vectorcall(callable, args, nargsf, kwnames); +#else + PyObject *posargs = NULL, *kwargs = NULL; + PyObject *res; + Py_ssize_t nposargs, nkwargs, i; + + if (nargsf != 0 && args == NULL) { + PyErr_BadInternalCall(); + goto error; + } + if (kwnames != NULL && !PyTuple_Check(kwnames)) { + PyErr_BadInternalCall(); + goto error; + } + + nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf); + if (kwnames) { + nkwargs = PyTuple_GET_SIZE(kwnames); + } + else { + nkwargs = 0; + } + + posargs = PyTuple_New(nposargs); + if (posargs == NULL) { + goto error; + } + if (nposargs) { + for (i=0; i < nposargs; i++) { + PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args)); + args++; + } + } + + if (nkwargs) { + kwargs = PyDict_New(); + if (kwargs == NULL) { + goto error; + } + + for (i = 0; i < nkwargs; i++) { + PyObject *key = PyTuple_GET_ITEM(kwnames, i); + PyObject *value = *args; + args++; + if (PyDict_SetItem(kwargs, key, value) < 0) { + goto error; + } + } + } + else { + kwargs = NULL; + } + + res = PyObject_Call(callable, posargs, kwargs); + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return res; + +error: + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return NULL; +#endif +} +#endif + + +// gh-106521 added PyObject_GetOptionalAttr() and +// PyObject_GetOptionalAttrString() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result) +{ + // bpo-32571 added _PyObject_LookupAttr() to Python 3.7.0b1 +#if PY_VERSION_HEX >= 0x030700B1 && !defined(PYPY_VERSION) + return _PyObject_LookupAttr(obj, attr_name, result); +#else + *result = PyObject_GetAttr(obj, attr_name); + if (*result != NULL) { + return 1; + } + if (!PyErr_Occurred()) { + return 0; + } + if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + return 0; + } + return -1; +#endif +} + +static inline int +PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result) +{ + PyObject *name_obj; + int rc; +#if PY_VERSION_HEX >= 0x03000000 + name_obj = PyUnicode_FromString(attr_name); +#else + name_obj = PyString_FromString(attr_name); +#endif + if (name_obj == NULL) { + *result = NULL; + return -1; + } + rc = PyObject_GetOptionalAttr(obj, name_obj, result); + Py_DECREF(name_obj); + return rc; +} +#endif + + +// gh-106307 added PyObject_GetOptionalAttr() and +// PyMapping_GetOptionalItemString() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result) +{ + *result = PyObject_GetItem(obj, key); + if (*result) { + return 1; + } + if (!PyErr_ExceptionMatches(PyExc_KeyError)) { + return -1; + } + PyErr_Clear(); + return 0; +} + +static inline int +PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result) +{ + PyObject *key_obj; + int rc; +#if PY_VERSION_HEX >= 0x03000000 + key_obj = PyUnicode_FromString(key); +#else + key_obj = PyString_FromString(key); +#endif + if (key_obj == NULL) { + *result = NULL; + return -1; + } + rc = PyMapping_GetOptionalItem(obj, key_obj, result); + Py_DECREF(key_obj); + return rc; +} +#endif + +// gh-108511 added PyMapping_HasKeyWithError() and +// PyMapping_HasKeyStringWithError() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyMapping_HasKeyWithError(PyObject *obj, PyObject *key) +{ + PyObject *res; + int rc = PyMapping_GetOptionalItem(obj, key, &res); + Py_XDECREF(res); + return rc; +} + +static inline int +PyMapping_HasKeyStringWithError(PyObject *obj, const char *key) +{ + PyObject *res; + int rc = PyMapping_GetOptionalItemString(obj, key, &res); + Py_XDECREF(res); + return rc; +} +#endif + + +// gh-108511 added PyObject_HasAttrWithError() and +// PyObject_HasAttrStringWithError() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyObject_HasAttrWithError(PyObject *obj, PyObject *attr) +{ + PyObject *res; + int rc = PyObject_GetOptionalAttr(obj, attr, &res); + Py_XDECREF(res); + return rc; +} + +static inline int +PyObject_HasAttrStringWithError(PyObject *obj, const char *attr) +{ + PyObject *res; + int rc = PyObject_GetOptionalAttrString(obj, attr, &res); + Py_XDECREF(res); + return rc; +} +#endif + + +// gh-106004 added PyDict_GetItemRef() and PyDict_GetItemStringRef() +// to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result) +{ +#if PY_VERSION_HEX >= 0x03000000 + PyObject *item = PyDict_GetItemWithError(mp, key); +#else + PyObject *item = _PyDict_GetItemWithError(mp, key); +#endif + if (item != NULL) { + *result = Py_NewRef(item); + return 1; // found + } + if (!PyErr_Occurred()) { + *result = NULL; + return 0; // not found + } + *result = NULL; + return -1; +} + +static inline int +PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result) +{ + int res; +#if PY_VERSION_HEX >= 0x03000000 + PyObject *key_obj = PyUnicode_FromString(key); +#else + PyObject *key_obj = PyString_FromString(key); +#endif + if (key_obj == NULL) { + *result = NULL; + return -1; + } + res = PyDict_GetItemRef(mp, key_obj, result); + Py_DECREF(key_obj); + return res; +} +#endif + + +// gh-106307 added PyModule_Add() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyModule_Add(PyObject *mod, const char *name, PyObject *value) +{ + int res = PyModule_AddObjectRef(mod, name, value); + Py_XDECREF(value); + return res; +} +#endif + + +// gh-108014 added Py_IsFinalizing() to Python 3.13.0a1 +// bpo-1856 added _Py_Finalizing to Python 3.2.1b1. +// _Py_IsFinalizing() was added to PyPy 7.3.0. +#if (0x030201B1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030D00A1) \ + && (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x7030000) +static inline int Py_IsFinalizing(void) +{ +#if PY_VERSION_HEX >= 0x030700A1 + // _Py_IsFinalizing() was added to Python 3.7.0a1. + return _Py_IsFinalizing(); +#else + return (_Py_Finalizing != NULL); +#endif +} +#endif + + +// gh-108323 added PyDict_ContainsString() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int PyDict_ContainsString(PyObject *op, const char *key) +{ + PyObject *key_obj = PyUnicode_FromString(key); + if (key_obj == NULL) { + return -1; + } + int res = PyDict_Contains(op, key_obj); + Py_DECREF(key_obj); + return res; +} +#endif + + +// gh-108445 added PyLong_AsInt() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int PyLong_AsInt(PyObject *obj) +{ +#ifdef PYPY_VERSION + long value = PyLong_AsLong(obj); + if (value == -1 && PyErr_Occurred()) { + return -1; + } + if (value < (long)INT_MIN || (long)INT_MAX < value) { + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)value; +#else + return _PyLong_AsInt(obj); +#endif +} +#endif + + +// gh-107073 added PyObject_VisitManagedDict() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg) +{ + PyObject **dict = _PyObject_GetDictPtr(obj); + if (*dict == NULL) { + return -1; + } + Py_VISIT(*dict); + return 0; +} + +static inline void +PyObject_ClearManagedDict(PyObject *obj) +{ + PyObject **dict = _PyObject_GetDictPtr(obj); + if (*dict == NULL) { + return; + } + Py_CLEAR(*dict); +} +#endif + +// gh-108867 added PyThreadState_GetUnchecked() to Python 3.13.0a1 +// Python 3.5.2 added _PyThreadState_UncheckedGet(). +#if PY_VERSION_HEX >= 0x03050200 && PY_VERSION_HEX < 0x030D00A1 +static inline PyThreadState* +PyThreadState_GetUnchecked(void) +{ + return _PyThreadState_UncheckedGet(); +} +#endif + +// gh-110289 added PyUnicode_EqualToUTF8() and PyUnicode_EqualToUTF8AndSize() +// to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A1 +static inline int +PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *str, Py_ssize_t str_len) +{ + Py_ssize_t len; + const void *utf8; + PyObject *exc_type, *exc_value, *exc_tb; + int res; + + // API cannot report errors so save/restore the exception + PyErr_Fetch(&exc_type, &exc_value, &exc_tb); + + // Python 3.3.0a1 added PyUnicode_AsUTF8AndSize() +#if PY_VERSION_HEX >= 0x030300A1 + if (PyUnicode_IS_ASCII(unicode)) { + utf8 = PyUnicode_DATA(unicode); + len = PyUnicode_GET_LENGTH(unicode); + } + else { + utf8 = PyUnicode_AsUTF8AndSize(unicode, &len); + if (utf8 == NULL) { + // Memory allocation failure. The API cannot report error, + // so ignore the exception and return 0. + res = 0; + goto done; + } + } + + if (len != str_len) { + res = 0; + goto done; + } + res = (memcmp(utf8, str, (size_t)len) == 0); +#else + PyObject *bytes = PyUnicode_AsUTF8String(unicode); + if (bytes == NULL) { + // Memory allocation failure. The API cannot report error, + // so ignore the exception and return 0. + res = 0; + goto done; + } + +#if PY_VERSION_HEX >= 0x03000000 + len = PyBytes_GET_SIZE(bytes); + utf8 = PyBytes_AS_STRING(bytes); +#else + len = PyString_GET_SIZE(bytes); + utf8 = PyString_AS_STRING(bytes); +#endif + if (len != str_len) { + Py_DECREF(bytes); + res = 0; + goto done; + } + + res = (memcmp(utf8, str, (size_t)len) == 0); + Py_DECREF(bytes); +#endif + +done: + PyErr_Restore(exc_type, exc_value, exc_tb); + return res; +} + +static inline int +PyUnicode_EqualToUTF8(PyObject *unicode, const char *str) +{ + return PyUnicode_EqualToUTF8AndSize(unicode, str, (Py_ssize_t)strlen(str)); +} +#endif + + +// gh-111138 added PyList_Extend() and PyList_Clear() to Python 3.13.0a2 +#if PY_VERSION_HEX < 0x030D00A2 +static inline int +PyList_Extend(PyObject *list, PyObject *iterable) +{ + return PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable); +} + +static inline int +PyList_Clear(PyObject *list) +{ + return PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL); +} +#endif + +// gh-111262 added PyDict_Pop() and PyDict_PopString() to Python 3.13.0a2 +#if PY_VERSION_HEX < 0x030D00A2 +static inline int +PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result) +{ + PyObject *value; + + if (!PyDict_Check(dict)) { + PyErr_BadInternalCall(); + if (result) { + *result = NULL; + } + return -1; + } + + // bpo-16991 added _PyDict_Pop() to Python 3.5.0b2. + // Python 3.6.0b3 changed _PyDict_Pop() first argument type to PyObject*. + // Python 3.13.0a1 removed _PyDict_Pop(). +#if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x030500b2 || PY_VERSION_HEX >= 0x030D0000 + value = PyObject_CallMethod(dict, "pop", "O", key); +#elif PY_VERSION_HEX < 0x030600b3 + value = _PyDict_Pop(_Py_CAST(PyDictObject*, dict), key, NULL); +#else + value = _PyDict_Pop(dict, key, NULL); +#endif + if (value == NULL) { + if (result) { + *result = NULL; + } + if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_KeyError)) { + return -1; + } + PyErr_Clear(); + return 0; + } + if (result) { + *result = value; + } + else { + Py_DECREF(value); + } + return 1; +} + +static inline int +PyDict_PopString(PyObject *dict, const char *key, PyObject **result) +{ + PyObject *key_obj = PyUnicode_FromString(key); + if (key_obj == NULL) { + if (result != NULL) { + *result = NULL; + } + return -1; + } + + int res = PyDict_Pop(dict, key_obj, result); + Py_DECREF(key_obj); + return res; +} +#endif + + +#if PY_VERSION_HEX < 0x030200A4 +// Python 3.2.0a4 added Py_hash_t type +typedef Py_ssize_t Py_hash_t; +#endif + + +// gh-111545 added Py_HashPointer() to Python 3.13.0a3 +#if PY_VERSION_HEX < 0x030D00A3 +static inline Py_hash_t Py_HashPointer(const void *ptr) +{ +#if PY_VERSION_HEX >= 0x030900A4 && !defined(PYPY_VERSION) + return _Py_HashPointer(ptr); +#else + return _Py_HashPointer(_Py_CAST(void*, ptr)); +#endif +} +#endif + + +// Python 3.13a4 added a PyTime API. +// Use the private API added to Python 3.5. +#if PY_VERSION_HEX < 0x030D00A4 && PY_VERSION_HEX >= 0x03050000 +typedef _PyTime_t PyTime_t; +#define PyTime_MIN _PyTime_MIN +#define PyTime_MAX _PyTime_MAX + +static inline double PyTime_AsSecondsDouble(PyTime_t t) +{ return _PyTime_AsSecondsDouble(t); } + +static inline int PyTime_Monotonic(PyTime_t *result) +{ return _PyTime_GetMonotonicClockWithInfo(result, NULL); } + +static inline int PyTime_Time(PyTime_t *result) +{ return _PyTime_GetSystemClockWithInfo(result, NULL); } + +static inline int PyTime_PerfCounter(PyTime_t *result) +{ +#if PY_VERSION_HEX >= 0x03070000 && !defined(PYPY_VERSION) + return _PyTime_GetPerfCounterWithInfo(result, NULL); +#elif PY_VERSION_HEX >= 0x03070000 + // Call time.perf_counter_ns() and convert Python int object to PyTime_t. + // Cache time.perf_counter_ns() function for best performance. + static PyObject *func = NULL; + if (func == NULL) { + PyObject *mod = PyImport_ImportModule("time"); + if (mod == NULL) { + return -1; + } + + func = PyObject_GetAttrString(mod, "perf_counter_ns"); + Py_DECREF(mod); + if (func == NULL) { + return -1; + } + } + + PyObject *res = PyObject_CallNoArgs(func); + if (res == NULL) { + return -1; + } + long long value = PyLong_AsLongLong(res); + Py_DECREF(res); + + if (value == -1 && PyErr_Occurred()) { + return -1; + } + + Py_BUILD_ASSERT(sizeof(value) >= sizeof(PyTime_t)); + *result = (PyTime_t)value; + return 0; +#else + // Call time.perf_counter() and convert C double to PyTime_t. + // Cache time.perf_counter() function for best performance. + static PyObject *func = NULL; + if (func == NULL) { + PyObject *mod = PyImport_ImportModule("time"); + if (mod == NULL) { + return -1; + } + + func = PyObject_GetAttrString(mod, "perf_counter"); + Py_DECREF(mod); + if (func == NULL) { + return -1; + } + } + + PyObject *res = PyObject_CallNoArgs(func); + if (res == NULL) { + return -1; + } + double d = PyFloat_AsDouble(res); + Py_DECREF(res); + + if (d == -1.0 && PyErr_Occurred()) { + return -1; + } + + // Avoid floor() to avoid having to link to libm + *result = (PyTime_t)(d * 1e9); + return 0; +#endif +} + +#endif + +// gh-111389 added hash constants to Python 3.13.0a5. These constants were +// added first as private macros to Python 3.4.0b1 and PyPy 7.3.9. +#if (!defined(PyHASH_BITS) \ + && ((!defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x030400B1) \ + || (defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03070000 \ + && PYPY_VERSION_NUM >= 0x07090000))) +# define PyHASH_BITS _PyHASH_BITS +# define PyHASH_MODULUS _PyHASH_MODULUS +# define PyHASH_INF _PyHASH_INF +# define PyHASH_IMAG _PyHASH_IMAG +#endif + + +// gh-111545 added Py_GetConstant() and Py_GetConstantBorrowed() +// to Python 3.13.0a6 +#if PY_VERSION_HEX < 0x030D00A6 && !defined(Py_CONSTANT_NONE) + +#define Py_CONSTANT_NONE 0 +#define Py_CONSTANT_FALSE 1 +#define Py_CONSTANT_TRUE 2 +#define Py_CONSTANT_ELLIPSIS 3 +#define Py_CONSTANT_NOT_IMPLEMENTED 4 +#define Py_CONSTANT_ZERO 5 +#define Py_CONSTANT_ONE 6 +#define Py_CONSTANT_EMPTY_STR 7 +#define Py_CONSTANT_EMPTY_BYTES 8 +#define Py_CONSTANT_EMPTY_TUPLE 9 + +static inline PyObject* Py_GetConstant(unsigned int constant_id) +{ + static PyObject* constants[Py_CONSTANT_EMPTY_TUPLE + 1] = {NULL}; + + if (constants[Py_CONSTANT_NONE] == NULL) { + constants[Py_CONSTANT_NONE] = Py_None; + constants[Py_CONSTANT_FALSE] = Py_False; + constants[Py_CONSTANT_TRUE] = Py_True; + constants[Py_CONSTANT_ELLIPSIS] = Py_Ellipsis; + constants[Py_CONSTANT_NOT_IMPLEMENTED] = Py_NotImplemented; + + constants[Py_CONSTANT_ZERO] = PyLong_FromLong(0); + if (constants[Py_CONSTANT_ZERO] == NULL) { + goto fatal_error; + } + + constants[Py_CONSTANT_ONE] = PyLong_FromLong(1); + if (constants[Py_CONSTANT_ONE] == NULL) { + goto fatal_error; + } + + constants[Py_CONSTANT_EMPTY_STR] = PyUnicode_FromStringAndSize("", 0); + if (constants[Py_CONSTANT_EMPTY_STR] == NULL) { + goto fatal_error; + } + + constants[Py_CONSTANT_EMPTY_BYTES] = PyBytes_FromStringAndSize("", 0); + if (constants[Py_CONSTANT_EMPTY_BYTES] == NULL) { + goto fatal_error; + } + + constants[Py_CONSTANT_EMPTY_TUPLE] = PyTuple_New(0); + if (constants[Py_CONSTANT_EMPTY_TUPLE] == NULL) { + goto fatal_error; + } + // goto dance to avoid compiler warnings about Py_FatalError() + goto init_done; + +fatal_error: + // This case should never happen + Py_FatalError("Py_GetConstant() failed to get constants"); + } + +init_done: + if (constant_id <= Py_CONSTANT_EMPTY_TUPLE) { + return Py_NewRef(constants[constant_id]); + } + else { + PyErr_BadInternalCall(); + return NULL; + } +} + +static inline PyObject* Py_GetConstantBorrowed(unsigned int constant_id) +{ + PyObject *obj = Py_GetConstant(constant_id); + Py_XDECREF(obj); + return obj; +} +#endif + + +// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4 +#if PY_VERSION_HEX < 0x030D00A4 +static inline PyObject * +PyList_GetItemRef(PyObject *op, Py_ssize_t index) +{ + PyObject *item = PyList_GetItem(op, index); + Py_XINCREF(item); + return item; +} +#endif + + +// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4 +#if PY_VERSION_HEX < 0x030D00A4 +static inline int +PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value, + PyObject **result) +{ + PyObject *value; + if (PyDict_GetItemRef(d, key, &value) < 0) { + // get error + if (result) { + *result = NULL; + } + return -1; + } + if (value != NULL) { + // present + if (result) { + *result = value; + } + else { + Py_DECREF(value); + } + return 1; + } + + // missing: set the item + if (PyDict_SetItem(d, key, default_value) < 0) { + // set error + if (result) { + *result = NULL; + } + return -1; + } + if (result) { + *result = Py_NewRef(default_value); + } + return 0; +} +#endif + +#if PY_VERSION_HEX < 0x030E0000 && PY_VERSION_HEX >= 0x03060000 && !defined(PYPY_VERSION) +typedef struct PyUnicodeWriter PyUnicodeWriter; + +static inline void PyUnicodeWriter_Discard(PyUnicodeWriter *writer) +{ + _PyUnicodeWriter_Dealloc((_PyUnicodeWriter*)writer); + PyMem_Free(writer); +} + +static inline PyUnicodeWriter* PyUnicodeWriter_Create(Py_ssize_t length) +{ + if (length < 0) { + PyErr_SetString(PyExc_ValueError, + "length must be positive"); + return NULL; + } + + const size_t size = sizeof(_PyUnicodeWriter); + PyUnicodeWriter *pub_writer = (PyUnicodeWriter *)PyMem_Malloc(size); + if (pub_writer == _Py_NULL) { + PyErr_NoMemory(); + return _Py_NULL; + } + _PyUnicodeWriter *writer = (_PyUnicodeWriter *)pub_writer; + + _PyUnicodeWriter_Init(writer); + if (_PyUnicodeWriter_Prepare(writer, length, 127) < 0) { + PyUnicodeWriter_Discard(pub_writer); + return NULL; + } + writer->overallocate = 1; + return pub_writer; +} + +static inline PyObject* PyUnicodeWriter_Finish(PyUnicodeWriter *writer) +{ + PyObject *str = _PyUnicodeWriter_Finish((_PyUnicodeWriter*)writer); + assert(((_PyUnicodeWriter*)writer)->buffer == NULL); + PyMem_Free(writer); + return str; +} + +static inline int +PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch) +{ + if (ch > 0x10ffff) { + PyErr_SetString(PyExc_ValueError, + "character must be in range(0x110000)"); + return -1; + } + + return _PyUnicodeWriter_WriteChar((_PyUnicodeWriter*)writer, ch); +} + +static inline int +PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj) +{ + PyObject *str = PyObject_Str(obj); + if (str == NULL) { + return -1; + } + + int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); + Py_DECREF(str); + return res; +} + +static inline int +PyUnicodeWriter_WriteRepr(PyUnicodeWriter *writer, PyObject *obj) +{ + PyObject *str = PyObject_Repr(obj); + if (str == NULL) { + return -1; + } + + int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); + Py_DECREF(str); + return res; +} + +static inline int +PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer, + const char *str, Py_ssize_t size) +{ + if (size < 0) { + size = (Py_ssize_t)strlen(str); + } + + PyObject *str_obj = PyUnicode_FromStringAndSize(str, size); + if (str_obj == _Py_NULL) { + return -1; + } + + int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj); + Py_DECREF(str_obj); + return res; +} + +static inline int +PyUnicodeWriter_WriteWideChar(PyUnicodeWriter *writer, + const wchar_t *str, Py_ssize_t size) +{ + if (size < 0) { + size = (Py_ssize_t)wcslen(str); + } + + PyObject *str_obj = PyUnicode_FromWideChar(str, size); + if (str_obj == _Py_NULL) { + return -1; + } + + int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj); + Py_DECREF(str_obj); + return res; +} + +static inline int +PyUnicodeWriter_WriteSubstring(PyUnicodeWriter *writer, PyObject *str, + Py_ssize_t start, Py_ssize_t end) +{ + if (!PyUnicode_Check(str)) { + PyErr_Format(PyExc_TypeError, "expect str, not %T", str); + return -1; + } + if (start < 0 || start > end) { + PyErr_Format(PyExc_ValueError, "invalid start argument"); + return -1; + } + if (end > PyUnicode_GET_LENGTH(str)) { + PyErr_Format(PyExc_ValueError, "invalid end argument"); + return -1; + } + + return _PyUnicodeWriter_WriteSubstring((_PyUnicodeWriter*)writer, str, + start, end); +} + +static inline int +PyUnicodeWriter_Format(PyUnicodeWriter *writer, const char *format, ...) +{ + va_list vargs; + va_start(vargs, format); + PyObject *str = PyUnicode_FromFormatV(format, vargs); + va_end(vargs); + if (str == _Py_NULL) { + return -1; + } + + int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); + Py_DECREF(str); + return res; +} +#endif // PY_VERSION_HEX < 0x030E0000 + +// gh-116560 added PyLong_GetSign() to Python 3.14.0a0 +#if PY_VERSION_HEX < 0x030E00A0 +static inline int PyLong_GetSign(PyObject *obj, int *sign) +{ + if (!PyLong_Check(obj)) { + PyErr_Format(PyExc_TypeError, "expect int, got %s", Py_TYPE(obj)->tp_name); + return -1; + } + + *sign = _PyLong_Sign(obj); + return 0; +} +#endif + + +#ifdef __cplusplus +} +#endif +#endif // PYTHONCAPI_COMPAT diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0e6ba709d974daebf81cf9e6cdb7aa8b947cc8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/result.h" + +namespace arrow { + +template +Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out, + UnaryOperation unary_op) { + for (; first != last; ++first, (void)++out) { + ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first)); + } + return Status::OK(); +} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h new file mode 100644 index 0000000000000000000000000000000000000000..71920e49f4aa2b1d92312b4aabaffafe35d323c7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" + +namespace arrow { +namespace internal { + +struct BitmapWordAlignParams { + int64_t leading_bits; + int64_t trailing_bits; + int64_t trailing_bit_offset; + const uint8_t* aligned_start; + int64_t aligned_bits; + int64_t aligned_words; +}; + +// Compute parameters for accessing a bitmap using aligned word instructions. +// The returned parameters describe: +// - a leading area of size `leading_bits` before the aligned words +// - a word-aligned area of size `aligned_bits` +// - a trailing area of size `trailing_bits` after the aligned words +template +inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset, + int64_t length) { + static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES), + "ALIGN_IN_BYTES should be a positive power of two"); + constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8; + + BitmapWordAlignParams p; + + // Compute a "bit address" that we can align up to ALIGN_IN_BITS. + // We don't care about losing the upper bits since we are only interested in the + // difference between both addresses. + const uint64_t bit_addr = + reinterpret_cast(data) * 8 + static_cast(bit_offset); + const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS); + + p.leading_bits = std::min(length, aligned_bit_addr - bit_addr); + p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS; + p.aligned_bits = p.aligned_words * ALIGN_IN_BITS; + p.trailing_bits = length - p.leading_bits - p.aligned_bits; + p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits; + + p.aligned_start = data + (bit_offset + p.leading_bits) / 8; + return p; +} +} // namespace internal + +namespace util { + +// Functions to check if the provided Arrow object is aligned by the specified alignment + +/// \brief Special alignment value to use data type-specific alignment +/// +/// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment +/// functions, then the function will ensure each buffer is suitably aligned +/// for the data type of the array. For example, given an int32 buffer the values +/// buffer's address must be a multiple of 4. Given a large_string buffer the offsets +/// buffer's address must be a multiple of 8. +constexpr int64_t kValueAlignment = -3; + +/// \brief Calculate if the buffer's address is a multiple of `alignment` +/// +/// If `alignment` is less than or equal to 0 then this method will always return true +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment); +/// \brief Calculate if all buffers in the array data are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array data to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment); +/// \brief Calculate if all buffers in the array are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment); + +// Following functions require an additional boolean vector which stores the +// alignment check bits of the constituent objects. +// For example, needs_alignment vector for a ChunkedArray will contain the +// check bits of the constituent Arrays. +// The boolean vector check was introduced to minimize the repetitive checks +// of the constituent objects during the EnsureAlignment function where certain +// objects can be ignored for further checking if we already know that they are +// completely aligned. + +/// \brief Calculate which (if any) chunks in a chunked array are unaligned +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the check +/// it must be set to a valid vector. Extra elements will be added to the end +/// of the vector for each chunk that is checked. `true` will be stored if +/// the chunk is unaligned. +/// \param offset the index of the chunk to start checking +/// \return true if all chunks (starting at `offset`) are aligned, false otherwise +ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment, + std::vector* needs_alignment, int offset = 0); + +/// \brief calculate which (if any) columns in a record batch are unaligned +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment, + std::vector* needs_alignment); + +/// \brief calculate which (if any) columns in a table are unaligned +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment, + std::vector* needs_alignment); + +/// \brief return a buffer that has the given alignment and the same data as the input +/// buffer +/// +/// If the input buffer is already aligned then this method will return the input buffer +/// If the input buffer is not already aligned then this method will allocate a new +/// buffer. The alignment of the new buffer will have at least +/// max(kDefaultBufferAlignment, alignment) bytes of alignment. +/// +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate a new buffer if the +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr buffer, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array data where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array_data the array data to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array_data, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr array, + int64_t alignment, + MemoryPool* memory_pool); + +/// \brief return a chunked array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the chunked array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a record batch where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr batch, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a table where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr
table, + int64_t alignment, + MemoryPool* memory_pool); + +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..fd66298d1a9d61ee9276eeb2f162cd0fc628caea --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h @@ -0,0 +1,2058 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/async_generator_fwd.h" +#include "arrow/util/async_util.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/io_util.h" +#include "arrow/util/iterator.h" +#include "arrow/util/mutex.h" +#include "arrow/util/queue.h" +#include "arrow/util/thread_pool.h" + +namespace arrow { + +// The methods in this file create, modify, and utilize AsyncGenerator which is an +// iterator of futures. This allows an asynchronous source (like file input) to be run +// through a pipeline in the same way that iterators can be used to create pipelined +// workflows. +// +// In order to support pipeline parallelism we introduce the concept of asynchronous +// reentrancy. This is different than synchronous reentrancy. With synchronous code a +// function is reentrant if the function can be called again while a previous call to that +// function is still running. Unless otherwise specified none of these generators are +// synchronously reentrant. Care should be taken to avoid calling them in such a way (and +// the utilities Visit/Collect/Await take care to do this). +// +// Asynchronous reentrancy on the other hand means the function is called again before the +// future returned by the function is marked finished (but after the call to get the +// future returns). Some of these generators are async-reentrant while others (e.g. +// those that depend on ordered processing like decompression) are not. Read the MakeXYZ +// function comments to determine which generators support async reentrancy. +// +// Note: Generators that are not asynchronously reentrant can still support readahead +// (\see MakeSerialReadaheadGenerator). +// +// Readahead operators, and some other operators, may introduce queueing. Any operators +// that introduce buffering should detail the amount of buffering they introduce in their +// MakeXYZ function comments. +// +// A generator should always be fully consumed before it is destroyed. +// A generator should not mark a future complete with an error status or a terminal value +// until all outstanding futures have completed. Generators that spawn multiple +// concurrent futures may need to hold onto an error while other concurrent futures wrap +// up. +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of AsyncGenerator, + /// an empty function indicates the end of iteration. + static AsyncGenerator End() { return AsyncGenerator(); } + + static bool IsEnd(const AsyncGenerator& val) { return !val; } +}; + +template +Future AsyncGeneratorEnd() { + return Future::MakeFinished(IterationTraits::End()); +} + +/// returning a future that completes when all have been visited +template +Future<> VisitAsyncGenerator(AsyncGenerator generator, Visitor visitor) { + struct LoopBody { + struct Callback { + Result> operator()(const T& next) { + if (IsIterationEnd(next)) { + return Break(); + } else { + auto visited = visitor(next); + if (visited.ok()) { + return Continue(); + } else { + return visited; + } + } + } + + Visitor visitor; + }; + + Future> operator()() { + Callback callback{visitor}; + auto next = generator(); + return next.Then(std::move(callback)); + } + + AsyncGenerator generator; + Visitor visitor; + }; + + return Loop(LoopBody{std::move(generator), std::move(visitor)}); +} + +/// \brief Wait for an async generator to complete, discarding results. +template +Future<> DiscardAllFromAsyncGenerator(AsyncGenerator generator) { + std::function visitor = [](const T&) { return Status::OK(); }; + return VisitAsyncGenerator(generator, visitor); +} + +/// \brief Collect the results of an async generator into a vector +template +Future> CollectAsyncGenerator(AsyncGenerator generator) { + auto vec = std::make_shared>(); + auto loop_body = [generator = std::move(generator), + vec = std::move(vec)]() -> Future>> { + auto next = generator(); + return next.Then([vec](const T& result) -> Result>> { + if (IsIterationEnd(result)) { + return Break(*vec); + } else { + vec->push_back(result); + return Continue(); + } + }); + }; + return Loop(std::move(loop_body)); +} + +/// \see MakeMappedGenerator +template +class MappingGenerator { + public: + MappingGenerator(AsyncGenerator source, std::function(const T&)> map) + : state_(std::make_shared(std::move(source), std::move(map))) {} + + Future operator()() { + auto future = Future::Make(); + bool should_trigger; + { + auto guard = state_->mutex.Lock(); + if (state_->finished) { + return AsyncGeneratorEnd(); + } + should_trigger = state_->waiting_jobs.empty(); + state_->waiting_jobs.push_back(future); + } + if (should_trigger) { + state_->source().AddCallback(Callback{state_}); + } + return future; + } + + private: + struct State { + State(AsyncGenerator source, std::function(const T&)> map) + : source(std::move(source)), + map(std::move(map)), + waiting_jobs(), + mutex(), + finished(false) {} + + void Purge() { + // This might be called by an original callback (if the source iterator fails or + // ends) or by a mapped callback (if the map function fails or ends prematurely). + // Either way it should only be called once and after finished is set so there is no + // need to guard access to `waiting_jobs`. + while (!waiting_jobs.empty()) { + waiting_jobs.front().MarkFinished(IterationTraits::End()); + waiting_jobs.pop_front(); + } + } + + AsyncGenerator source; + std::function(const T&)> map; + std::deque> waiting_jobs; + util::Mutex mutex; + bool finished; + }; + + struct Callback; + + struct MappedCallback { + void operator()(const Result& maybe_next) { + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + if (end) { + { + auto guard = state->mutex.Lock(); + should_purge = !state->finished; + state->finished = true; + } + } + sink.MarkFinished(maybe_next); + if (should_purge) { + state->Purge(); + } + } + std::shared_ptr state; + Future sink; + }; + + struct Callback { + void operator()(const Result& maybe_next) { + Future sink; + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + bool should_trigger; + { + auto guard = state->mutex.Lock(); + // A MappedCallback may have purged or be purging the queue; + // we shouldn't do anything here. + if (state->finished) return; + if (end) { + should_purge = !state->finished; + state->finished = true; + } + sink = state->waiting_jobs.front(); + state->waiting_jobs.pop_front(); + should_trigger = !end && !state->waiting_jobs.empty(); + } + if (should_purge) { + state->Purge(); + } + if (should_trigger) { + state->source().AddCallback(Callback{state}); + } + if (maybe_next.ok()) { + const T& val = maybe_next.ValueUnsafe(); + if (IsIterationEnd(val)) { + sink.MarkFinished(IterationTraits::End()); + } else { + Future mapped_fut = state->map(val); + mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)}); + } + } else { + sink.MarkFinished(maybe_next.status()); + } + } + + std::shared_ptr state; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that will apply the map function to each element of +/// source. The map function is not called on the end token. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeMappedGenerator(AsyncGenerator source_generator, MapFn map) { + auto map_callback = [map = std::move(map)](const T& val) mutable -> Future { + return ToFuture(map(val)); + }; + return MappingGenerator(std::move(source_generator), std::move(map_callback)); +} + +/// \brief Create a generator that will apply the map function to +/// each element of source. The map function is not called on the end +/// token. The result of the map function should be another +/// generator; all these generators will then be flattened to produce +/// a single stream of items. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeFlatMappedGenerator(AsyncGenerator source_generator, MapFn map) { + return MakeConcatenatedGenerator( + MakeMappedGenerator(std::move(source_generator), std::move(map))); +} + +/// \see MakeSequencingGenerator +template +class SequencingGenerator { + public: + SequencingGenerator(AsyncGenerator source, ComesAfter compare, IsNext is_next, + T initial_value) + : state_(std::make_shared(std::move(source), std::move(compare), + std::move(is_next), std::move(initial_value))) {} + + Future operator()() { + { + auto guard = state_->mutex.Lock(); + // We can send a result immediately if the top of the queue is either an + // error or the next item + if (!state_->queue.empty() && + (!state_->queue.top().ok() || + state_->is_next(state_->previous_value, *state_->queue.top()))) { + auto result = std::move(state_->queue.top()); + if (result.ok()) { + state_->previous_value = *result; + } + state_->queue.pop(); + return Future::MakeFinished(result); + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + // The next item is not in the queue so we will need to wait + auto new_waiting_fut = Future::Make(); + state_->waiting_future = new_waiting_fut; + guard.Unlock(); + state_->source().AddCallback(Callback{state_}); + return new_waiting_fut; + } + } + + private: + struct WrappedComesAfter { + bool operator()(const Result& left, const Result& right) { + if (!left.ok() || !right.ok()) { + // Should never happen + return false; + } + return compare(*left, *right); + } + ComesAfter compare; + }; + + struct State { + State(AsyncGenerator source, ComesAfter compare, IsNext is_next, T initial_value) + : source(std::move(source)), + is_next(std::move(is_next)), + previous_value(std::move(initial_value)), + waiting_future(), + queue(WrappedComesAfter{compare}), + finished(false), + mutex() {} + + AsyncGenerator source; + IsNext is_next; + T previous_value; + Future waiting_future; + std::priority_queue, std::vector>, WrappedComesAfter> queue; + bool finished; + util::Mutex mutex; + }; + + class Callback { + public: + explicit Callback(std::shared_ptr state) : state_(std::move(state)) {} + + void operator()(const Result result) { + Future to_deliver; + bool finished; + { + auto guard = state_->mutex.Lock(); + bool ready_to_deliver = false; + if (!result.ok()) { + // Clear any cached results + while (!state_->queue.empty()) { + state_->queue.pop(); + } + ready_to_deliver = true; + state_->finished = true; + } else if (IsIterationEnd(result.ValueUnsafe())) { + ready_to_deliver = state_->queue.empty(); + state_->finished = true; + } else { + ready_to_deliver = state_->is_next(state_->previous_value, *result); + } + + if (ready_to_deliver && state_->waiting_future.is_valid()) { + to_deliver = state_->waiting_future; + if (result.ok()) { + state_->previous_value = *result; + } + } else { + state_->queue.push(result); + } + // Capture state_->finished so we can access it outside the mutex + finished = state_->finished; + } + // Must deliver result outside of the mutex + if (to_deliver.is_valid()) { + to_deliver.MarkFinished(result); + } else { + // Otherwise, if we didn't get the next item (or a terminal item), we + // need to keep looking + if (!finished) { + state_->source().AddCallback(Callback{state_}); + } + } + } + + private: + const std::shared_ptr state_; + }; + + const std::shared_ptr state_; +}; + +/// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter +/// and IsNext determine the sequence order. +/// +/// ComesAfter should be a BinaryPredicate that only returns true if a comes after b +/// +/// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if +/// `b` follows immediately after `a`. It should return true given `initial_value` and +/// `b` if `b` is the first item in the sequence. +/// +/// This operator will queue unboundedly while waiting for the next item. It is intended +/// for jittery sources that might scatter an ordered sequence. It is NOT intended to +/// sort. Using it to try and sort could result in excessive RAM usage. This generator +/// will queue up to N blocks where N is the max "out of order"ness of the source. +/// +/// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3 +/// blocks beyond where it belongs. +/// +/// This generator is not async-reentrant but it consists only of a simple log(n) +/// insertion into a priority queue. +template +AsyncGenerator MakeSequencingGenerator(AsyncGenerator source_generator, + ComesAfter compare, IsNext is_next, + T initial_value) { + return SequencingGenerator( + std::move(source_generator), std::move(compare), std::move(is_next), + std::move(initial_value)); +} + +/// \see MakeTransformedGenerator +template +class TransformingGenerator { + // The transforming generator state will be referenced as an async generator but will + // also be referenced via callback to various futures. If the async generator owner + // moves it around we need the state to be consistent for future callbacks. + struct TransformingGeneratorState + : std::enable_shared_from_this { + TransformingGeneratorState(AsyncGenerator generator, Transformer transformer) + : generator_(std::move(generator)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Future operator()() { + while (true) { + auto maybe_next_result = Pump(); + if (!maybe_next_result.ok()) { + return Future::MakeFinished(maybe_next_result.status()); + } + auto maybe_next = std::move(maybe_next_result).ValueUnsafe(); + if (maybe_next.has_value()) { + return Future::MakeFinished(*std::move(maybe_next)); + } + + auto next_fut = generator_(); + // If finished already, process results immediately inside the loop to avoid + // stack overflow + if (next_fut.is_finished()) { + auto next_result = next_fut.result(); + if (next_result.ok()) { + last_value_ = *next_result; + } else { + return Future::MakeFinished(next_result.status()); + } + // Otherwise, if not finished immediately, add callback to process results + } else { + auto self = this->shared_from_this(); + return next_fut.Then([self](const T& next_result) { + self->last_value_ = next_result; + return (*self)(); + }); + } + } + } + + // See comment on TransformingIterator::Pump + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + ARROW_ASSIGN_OR_RAISE(TransformFlow next, transformer_(*last_value_)); + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + AsyncGenerator generator_; + Transformer transformer_; + std::optional last_value_; + bool finished_; + }; + + public: + explicit TransformingGenerator(AsyncGenerator generator, + Transformer transformer) + : state_(std::make_shared(std::move(generator), + std::move(transformer))) {} + + Future operator()() { return (*state_)(); } + + protected: + std::shared_ptr state_; +}; + +/// \brief Transform an async generator using a transformer function returning a new +/// AsyncGenerator +/// +/// The transform function here behaves exactly the same as the transform function in +/// MakeTransformedIterator and you can safely use the same transform function to +/// transform both synchronous and asynchronous streams. +/// +/// This generator is not async-reentrant +/// +/// This generator may queue up to 1 instance of T but will not delay +template +AsyncGenerator MakeTransformedGenerator(AsyncGenerator generator, + Transformer transformer) { + return TransformingGenerator(generator, transformer); +} + +/// \see MakeSerialReadaheadGenerator +template +class SerialReadaheadGenerator { + public: + SerialReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future operator()() { + if (state_->first_) { + // Lazy generator, need to wait for the first ask to prime the pump + state_->first_ = false; + auto next = state_->source_(); + return next.Then(Callback{state_}, ErrCallback{state_}); + } + + // This generator is not async-reentrant. We won't be called until the last + // future finished so we know there is something in the queue + auto finished = state_->finished_.load(); + if (finished && state_->readahead_queue_.IsEmpty()) { + return AsyncGeneratorEnd(); + } + + std::shared_ptr> next; + if (!state_->readahead_queue_.Read(next)) { + return Status::UnknownError("Could not read from readahead_queue"); + } + + auto last_available = state_->spaces_available_.fetch_add(1); + if (last_available == 0 && !finished) { + // Reader idled out, we need to restart it + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return *next; + } + + private: + struct State { + State(AsyncGenerator source, int max_readahead) + : first_(true), + source_(std::move(source)), + finished_(false), + // There is one extra "space" for the in-flight request + spaces_available_(max_readahead + 1), + // The SPSC queue has size-1 "usable" slots so we need to overallocate 1 + readahead_queue_(max_readahead + 1) {} + + Status Pump(const std::shared_ptr& self) { + // Can't do readahead_queue.write(source().Then(...)) because then the + // callback might run immediately and add itself to the queue before this gets added + // to the queue messing up the order. + auto next_slot = std::make_shared>(); + auto written = readahead_queue_.Write(next_slot); + if (!written) { + return Status::UnknownError("Could not write to readahead_queue"); + } + // If this Pump is being called from a callback it is possible for the source to + // poll and read from the queue between the Write and this spot where we fill the + // value in. However, it is not possible for the future to read this value we are + // writing. That is because this callback (the callback for future X) must be + // finished before future X is marked complete and this source is not pulled + // reentrantly so it will not poll for future X+1 until this callback has completed. + *next_slot = source_().Then(Callback{self}, ErrCallback{self}); + return Status::OK(); + } + + // Only accessed by the consumer end + bool first_; + // Accessed by both threads + AsyncGenerator source_; + std::atomic finished_; + // The queue has a size but it is not atomic. We keep track of how many spaces are + // left in the queue here so we know if we've just written the last value and we need + // to stop reading ahead or if we've just read from a full queue and we need to + // restart reading ahead + std::atomic spaces_available_; + // Needs to be a queue of shared_ptr and not Future because we set the value of the + // future after we add it to the queue + util::SpscQueue>> readahead_queue_; + }; + + struct Callback { + Result operator()(const T& next) { + if (IsIterationEnd(next)) { + state_->finished_.store(true); + return next; + } + auto last_available = state_->spaces_available_.fetch_sub(1); + if (last_available > 1) { + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return next; + } + + std::shared_ptr state_; + }; + + struct ErrCallback { + Result operator()(const Status& st) { + state_->finished_.store(true); + return st; + } + + std::shared_ptr state_; + }; + + std::shared_ptr state_; +}; + +/// \see MakeFromFuture +template +class FutureFirstGenerator { + public: + explicit FutureFirstGenerator(Future> future) + : state_(std::make_shared(std::move(future))) {} + + Future operator()() { + if (state_->source_) { + return state_->source_(); + } else { + auto state = state_; + return state_->future_.Then([state](const AsyncGenerator& source) { + state->source_ = source; + return state->source_(); + }); + } + } + + private: + struct State { + explicit State(Future> future) : future_(future), source_() {} + + Future> future_; + AsyncGenerator source_; + }; + + std::shared_ptr state_; +}; + +/// \brief Transform a Future> into an AsyncGenerator +/// that waits for the future to complete as part of the first item. +/// +/// This generator is not async-reentrant (even if the generator yielded by future is) +/// +/// This generator does not queue +template +AsyncGenerator MakeFromFuture(Future> future) { + return FutureFirstGenerator(std::move(future)); +} + +/// \brief Create a generator that will pull from the source into a queue. Unlike +/// MakeReadaheadGenerator this will not pull reentrantly from the source. +/// +/// The source generator does not need to be async-reentrant +/// +/// This generator is not async-reentrant (even if the source is) +/// +/// This generator may queue up to max_readahead additional instances of T +template +AsyncGenerator MakeSerialReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return SerialReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Create a generator that immediately pulls from the source +/// +/// Typical generators do not pull from their source until they themselves +/// are pulled. This generator does not follow that convention and will call +/// generator() once before it returns. The returned generator will otherwise +/// mirror the source. +/// +/// This generator forwards async-reentrant pressure to the source +/// This generator buffers one item (the first result) until it is delivered. +template +AsyncGenerator MakeAutoStartingGenerator(AsyncGenerator generator) { + struct AutostartGenerator { + Future operator()() { + if (first_future->is_valid()) { + Future result = *first_future; + *first_future = Future(); + return result; + } + return source(); + } + + std::shared_ptr> first_future; + AsyncGenerator source; + }; + + std::shared_ptr> first_future = std::make_shared>(generator()); + return AutostartGenerator{std::move(first_future), std::move(generator)}; +} + +/// \see MakeReadaheadGenerator +template +class ReadaheadGenerator { + public: + ReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future AddMarkFinishedContinuation(Future fut) { + auto state = state_; + return fut.Then( + [state](const T& result) -> Future { + state->MarkFinishedIfDone(result); + if (state->finished.load()) { + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + } else { + state->num_running.fetch_sub(1); + } + return result; + }, + [state](const Status& err) -> Future { + // If there is an error we need to make sure all running + // tasks finish before we return the error. + state->finished.store(true); + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + return state->final_future.Then([err]() -> Result { return err; }); + }); + } + + Future operator()() { + if (state_->readahead_queue.empty()) { + // This is the first request, let's pump the underlying queue + state_->num_running.store(state_->max_readahead); + for (int i = 0; i < state_->max_readahead; i++) { + auto next = state_->source_generator(); + auto next_after_check = AddMarkFinishedContinuation(std::move(next)); + state_->readahead_queue.push(std::move(next_after_check)); + } + } + // Pop one and add one + auto result = state_->readahead_queue.front(); + state_->readahead_queue.pop(); + if (state_->finished.load()) { + state_->readahead_queue.push(AsyncGeneratorEnd()); + } else { + state_->num_running.fetch_add(1); + auto back_of_queue = state_->source_generator(); + auto back_of_queue_after_check = + AddMarkFinishedContinuation(std::move(back_of_queue)); + state_->readahead_queue.push(std::move(back_of_queue_after_check)); + } + return result; + } + + private: + struct State { + State(AsyncGenerator source_generator, int max_readahead) + : source_generator(std::move(source_generator)), max_readahead(max_readahead) {} + + void MarkFinishedIfDone(const T& next_result) { + if (IsIterationEnd(next_result)) { + finished.store(true); + } + } + + AsyncGenerator source_generator; + int max_readahead; + Future<> final_future = Future<>::Make(); + std::atomic num_running{0}; + std::atomic finished{false}; + std::queue> readahead_queue; + }; + + std::shared_ptr state_; +}; + +/// \brief A generator where the producer pushes items on a queue. +/// +/// No back-pressure is applied, so this generator is mostly useful when +/// producing the values is neither CPU- nor memory-expensive (e.g. fetching +/// filesystem metadata). +/// +/// This generator is not async-reentrant. +template +class PushGenerator { + struct State { + State() {} + + util::Mutex mutex; + std::deque> result_q; + std::optional> consumer_fut; + bool finished = false; + }; + + public: + /// Producer API for PushGenerator + class Producer { + public: + explicit Producer(const std::shared_ptr& state) : weak_state_(state) {} + + /// \brief Push a value on the queue + /// + /// True is returned if the value was pushed, false if the generator is + /// already closed or destroyed. If the latter, it is recommended to stop + /// producing any further values. + bool Push(Result result) { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Closed early + return false; + } + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(std::move(result)); + } else { + state->result_q.push_back(std::move(result)); + } + return true; + } + + /// \brief Tell the consumer we have finished producing + /// + /// It is allowed to call this and later call Push() again ("early close"). + /// In this case, calls to Push() after the queue is closed are silently + /// ignored. This can help implementing non-trivial cancellation cases. + /// + /// True is returned on success, false if the generator is already closed + /// or destroyed. + bool Close() { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Already closed + return false; + } + state->finished = true; + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(IterationTraits::End()); + } + return true; + } + + /// Return whether the generator was closed or destroyed. + bool is_closed() const { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return true; + } + auto lock = state->mutex.Lock(); + return state->finished; + } + + private: + const std::weak_ptr weak_state_; + }; + + PushGenerator() : state_(std::make_shared()) {} + + /// Read an item from the queue + Future operator()() const { + auto lock = state_->mutex.Lock(); + assert(!state_->consumer_fut.has_value()); // Non-reentrant + if (!state_->result_q.empty()) { + auto fut = Future::MakeFinished(std::move(state_->result_q.front())); + state_->result_q.pop_front(); + return fut; + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + auto fut = Future::Make(); + state_->consumer_fut = fut; + return fut; + } + + /// \brief Return producer-side interface + /// + /// The returned object must be used by the producer to push values on the queue. + /// Only a single Producer object should be instantiated. + Producer producer() { return Producer{state_}; } + + private: + const std::shared_ptr state_; +}; + +/// \brief Create a generator that pulls reentrantly from a source +/// This generator will pull reentrantly from a source, ensuring that max_readahead +/// requests are active at any given time. +/// +/// The source generator must be async-reentrant +/// +/// This generator itself is async-reentrant. +/// +/// This generator may queue up to max_readahead instances of T +template +AsyncGenerator MakeReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return ReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Creates a generator that will yield finished futures from a vector +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeVectorGenerator(std::vector vec) { + struct State { + explicit State(std::vector vec_) : vec(std::move(vec_)), vec_idx(0) {} + + std::vector vec; + std::atomic vec_idx; + }; + + auto state = std::make_shared(std::move(vec)); + return [state]() { + auto idx = state->vec_idx.fetch_add(1); + if (idx >= state->vec.size()) { + // Eagerly return memory + state->vec.clear(); + return AsyncGeneratorEnd(); + } + return Future::MakeFinished(state->vec[idx]); + }; +} + +/// \see MakeMergedGenerator +template +class MergedGenerator { + // Note, the implementation of this class is quite complex at the moment (PRs to + // simplify are always welcome) + // + // Terminology is borrowed from rxjs. This is a pull based implementation of the + // mergeAll operator. The "outer subscription" refers to the async + // generator that the caller provided when creating this. The outer subscription + // yields generators. + // + // Each of these generators is then subscribed to (up to max_subscriptions) and these + // are referred to as "inner subscriptions". + // + // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For + // each inner subscription we will cache up to 1 value. This means we may have more + // values than we have been asked for. In our example, if a caller asks for one record + // batch we will start scanning `max_subscriptions` different files. For each file we + // will only queue up to 1 batch (so a separate readahead is needed on the file if batch + // readahead is desired). + // + // If the caller is slow we may accumulate ready-to-deliver items. These are stored + // in `delivered_jobs`. + // + // If the caller is very quick we may accumulate requests. These are stored in + // `waiting_jobs`. + // + // It may be helpful to consider an example, in the scanner the outer subscription + // is some kind of asynchronous directory listing. The inner subscription is + // then a scan on a file yielded by the directory listing. + // + // An "outstanding" request is when we have polled either the inner or outer + // subscription but that future hasn't completed yet. + // + // There are three possible "events" that can happen. + // * A caller could request the next future + // * An outer callback occurs when the next subscription is ready (e.g. the directory + // listing has produced a new file) + // * An inner callback occurs when one of the inner subscriptions emits a value (e.g. + // a file scan emits a record batch) + // + // Any time an event happens the logic is broken into two phases. First, we grab the + // lock and modify the shared state. While doing this we figure out what callbacks we + // will need to execute. Then, we give up the lock and execute these callbacks. It is + // important to execute these callbacks without the lock to avoid deadlock. + public: + explicit MergedGenerator(AsyncGenerator> source, + int max_subscriptions) + : state_(std::make_shared(std::move(source), max_subscriptions)) {} + + Future operator()() { + // A caller has requested a future + Future waiting_future; + std::shared_ptr delivered_job; + bool mark_generator_complete = false; + { + auto guard = state_->mutex.Lock(); + if (!state_->delivered_jobs.empty()) { + // If we have a job sitting around we can deliver it + delivered_job = std::move(state_->delivered_jobs.front()); + state_->delivered_jobs.pop_front(); + if (state_->IsCompleteUnlocked(guard)) { + // It's possible this waiting job was the only thing left to handle and + // we have now completed the generator. + mark_generator_complete = true; + } else { + // Since we had a job sitting around we also had an inner subscription + // that had paused. We are going to restart this inner subscription and + // so there will be a new outstanding request. + state_->outstanding_requests++; + } + } else if (state_->broken || + (!state_->first && state_->num_running_subscriptions == 0)) { + // If we are broken or exhausted then prepare a terminal item but + // we won't complete it until we've finished. + Result end_res = IterationEnd(); + if (!state_->final_error.ok()) { + end_res = state_->final_error; + state_->final_error = Status::OK(); + } + return state_->all_finished.Then([end_res]() -> Result { return end_res; }); + } else { + // Otherwise we just queue the request and it will be completed when one of the + // ongoing inner subscriptions delivers a result + waiting_future = Future::Make(); + state_->waiting_jobs.push_back(std::make_shared>(waiting_future)); + } + if (state_->first) { + // On the first request we are going to try and immediately fill our queue + // of subscriptions. We assume we are going to be able to start them all. + state_->outstanding_requests += + static_cast(state_->active_subscriptions.size()); + state_->num_running_subscriptions += + static_cast(state_->active_subscriptions.size()); + } + } + // If we grabbed a finished item from the delivered_jobs queue then we may need + // to mark the generator finished or issue a request for a new item to fill in + // the spot we just vacated. Notice that we issue that request to the same + // subscription that delivered it (deliverer). + if (delivered_job) { + if (mark_generator_complete) { + state_->all_finished.MarkFinished(); + } else { + delivered_job->deliverer().AddCallback( + InnerCallback(state_, delivered_job->index)); + } + return std::move(delivered_job->value); + } + // On the first call we try and fill up our subscriptions. It's possible the outer + // generator only has a few items and we can't fill up to what we were hoping. In + // that case we have to bail early. + if (state_->first) { + state_->first = false; + mark_generator_complete = false; + for (int i = 0; i < static_cast(state_->active_subscriptions.size()); i++) { + state_->PullSource().AddCallback( + OuterCallback{state_, static_cast(i)}); + // If we have to bail early then we need to update the shared state again so + // we need to reacquire the lock. + auto guard = state_->mutex.Lock(); + if (state_->source_exhausted) { + int excess_requests = + static_cast(state_->active_subscriptions.size()) - i - 1; + state_->outstanding_requests -= excess_requests; + state_->num_running_subscriptions -= excess_requests; + if (excess_requests > 0) { + // It's possible that we are completing the generator by reducing the number + // of outstanding requests (e.g. this happens when the outer subscription and + // all inner subscriptions are synchronous) + mark_generator_complete = state_->IsCompleteUnlocked(guard); + } + break; + } + } + if (mark_generator_complete) { + state_->MarkFinishedAndPurge(); + } + } + return waiting_future; + } + + private: + struct DeliveredJob { + explicit DeliveredJob(AsyncGenerator deliverer_, Result value_, + std::size_t index_) + : deliverer(deliverer_), value(std::move(value_)), index(index_) {} + + // The generator that delivered this result, we will request another item + // from this generator once the result is delivered + AsyncGenerator deliverer; + // The result we received from the generator + Result value; + // The index of the generator (in active_subscriptions) that delivered this + // result. This is used if we need to replace a finished generator. + std::size_t index; + }; + + struct State { + State(AsyncGenerator> source, int max_subscriptions) + : source(std::move(source)), + active_subscriptions(max_subscriptions), + delivered_jobs(), + waiting_jobs(), + mutex(), + first(true), + broken(false), + source_exhausted(false), + outstanding_requests(0), + num_running_subscriptions(0), + final_error(Status::OK()) {} + + Future> PullSource() { + // Need to guard access to source() so we don't pull sync-reentrantly which + // is never valid. + auto lock = mutex.Lock(); + return source(); + } + + void SignalErrorUnlocked(const util::Mutex::Guard& guard) { + broken = true; + // Empty any results that have arrived but not asked for. + while (!delivered_jobs.empty()) { + delivered_jobs.pop_front(); + } + } + + // This function is called outside the mutex but it will only ever be + // called once + void MarkFinishedAndPurge() { + all_finished.MarkFinished(); + while (!waiting_jobs.empty()) { + waiting_jobs.front()->MarkFinished(IterationEnd()); + waiting_jobs.pop_front(); + } + } + + // This is called outside the mutex but it is only ever called + // once and Future<>::AddCallback is thread-safe + void MarkFinalError(const Status& err, Future maybe_sink) { + if (maybe_sink.is_valid()) { + // Someone is waiting for this error so lets mark it complete when + // all the work is done + all_finished.AddCallback([maybe_sink, err](const Status& status) mutable { + maybe_sink.MarkFinished(err); + }); + } else { + // No one is waiting for this error right now so it will be delivered + // next. + final_error = err; + } + } + + bool IsCompleteUnlocked(const util::Mutex::Guard& guard) { + return outstanding_requests == 0 && + (broken || (source_exhausted && num_running_subscriptions == 0 && + delivered_jobs.empty())); + } + + bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) { + --outstanding_requests; + return IsCompleteUnlocked(guard); + } + + // The outer generator. Each item we pull from this will be its own generator + // and become an inner subscription + AsyncGenerator> source; + // active_subscriptions and delivered_jobs will be bounded by max_subscriptions + std::vector> active_subscriptions; + // Results delivered by the inner subscriptions that weren't yet asked for by the + // caller + std::deque> delivered_jobs; + // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the + // backpressure + std::deque>> waiting_jobs; + // A future that will be marked complete when the terminal item has arrived and all + // outstanding futures have completed. It is used to hold off emission of an error + // until all outstanding work is done. + Future<> all_finished = Future<>::Make(); + util::Mutex mutex; + // A flag cleared when the caller firsts asks for a future. Used to start polling. + bool first; + // A flag set when an error arrives, prevents us from issuing new requests. + bool broken; + // A flag set when the outer subscription has been exhausted. Prevents us from + // pulling it further (even though it would be generally harmless) and lets us know we + // are finishing up. + bool source_exhausted; + // The number of futures that we have requested from either the outer or inner + // subscriptions that have not yet completed. We cannot mark all_finished until this + // reaches 0. This will never be greater than max_subscriptions + int outstanding_requests; + // The number of running subscriptions. We ramp this up to `max_subscriptions` as + // soon as the first item is requested and then it stays at that level (each exhausted + // inner subscription is replaced by a new inner subscription) until the outer + // subscription is exhausted at which point this descends to 0 (and source_exhausted) + // is then set to true. + int num_running_subscriptions; + // If an error arrives, and the caller hasn't asked for that item, we store the error + // here. It is analagous to delivered_jobs but for errors instead of finished + // results. + Status final_error; + }; + + struct InnerCallback { + InnerCallback(std::shared_ptr state, std::size_t index, bool recursive = false) + : state(std::move(state)), index(index), recursive(recursive) {} + + void operator()(const Result& maybe_next_ref) { + // An item has been delivered by one of the inner subscriptions + Future next_fut; + const Result* maybe_next = &maybe_next_ref; + + // When an item is delivered (and the caller has asked for it) we grab the + // next item from the inner subscription. To avoid this behavior leading to an + // infinite loop (this can happen if the caller's callback asks for the next item) + // we use a while loop. + while (true) { + Future sink; + bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next); + bool pull_next_sub = false; + bool was_broken = false; + bool should_mark_gen_complete = false; + bool should_mark_final_error = false; + { + auto guard = state->mutex.Lock(); + if (state->broken) { + // We've errored out previously so ignore the result. If anyone was waiting + // for this they will get IterationEnd when we purge + was_broken = true; + } else { + if (!sub_finished) { + // There is a result to deliver. Either we can deliver it now or we will + // queue it up + if (state->waiting_jobs.empty()) { + state->delivered_jobs.push_back(std::make_shared( + state->active_subscriptions[index], *maybe_next, index)); + } else { + sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + + // If this is the first error then we transition the state to a broken state + if (!maybe_next->ok()) { + should_mark_final_error = true; + state->SignalErrorUnlocked(guard); + } + } + + // If we finished this inner subscription then we need to grab a new inner + // subscription to take its spot. If we can't (because we're broken or + // exhausted) then we aren't going to be starting any new futures and so + // the number of running subscriptions drops. + pull_next_sub = sub_finished && !state->source_exhausted && !was_broken; + if (sub_finished && !pull_next_sub) { + state->num_running_subscriptions--; + } + // There are three situations we won't pull again. If an error occurred or we + // are already finished or if no one was waiting for our result and so we queued + // it up. We will decrement outstanding_requests and possibly mark the + // generator completed. + if (state->broken || (!sink.is_valid() && !sub_finished) || + (sub_finished && state->source_exhausted)) { + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } + } + + // Now we have given up the lock and we can take all the actions we decided we + // need to take. + if (should_mark_final_error) { + state->MarkFinalError(maybe_next->status(), std::move(sink)); + } + + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + + // An error occurred elsewhere so there is no need to mark any future + // finished (will happen during the purge) or pull from anything + if (was_broken) { + return; + } + + if (pull_next_sub) { + if (recursive) { + was_empty = true; + return; + } + // We pulled an end token so we need to start a new subscription + // in our spot + state->PullSource().AddCallback(OuterCallback{state, index}); + } else if (sink.is_valid()) { + // We pulled a valid result and there was someone waiting for it + // so lets fetch the next result from our subscription + sink.MarkFinished(*maybe_next); + next_fut = state->active_subscriptions[index](); + if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) { + return; + } + // Already completed. Avoid very deep recursion by looping + // here instead of relying on the callback. + maybe_next = &next_fut.result(); + continue; + } + // else: We pulled a valid result but no one was waiting for it so + // we can just stop. + return; + } + } + std::shared_ptr state; + std::size_t index; + bool recursive; + bool was_empty = false; + }; + + struct OuterCallback { + void operator()(const Result>& initial_maybe_next) { + Result> maybe_next = initial_maybe_next; + while (true) { + // We have been given a new inner subscription + bool should_continue = false; + bool should_mark_gen_complete = false; + bool should_deliver_error = false; + bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next); + Future error_sink; + { + auto guard = state->mutex.Lock(); + if (!maybe_next.ok() || source_exhausted || state->broken) { + // If here then we will not pull any more from the outer source + if (!state->broken && !maybe_next.ok()) { + state->SignalErrorUnlocked(guard); + // If here then we are the first error so we need to deliver it + should_deliver_error = true; + if (!state->waiting_jobs.empty()) { + error_sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + if (source_exhausted) { + state->source_exhausted = true; + state->num_running_subscriptions--; + } + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } else { + state->active_subscriptions[index] = *maybe_next; + should_continue = true; + } + } + if (should_deliver_error) { + state->MarkFinalError(maybe_next.status(), std::move(error_sink)); + } + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + if (should_continue) { + // There is a possibility that a large sequence of immediately available inner + // callbacks could lead to a stack overflow. To avoid this we need to + // synchronously loop through inner/outer callbacks until we either find an + // unfinished future or we find an actual item to deliver. + Future next_item = (*maybe_next)(); + if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) { + // By setting recursive to true we signal to the inner callback that, if it is + // empty, instead of adding a new outer callback, it should just immediately + // return, flagging was_empty so that we know we need to check the next + // subscription. + InnerCallback immediate_inner(state, index, /*recursive=*/true); + immediate_inner(next_item.result()); + if (immediate_inner.was_empty) { + Future> next_source = state->PullSource(); + if (next_source.TryAddCallback([this] { + return OuterCallback{state, index}; + })) { + // We hit an unfinished future so we can stop looping + return; + } + // The current subscription was immediately and synchronously empty + // and we were able to synchronously pull the next subscription so we + // can keep looping. + maybe_next = next_source.result(); + continue; + } + } + } + return; + } + } + std::shared_ptr state; + std::size_t index; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that takes in a stream of generators and pulls from up to +/// max_subscriptions at a time +/// +/// Note: This may deliver items out of sequence. For example, items from the third +/// AsyncGenerator generated by the source may be emitted before some items from the first +/// AsyncGenerator generated by the source. +/// +/// This generator will pull from source async-reentrantly unless max_subscriptions is 1 +/// This generator will not pull from the individual subscriptions reentrantly. Add +/// readahead to the individual subscriptions if that is desired. +/// This generator is async-reentrant +/// +/// This generator may queue up to max_subscriptions instances of T +template +AsyncGenerator MakeMergedGenerator(AsyncGenerator> source, + int max_subscriptions) { + return MergedGenerator(std::move(source), max_subscriptions); +} + +template +Result> MakeSequencedMergedGenerator( + AsyncGenerator> source, int max_subscriptions) { + if (max_subscriptions < 0) { + return Status::Invalid("max_subscriptions must be a positive integer"); + } + if (max_subscriptions == 1) { + return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1"); + } + AsyncGenerator> autostarting_source = MakeMappedGenerator( + std::move(source), + [](const AsyncGenerator& sub) { return MakeAutoStartingGenerator(sub); }); + AsyncGenerator> sub_readahead = + MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1); + return MakeConcatenatedGenerator(std::move(sub_readahead)); +} + +/// \brief Create a generator that takes in a stream of generators and pulls from each +/// one in sequence. +/// +/// This generator is async-reentrant but will never pull from source reentrantly and +/// will never pull from any subscription reentrantly. +/// +/// This generator may queue 1 instance of T +/// +/// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that +/// forwards async-reentrant requests instead of buffering them (which is what +/// MergedGenerator does) +template +AsyncGenerator MakeConcatenatedGenerator(AsyncGenerator> source) { + return MergedGenerator(std::move(source), 1); +} + +template +struct Enumerated { + T value; + int index; + bool last; +}; + +template +struct IterationTraits> { + static Enumerated End() { return Enumerated{IterationEnd(), -1, false}; } + static bool IsEnd(const Enumerated& val) { return val.index < 0; } +}; + +/// \see MakeEnumeratedGenerator +template +class EnumeratingGenerator { + public: + EnumeratingGenerator(AsyncGenerator source, T initial_value) + : state_(std::make_shared(std::move(source), std::move(initial_value))) {} + + Future> operator()() { + if (state_->finished) { + return AsyncGeneratorEnd>(); + } else { + auto state = state_; + return state->source().Then([state](const T& next) { + auto finished = IsIterationEnd(next); + auto prev = Enumerated{state->prev_value, state->prev_index, finished}; + state->prev_value = next; + state->prev_index++; + state->finished = finished; + return prev; + }); + } + } + + private: + struct State { + State(AsyncGenerator source, T initial_value) + : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) { + finished = IsIterationEnd(prev_value); + } + + AsyncGenerator source; + T prev_value; + int prev_index; + bool finished; + }; + + std::shared_ptr state_; +}; + +/// Wrap items from a source generator with positional information +/// +/// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be +/// processed in a "first-available" fashion and later resequenced which can reduce the +/// impact of sources with erratic performance (e.g. a filesystem where some items may +/// take longer to read than others). +/// +/// TODO(ARROW-12371) Would require this generator be async-reentrant +/// +/// \see MakeSequencingGenerator for an example of putting items back in order +/// +/// This generator is not async-reentrant +/// +/// This generator buffers one item (so it knows which item is the last item) +template +AsyncGenerator> MakeEnumeratedGenerator(AsyncGenerator source) { + return FutureFirstGenerator>( + source().Then([source](const T& initial_value) -> AsyncGenerator> { + return EnumeratingGenerator(std::move(source), initial_value); + })); +} + +/// \see MakeTransferredGenerator +template +class TransferringGenerator { + public: + explicit TransferringGenerator(AsyncGenerator source, internal::Executor* executor) + : source_(std::move(source)), executor_(executor) {} + + Future operator()() { return executor_->Transfer(source_()); } + + private: + AsyncGenerator source_; + internal::Executor* executor_; +}; + +/// \brief Transfer a future to an underlying executor. +/// +/// Continuations run on the returned future will be run on the given executor +/// if they cannot be run synchronously. +/// +/// This is often needed to move computation off I/O threads or other external +/// completion sources and back on to the CPU executor so the I/O thread can +/// stay busy and focused on I/O +/// +/// Keep in mind that continuations called on an already completed future will +/// always be run synchronously and so no transfer will happen in that case. +/// +/// This generator is async reentrant if the source is +/// +/// This generator will not queue +template +AsyncGenerator MakeTransferredGenerator(AsyncGenerator source, + internal::Executor* executor) { + return TransferringGenerator(std::move(source), executor); +} + +/// \see MakeBackgroundGenerator +template +class BackgroundGenerator { + public: + explicit BackgroundGenerator(Iterator it, internal::Executor* io_executor, int max_q, + int q_restart) + : state_(std::make_shared(io_executor, std::move(it), max_q, q_restart)), + cleanup_(std::make_shared(state_.get())) {} + + Future operator()() { + auto guard = state_->mutex.Lock(); + Future waiting_future; + if (state_->queue.empty()) { + if (state_->finished) { + return AsyncGeneratorEnd(); + } else { + waiting_future = Future::Make(); + state_->waiting_future = waiting_future; + } + } else { + auto next = Future::MakeFinished(std::move(state_->queue.front())); + state_->queue.pop(); + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(next)); + } + return next; + } + // This should only trigger the very first time this method is called + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(waiting_future)); + } + return waiting_future; + } + + protected: + static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits::max()}; + + struct State { + State(internal::Executor* io_executor, Iterator it, int max_q, int q_restart) + : io_executor(io_executor), + max_q(max_q), + q_restart(q_restart), + it(std::move(it)), + reading(false), + finished(false), + should_shutdown(false) {} + + void ClearQueue() { + while (!queue.empty()) { + queue.pop(); + } + } + + bool TaskIsRunning() const { return task_finished.is_valid(); } + + bool NeedsRestart() const { + return !finished && !reading && static_cast(queue.size()) <= q_restart; + } + + void DoRestartTask(std::shared_ptr state, util::Mutex::Guard guard) { + // If we get here we are actually going to start a new task so let's create a + // task_finished future for it + state->task_finished = Future<>::Make(); + state->reading = true; + auto spawn_status = io_executor->Spawn( + [state]() { BackgroundGenerator::WorkerTask(std::move(state)); }); + if (!spawn_status.ok()) { + // If we can't spawn a new task then send an error to the consumer (either via a + // waiting future or the queue) and mark ourselves finished + state->finished = true; + state->task_finished = Future<>(); + if (waiting_future.has_value()) { + auto to_deliver = std::move(waiting_future.value()); + waiting_future.reset(); + guard.Unlock(); + to_deliver.MarkFinished(spawn_status); + } else { + ClearQueue(); + queue.push(spawn_status); + } + } + } + + Future RestartTask(std::shared_ptr state, util::Mutex::Guard guard, + Future next) { + if (TaskIsRunning()) { + // If the task is still cleaning up we need to wait for it to finish before + // restarting. We also want to block the consumer until we've restarted the + // reader to avoid multiple restarts + return task_finished.Then([state, next]() { + // This may appear dangerous (recursive mutex) but we should be guaranteed the + // outer guard has been released by this point. We know... + // * task_finished is not already finished (it would be invalid in that case) + // * task_finished will not be marked complete until we've given up the mutex + auto guard_ = state->mutex.Lock(); + state->DoRestartTask(state, std::move(guard_)); + return next; + }); + } + // Otherwise we can restart immediately + DoRestartTask(std::move(state), std::move(guard)); + return next; + } + + internal::Executor* io_executor; + const int max_q; + const int q_restart; + Iterator it; + std::atomic worker_thread_id{kUnlikelyThreadId}; + + // If true, the task is actively pumping items from the queue and does not need a + // restart + bool reading; + // Set to true when a terminal item arrives + bool finished; + // Signal to the background task to end early because consumers have given up on it + bool should_shutdown; + // If the queue is empty, the consumer will create a waiting future and wait for it + std::queue> queue; + std::optional> waiting_future; + // Every background task is given a future to complete when it is entirely finished + // processing and ready for the next task to start or for State to be destroyed + Future<> task_finished; + util::Mutex mutex; + }; + + // Cleanup task that will be run when all consumer references to the generator are lost + struct Cleanup { + explicit Cleanup(State* state) : state(state) {} + ~Cleanup() { + /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and + /// there is no need to perform this check. + /// + /// It's a deadlock if we enter cleanup from + /// the worker thread but it can happen if the consumer doesn't transfer away + assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId()); + Future<> finish_fut; + { + auto lock = state->mutex.Lock(); + if (!state->TaskIsRunning()) { + return; + } + // Signal the current task to stop and wait for it to finish + state->should_shutdown = true; + finish_fut = state->task_finished; + } + // Using future as a condition variable here + Status st = finish_fut.status(); + ARROW_UNUSED(st); + } + State* state; + }; + + static void WorkerTask(std::shared_ptr state) { + state->worker_thread_id.store(::arrow::internal::GetThreadId()); + // We need to capture the state to read while outside the mutex + bool reading = true; + while (reading) { + auto next = state->it.Next(); + // Need to capture state->waiting_future inside the mutex to mark finished outside + Future waiting_future; + { + auto guard = state->mutex.Lock(); + + if (state->should_shutdown) { + state->finished = true; + break; + } + + if (!next.ok() || IsIterationEnd(*next)) { + // Terminal item. Mark finished to true, send this last item, and quit + state->finished = true; + if (!next.ok()) { + state->ClearQueue(); + } + } + // At this point we are going to send an item. Either we will add it to the + // queue or deliver it to a waiting future. + if (state->waiting_future.has_value()) { + waiting_future = std::move(state->waiting_future.value()); + state->waiting_future.reset(); + } else { + state->queue.push(std::move(next)); + // We just filled up the queue so it is time to quit. We may need to notify + // a cleanup task so we transition to Quitting + if (static_cast(state->queue.size()) >= state->max_q) { + state->reading = false; + } + } + reading = state->reading && !state->finished; + } + // This should happen outside the mutex. Presumably there is a + // transferring generator on the other end that will quickly transfer any + // callbacks off of this thread so we can continue looping. Still, best not to + // rely on that + if (waiting_future.is_valid()) { + waiting_future.MarkFinished(next); + } + } + // Once we've sent our last item we can notify any waiters that we are done and so + // either state can be cleaned up or a new background task can be started + Future<> task_finished; + { + auto guard = state->mutex.Lock(); + // After we give up the mutex state can be safely deleted. We will no longer + // reference it. We can safely transition to idle now. + task_finished = state->task_finished; + state->task_finished = Future<>(); + state->worker_thread_id.store(kUnlikelyThreadId); + } + task_finished.MarkFinished(); + } + + std::shared_ptr state_; + // state_ is held by both the generator and the background thread so it won't be cleaned + // up when all consumer references are relinquished. cleanup_ is only held by the + // generator so it will be destructed when the last consumer reference is gone. We use + // this to cleanup / stop the background generator in case the consuming end stops + // listening (e.g. due to a downstream error) + std::shared_ptr cleanup_; +}; + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Create an AsyncGenerator by iterating over an Iterator on a background +/// thread +/// +/// The parameter max_q and q_restart control queue size and background thread task +/// management. If the background task is fast you typically don't want it creating a +/// thread task for every item. Instead the background thread will run until it fills +/// up a readahead queue. +/// +/// Once the queue has filled up the background thread task will terminate (allowing other +/// I/O tasks to use the thread). Once the queue has been drained enough (specified by +/// q_restart) then the background thread task will be restarted. If q_restart is too low +/// then you may exhaust the queue waiting for the background thread task to start running +/// again. If it is too high then it will be constantly stopping and restarting the +/// background queue task +/// +/// The "background thread" is a logical thread and will run as tasks on the io_executor. +/// This thread may stop and start when the queue fills up but there will only be one +/// active background thread task at any given time. You MUST transfer away from this +/// background generator. Otherwise there could be a race condition if a callback on the +/// background thread deletes the last consumer reference to the background generator. You +/// can transfer onto the same executor as the background thread, it is only necessary to +/// create a new thread task, not to switch executors. +/// +/// This generator is not async-reentrant +/// +/// This generator will queue up to max_q blocks +template +static Result> MakeBackgroundGenerator( + Iterator iterator, internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) { + if (max_q < q_restart) { + return Status::Invalid("max_q must be >= q_restart"); + } + return BackgroundGenerator(std::move(iterator), io_executor, max_q, q_restart); +} + +/// \brief Create an AsyncGenerator by iterating over an Iterator synchronously +/// +/// This should only be used if you know the source iterator does not involve any +/// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending +/// on the complexity of the iterator, it may lead to deadlock. +/// +/// If you are not certain if there will be I/O then it is better to use +/// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator +/// equivalent of Future::MakeFinished +/// +/// It is impossible to call this in an async-reentrant manner since the returned +/// future will be completed by the time it is polled. +/// +/// This generator does not queue +template +static Result> MakeBlockingGenerator( + std::shared_ptr> iterator) { + return [it = std::move(iterator)]() mutable -> Future { + return Future::MakeFinished(it->Next()); + }; +} + +template +static Result> MakeBlockingGenerator(Iterator iterator) { + return MakeBlockingGenerator(std::make_shared>(std::move(iterator))); +} + +/// \see MakeGeneratorIterator +template +class GeneratorIterator { + public: + explicit GeneratorIterator(AsyncGenerator source) : source_(std::move(source)) {} + + Result Next() { return source_().result(); } + + private: + AsyncGenerator source_; +}; + +/// \brief Convert an AsyncGenerator to an Iterator which blocks until each future +/// is finished +template +Iterator MakeGeneratorIterator(AsyncGenerator source) { + return Iterator(GeneratorIterator(std::move(source))); +} + +/// \brief Add readahead to an iterator using a background thread. +/// +/// Under the hood this is converting the iterator to a generator using +/// MakeBackgroundGenerator, adding readahead to the converted generator with +/// MakeReadaheadGenerator, and then converting back to an iterator using +/// MakeGeneratorIterator. +template +Result> MakeReadaheadIterator(Iterator it, int readahead_queue_size) { + ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1)); + auto max_q = readahead_queue_size; + auto q_restart = std::max(1, max_q / 2); + ARROW_ASSIGN_OR_RAISE( + auto background_generator, + MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart)); + // Capture io_executor to keep it alive as long as owned_bg_generator is still + // referenced + AsyncGenerator owned_bg_generator = [io_executor, background_generator]() { + return background_generator(); + }; + return MakeGeneratorIterator(std::move(owned_bg_generator)); +} + +/// \brief Make a generator that returns a single pre-generated future +/// +/// This generator is async-reentrant. +template +std::function()> MakeSingleFutureGenerator(Future future) { + assert(future.is_valid()); + auto state = std::make_shared>(std::move(future)); + return [state]() -> Future { + auto fut = std::move(*state); + if (fut.is_valid()) { + return fut; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that immediately ends. +/// +/// This generator is async-reentrant. +template +std::function()> MakeEmptyGenerator() { + return []() -> Future { return AsyncGeneratorEnd(); }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeFailingGenerator(Status st) { + assert(!st.ok()); + auto state = std::make_shared(std::move(st)); + return [state]() -> Future { + auto st = std::move(*state); + if (!st.ok()) { + return st; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This overload allows inferring the return type from the argument. +template +AsyncGenerator MakeFailingGenerator(const Result& result) { + return MakeFailingGenerator(result.status()); +} + +/// \brief Prepend initial_values onto a generator +/// +/// This generator is async-reentrant but will buffer requests and will not +/// pull from following_values async-reentrantly. +template +AsyncGenerator MakeGeneratorStartsWith(std::vector initial_values, + AsyncGenerator following_values) { + auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values)); + auto gen_gen = MakeVectorGenerator>( + {std::move(initial_values_vec_gen), std::move(following_values)}); + return MakeConcatenatedGenerator(std::move(gen_gen)); +} + +template +struct CancellableGenerator { + Future operator()() { + if (stop_token.IsStopRequested()) { + return stop_token.Poll(); + } + return source(); + } + + AsyncGenerator source; + StopToken stop_token; +}; + +/// \brief Allow an async generator to be cancelled +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeCancellable(AsyncGenerator source, StopToken stop_token) { + return CancellableGenerator{std::move(source), std::move(stop_token)}; +} + +template +class DefaultIfEmptyGenerator { + public: + DefaultIfEmptyGenerator(AsyncGenerator source, T or_value) + : state_(std::make_shared(std::move(source), std::move(or_value))) {} + + Future operator()() { + if (state_->first) { + state_->first = false; + struct { + T or_value; + + Result operator()(const T& value) { + if (IterationTraits::IsEnd(value)) { + return std::move(or_value); + } + return value; + } + } Continuation; + Continuation.or_value = std::move(state_->or_value); + return state_->source().Then(std::move(Continuation)); + } + return state_->source(); + } + + private: + struct State { + AsyncGenerator source; + T or_value; + bool first; + State(AsyncGenerator source_, T or_value_) + : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {} + }; + std::shared_ptr state_; +}; + +/// \brief If the generator is empty, return the given value, else +/// forward the values from the generator. +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeDefaultIfEmptyGenerator(AsyncGenerator source, T or_value) { + return DefaultIfEmptyGenerator(std::move(source), std::move(or_value)); +} +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..f3c5bf9ef6f52b0a0737348c2a5bdc524e62c251 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" + +namespace arrow { + +template +using AsyncGenerator = std::function()>; + +template +class MappingGenerator; + +template +class SequencingGenerator; + +template +class TransformingGenerator; + +template +class SerialReadaheadGenerator; + +template +class ReadaheadGenerator; + +template +class PushGenerator; + +template +class MergedGenerator; + +template +struct Enumerated; + +template +class EnumeratingGenerator; + +template +class TransferringGenerator; + +template +class BackgroundGenerator; + +template +class GeneratorIterator; + +template +struct CancellableGenerator; + +template +class DefaultIfEmptyGenerator; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h new file mode 100644 index 0000000000000000000000000000000000000000..5b80e19d896b746ccc4318bb2f8ce250c7892e66 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +ARROW_EXPORT +std::string base64_encode(std::string_view s); + +ARROW_EXPORT +std::string base64_decode(std::string_view s); + +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h new file mode 100644 index 0000000000000000000000000000000000000000..eb079e2c548abee644a1d87669a2a11d52196985 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/span.h" + +namespace arrow::util { + +inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) { + assert(size <= BinaryViewType::kInlineSize); + // Small string: inlined. Bytes beyond size are zeroed + BinaryViewType::c_type out; + out.inlined = {size, {}}; + memcpy(&out.inlined.data, data, size); + return out; +} + +inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) { + assert(v.size() <= BinaryViewType::kInlineSize); + return ToInlineBinaryView(v.data(), static_cast(v.size())); +} + +inline BinaryViewType::c_type ToNonInlineBinaryView(const void* data, int32_t size, + int32_t buffer_index, + int32_t offset) { + // Large string: store index/offset. + BinaryViewType::c_type out; + out.ref = {size, {}, buffer_index, offset}; + memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix)); + return out; +} + +inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size, + int32_t buffer_index, int32_t offset) { + if (size <= BinaryViewType::kInlineSize) { + return ToInlineBinaryView(data, size); + } + return ToNonInlineBinaryView(data, size, buffer_index, offset); +} + +inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index, + int32_t offset) { + return ToBinaryView(v.data(), static_cast(v.size()), buffer_index, offset); +} + +template +std::string_view FromBinaryView(const BinaryViewType::c_type& v, + const BufferPtr* data_buffers) { + auto* data = v.is_inline() ? v.inlined.data.data() + : data_buffers[v.ref.buffer_index]->data() + v.ref.offset; + return {reinterpret_cast(data), static_cast(v.size())}; +} +template +std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete; + +template +bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r, + const BufferPtr* l_buffers, const BufferPtr* r_buffers) { + int64_t l_size_and_prefix, r_size_and_prefix; + memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix)); + memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix)); + + if (l_size_and_prefix != r_size_and_prefix) return false; + + if (l.is_inline()) { + // The columnar spec mandates that the inlined part be zero-padded, so we can compare + // a word at a time regardless of the exact size. + int64_t l_inlined, r_inlined; + memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined)); + memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined)); + return l_inlined == r_inlined; + } + + // Sizes are equal and this is not inline, therefore both are out + // of line and have kPrefixSize first in common. + const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset; + const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset; + return memcmp(l_data + BinaryViewType::kPrefixSize, + r_data + BinaryViewType::kPrefixSize, + l.size() - BinaryViewType::kPrefixSize) == 0; +} + +/// \brief Compute the total size of a list of binary views including null +/// views. +/// +/// This is useful when calculating the necessary memory to store all the string +/// data from the views. +inline int64_t SumOfBinaryViewSizes(const BinaryViewType::c_type* views, int64_t length) { + int64_t total = 0; + for (int64_t i = 0; i < length; ++i) { + total += views[i].size(); + } + return total; +} + +} // namespace arrow::util diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..a436a50b86fe14f84699cba679f6cac882514c19 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h @@ -0,0 +1,515 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +struct BitRun { + int64_t length; + // Whether bits are set at this point. + bool set; + + std::string ToString() const { + return std::string("{Length: ") + std::to_string(length) + + ", set=" + std::to_string(set) + "}"; + } +}; + +inline bool operator==(const BitRun& lhs, const BitRun& rhs) { + return lhs.length == rhs.length && lhs.set == rhs.set; +} + +inline bool operator!=(const BitRun& lhs, const BitRun& rhs) { + return lhs.length != rhs.length || lhs.set != rhs.set; +} + +class BitRunReaderLinear { + public: + BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : reader_(bitmap, start_offset, length) {} + + BitRun NextRun() { + BitRun rl = {/*length=*/0, reader_.IsSet()}; + // Advance while the values are equal and not at the end of list. + while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) { + rl.length++; + reader_.Next(); + } + return rl; + } + + private: + BitmapReader reader_; +}; + +#if ARROW_LITTLE_ENDIAN +/// A convenience class for counting the number of contiguous set/unset bits +/// in a bitmap. +class ARROW_EXPORT BitRunReader { + public: + /// \brief Constructs new BitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length); + + /// Returns a new BitRun containing the number of contiguous + /// bits with the same value. length == 0 indicates the + /// end of the bitmap. + BitRun NextRun() { + if (ARROW_PREDICT_FALSE(position_ >= length_)) { + return {/*length=*/0, false}; + } + // This implementation relies on a efficient implementations of + // CountTrailingZeros and assumes that runs are more often then + // not. The logic is to incrementally find the next bit change + // from the current position. This is done by zeroing all + // bits in word_ up to position_ and using the TrailingZeroCount + // to find the index of the next set bit. + + // The runs alternate on each call, so flip the bit. + current_run_bit_set_ = !current_run_bit_set_; + + int64_t start_position = position_; + int64_t start_bit_offset = start_position & 63; + // Invert the word for proper use of CountTrailingZeros and + // clear bits so CountTrailingZeros can do it magic. + word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset); + + // Go forward until the next change from unset to set. + int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset; + position_ += new_bits; + + if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_)) { + // Continue extending position while we can advance an entire word. + // (updates position_ accordingly). + AdvanceUntilChange(); + } + + return {/*length=*/position_ - start_position, current_run_bit_set_}; + } + + private: + void AdvanceUntilChange() { + int64_t new_bits = 0; + do { + // Advance the position of the bitmap for loading. + bitmap_ += sizeof(uint64_t); + LoadNextWord(); + new_bits = bit_util::CountTrailingZeros(word_); + // Continue calculating run length. + position_ += new_bits; + } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0); + } + + void LoadNextWord() { return LoadWord(length_ - position_); } + + // Helper method for Loading the next word. + void LoadWord(int64_t bits_remaining) { + word_ = 0; + // we need at least an extra byte in this case. + if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) { + std::memcpy(&word_, bitmap_, 8); + } else { + int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining); + auto word_ptr = reinterpret_cast(&word_); + std::memcpy(word_ptr, bitmap_, bytes_to_load); + // Ensure stoppage at last bit in bitmap by reversing the next higher + // order bit. + bit_util::SetBitTo(word_ptr, bits_remaining, + !bit_util::GetBit(word_ptr, bits_remaining - 1)); + } + + // Two cases: + // 1. For unset, CountTrailingZeros works naturally so we don't + // invert the word. + // 2. Otherwise invert so we can use CountTrailingZeros. + if (current_run_bit_set_) { + word_ = ~word_; + } + } + const uint8_t* bitmap_; + int64_t position_; + int64_t length_; + uint64_t word_; + bool current_run_bit_set_; +}; +#else +using BitRunReader = BitRunReaderLinear; +#endif + +struct SetBitRun { + int64_t position; + int64_t length; + + bool AtEnd() const { return length == 0; } + + std::string ToString() const { + return std::string("{pos=") + std::to_string(position) + + ", len=" + std::to_string(length) + "}"; + } + + bool operator==(const SetBitRun& other) const { + return position == other.position && length == other.length; + } + bool operator!=(const SetBitRun& other) const { + return position != other.position || length != other.length; + } +}; + +template +class BaseSetBitRunReader { + public: + /// \brief Constructs new SetBitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + ARROW_NOINLINE + BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap)), + length_(length), + remaining_(length_), + current_word_(0), + current_num_bits_(0) { + if (Reverse) { + bitmap_ += (start_offset + length) / 8; + const int8_t end_bit_offset = static_cast((start_offset + length) % 8); + if (length > 0 && end_bit_offset) { + // Get LSBs from last byte + ++bitmap_; + current_num_bits_ = + std::min(static_cast(length), static_cast(end_bit_offset)); + current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_); + } + } else { + bitmap_ += start_offset / 8; + const int8_t bit_offset = static_cast(start_offset % 8); + if (length > 0 && bit_offset) { + // Get MSBs from first byte + current_num_bits_ = + std::min(static_cast(length), static_cast(8 - bit_offset)); + current_word_ = LoadPartialWord(bit_offset, current_num_bits_); + } + } + } + + ARROW_NOINLINE + SetBitRun NextRun() { + int64_t pos = 0; + int64_t len = 0; + if (current_num_bits_) { + const auto run = FindCurrentRun(); + assert(remaining_ >= 0); + if (run.length && current_num_bits_) { + // The run ends in current_word_ + return AdjustRun(run); + } + pos = run.position; + len = run.length; + } + if (!len) { + // We didn't get any ones in current_word_, so we can skip any zeros + // in the following words + SkipNextZeros(); + if (remaining_ == 0) { + return {0, 0}; + } + assert(current_num_bits_); + pos = position(); + } else if (!current_num_bits_) { + if (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + current_num_bits_ = 64; + } else if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + } else { + // No bits remaining, perhaps we found a run? + return AdjustRun({pos, len}); + } + // If current word starts with a zero, we got a full run + if (!(current_word_ & kFirstBit)) { + return AdjustRun({pos, len}); + } + } + // Current word should now start with a set bit + len += CountNextOnes(); + return AdjustRun({pos, len}); + } + + protected: + int64_t position() const { + if (Reverse) { + return remaining_; + } else { + return length_ - remaining_; + } + } + + SetBitRun AdjustRun(SetBitRun run) { + if (Reverse) { + assert(run.position >= run.length); + run.position -= run.length; + } + return run; + } + + uint64_t LoadFullWord() { + uint64_t word; + if (Reverse) { + bitmap_ -= 8; + } + memcpy(&word, bitmap_, 8); + if (!Reverse) { + bitmap_ += 8; + } + return bit_util::ToLittleEndian(word); + } + + uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) { + assert(num_bits > 0); + uint64_t word = 0; + const int64_t num_bytes = bit_util::BytesForBits(num_bits); + if (Reverse) { + // Read in the most significant bytes of the word + bitmap_ -= num_bytes; + memcpy(reinterpret_cast(&word) + 8 - num_bytes, bitmap_, num_bytes); + // XXX MostSignificantBitmask + return (bit_util::ToLittleEndian(word) << bit_offset) & + ~bit_util::LeastSignificantBitMask(64 - num_bits); + } else { + memcpy(&word, bitmap_, num_bytes); + bitmap_ += num_bytes; + return (bit_util::ToLittleEndian(word) >> bit_offset) & + bit_util::LeastSignificantBitMask(num_bits); + } + } + + void SkipNextZeros() { + assert(current_num_bits_ == 0); + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros < 64) { + // Run of zeros ends here + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ = 64 - num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + return; + } + remaining_ -= 64; + } + // Run of zeros continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_zeros = + std::min(current_num_bits_, CountFirstZeros(current_word_)); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + } + } + + int64_t CountNextOnes() { + assert(current_word_ & kFirstBit); + + int64_t len; + if (~current_word_) { + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + remaining_ -= num_ones; + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + if (current_num_bits_) { + // Run of ones ends here + return num_ones; + } + len = num_ones; + } else { + // current_word_ is all ones + remaining_ -= 64; + current_num_bits_ = 0; + len = 64; + } + + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_ones = CountFirstZeros(~current_word_); + len += num_ones; + remaining_ -= num_ones; + if (num_ones < 64) { + // Run of ones ends here + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ = 64 - num_ones; + return len; + } + } + // Run of ones continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + len += num_ones; + } + return len; + } + + SetBitRun FindCurrentRun() { + // Skip any pending zeros + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros >= current_num_bits_) { + remaining_ -= current_num_bits_; + current_word_ = 0; + current_num_bits_ = 0; + return {0, 0}; + } + assert(num_zeros <= remaining_); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + const int64_t pos = position(); + // Count any ones + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + return {pos, num_ones}; + } + + inline int CountFirstZeros(uint64_t word); + inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits); + + const uint8_t* bitmap_; + const int64_t length_; + int64_t remaining_; + uint64_t current_word_; + int32_t current_num_bits_; + + static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1; +}; + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountTrailingZeros(word); +} + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountLeadingZeros(word); +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word >> num_bits; +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word << num_bits; +} + +using SetBitRunReader = BaseSetBitRunReader; +using ReverseSetBitRunReader = BaseSetBitRunReader; + +// Functional-style bit run visitors. + +// XXX: Try to make this function small so the compiler can inline and optimize +// the `visit` function, which is normally a hot loop with vectorizable code. +// - don't inline SetBitRunReader constructor, it doesn't hurt performance +// - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases +template +inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + return visit(static_cast(0), static_cast(length)); + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + ARROW_RETURN_NOT_OK(visit(run.position, run.length)); + } + return Status::OK(); +} + +template +inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + visit(static_cast(0), static_cast(length)); + return; + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + visit(run.position, run.length); + } +} + +template +inline Status VisitSetBitRuns(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +template +inline void VisitSetBitRunsVoid(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e7eb3f833ea8a2d044af48664c0f067ed18e1ca9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h @@ -0,0 +1,369 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_MSC_VER) +# if defined(_M_AMD64) || defined(_M_X64) +# include // IWYU pragma: keep +# endif + +# pragma intrinsic(_BitScanReverse) +# pragma intrinsic(_BitScanForward) +# define ARROW_POPCOUNT64 __popcnt64 +# define ARROW_POPCOUNT32 __popcnt +#else +# define ARROW_POPCOUNT64 __builtin_popcountll +# define ARROW_POPCOUNT32 __builtin_popcount +#endif + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace detail { + +template +typename std::make_unsigned::type as_unsigned(Integer x) { + return static_cast::type>(x); +} + +} // namespace detail + +namespace bit_util { + +// The number of set bits in a given unsigned byte value, pre-computed +// +// Generated with the following Python code +// output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};' +// popcounts = [str(bin(i).count('1')) for i in range(0, 256)] +// print(output.format(', '.join(popcounts))) +static constexpr uint8_t kBytePopcount[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, + 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, + 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, + 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, + 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + +static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); } +static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); } + +// +// Bit-related computations on integer values +// + +// Returns the ceil of value/divisor +constexpr int64_t CeilDiv(int64_t value, int64_t divisor) { + return (value == 0) ? 0 : 1 + (value - 1) / divisor; +} + +// Return the number of bytes needed to fit the given number of bits +constexpr int64_t BytesForBits(int64_t bits) { + // This formula avoids integer overflow on very large `bits` + return (bits >> 3) + ((bits & 7) != 0); +} + +constexpr bool IsPowerOf2(int64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +constexpr bool IsPowerOf2(uint64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +// Returns the smallest power of two that contains v. If v is already a +// power of two, it is returned as is. +static inline int64_t NextPower2(int64_t n) { + // Taken from + // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + n--; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n |= n >> 32; + n++; + return n; +} + +constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; } + +constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; } + +// Returns a mask for the bit_index lower order bits. +// Only valid for bit_index in the range [0, 64). +constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) { + return (static_cast(1) << bit_index) - 1; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' +constexpr int64_t RoundUp(int64_t value, int64_t factor) { + return CeilDiv(value, factor) * factor; +} + +// Returns 'value' rounded down to the nearest multiple of 'factor' +constexpr int64_t RoundDown(int64_t value, int64_t factor) { + return (value / factor) * factor; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' when factor +// is a power of two. +// The result is undefined on overflow, i.e. if `value > 2**64 - factor`, +// since we cannot return the correct result which would be 2**64. +constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) { + // DCHECK(value >= 0); + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) { + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); } + +constexpr int64_t RoundUpToMultipleOf64(int64_t num) { + return RoundUpToPowerOf2(num, 64); +} + +// Returns the number of bytes covering a sliced bitmap. Find the length +// rounded to cover full bytes on both extremities. +// +// The following example represents a slice (offset=10, length=9) +// +// 0 8 16 24 +// |-------|-------|------| +// [ ] (slice) +// [ ] (same slice aligned to bytes bounds, length=16) +// +// The covering bytes is the length (in bytes) of this new aligned slice. +constexpr int64_t CoveringBytes(int64_t offset, int64_t length) { + return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8; +} + +// Returns the 'num_bits' least-significant bits of 'v'. +static inline uint64_t TrailingBits(uint64_t v, int num_bits) { + if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0; + if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v; + int n = 64 - num_bits; + return (v << n) >> n; +} + +/// \brief Count the number of leading zeros in an unsigned integer. +static inline int CountLeadingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_clz(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse(&index, static_cast(value))) { // NOLINT + return 31 - static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 32 - bitpos; +#endif +} + +static inline int CountLeadingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_clzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse64(&index, value)) { // NOLINT + return 63 - static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 64 - bitpos; +#endif +} + +static inline int CountTrailingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_ctzl(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward(&index, value)) { + return static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 32; + } + return bitpos; +#endif +} + +static inline int CountTrailingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_ctzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward64(&index, value)) { + return static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 64; + } + return bitpos; +#endif +} + +// Returns the minimum number of bits needed to represent an unsigned value +static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); } + +// Returns ceil(log2(x)). +static inline int Log2(uint64_t x) { + // DCHECK_GT(x, 0); + return NumRequiredBits(x - 1); +} + +// +// Utilities for reading and writing individual bits by their index +// in a memory area. +// + +// Bitmask selecting the k-th bit in a byte +static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128}; + +// the bitwise complement version of kBitmask +static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127}; + +// Bitmask selecting the (k - 1) preceding bits in a byte +static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127}; +static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127}; + +// the bitwise complement version of kPrecedingBitmask +static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128}; + +static constexpr bool GetBit(const uint8_t* bits, uint64_t i) { + return (bits[i >> 3] >> (i & 0x07)) & 1; +} + +// Gets the i-th bit from a byte. Should only be used with i <= 7. +static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) { + return byte & kBitmask[i]; +} + +static inline void ClearBit(uint8_t* bits, int64_t i) { + bits[i / 8] &= kFlippedBitmask[i % 8]; +} + +static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; } + +static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) { + // https://graphics.stanford.edu/~seander/bithacks.html + // "Conditionally set or clear bits without branching" + // NOTE: this seems to confuse Valgrind as it reads from potentially + // uninitialized memory + bits[i / 8] ^= static_cast(-static_cast(bit_is_set) ^ bits[i / 8]) & + kBitmask[i % 8]; +} + +/// \brief set or clear a range of bits quickly +ARROW_EXPORT +void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set); + +/// \brief Sets all bits in the bitmap to true +ARROW_EXPORT +void SetBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// \brief Clears all bits in the bitmap (set to false) +ARROW_EXPORT +void ClearBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be +/// returned +/// ex: +/// ref: https://stackoverflow.com/a/59523400 +template +constexpr Word PrecedingWordBitmask(unsigned int const i) { + return static_cast(static_cast(i < sizeof(Word) * 8) + << (i & (sizeof(Word) * 8 - 1))) - + 1; +} +static_assert(PrecedingWordBitmask(0) == 0x00, ""); +static_assert(PrecedingWordBitmask(4) == 0x0f, ""); +static_assert(PrecedingWordBitmask(8) == 0xff, ""); +static_assert(PrecedingWordBitmask(8) == 0x00ff, ""); + +/// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits +/// from `high`. +/// Word ret +/// for (i = 0; i < sizeof(Word)*8; i++){ +/// ret[i]= i < n ? low[i]: high[i]; +/// } +template +constexpr Word SpliceWord(int n, Word low, Word high) { + return (high & ~PrecedingWordBitmask(n)) | (low & PrecedingWordBitmask(n)); +} + +/// \brief Pack integers into a bitmap in batches of 8 +template +void PackBits(const uint32_t* values, uint8_t* out) { + for (int i = 0; i < batch_size / 8; ++i) { + *out++ = static_cast(values[0] | values[1] << 1 | values[2] << 2 | + values[3] << 3 | values[4] << 4 | values[5] << 5 | + values[6] << 6 | values[7] << 7); + values += 8; + } +} + +} // namespace bit_util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..4750e697fc7972e8ad57766ffd1134cf3e99fd14 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/compare.h" +#include "arrow/util/endian.h" +#include "arrow/util/functional.h" +#include "arrow/util/span.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class BooleanArray; + +namespace internal { + +class ARROW_EXPORT Bitmap : public util::ToStringOstreamable, + public util::EqualityComparable { + public: + Bitmap() = default; + + Bitmap(const std::shared_ptr& buffer, int64_t offset, int64_t length) + : data_(buffer->data()), offset_(offset), length_(length) { + if (buffer->is_mutable()) { + mutable_data_ = buffer->mutable_data(); + } + } + + Bitmap(const void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), offset_(offset), length_(length) {} + + Bitmap(void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), + mutable_data_(reinterpret_cast(data)), + offset_(offset), + length_(length) {} + + Bitmap Slice(int64_t offset) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length_ - offset}; + } else { + return {data_, offset_ + offset, length_ - offset}; + } + } + + Bitmap Slice(int64_t offset, int64_t length) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length}; + } else { + return {data_, offset_ + offset, length}; + } + } + + std::string ToString() const; + + bool Equals(const Bitmap& other) const; + + std::string Diff(const Bitmap& other) const; + + bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); } + + bool operator[](int64_t i) const { return GetBit(i); } + + void SetBitTo(int64_t i, bool v) const { + bit_util::SetBitTo(mutable_data_, i + offset_, v); + } + + void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); } + + void CopyFrom(const Bitmap& other); + void CopyFromInverted(const Bitmap& other); + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps, N); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const std::array& bitmaps, Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit words of bits from each bitmap as array + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// + /// TODO(bkietz) allow for early termination + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + // local, mutable variables which will be sliced/decremented to represent consumption: + Bitmap bitmaps[N]; + int64_t offsets[N]; + int64_t bit_length = BitLength(bitmaps_arg, N); + util::span words[N]; + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps_arg[i]; + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + + auto consume = [&](int64_t consumed_bits) { + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits); + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + bit_length -= consumed_bits; + }; + + std::array visited_words; + visited_words.fill(0); + + if (bit_length <= kBitWidth * 2) { + // bitmaps fit into one or two words so don't bother with optimization + while (bit_length > 0) { + auto leading_bits = std::min(bit_length, kBitWidth); + SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + return 0; + } + + int64_t max_offset = *std::max_element(offsets, offsets + N); + int64_t min_offset = *std::min_element(offsets, offsets + N); + if (max_offset > 0) { + // consume leading bits + auto leading_bits = kBitWidth - min_offset; + SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + assert(*std::min_element(offsets, offsets + N) == 0); + + int64_t whole_word_count = bit_length / kBitWidth; + assert(whole_word_count >= 1); + + if (min_offset == max_offset) { + // all offsets were identical, all leading bits have been consumed + assert( + std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; })); + + for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) { + for (size_t i = 0; i < N; ++i) { + visited_words[i] = words[i][word_i]; + } + visitor(visited_words); + } + consume(whole_word_count * kBitWidth); + } else { + // leading bits from potentially incomplete words have been consumed + + // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely + // within the bitmap for all i + for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) { + for (size_t i = 0; i < N; ++i) { + if (offsets[i] == 0) { + visited_words[i] = words[i][word_i]; + } else { + auto words0 = bit_util::ToLittleEndian(words[i][word_i]); + auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]); + visited_words[i] = bit_util::FromLittleEndian( + (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i]))); + } + } + visitor(visited_words); + } + consume((whole_word_count - 1) * kBitWidth); + + SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words); + + visitor(visited_words); + consume(kBitWidth); + } + + // load remaining bits + if (bit_length > 0) { + SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words); + visitor(visited_words); + } + + return min_offset; + } + + template >::type::value_type> + static void RunVisitWordsAndWriteLoop(int64_t bit_length, + std::array& readers, + std::array& writers, + Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + std::array visited_words; + std::array output_words; + + // every reader will have same number of words, since they are same length'ed + // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond + // Word boundary, every Word would have to be created from 2 adjoining Words + auto n_words = readers[0].words(); + bit_length -= n_words * kBitWidth; + while (n_words--) { + // first collect all words to visited_words array + for (size_t i = 0; i < N; i++) { + visited_words[i] = readers[i].NextWord(); + } + visitor(visited_words, &output_words); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextWord(output_words[i]); + } + } + + // every reader will have same number of trailing bytes, because of the above reason + // tailing portion could be more than one word! (ref: BitmapWordReader constructor) + // remaining full/ partial words to write + + if (bit_length) { + // convert the word visitor lambda to a byte_visitor + auto byte_visitor = [&](const std::array& in, + std::array* out) { + std::array in_words; + std::array out_words; + std::copy(in.begin(), in.end(), in_words.begin()); + visitor(in_words, &out_words); + for (size_t i = 0; i < M; i++) { + out->at(i) = static_cast(out_words[i]); + } + }; + + std::array visited_bytes; + std::array output_bytes; + int n_bytes = readers[0].trailing_bytes(); + while (n_bytes--) { + visited_bytes.fill(0); + output_bytes.fill(0); + int valid_bits; + for (size_t i = 0; i < N; i++) { + visited_bytes[i] = readers[i].NextTrailingByte(valid_bits); + } + byte_visitor(visited_bytes, &output_bytes); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextTrailingByte(output_bytes[i], valid_bits); + } + } + } + } + + /// \brief Visit words of bits from each input bitmap as array and collects + /// outputs to an array, to be written into the output bitmaps accordingly. + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// Visitor is expected to have the following signature + /// [](const std::array& in_words, std::array* out_words){...} + /// + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static void VisitWordsAndWrite(const std::array& bitmaps_arg, + std::array* out_bitmaps_arg, + Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps_arg); + assert(bit_length == BitLength(*out_bitmaps_arg)); + + // if both input and output bitmaps have no byte offset, then use special template + if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; }) && + std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; })) { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = BitmapWordReader( + in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter( + out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } else { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = + BitmapWordReader(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter(out_bitmap.mutable_data_, out_bitmap.offset_, + out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } + } + + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return mutable_data_; } + + /// offset of first bit relative to buffer().data() + int64_t offset() const { return offset_; } + + /// number of bits in this Bitmap + int64_t length() const { return length_; } + + /// span of all bytes which contain any bit in this Bitmap + util::span bytes() const { + auto byte_offset = offset_ / 8; + auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset; + return {data_ + byte_offset, static_cast(byte_count)}; + } + + private: + /// span of all Words which contain any bit in this Bitmap + /// + /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36) + /// words() would span bits [16, 48). + /// + /// 0 16 32 48 64 + /// |-------|-------|------|------| (buffer) + /// [ ] (bitmap) + /// |-------|------| (returned words) + /// + /// \warning The words may contain bytes which lie outside the buffer or are + /// uninitialized. + template + util::span words() const { + auto bytes_addr = reinterpret_cast(bytes().data()); + auto words_addr = bytes_addr - bytes_addr % sizeof(Word); + auto word_byte_count = + bit_util::RoundUpToPowerOf2(static_cast(bytes_addr + bytes().size()), + static_cast(sizeof(Word))) - + words_addr; + return {reinterpret_cast(words_addr), + static_cast(word_byte_count / sizeof(Word))}; + } + + /// offset of first bit relative to words().data() + template + int64_t word_offset() const { + return offset_ + 8 * (reinterpret_cast(data_) - + reinterpret_cast(words().data())); + } + + /// load words from bitmaps bitwise + template + static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset, + int64_t out_length, bool set_trailing_bits, + std::array* out) { + out->fill(0); + + int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0; + + Bitmap slices[N], out_bitmaps[N]; + for (size_t i = 0; i < N; ++i) { + slices[i] = bitmaps[i].Slice(offset, out_length); + out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length); + } + + int64_t bit_i = 0; + Bitmap::VisitBits(slices, [&](std::bitset bits) { + for (size_t i = 0; i < N; ++i) { + out_bitmaps[i].SetBitTo(bit_i, bits[i]); + } + ++bit_i; + }); + } + + /// assert bitmaps have identical length and return that length + static int64_t BitLength(const Bitmap* bitmaps, size_t N); + + template + static int64_t BitLength(const std::array& bitmaps) { + for (size_t i = 1; i < N; ++i) { + assert(bitmaps[i].length() == bitmaps[0].length()); + } + return bitmaps[0].length(); + } + + const uint8_t* data_ = NULLPTR; + uint8_t* mutable_data_ = NULLPTR; + int64_t offset_ = 0, length_ = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd2ad44140834487b02d5899d3515e7b7eafefc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Generate Bitmap with all position to `value` except for one found +/// at `straggler_pos`. +ARROW_EXPORT +Result> BitmapAllButOne(MemoryPool* pool, int64_t length, + int64_t straggler_pos, bool value = true); + +/// \brief Convert vector of bytes to bitmap buffer +ARROW_EXPORT +Result> BytesToBits(const std::vector&, + MemoryPool* pool = default_memory_pool()); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h new file mode 100644 index 0000000000000000000000000000000000000000..52a1e228e01f1d6c3c37a5e2d49d843f0a4573f9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A std::generate() like function to write sequential bits into a bitmap area. +// Bits preceding the bitmap area are preserved, bits following the bitmap +// area may be clobbered. + +template +void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) { + if (length == 0) { + return; + } + uint8_t* cur = bitmap + start_offset / 8; + uint8_t bit_mask = bit_util::kBitmask[start_offset % 8]; + uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8]; + + for (int64_t index = 0; index < length; ++index) { + const bool bit = g(); + current_byte = bit ? (current_byte | bit_mask) : current_byte; + bit_mask = static_cast(bit_mask << 1); + if (bit_mask == 0) { + bit_mask = 1; + *cur++ = current_byte; + current_byte = 0; + } + } + if (bit_mask != 1) { + *cur++ = current_byte; + } +} + +// Like GenerateBits(), but unrolls its main loop for higher performance. + +template +void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length, + Generator&& g) { + static_assert(std::is_same()()), bool>::value, + "Functor passed to GenerateBitsUnrolled must return bool"); + + if (length == 0) { + return; + } + uint8_t current_byte; + uint8_t* cur = bitmap + start_offset / 8; + const uint64_t start_bit_offset = start_offset % 8; + uint8_t bit_mask = bit_util::kBitmask[start_bit_offset]; + int64_t remaining = length; + + if (bit_mask != 0x01) { + current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset]; + while (bit_mask != 0 && remaining > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + --remaining; + } + *cur++ = current_byte; + } + + int64_t remaining_bytes = remaining / 8; + uint8_t out_results[8]; + while (remaining_bytes-- > 0) { + for (int i = 0; i < 8; ++i) { + out_results[i] = g(); + } + *cur++ = static_cast(out_results[0] | out_results[1] << 1 | + out_results[2] << 2 | out_results[3] << 3 | + out_results[4] << 4 | out_results[5] << 5 | + out_results[6] << 6 | out_results[7] << 7); + } + + int64_t remaining_bits = remaining % 8; + if (remaining_bits) { + current_byte = 0; + bit_mask = 0x01; + while (remaining_bits-- > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + } + *cur++ = current_byte; + } +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h new file mode 100644 index 0000000000000000000000000000000000000000..c29589013e4b7863705e1de4cf8c69293451eb8b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_reader.h" + +namespace arrow { +namespace internal { + +// A function that visits each bit in a bitmap and calls a visitor function with a +// boolean representation of that bit. This is intended to be analogous to +// GenerateBits. +template +void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length, + Visitor&& visit) { + BitmapReader reader(bitmap, start_offset, length); + for (int64_t index = 0; index < length; ++index) { + visit(reader.IsSet()); + reader.Next(); + } +} + +// Like VisitBits(), but unrolls its main loop for better performance. +template +void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length, + Visitor&& visit) { + if (length == 0) { + return; + } + + // Start by visiting any bits preceding the first full byte. + int64_t num_bits_before_full_bytes = + bit_util::RoundUpToMultipleOf8(start_offset) - start_offset; + // Truncate num_bits_before_full_bytes if it is greater than length. + if (num_bits_before_full_bytes > length) { + num_bits_before_full_bytes = length; + } + // Use the non loop-unrolled VisitBits since we don't want to add branches + VisitBits(bitmap, start_offset, num_bits_before_full_bytes, visit); + + // Shift the start pointer to the first full byte and compute the + // number of full bytes to be read. + const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8); + const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8; + + // Iterate over each full byte of the input bitmap and call the visitor in + // a loop-unrolled manner. + for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) { + // Get the current bit-packed byte value from the bitmap. + const uint8_t byte = *(first_full_byte + byte_index); + + // Execute the visitor function on each bit of the current byte. + visit(bit_util::GetBitFromByte(byte, 0)); + visit(bit_util::GetBitFromByte(byte, 1)); + visit(bit_util::GetBitFromByte(byte, 2)); + visit(bit_util::GetBitFromByte(byte, 3)); + visit(bit_util::GetBitFromByte(byte, 4)); + visit(bit_util::GetBitFromByte(byte, 5)); + visit(bit_util::GetBitFromByte(byte, 6)); + visit(bit_util::GetBitFromByte(byte, 7)); + } + + // Write any leftover bits in the last byte. + const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8; + VisitBits(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes, + visit); +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..9b334b3605eeee020a2e717b64f530c5ba82bdcd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/type_traits.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Store a stack of bitsets efficiently. The top bitset may be +/// accessed and its bits may be modified, but it may not be resized. +class BitsetStack { + public: + using reference = typename std::vector::reference; + + /// \brief push a bitset onto the stack + /// \param size number of bits in the next bitset + /// \param value initial value for bits in the pushed bitset + void Push(int size, bool value) { + offsets_.push_back(bit_count()); + bits_.resize(bit_count() + size, value); + } + + /// \brief number of bits in the bitset at the top of the stack + int TopSize() const { + if (offsets_.size() == 0) return 0; + return bit_count() - offsets_.back(); + } + + /// \brief pop a bitset off the stack + void Pop() { + bits_.resize(offsets_.back()); + offsets_.pop_back(); + } + + /// \brief get the value of a bit in the top bitset + /// \param i index of the bit to access + bool operator[](int i) const { return bits_[offsets_.back() + i]; } + + /// \brief get a mutable reference to a bit in the top bitset + /// \param i index of the bit to access + reference operator[](int i) { return bits_[offsets_.back() + i]; } + + private: + int bit_count() const { return static_cast(bits_.size()); } + std::vector bits_; + std::vector offsets_; +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h new file mode 100644 index 0000000000000000000000000000000000000000..dd85c1638c7bfcd9cfd4034fb80ce775aaa92ce9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/endian.h" +#include "arrow/util/visibility.h" + +#include + +namespace arrow { +namespace internal { + +ARROW_EXPORT +int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); +ARROW_EXPORT +int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h new file mode 100644 index 0000000000000000000000000000000000000000..96723f803e0c1a64ef753ab6a51d8f2bd8c173d1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..9d02cd568acbc9661f763259e1d4ed134f609e4d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h new file mode 100644 index 0000000000000000000000000000000000000000..214c7551b6c76bc95a7d71eb8b8c31bd96d4b838 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" + +namespace arrow { + +namespace util { + +/// \brief The sum of bytes in each buffer referenced by the array +/// +/// Note: An array may only reference a portion of a buffer. +/// This method will overestimate in this case and return the +/// byte size of the entire buffer. +/// Note: If a buffer is referenced multiple times then it will +/// only be counted once. +ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data); +/// \brief The sum of bytes in each buffer referenced by the array +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const Array& array); +/// \brief The sum of bytes in each buffer referenced by the array +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array); +/// \brief The sum of bytes in each buffer referenced by the batch +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch); +/// \brief The sum of bytes in each buffer referenced by the table +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const Table& table); + +/// \brief Calculate the buffer ranges referenced by the array +/// +/// These ranges will take into account array offsets +/// +/// The ranges may contain duplicates +/// +/// Dictionary arrays will ignore the offset of their containing array +/// +/// The return value will be a struct array corresponding to the schema: +/// schema({field("start", uint64()), field("offset", uint64()), field("length", +/// uint64())) +ARROW_EXPORT Result> ReferencedRanges(const ArrayData& array_data); + +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// +/// Unlike TotalBufferSize this method will account for array +/// offsets. +/// +/// If buffers are shared between arrays then the shared +/// portion will be counted multiple times. +/// +/// Dictionary arrays will always be counted in their entirety +/// even if the array only references a portion of the dictionary. +ARROW_EXPORT Result ReferencedBufferSize(const ArrayData& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const Array& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const ChunkedArray& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const RecordBatch& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const Table& array_data); + +} // namespace util + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bf4d5e12d02d349c3a0e0fce43f6be5ef4d585 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +constexpr int kUseDefaultCompressionLevel = std::numeric_limits::min(); + +/// \brief Streaming compressor interface +/// +class ARROW_EXPORT Compressor { + public: + virtual ~Compressor() = default; + + struct CompressResult { + int64_t bytes_read; + int64_t bytes_written; + }; + struct FlushResult { + int64_t bytes_written; + bool should_retry; + }; + struct EndResult { + int64_t bytes_written; + bool should_retry; + }; + + /// \brief Compress some input. + /// + /// If bytes_read is 0 on return, then a larger output buffer should be supplied. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Flush part of the compressed output. + /// + /// If should_retry is true on return, Flush() should be called again + /// with a larger buffer. + virtual Result Flush(int64_t output_len, uint8_t* output) = 0; + + /// \brief End compressing, doing whatever is necessary to end the stream. + /// + /// If should_retry is true on return, End() should be called again + /// with a larger buffer. Otherwise, the Compressor should not be used anymore. + /// + /// End() implies Flush(). + virtual Result End(int64_t output_len, uint8_t* output) = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Streaming decompressor interface +/// +class ARROW_EXPORT Decompressor { + public: + virtual ~Decompressor() = default; + + struct DecompressResult { + // XXX is need_more_output necessary? (Brotli?) + int64_t bytes_read; + int64_t bytes_written; + bool need_more_output; + }; + + /// \brief Decompress some input. + /// + /// If need_more_output is true on return, a larger output buffer needs + /// to be supplied. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Return whether the compressed stream is finished. + /// + /// This is a heuristic. If true is returned, then it is guaranteed + /// that the stream is finished. If false is returned, however, it may + /// simply be that the underlying library isn't able to provide the information. + virtual bool IsFinished() = 0; + + /// \brief Reinitialize decompressor, making it ready for a new compressed stream. + virtual Status Reset() = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Compression codec options +class ARROW_EXPORT CodecOptions { + public: + explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel) + : compression_level(compression_level) {} + + virtual ~CodecOptions() = default; + + int compression_level; +}; + +// ---------------------------------------------------------------------- +// GZip codec options implementation + +enum class GZipFormat { + ZLIB, + DEFLATE, + GZIP, +}; + +class ARROW_EXPORT GZipCodecOptions : public CodecOptions { + public: + GZipFormat gzip_format = GZipFormat::GZIP; + std::optional window_bits; +}; + +// ---------------------------------------------------------------------- +// brotli codec options implementation + +class ARROW_EXPORT BrotliCodecOptions : public CodecOptions { + public: + std::optional window_bits; +}; + +/// \brief Compression codec +class ARROW_EXPORT Codec { + public: + virtual ~Codec() = default; + + /// \brief Return special value to indicate that a codec implementation + /// should use its default compression level + static int UseDefaultCompressionLevel(); + + /// \brief Return a string name for compression type + static const std::string& GetCodecAsString(Compression::type t); + + /// \brief Return compression type for name (all lower case) + static Result GetCompressionType(const std::string& name); + + /// \brief Create a codec for the given compression algorithm with CodecOptions + static Result> Create( + Compression::type codec, const CodecOptions& codec_options = CodecOptions{}); + + /// \brief Create a codec for the given compression algorithm + static Result> Create(Compression::type codec, + int compression_level); + + /// \brief Return true if support for indicated codec has been enabled + static bool IsAvailable(Compression::type codec); + + /// \brief Return true if indicated codec supports setting a compression level + static bool SupportsCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MinimumCompressionLevel(Compression::type codec); + + /// \brief Return the largest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MaximumCompressionLevel(Compression::type codec); + + /// \brief Return the default compression level + /// Note: This function creates a temporary Codec instance + static Result DefaultCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level + virtual int minimum_compression_level() const = 0; + + /// \brief Return the largest supported compression level + virtual int maximum_compression_level() const = 0; + + /// \brief Return the default compression level + virtual int default_compression_level() const = 0; + + /// \brief One-shot decompression function + /// + /// output_buffer_len must be correct and therefore be obtained in advance. + /// The actual decompressed length is returned. + /// + /// \note One-shot decompression is not always compatible with streaming + /// compression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) = 0; + + /// \brief One-shot compression function + /// + /// output_buffer_len must first have been computed using MaxCompressedLen(). + /// The actual compressed length is returned. + /// + /// \note One-shot compression is not always compatible with streaming + /// decompression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, uint8_t* output_buffer) = 0; + + virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeCompressor() = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeDecompressor() = 0; + + /// \brief This Codec's compression type + virtual Compression::type compression_type() const = 0; + + /// \brief The name of this Codec's compression type + const std::string& name() const { return GetCodecAsString(compression_type()); } + + /// \brief This Codec's compression level, if applicable + virtual int compression_level() const { return UseDefaultCompressionLevel(); } + + private: + /// \brief Initializes the codec's resources. + virtual Status Init(); +}; + +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..c23d6ccd9886e4539d52d537abb85da1dcc93385 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/chunked_array.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { +namespace internal { + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool); + +template +class Converter { + public: + using Self = Converter; + using InputType = Input; + using OptionsType = Options; + + virtual ~Converter() = default; + + Status Construct(std::shared_ptr type, OptionsType options, + MemoryPool* pool) { + type_ = std::move(type); + options_ = std::move(options); + return Init(pool); + } + + virtual Status Append(InputType value) { return Status::NotImplemented("Append"); } + + virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) { + return Status::NotImplemented("Extend"); + } + + virtual Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + return Status::NotImplemented("ExtendMasked"); + } + + const std::shared_ptr& builder() const { return builder_; } + + const std::shared_ptr& type() const { return type_; } + + OptionsType options() const { return options_; } + + bool may_overflow() const { return may_overflow_; } + + bool rewind_on_overflow() const { return rewind_on_overflow_; } + + virtual Status Reserve(int64_t additional_capacity) { + return builder_->Reserve(additional_capacity); + } + + Status AppendNull() { return builder_->AppendNull(); } + + virtual Result> ToArray() { return builder_->Finish(); } + + virtual Result> ToArray(int64_t length) { + ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray()); + return arr->Slice(0, length); + } + + virtual Result> ToChunkedArray() { + ARROW_ASSIGN_OR_RAISE(auto array, ToArray()); + std::vector> chunks = {std::move(array)}; + return std::make_shared(chunks); + } + + protected: + virtual Status Init(MemoryPool* pool) { return Status::OK(); } + + std::shared_ptr type_; + std::shared_ptr builder_; + OptionsType options_; + bool may_overflow_ = false; + bool rewind_on_overflow_ = false; +}; + +template +class PrimitiveConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + + protected: + Status Init(MemoryPool* pool) override { + this->builder_ = std::make_shared(this->type_, pool); + // Narrow variable-sized binary types may overflow + this->may_overflow_ = is_binary_like(this->type_->id()); + primitive_type_ = checked_cast(this->type_.get()); + primitive_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const ArrowType* primitive_type_; + BuilderType* primitive_builder_; +}; + +template class ConverterTrait> +class ListConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + using ConverterType = typename ConverterTrait::type; + + protected: + Status Init(MemoryPool* pool) override { + list_type_ = checked_cast(this->type_.get()); + ARROW_ASSIGN_OR_RAISE(value_converter_, + (MakeConverter( + list_type_->value_type(), this->options_, pool))); + this->builder_ = + std::make_shared(pool, value_converter_->builder(), this->type_); + list_builder_ = checked_cast(this->builder_.get()); + // Narrow list types may overflow + this->may_overflow_ = this->rewind_on_overflow_ = + sizeof(typename ArrowType::offset_type) < sizeof(int64_t); + return Status::OK(); + } + + const ArrowType* list_type_; + BuilderType* list_builder_; + std::unique_ptr value_converter_; +}; + +template class ConverterTrait> +class StructConverter : public BaseConverter { + public: + using ConverterType = typename ConverterTrait::type; + + Status Reserve(int64_t additional_capacity) override { + ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity)); + for (const auto& child : children_) { + ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity)); + } + return Status::OK(); + } + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr child_converter; + std::vector> child_builders; + + struct_type_ = checked_cast(this->type_.get()); + for (const auto& field : struct_type_->fields()) { + ARROW_ASSIGN_OR_RAISE(child_converter, + (MakeConverter( + field->type(), this->options_, pool))); + this->may_overflow_ |= child_converter->may_overflow(); + this->rewind_on_overflow_ = this->may_overflow_; + child_builders.push_back(child_converter->builder()); + children_.push_back(std::move(child_converter)); + } + + this->builder_ = + std::make_shared(this->type_, pool, std::move(child_builders)); + struct_builder_ = checked_cast(this->builder_.get()); + + return Status::OK(); + } + + const StructType* struct_type_; + StructBuilder* struct_builder_; + std::vector> children_; +}; + +template +class DictionaryConverter : public BaseConverter { + public: + using BuilderType = DictionaryBuilder; + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr builder; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder)); + this->builder_ = std::move(builder); + this->may_overflow_ = false; + dict_type_ = checked_cast(this->type_.get()); + value_type_ = checked_cast(dict_type_->value_type().get()); + value_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const DictionaryType* dict_type_; + const ValueType* value_type_; + BuilderType* value_builder_; +}; + +template class ConverterTrait> +struct MakeConverterImpl { + template ::type> + Status Visit(const T&) { + out.reset(new ConverterType()); + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DictionaryType& t) { + switch (t.value_type()->id()) { +#define DICTIONARY_CASE(TYPE) \ + case TYPE::type_id: \ + out = std::make_unique< \ + typename ConverterTrait::template dictionary_type>(); \ + break; + DICTIONARY_CASE(BooleanType); + DICTIONARY_CASE(Int8Type); + DICTIONARY_CASE(Int16Type); + DICTIONARY_CASE(Int32Type); + DICTIONARY_CASE(Int64Type); + DICTIONARY_CASE(UInt8Type); + DICTIONARY_CASE(UInt16Type); + DICTIONARY_CASE(UInt32Type); + DICTIONARY_CASE(UInt64Type); + DICTIONARY_CASE(FloatType); + DICTIONARY_CASE(DoubleType); + DICTIONARY_CASE(BinaryType); + DICTIONARY_CASE(StringType); + DICTIONARY_CASE(FixedSizeBinaryType); +#undef DICTIONARY_CASE + default: + return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(), + " not implemented"); + } + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); } + + std::shared_ptr type; + typename BaseConverter::OptionsType options; + MemoryPool* pool; + std::unique_ptr out; +}; + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool) { + MakeConverterImpl visitor{ + std::move(type), std::move(options), pool, NULLPTR}; + ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor)); + return std::move(visitor.out); +} + +template +class Chunker { + public: + using InputType = typename Converter::InputType; + + explicit Chunker(std::unique_ptr converter) + : converter_(std::move(converter)) {} + + Status Reserve(int64_t additional_capacity) { + ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity)); + reserved_ += additional_capacity; + return Status::OK(); + } + + Status AppendNull() { + auto status = converter_->AppendNull(); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return converter_->AppendNull(); + } + ++length_; + return status; + } + + Status Append(InputType value) { + auto status = converter_->Append(value); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return Append(value); + } + ++length_; + return status; + } + + Status Extend(InputType values, int64_t size, int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->Extend(values, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->ExtendMasked(values, mask, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status FinishChunk() { + ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_)); + chunks_.push_back(chunk); + // Reserve space for the remaining items. + // Besides being an optimization, it is also required if the converter's + // implementation relies on unsafe builder methods in converter->Append(). + auto remaining = reserved_ - length_; + Reset(); + return Reserve(remaining); + } + + Result> ToChunkedArray() { + ARROW_RETURN_NOT_OK(FinishChunk()); + return std::make_shared(chunks_); + } + + protected: + void Reset() { + converter_->builder()->Reset(); + length_ = 0; + reserved_ = 0; + } + + int64_t length_ = 0; + int64_t reserved_ = 0; + std::unique_ptr converter_; + std::vector> chunks_; +}; + +template +static Result>> MakeChunker(std::unique_ptr converter) { + return std::make_unique>(std::move(converter)); +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h new file mode 100644 index 0000000000000000000000000000000000000000..a3c13cc3bea4d6be639b521051021f7cb1c07f14 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef ARROW_COUNTING_SEMAPHORE_H +#define ARROW_COUNTING_SEMAPHORE_H + +#include + +#include "arrow/status.h" + +namespace arrow { +namespace util { + +/// \brief Simple mutex-based counting semaphore with timeout +class ARROW_EXPORT CountingSemaphore { + public: + /// \brief Create an instance with initial_avail starting permits + /// + /// \param[in] initial_avail The semaphore will start with this many permits available + /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations + /// will return Status::Invalid if this timeout elapses + explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10); + ~CountingSemaphore(); + /// \brief Block until num_permits permits are available + Status Acquire(uint32_t num_permits); + /// \brief Make num_permits permits available + Status Release(uint32_t num_permits); + /// \brief Wait until num_waiters are waiting on permits + /// + /// This method is non-standard but useful in unit tests to ensure sequencing + Status WaitForWaiters(uint32_t num_waiters); + /// \brief Immediately time out any waiters + /// + /// This method will return Status::OK only if there were no waiters to time out. + /// Once closed any operation on this instance will return an invalid status. + Status Close(); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace util +} // namespace arrow + +#endif // ARROW_COUNTING_SEMAPHORE_H diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h new file mode 100644 index 0000000000000000000000000000000000000000..949719b97ed84da6277139a70e22203706ed6055 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal +// set of functions needed for Apache Arrow / Apache parquet-cpp + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// CpuInfo is an interface to query for cpu information at runtime. The caller can +/// ask for the sizes of the caches and what hardware features are supported. +/// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and +/// /sys/devices) +class ARROW_EXPORT CpuInfo { + public: + ~CpuInfo(); + + /// x86 features + static constexpr int64_t SSSE3 = (1LL << 0); + static constexpr int64_t SSE4_1 = (1LL << 1); + static constexpr int64_t SSE4_2 = (1LL << 2); + static constexpr int64_t POPCNT = (1LL << 3); + static constexpr int64_t AVX = (1LL << 4); + static constexpr int64_t AVX2 = (1LL << 5); + static constexpr int64_t AVX512F = (1LL << 6); + static constexpr int64_t AVX512CD = (1LL << 7); + static constexpr int64_t AVX512VL = (1LL << 8); + static constexpr int64_t AVX512DQ = (1LL << 9); + static constexpr int64_t AVX512BW = (1LL << 10); + static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW; + static constexpr int64_t BMI1 = (1LL << 11); + static constexpr int64_t BMI2 = (1LL << 12); + + /// Arm features + static constexpr int64_t ASIMD = (1LL << 32); + + /// Cache enums for L1 (data), L2 and L3 + enum class CacheLevel { L1 = 0, L2, L3, Last = L3 }; + + /// CPU vendors + enum class Vendor { Unknown, Intel, AMD }; + + static const CpuInfo* GetInstance(); + + /// Returns all the flags for this cpu + int64_t hardware_flags() const; + + /// Returns the number of cores (including hyper-threaded) on this machine. + int num_cores() const; + + /// Returns the vendor of the cpu. + Vendor vendor() const; + + /// Returns the model name of the cpu (e.g. Intel i7-2600) + const std::string& model_name() const; + + /// Returns the size of the cache in KB at this cache level + int64_t CacheSize(CacheLevel level) const; + + /// \brief Returns whether or not the given feature is enabled. + /// + /// IsSupported() is true iff IsDetected() is also true and the feature + /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL + /// environment variable). + bool IsSupported(int64_t flags) const; + + /// Returns whether or not the given feature is available on the CPU. + bool IsDetected(int64_t flags) const; + + /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error + /// and terminate. + void VerifyCpuRequirements() const; + + /// Toggle a hardware feature on and off. It is not valid to turn on a feature + /// that the underlying hardware cannot support. This is useful for testing. + void EnableFeature(int64_t flag, bool enable); + + bool HasEfficientBmi2() const { + // BMI2 (pext, pdep) is only efficient on Intel X86 processors. + return vendor() == Vendor::Intel && IsSupported(BMI2); + } + + private: + CpuInfo(); + + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..640dc9aec157c9234b30fcd997451c96bd87cd85 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h @@ -0,0 +1,535 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/basic_decimal.h" + +namespace arrow { + +class Decimal64; + +/// Represents a signed 32-bit decimal value in two's complement. +/// Calulations wrap around and overflow is ignored. +/// The max decimal precision that can be safely represented is +/// 9 significant digits. +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal32 +/// - can be safely compiled to IR without references to libstdc++ +/// 2. Decimal32 +/// - has additional functionality on top of BasicDecimal32 to deal with +/// strings and streams +class ARROW_EXPORT Decimal32 : public BasicDecimal32 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in sphinx) + using BasicDecimal32::BasicDecimal32; + /// \endcond + + /// \brief constructor creates a Decimal32 from a BasicDecimal32 + constexpr Decimal32(const BasicDecimal32& value) noexcept // NOLINT runtime/explicit + : BasicDecimal32(value) {} + + /// \brief Parse the number from a base 10 string representation + explicit Decimal32(const std::string& value); + + /// \brief Empty constructor creates a Decimal32 with a value of 0 + /// this is required for some older compilers + constexpr Decimal32() noexcept : BasicDecimal32() {} + + /// \brief Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal32& divisor) const { + std::pair result; + auto dstatus = BasicDecimal32::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert the Decimal32 value to a base 10 decimal string with the given scale + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Cast this value to an int64_t + explicit operator int64_t() const; + + explicit operator Decimal64() const; + + /// \brief Convert a decimal string to a Decimal value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal32* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal32* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal32* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 4 + /// \return error statis if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + /// \brief Convert Decimal32 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal32 out; + auto dstatus = BasicDecimal32::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// \brief Convert to a signed integer + template > + Result ToInteger() const { + return static_cast(value_); + } + + /// \brief Convert to a signed integer + template > + Status ToInteger(T* out) const { + return ToInteger().Value(out); + } + + /// \brief Convert to a floating-point number (scaled) + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal32& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +class ARROW_EXPORT Decimal64 : public BasicDecimal64 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in sphinx) + using BasicDecimal64::BasicDecimal64; + /// \endcond + + /// \brief constructor creates a Decimal64 from a BasicDecimal64 + constexpr Decimal64(const BasicDecimal64& value) noexcept // NOLINT runtime/explicit + : BasicDecimal64(value) {} + + explicit Decimal64(const BasicDecimal32& value) noexcept + : BasicDecimal64(static_cast(value.value())) {} + + /// \brief Parse the number from a base 10 string representation + explicit Decimal64(const std::string& value); + + /// \brief Empty constructor creates a Decimal64 with a value of 0 + /// this is required for some older compilers + constexpr Decimal64() noexcept : BasicDecimal64() {} + + /// \brief Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal64& divisor) const { + std::pair result; + auto dstatus = BasicDecimal64::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert the Decimal64 value to a base 10 decimal string with the given scale + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Cast this value to an int64_t + explicit operator int64_t() const; + + /// \brief Convert a decimal string to a Decimal value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal64* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal64* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal64* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 4 + /// \return error statis if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + /// \brief Convert Decimal64 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal64 out; + auto dstatus = BasicDecimal64::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// \brief Convert to a signed integer + template > + Result ToInteger() const { + return static_cast(value_); + } + + /// \brief Convert to a signed integer + template > + Status ToInteger(T* out) const { + return ToInteger().Value(out); + } + + /// \brief Convert to a floating-point number (scaled) + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal64& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// Represents a signed 128-bit integer in two's complement. +/// Calculations wrap around and overflow is ignored. +/// The max decimal precision that can be safely represented is +/// 38 significant digits. +/// +/// For a discussion of the algorithms, look at Knuth's volume 2, +/// Semi-numerical Algorithms section 4.3.1. +/// +/// Adapted from the Apache ORC C++ implementation +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal128 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal128 +/// - has additional functionality on top of BasicDecimal128 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal128 : public BasicDecimal128 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal128::BasicDecimal128; + /// \endcond + + /// \brief constructor creates a Decimal128 from a BasicDecimal128. + constexpr Decimal128(const BasicDecimal128& value) noexcept // NOLINT runtime/explicit + : BasicDecimal128(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal128(const std::string& value); + + /// \brief Empty constructor creates a Decimal128 with a value of 0. + // This is required on some older compilers. + constexpr Decimal128() noexcept : BasicDecimal128() {} + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal128& divisor) const { + std::pair result; + auto dstatus = BasicDecimal128::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert the Decimal128 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Cast this value to an int64_t. + explicit operator int64_t() const; + + /// \brief Convert a decimal string to a Decimal128 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 16. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + /// \brief Convert Decimal128 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal128 out; + auto dstatus = BasicDecimal128::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// \brief Convert to a signed integer + template > + Result ToInteger() const { + constexpr auto min_value = std::numeric_limits::min(); + constexpr auto max_value = std::numeric_limits::max(); + const auto& self = *this; + if (self < min_value || self > max_value) { + return Status::Invalid("Invalid cast from Decimal128 to ", sizeof(T), + " byte integer"); + } + return static_cast(low_bits()); + } + + /// \brief Convert to a signed integer + template > + Status ToInteger(T* out) const { + return ToInteger().Value(out); + } + + /// \brief Convert to a floating-point number (scaled) + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal128& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// Represents a signed 256-bit integer in two's complement. +/// The max decimal precision that can be safely represented is +/// 76 significant digits. +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal256 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal256 +/// - (TODO) has additional functionality on top of BasicDecimal256 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal256 : public BasicDecimal256 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal256::BasicDecimal256; + /// \endcond + + /// \brief constructor creates a Decimal256 from a BasicDecimal256. + constexpr Decimal256(const BasicDecimal256& value) noexcept // NOLINT(runtime/explicit) + : BasicDecimal256(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal256(const std::string& value); + + /// \brief Empty constructor creates a Decimal256 with a value of 0. + // This is required on some older compilers. + constexpr Decimal256() noexcept : BasicDecimal256() {} + + /// \brief Convert the Decimal256 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Convert a decimal string to a Decimal256 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + /// \brief Convert Decimal256 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal256 out; + auto dstatus = BasicDecimal256::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal256& divisor) const { + std::pair result; + auto dstatus = BasicDecimal256::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 32. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert to a floating-point number (scaled). + /// May return infinity in case of overflow. + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal256& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// For an integer type, return the max number of decimal digits +/// (=minimal decimal precision) it can represent. +inline Result MaxDecimalDigitsForInteger(Type::type type_id) { + switch (type_id) { + case Type::INT8: + case Type::UINT8: + return 3; + case Type::INT16: + case Type::UINT16: + return 5; + case Type::INT32: + case Type::UINT32: + return 10; + case Type::INT64: + return 19; + case Type::UINT64: + return 20; + default: + break; + } + return Status::Invalid("Not an integer type: ", type_id); +} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fae9293f9e79891dcd85b536d697291289804ce5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/cpu_info.h" + +namespace arrow { +namespace internal { + +enum class DispatchLevel : int { + // These dispatch levels, corresponding to instruction set features, + // are sorted in increasing order of preference. + NONE = 0, + SSE4_2, + AVX2, + AVX512, + NEON, + MAX +}; + +/* + A facility for dynamic dispatch according to available DispatchLevel. + + Typical use: + + static void my_function_default(...); + static void my_function_avx2(...); + + struct MyDynamicFunction { + using FunctionType = decltype(&my_function_default); + + static std::vector> implementations() { + return { + { DispatchLevel::NONE, my_function_default } + #if defined(ARROW_HAVE_RUNTIME_AVX2) + , { DispatchLevel::AVX2, my_function_avx2 } + #endif + }; + } + }; + + void my_function(...) { + static DynamicDispatch dispatch; + return dispatch.func(...); + } +*/ +template +class DynamicDispatch { + protected: + using FunctionType = typename DynamicFunction::FunctionType; + using Implementation = std::pair; + + public: + DynamicDispatch() { Resolve(DynamicFunction::implementations()); } + + FunctionType func = {}; + + protected: + // Use the Implementation with the highest DispatchLevel + void Resolve(const std::vector& implementations) { + Implementation cur{DispatchLevel::NONE, {}}; + + for (const auto& impl : implementations) { + if (impl.first >= cur.first && IsSupported(impl.first)) { + // Higher (or same) level than current + cur = impl; + } + } + + if (!cur.second) { + Status::Invalid("No appropriate implementation found").Abort(); + } + func = cur.second; + } + + private: + bool IsSupported(DispatchLevel level) const { + static const auto cpu_info = arrow::internal::CpuInfo::GetInstance(); + + switch (level) { + case DispatchLevel::NONE: + return true; + case DispatchLevel::SSE4_2: + return cpu_info->IsSupported(CpuInfo::SSE4_2); + case DispatchLevel::AVX2: + return cpu_info->IsSupported(CpuInfo::AVX2); + case DispatchLevel::AVX512: + return cpu_info->IsSupported(CpuInfo::AVX512); + default: + return false; + } + } +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h new file mode 100644 index 0000000000000000000000000000000000000000..0a432fee2cd315d23bd35e0907e327efc7f419ca --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +/// \brief Class representing an IEEE half-precision float, encoded as a `uint16_t` +/// +/// The exact format is as follows (from LSB to MSB): +/// - bits 0-10: mantissa +/// - bits 10-15: exponent +/// - bit 15: sign +/// +class ARROW_EXPORT Float16 { + public: + Float16() = default; + explicit Float16(float f) : Float16(FromFloat(f)) {} + explicit Float16(double d) : Float16(FromDouble(d)) {} + template >* = NULLPTR> + explicit Float16(T v) : Float16(static_cast(v)) {} + + /// \brief Create a `Float16` from its exact binary representation + constexpr static Float16 FromBits(uint16_t bits) { return Float16{bits, bool{}}; } + /// \brief Create a `Float16` from a 32-bit float (may lose precision) + static Float16 FromFloat(float f); + /// \brief Create a `Float16` from a 64-bit float (may lose precision) + static Float16 FromDouble(double d); + + /// \brief Read a `Float16` from memory in native-endian byte order + static Float16 FromBytes(const uint8_t* src) { + return FromBits(SafeLoadAs(src)); + } + + /// \brief Read a `Float16` from memory in little-endian byte order + static Float16 FromLittleEndian(const uint8_t* src) { + return FromBits(::arrow::bit_util::FromLittleEndian(SafeLoadAs(src))); + } + + /// \brief Read a `Float16` from memory in big-endian byte order + static Float16 FromBigEndian(const uint8_t* src) { + return FromBits(::arrow::bit_util::FromBigEndian(SafeLoadAs(src))); + } + + /// \brief Return the value's binary representation as a `uint16_t` + constexpr uint16_t bits() const { return bits_; } + + /// \brief Return true if the value is negative (sign bit is set) + constexpr bool signbit() const { return (bits_ & 0x8000) != 0; } + + /// \brief Return true if the value is NaN + constexpr bool is_nan() const { return (bits_ & 0x7fff) > 0x7c00; } + /// \brief Return true if the value is positive/negative infinity + constexpr bool is_infinity() const { return (bits_ & 0x7fff) == 0x7c00; } + /// \brief Return true if the value is finite and not NaN + constexpr bool is_finite() const { return (bits_ & 0x7c00) != 0x7c00; } + /// \brief Return true if the value is positive/negative zero + constexpr bool is_zero() const { return (bits_ & 0x7fff) == 0; } + + /// \brief Convert to a 32-bit float + float ToFloat() const; + /// \brief Convert to a 64-bit float + double ToDouble() const; + + explicit operator float() const { return ToFloat(); } + explicit operator double() const { return ToDouble(); } + + /// \brief Copy the value's bytes in native-endian byte order + void ToBytes(uint8_t* dest) const { std::memcpy(dest, &bits_, sizeof(bits_)); } + /// \brief Return the value's bytes in native-endian byte order + constexpr std::array ToBytes() const { +#if ARROW_LITTLE_ENDIAN + return ToLittleEndian(); +#else + return ToBigEndian(); +#endif + } + + /// \brief Copy the value's bytes in little-endian byte order + void ToLittleEndian(uint8_t* dest) const { + const auto bytes = ToLittleEndian(); + std::memcpy(dest, bytes.data(), bytes.size()); + } + /// \brief Return the value's bytes in little-endian byte order + constexpr std::array ToLittleEndian() const { +#if ARROW_LITTLE_ENDIAN + return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)}; +#else + return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)}; +#endif + } + + /// \brief Copy the value's bytes in big-endian byte order + void ToBigEndian(uint8_t* dest) const { + const auto bytes = ToBigEndian(); + std::memcpy(dest, bytes.data(), bytes.size()); + } + /// \brief Return the value's bytes in big-endian byte order + constexpr std::array ToBigEndian() const { +#if ARROW_LITTLE_ENDIAN + return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)}; +#else + return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)}; +#endif + } + + constexpr Float16 operator-() const { return FromBits(bits_ ^ 0x8000); } + constexpr Float16 operator+() const { return FromBits(bits_); } + + friend constexpr bool operator==(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return Float16::CompareEq(lhs, rhs); + } + friend constexpr bool operator!=(Float16 lhs, Float16 rhs) { return !(lhs == rhs); } + + friend constexpr bool operator<(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return Float16::CompareLt(lhs, rhs); + } + friend constexpr bool operator>(Float16 lhs, Float16 rhs) { return rhs < lhs; } + + friend constexpr bool operator<=(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return !Float16::CompareLt(rhs, lhs); + } + friend constexpr bool operator>=(Float16 lhs, Float16 rhs) { return rhs <= lhs; } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, Float16 arg); + + protected: + uint16_t bits_; + + private: + constexpr Float16(uint16_t bits, bool) : bits_(bits) {} + + // Comparison helpers that assume neither operand is NaN + static constexpr bool CompareEq(Float16 lhs, Float16 rhs) { + return (lhs.bits() == rhs.bits()) || (lhs.is_zero() && rhs.is_zero()); + } + static constexpr bool CompareLt(Float16 lhs, Float16 rhs) { + if (lhs.signbit()) { + if (rhs.signbit()) { + // Both are negative + return lhs.bits() > rhs.bits(); + } else { + // Handle +/-0 + return !lhs.is_zero() || rhs.bits() != 0; + } + } else if (rhs.signbit()) { + return false; + } else { + // Both are positive + return lhs.bits() < rhs.bits(); + } + } +}; + +static_assert(std::is_trivial_v); + +} // namespace util +} // namespace arrow + +// TODO: Not complete +template <> +class std::numeric_limits { + using T = arrow::util::Float16; + + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + + static constexpr T min() { return T::FromBits(0b0000010000000000); } + static constexpr T max() { return T::FromBits(0b0111101111111111); } + static constexpr T lowest() { return -max(); } + + static constexpr T infinity() { return T::FromBits(0b0111110000000000); } + + static constexpr T quiet_NaN() { return T::FromBits(0b0111111111111111); } +}; diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h new file mode 100644 index 0000000000000000000000000000000000000000..0aa2842703712d0245f47c2b0e1885067a4f8f90 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h @@ -0,0 +1,882 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/tracing.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +struct EnsureFuture; + +namespace detail { + +template +struct is_future : std::false_type {}; + +template +struct is_future> : std::true_type {}; + +template +struct result_of; + +template +struct result_of()(std::declval()...))>> { + using type = decltype(std::declval()(std::declval()...)); +}; + +template +using result_of_t = typename result_of::type; + +// Helper to find the synchronous counterpart for a Future +template +struct SyncType { + using type = Result; +}; + +template <> +struct SyncType { + using type = Status; +}; + +template +using first_arg_is_status = + std::is_same>::type, + Status>; + +template > +using if_has_no_args = typename std::conditional::type; + +/// Creates a callback that can be added to a future to mark a `dest` future finished +template +struct MarkNextFinished {}; + +/// If the source and dest are both empty we can pass on the status +template +struct MarkNextFinished { + void operator()(const Status& status) && { next.MarkFinished(status); } + Dest next; +}; + +/// If the source is not empty but the dest is then we can take the +/// status out of the result +template +struct MarkNextFinished { + void operator()(const Result& res) && { + next.MarkFinished(internal::Empty::ToResult(res.status())); + } + Dest next; +}; + +/// If neither are empty we pass on the result +template +struct MarkNextFinished { + void operator()(const Result& res) && { + next.MarkFinished(res); + } + Dest next; +}; + +/// Helper that contains information about how to apply a continuation +struct ContinueFuture { + template + struct ForReturnImpl; + + template + using ForReturn = typename ForReturnImpl::type; + + template + using ForSignature = ForReturn>; + + // If the callback returns void then we return Future<> that always finishes OK. + template , + typename NextFuture = ForReturn> + typename std::enable_if::value>::type operator()( + NextFuture next, ContinueFunc&& f, Args&&... a) const { + std::forward(f)(std::forward(a)...); + next.MarkFinished(); + } + + /// If the callback returns a non-future then we return Future + /// and mark the future finished with the callback result. It will get promoted + /// to Result as part of MarkFinished if it isn't already. + /// + /// If the callback returns Status and we return Future<> then also send the callback + /// result as-is to the destination future. + template , + typename NextFuture = ForReturn> + typename std::enable_if< + !std::is_void::value && !is_future::value && + (!NextFuture::is_empty || std::is_same::value)>::type + operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const { + next.MarkFinished(std::forward(f)(std::forward(a)...)); + } + + /// If the callback returns a Result and the next future is Future<> then we mark + /// the future finished with the callback result. + /// + /// It may seem odd that the next future is Future<> when the callback returns a + /// result but this can occur if the OnFailure callback returns a result while the + /// OnSuccess callback is void/Status (e.g. you would get this calling the one-arg + /// version of Then with an OnSuccess callback that returns void) + template , + typename NextFuture = ForReturn> + typename std::enable_if::value && + !is_future::value && NextFuture::is_empty && + !std::is_same::value>::type + operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const { + next.MarkFinished(std::forward(f)(std::forward(a)...).status()); + } + + /// If the callback returns a Future then we return Future. We create a new + /// future and add a callback to the future given to us by the user that forwards the + /// result to the future we just created + template , + typename NextFuture = ForReturn> + typename std::enable_if::value>::type operator()( + NextFuture next, ContinueFunc&& f, Args&&... a) const { + ContinueResult signal_to_complete_next = + std::forward(f)(std::forward(a)...); + MarkNextFinished callback{std::move(next)}; + signal_to_complete_next.AddCallback(std::move(callback)); + } + + /// Helpers to conditionally ignore arguments to ContinueFunc + template + void IgnoringArgsIf(std::true_type, NextFuture&& next, ContinueFunc&& f, + Args&&...) const { + operator()(std::forward(next), std::forward(f)); + } + template + void IgnoringArgsIf(std::false_type, NextFuture&& next, ContinueFunc&& f, + Args&&... a) const { + operator()(std::forward(next), std::forward(f), + std::forward(a)...); + } +}; + +/// Helper struct which tells us what kind of Future gets returned from `Then` based on +/// the return type of the OnSuccess callback +template <> +struct ContinueFuture::ForReturnImpl { + using type = Future<>; +}; + +template <> +struct ContinueFuture::ForReturnImpl { + using type = Future<>; +}; + +template +struct ContinueFuture::ForReturnImpl { + using type = Future; +}; + +template +struct ContinueFuture::ForReturnImpl> { + using type = Future; +}; + +template +struct ContinueFuture::ForReturnImpl> { + using type = Future; +}; + +} // namespace detail + +/// A Future's execution or completion status +enum class FutureState : int8_t { PENDING, SUCCESS, FAILURE }; + +inline bool IsFutureFinished(FutureState state) { return state != FutureState::PENDING; } + +/// \brief Describe whether the callback should be scheduled or run synchronously +enum class ShouldSchedule { + /// Always run the callback synchronously (the default) + Never = 0, + /// Schedule a new task only if the future is not finished when the + /// callback is added + IfUnfinished = 1, + /// Always schedule the callback as a new task + Always = 2, + /// Schedule a new task only if it would run on an executor other than + /// the specified executor. + IfDifferentExecutor = 3, +}; + +/// \brief Options that control how a continuation is run +struct CallbackOptions { + /// Describe whether the callback should be run synchronously or scheduled + ShouldSchedule should_schedule = ShouldSchedule::Never; + /// If the callback is scheduled then this is the executor it should be scheduled + /// on. If this is NULL then should_schedule must be Never + internal::Executor* executor = NULLPTR; + + static CallbackOptions Defaults() { return {}; } +}; + +// Untyped private implementation +class ARROW_EXPORT FutureImpl : public std::enable_shared_from_this { + public: + FutureImpl(); + virtual ~FutureImpl() = default; + + FutureState state() { return state_.load(); } + + static std::unique_ptr Make(); + static std::unique_ptr MakeFinished(FutureState state); + +#ifdef ARROW_WITH_OPENTELEMETRY + void SetSpan(util::tracing::Span* span) { span_ = span; } +#endif + + // Future API + void MarkFinished(); + void MarkFailed(); + void Wait(); + bool Wait(double seconds); + template + Result* CastResult() const { + return static_cast*>(result_.get()); + } + + using Callback = internal::FnOnce; + void AddCallback(Callback callback, CallbackOptions opts); + bool TryAddCallback(const std::function& callback_factory, + CallbackOptions opts); + + std::atomic state_{FutureState::PENDING}; + + // Type erased storage for arbitrary results + // XXX small objects could be stored inline instead of boxed in a pointer + using Storage = std::unique_ptr; + Storage result_{NULLPTR, NULLPTR}; + + struct CallbackRecord { + Callback callback; + CallbackOptions options; + }; + std::vector callbacks_; +#ifdef ARROW_WITH_OPENTELEMETRY + util::tracing::Span* span_ = NULLPTR; +#endif +}; + +// --------------------------------------------------------------------- +// Public API + +/// \brief EXPERIMENTAL A std::future-like class with more functionality. +/// +/// A Future represents the results of a past or future computation. +/// The Future API has two sides: a producer side and a consumer side. +/// +/// The producer API allows creating a Future and setting its result or +/// status, possibly after running a computation function. +/// +/// The consumer API allows querying a Future's current state, wait for it +/// to complete, and composing futures with callbacks. +template +class [[nodiscard]] Future { + public: + using ValueType = T; + using SyncType = typename detail::SyncType::type; + static constexpr bool is_empty = std::is_same::value; + // The default constructor creates an invalid Future. Use Future::Make() + // for a valid Future. This constructor is mostly for the convenience + // of being able to presize a vector of Futures. + Future() = default; + +#ifdef ARROW_WITH_OPENTELEMETRY + void SetSpan(util::tracing::Span* span) { impl_->SetSpan(span); } +#endif + + // Consumer API + + bool is_valid() const { return impl_ != NULLPTR; } + + /// \brief Return the Future's current state + /// + /// A return value of PENDING is only indicative, as the Future can complete + /// concurrently. A return value of FAILURE or SUCCESS is definitive, though. + FutureState state() const { + CheckValid(); + return impl_->state(); + } + + /// \brief Whether the Future is finished + /// + /// A false return value is only indicative, as the Future can complete + /// concurrently. A true return value is definitive, though. + bool is_finished() const { + CheckValid(); + return IsFutureFinished(impl_->state()); + } + + /// \brief Wait for the Future to complete and return its Result + const Result& result() const& { + Wait(); + return *GetResult(); + } + + /// \brief Returns an rvalue to the result. This method is potentially unsafe + /// + /// The future is not the unique owner of the result, copies of a future will + /// also point to the same result. You must make sure that no other copies + /// of the future exist. Attempts to add callbacks after you move the result + /// will result in undefined behavior. + Result&& MoveResult() { + Wait(); + return std::move(*GetResult()); + } + + /// \brief Wait for the Future to complete and return its Status + const Status& status() const { return result().status(); } + + /// \brief Future is convertible to Future<>, which views only the + /// Status of the original. Marking the returned Future Finished is not supported. + explicit operator Future<>() const { + Future<> status_future; + status_future.impl_ = impl_; + return status_future; + } + + /// \brief Wait for the Future to complete + void Wait() const { + CheckValid(); + impl_->Wait(); + } + + /// \brief Wait for the Future to complete, or for the timeout to expire + /// + /// `true` is returned if the Future completed, `false` if the timeout expired. + /// Note a `false` value is only indicative, as the Future can complete + /// concurrently. + bool Wait(double seconds) const { + CheckValid(); + return impl_->Wait(seconds); + } + + // Producer API + + /// \brief Producer API: mark Future finished + /// + /// The Future's result is set to `res`. + void MarkFinished(Result res) { DoMarkFinished(std::move(res)); } + + /// \brief Mark a Future<> completed with the provided Status. + template ::value>::type> + void MarkFinished(Status s = Status::OK()) { + return DoMarkFinished(E::ToResult(std::move(s))); + } + + /// \brief Producer API: instantiate a valid Future + /// + /// The Future's state is initialized with PENDING. If you are creating a future with + /// this method you must ensure that future is eventually completed (with success or + /// failure). Creating a future, returning it, and never completing the future can lead + /// to memory leaks (for example, see Loop). + static Future Make() { + Future fut; + fut.impl_ = FutureImpl::Make(); + return fut; + } + + /// \brief Producer API: instantiate a finished Future + static Future MakeFinished(Result res) { + Future fut; + fut.InitializeFromResult(std::move(res)); + return fut; + } + + /// \brief Make a finished Future<> with the provided Status. + template ::value>::type> + static Future<> MakeFinished(Status s = Status::OK()) { + return MakeFinished(E::ToResult(std::move(s))); + } + + struct WrapResultOnComplete { + template + struct Callback { + void operator()(const FutureImpl& impl) && { + std::move(on_complete)(*impl.CastResult()); + } + OnComplete on_complete; + }; + }; + + struct WrapStatusyOnComplete { + template + struct Callback { + static_assert(std::is_same::value, + "Only callbacks for Future<> should accept Status and not Result"); + + void operator()(const FutureImpl& impl) && { + std::move(on_complete)(impl.CastResult()->status()); + } + OnComplete on_complete; + }; + }; + + template + using WrapOnComplete = typename std::conditional< + detail::first_arg_is_status::value, WrapStatusyOnComplete, + WrapResultOnComplete>::type::template Callback; + + /// \brief Consumer API: Register a callback to run when this future completes + /// + /// The callback should receive the result of the future (const Result&) + /// For a void or statusy future this should be (const Status&) + /// + /// There is no guarantee to the order in which callbacks will run. In + /// particular, callbacks added while the future is being marked complete + /// may be executed immediately, ahead of, or even the same time as, other + /// callbacks that have been previously added. + /// + /// WARNING: callbacks may hold arbitrary references, including cyclic references. + /// Since callbacks will only be destroyed after they are invoked, this can lead to + /// memory leaks if a Future is never marked finished (abandoned): + /// + /// { + /// auto fut = Future<>::Make(); + /// fut.AddCallback([fut]() {}); + /// } + /// + /// In this example `fut` falls out of scope but is not destroyed because it holds a + /// cyclic reference to itself through the callback. + template > + void AddCallback(OnComplete on_complete, + CallbackOptions opts = CallbackOptions::Defaults()) const { + // We know impl_ will not be dangling when invoking callbacks because at least one + // thread will be waiting for MarkFinished to return. Thus it's safe to keep a + // weak reference to impl_ here + impl_->AddCallback(Callback{std::move(on_complete)}, opts); + } + + /// \brief Overload of AddCallback that will return false instead of running + /// synchronously + /// + /// This overload will guarantee the callback is never run synchronously. If the future + /// is already finished then it will simply return false. This can be useful to avoid + /// stack overflow in a situation where you have recursive Futures. For an example + /// see the Loop function + /// + /// Takes in a callback factory function to allow moving callbacks (the factory function + /// will only be called if the callback can successfully be added) + /// + /// Returns true if a callback was actually added and false if the callback failed + /// to add because the future was marked complete. + template , + typename Callback = WrapOnComplete> + bool TryAddCallback(CallbackFactory callback_factory, + CallbackOptions opts = CallbackOptions::Defaults()) const { + return impl_->TryAddCallback([&]() { return Callback{callback_factory()}; }, opts); + } + + template + struct ThenOnComplete { + static constexpr bool has_no_args = + internal::call_traits::argument_count::value == 0; + + using ContinuedFuture = detail::ContinueFuture::ForSignature< + detail::if_has_no_args>; + + static_assert( + std::is_same, + ContinuedFuture>::value, + "OnSuccess and OnFailure must continue with the same future type"); + + struct DummyOnSuccess { + void operator()(const T&); + }; + using OnSuccessArg = typename std::decay>>::type; + + static_assert( + !std::is_same::type>::value, + "OnSuccess' argument should not be a Result"); + + void operator()(const Result& result) && { + detail::ContinueFuture continue_future; + if (ARROW_PREDICT_TRUE(result.ok())) { + // move on_failure to a(n immediately destroyed) temporary to free its resources + ARROW_UNUSED(OnFailure(std::move(on_failure))); + continue_future.IgnoringArgsIf( + detail::if_has_no_args{}, + std::move(next), std::move(on_success), result.ValueOrDie()); + } else { + ARROW_UNUSED(OnSuccess(std::move(on_success))); + continue_future(std::move(next), std::move(on_failure), result.status()); + } + } + + OnSuccess on_success; + OnFailure on_failure; + ContinuedFuture next; + }; + + template + struct PassthruOnFailure { + using ContinuedFuture = detail::ContinueFuture::ForSignature< + detail::if_has_no_args>; + + Result operator()(const Status& s) { return s; } + }; + + /// \brief Consumer API: Register a continuation to run when this future completes + /// + /// The continuation will run in the same thread that called MarkFinished (whatever + /// callback is registered with this function will run before MarkFinished returns). + /// Avoid long-running callbacks in favor of submitting a task to an Executor and + /// returning the future. + /// + /// Two callbacks are supported: + /// - OnSuccess, called with the result (const ValueType&) on successful completion. + /// for an empty future this will be called with nothing () + /// - OnFailure, called with the error (const Status&) on failed completion. + /// This callback is optional and defaults to a passthru of any errors. + /// + /// Then() returns a Future whose ValueType is derived from the return type of the + /// callbacks. If a callback returns: + /// - void, a Future<> will be returned which will completes successfully as soon + /// as the callback runs. + /// - Status, a Future<> will be returned which will complete with the returned Status + /// as soon as the callback runs. + /// - V or Result, a Future will be returned which will complete with the result + /// of invoking the callback as soon as the callback runs. + /// - Future, a Future will be returned which will be marked complete when the + /// future returned by the callback completes (and will complete with the same + /// result). + /// + /// The continued Future type must be the same for both callbacks. + /// + /// Note that OnFailure can swallow errors, allowing continued Futures to successfully + /// complete even if this Future fails. + /// + /// If this future is already completed then the callback will be run immediately + /// and the returned future may already be marked complete. + /// + /// See AddCallback for general considerations when writing callbacks. + template , + typename OnComplete = ThenOnComplete, + typename ContinuedFuture = typename OnComplete::ContinuedFuture> + ContinuedFuture Then(OnSuccess on_success, OnFailure on_failure = {}, + CallbackOptions options = CallbackOptions::Defaults()) const { + auto next = ContinuedFuture::Make(); + AddCallback(OnComplete{std::forward(on_success), + std::forward(on_failure), next}, + options); + return next; + } + + /// \brief Implicit constructor to create a finished future from a value + Future(ValueType val) : Future() { // NOLINT runtime/explicit + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + SetResult(std::move(val)); + } + + /// \brief Implicit constructor to create a future from a Result, enabling use + /// of macros like ARROW_ASSIGN_OR_RAISE. + Future(Result res) : Future() { // NOLINT runtime/explicit + if (ARROW_PREDICT_TRUE(res.ok())) { + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + } else { + impl_ = FutureImpl::MakeFinished(FutureState::FAILURE); + } + SetResult(std::move(res)); + } + + /// \brief Implicit constructor to create a future from a Status, enabling use + /// of macros like ARROW_RETURN_NOT_OK. + Future(Status s) // NOLINT runtime/explicit + : Future(Result(std::move(s))) {} + + protected: + void InitializeFromResult(Result res) { + if (ARROW_PREDICT_TRUE(res.ok())) { + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + } else { + impl_ = FutureImpl::MakeFinished(FutureState::FAILURE); + } + SetResult(std::move(res)); + } + + void Initialize() { impl_ = FutureImpl::Make(); } + + Result* GetResult() const { return impl_->CastResult(); } + + void SetResult(Result res) { + impl_->result_ = {new Result(std::move(res)), + [](void* p) { delete static_cast*>(p); }}; + } + + void DoMarkFinished(Result res) { + SetResult(std::move(res)); + + if (ARROW_PREDICT_TRUE(GetResult()->ok())) { + impl_->MarkFinished(); + } else { + impl_->MarkFailed(); + } + } + + void CheckValid() const { +#ifndef NDEBUG + if (!is_valid()) { + Status::Invalid("Invalid Future (default-initialized?)").Abort(); + } +#endif + } + + explicit Future(std::shared_ptr impl) : impl_(std::move(impl)) {} + + std::shared_ptr impl_; + + friend struct detail::ContinueFuture; + + template + friend class Future; + friend class WeakFuture; + + FRIEND_TEST(FutureRefTest, ChainRemoved); + FRIEND_TEST(FutureRefTest, TailRemoved); + FRIEND_TEST(FutureRefTest, HeadRemoved); +}; + +template +typename Future::SyncType FutureToSync(const Future& fut) { + return fut.result(); +} + +template <> +inline typename Future::SyncType FutureToSync( + const Future& fut) { + return fut.status(); +} + +template <> +inline Future<>::Future(Status s) : Future(internal::Empty::ToResult(std::move(s))) {} + +template +class WeakFuture { + public: + explicit WeakFuture(const Future& future) : impl_(future.impl_) {} + + Future get() { return Future{impl_.lock()}; } + + private: + std::weak_ptr impl_; +}; + +/// \defgroup future-utilities Functions for working with Futures +/// @{ + +/// If a Result holds an error instead of a Future, construct a finished Future +/// holding that error. +template +static Future DeferNotOk(Result> maybe_future) { + if (ARROW_PREDICT_FALSE(!maybe_future.ok())) { + return Future::MakeFinished(std::move(maybe_future).status()); + } + return std::move(maybe_future).MoveValueUnsafe(); +} + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future's result is a vector of the results of `futures`. +/// Note that this future will never be marked "failed"; failed results +/// will be stored in the result vector alongside successful results. +template +Future>> All(std::vector> futures) { + struct State { + explicit State(std::vector> f) + : futures(std::move(f)), n_remaining(futures.size()) {} + + std::vector> futures; + std::atomic n_remaining; + }; + + if (futures.size() == 0) { + return {std::vector>{}}; + } + + auto state = std::make_shared(std::move(futures)); + + auto out = Future>>::Make(); + for (const Future& future : state->futures) { + future.AddCallback([state, out](const Result&) mutable { + if (state->n_remaining.fetch_sub(1) != 1) return; + + std::vector> results(state->futures.size()); + for (size_t i = 0; i < results.size(); ++i) { + results[i] = state->futures[i].result(); + } + out.MarkFinished(std::move(results)); + }); + } + return out; +} + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future will be marked complete if all `futures` complete +/// successfully. Otherwise, it will be marked failed with the status of +/// the first failing future. +ARROW_EXPORT +Future<> AllComplete(const std::vector>& futures); + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future will finish with an ok status if all `futures` finish with +/// an ok status. Otherwise, it will be marked failed with the status of +/// one of the failing futures. +/// +/// Unlike AllComplete this Future will not complete immediately when a +/// failure occurs. It will wait until all futures have finished. +ARROW_EXPORT +Future<> AllFinished(const std::vector>& futures); + +/// @} + +struct Continue { + template + operator std::optional() && { // NOLINT explicit + return {}; + } +}; + +template +std::optional Break(T break_value = {}) { + return std::optional{std::move(break_value)}; +} + +template +using ControlFlow = std::optional; + +/// \brief Loop through an asynchronous sequence +/// +/// \param[in] iterate A generator of Future>. On completion +/// of each yielded future the resulting ControlFlow will be examined. A Break will +/// terminate the loop, while a Continue will re-invoke `iterate`. +/// +/// \return A future which will complete when a Future returned by iterate completes with +/// a Break +template ::ValueType, + typename BreakValueType = typename Control::value_type> +Future Loop(Iterate iterate) { + struct Callback { + bool CheckForTermination(const Result& control_res) { + if (!control_res.ok()) { + break_fut.MarkFinished(control_res.status()); + return true; + } + if (control_res->has_value()) { + break_fut.MarkFinished(**control_res); + return true; + } + return false; + } + + void operator()(const Result& maybe_control) && { + if (CheckForTermination(maybe_control)) return; + + auto control_fut = iterate(); + while (true) { + if (control_fut.TryAddCallback([this]() { return *this; })) { + // Adding a callback succeeded; control_fut was not finished + // and we must wait to CheckForTermination. + return; + } + // Adding a callback failed; control_fut was finished and we + // can CheckForTermination immediately. This also avoids recursion and potential + // stack overflow. + if (CheckForTermination(control_fut.result())) return; + + control_fut = iterate(); + } + } + + Iterate iterate; + + // If the future returned by control_fut is never completed then we will be hanging on + // to break_fut forever even if the listener has given up listening on it. Instead we + // rely on the fact that a producer (the caller of Future<>::Make) is always + // responsible for completing the futures they create. + // TODO: Could avoid this kind of situation with "future abandonment" similar to mesos + Future break_fut; + }; + + auto break_fut = Future::Make(); + auto control_fut = iterate(); + control_fut.AddCallback(Callback{std::move(iterate), break_fut}); + + return break_fut; +} + +inline Future<> ToFuture(Status status) { + return Future<>::MakeFinished(std::move(status)); +} + +template +Future ToFuture(T value) { + return Future::MakeFinished(std::move(value)); +} + +template +Future ToFuture(Result maybe_value) { + return Future::MakeFinished(std::move(maybe_value)); +} + +template +Future ToFuture(Future fut) { + return fut; +} + +template +struct EnsureFuture { + using type = decltype(ToFuture(std::declval())); +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h new file mode 100644 index 0000000000000000000000000000000000000000..7b3de2208935fa8c7c8afbc83ba9982f4907491d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { +namespace internal { + +// ---------------------------------------------------------------------- +// BEGIN Hash utilities from Boost + +namespace detail { + +#if defined(_MSC_VER) +# define ARROW_HASH_ROTL32(x, r) _rotl(x, r) +#else +# define ARROW_HASH_ROTL32(x, r) (x << r) | (x >> (32 - r)) +#endif + +template +inline void hash_combine_impl(SizeT& seed, SizeT value) { + seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} + +inline void hash_combine_impl(uint32_t& h1, uint32_t k1) { + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + k1 *= c1; + k1 = ARROW_HASH_ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ARROW_HASH_ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; +} + +#undef ARROW_HASH_ROTL32 + +} // namespace detail + +template +inline void hash_combine(std::size_t& seed, T const& v) { + std::hash hasher; + return ::arrow::internal::detail::hash_combine_impl(seed, hasher(v)); +} + +// END Hash utilities from Boost +// ---------------------------------------------------------------------- + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h new file mode 100644 index 0000000000000000000000000000000000000000..4ead1a7283d81fc60fd59716b60b280ccdae92d2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h @@ -0,0 +1,944 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Private header, not to be exported + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/builder_binary.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_builders.h" +#include "arrow/util/endian.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" + +#define XXH_INLINE_ALL + +#include "arrow/vendored/xxhash.h" // IWYU pragma: keep + +namespace arrow { +namespace internal { + +// XXX would it help to have a 32-bit hash value on large datasets? +typedef uint64_t hash_t; + +// Notes about the choice of a hash function. +// - XXH3 is extremely fast on most data sizes, from small to huge; +// faster even than HW CRC-based hashing schemes +// - our custom hash function for tiny values (< 16 bytes) is still +// significantly faster (~30%), at least on this machine and compiler + +template +inline hash_t ComputeStringHash(const void* data, int64_t length); + +/// \brief A hash function for bitmaps that can handle offsets and lengths in +/// terms of number of bits. The hash only depends on the bits actually hashed. +/// +/// It's the caller's responsibility to ensure that bits_offset + num_bits are +/// readable from the bitmap. +/// +/// \pre bits_offset >= 0 +/// \pre num_bits >= 0 +/// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap +/// +/// \param bitmap The pointer to the bitmap. +/// \param seed The seed for the hash function (useful when chaining hash functions). +/// \param bits_offset The offset in bits relative to the start of the bitmap. +/// \param num_bits The number of bits after the offset to be hashed. +ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed, + int64_t bits_offset, int64_t num_bits); + +template +struct ScalarHelperBase { + static bool CompareScalars(Scalar u, Scalar v) { return u == v; } + + static hash_t ComputeHash(const Scalar& value) { + // Generic hash computation for scalars. Simply apply the string hash + // to the bit representation of the value. + + // XXX in the case of FP values, we'd like equal values to have the same hash, + // even if they have different bit representations... + return ComputeStringHash(&value, sizeof(value)); + } +}; + +template +struct ScalarHelper : public ScalarHelperBase {}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for integers + + static hash_t ComputeHash(const Scalar& value) { + // Faster hash computation for integers. + + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + static constexpr uint64_t multipliers[] = {11400714785074694791ULL, + 14029467366897019727ULL}; + + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + auto h = static_cast(value); + return bit_util::ByteSwap(multipliers[AlgNum] * h); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for std::string_view + + static hash_t ComputeHash(std::string_view value) { + return ComputeStringHash(value.data(), static_cast(value.size())); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for reals + + static bool CompareScalars(Scalar u, Scalar v) { + if (std::isnan(u)) { + // XXX should we do a bit-precise comparison? + return std::isnan(v); + } + return u == v; + } +}; + +template +hash_t ComputeStringHash(const void* data, int64_t length) { + if (ARROW_PREDICT_TRUE(length <= 16)) { + // Specialize for small hash strings, as they are quite common as + // hash table keys. Even XXH3 isn't quite as fast. + auto p = reinterpret_cast(data); + auto n = static_cast(length); + if (n <= 8) { + if (n <= 3) { + if (n == 0) { + return 1U; + } + uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1]; + return ScalarHelper::ComputeHash(x); + } + // 4 <= length <= 8 + // We can read the string as two overlapping 32-bit ints, apply + // different hash functions to each of them in parallel, then XOR + // the results + uint32_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 4); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + // 8 <= length <= 16 + // Apply the same principle as above + uint64_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 8); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + +#if XXH3_SECRET_SIZE_MIN != 136 +# error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets +#endif + + // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow. + // Instead, we use hard-coded random secrets. To maximize cache efficiency, + // they reuse the same memory area. + static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = { + 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f, + 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24, + 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26, + 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75, + 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce, + 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3, + 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42, + 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1, + 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5, + 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87}; + + static_assert(AlgNum < 2, "AlgNum too large"); + static constexpr auto secret = kXxh3Secrets + AlgNum; + return XXH3_64bits_withSecret(data, static_cast(length), secret, + XXH3_SECRET_SIZE_MIN); +} + +// XXX add a HashEq struct with both hash and compare functions? + +// ---------------------------------------------------------------------- +// An open-addressing insert-only hash table (no deletes) + +template +class HashTable { + public: + static constexpr hash_t kSentinel = 0ULL; + static constexpr int64_t kLoadFactor = 2UL; + + struct Entry { + hash_t h; + Payload payload; + + // An entry is valid if the hash is different from the sentinel value + operator bool() const { return h != kSentinel; } + }; + + HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) { + DCHECK_NE(pool, nullptr); + // Minimum of 32 elements + capacity = std::max(capacity, 32UL); + capacity_ = bit_util::NextPower2(capacity); + capacity_mask_ = capacity_ - 1; + size_ = 0; + + DCHECK_OK(UpsizeBuffer(capacity_)); + } + + // Lookup with non-linear probing + // cmp_func should have signature bool(const Payload*). + // Return a (Entry*, found) pair. + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) const { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + Status Insert(Entry* entry, hash_t h, const Payload& payload) { + // Ensure entry is empty before inserting + assert(!*entry); + entry->h = FixHash(h); + entry->payload = payload; + ++size_; + + if (ARROW_PREDICT_FALSE(NeedUpsizing())) { + // Resize less frequently since it is expensive + return Upsize(capacity_ * kLoadFactor * 2); + } + return Status::OK(); + } + + uint64_t size() const { return size_; } + + // Visit all non-empty entries in the table + // The visit_func should have signature void(const Entry*) + template + void VisitEntries(VisitFunc&& visit_func) const { + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = entries_[i]; + if (entry) { + visit_func(&entry); + } + } + } + + protected: + // NoCompare is for when the value is known not to exist in the table + enum CompareKind { DoCompare, NoCompare }; + + // The workhorse lookup function + template + std::pair Lookup(hash_t h, const Entry* entries, uint64_t size_mask, + CmpFunc&& cmp_func) const { + static constexpr uint8_t perturb_shift = 5; + + uint64_t index, perturb; + const Entry* entry; + + h = FixHash(h); + index = h & size_mask; + perturb = (h >> perturb_shift) + 1U; + + while (true) { + entry = &entries[index]; + if (CompareEntry(h, entry, std::forward(cmp_func))) { + // Found + return {index, true}; + } + if (entry->h == kSentinel) { + // Empty slot + return {index, false}; + } + + // Perturbation logic inspired from CPython's set / dict object. + // The goal is that all 64 bits of the unmasked hash value eventually + // participate in the probing sequence, to minimize clustering. + index = (index + perturb) & size_mask; + perturb = (perturb >> perturb_shift) + 1U; + } + } + + template + bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const { + if (CKind == NoCompare) { + return false; + } else { + return entry->h == h && cmp_func(&entry->payload); + } + } + + bool NeedUpsizing() const { + // Keep the load factor <= 1/2 + return size_ * kLoadFactor >= capacity_; + } + + Status UpsizeBuffer(uint64_t capacity) { + RETURN_NOT_OK(entries_builder_.Resize(capacity)); + entries_ = entries_builder_.mutable_data(); + memset(static_cast(entries_), 0, capacity * sizeof(Entry)); + + return Status::OK(); + } + + Status Upsize(uint64_t new_capacity) { + assert(new_capacity > capacity_); + uint64_t new_mask = new_capacity - 1; + assert((new_capacity & new_mask) == 0); // it's a power of two + + // Stash old entries and seal builder, effectively resetting the Buffer + const Entry* old_entries = entries_; + ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_)); + // Allocate new buffer + RETURN_NOT_OK(UpsizeBuffer(new_capacity)); + + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = old_entries[i]; + if (entry) { + // Dummy compare function will not be called + auto p = Lookup(entry.h, entries_, new_mask, + [](const Payload*) { return false; }); + // Lookup (and CompareEntry) ensure that an + // empty slots is always returned + assert(!p.second); + entries_[p.first] = entry; + } + } + capacity_ = new_capacity; + capacity_mask_ = new_mask; + + return Status::OK(); + } + + hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; } + + // The number of slots available in the hash table array. + uint64_t capacity_; + uint64_t capacity_mask_; + // The number of used slots in the hash table array. + uint64_t size_; + + Entry* entries_; + TypedBufferBuilder entries_builder_; +}; + +// XXX typedef memo_index_t int32_t ? + +constexpr int32_t kKeyNotFound = -1; + +// ---------------------------------------------------------------------- +// A base class for memoization table. + +class MemoTable { + public: + virtual ~MemoTable() = default; + + virtual int32_t size() const = 0; +}; + +// ---------------------------------------------------------------------- +// A memoization table for memory-cheap scalar values. + +// The memoization table remembers and allows to look up the insertion +// index for each key. + +template class HashTableTemplateType = HashTable> +class ScalarMemoTable : public MemoTable { + public: + explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0) + : hash_table_(pool, static_cast(entries)) {} + + int32_t Get(const Scalar& value) const { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(payload->value, value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + template + Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(value, payload->value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index})); + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index != kKeyNotFound) { + on_found(memo_index); + } else { + null_index_ = memo_index = size(); + on_not_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table +1 if null was added. + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size()) + (GetNull() != kKeyNotFound); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + hash_table_.VisitEntries([=](const HashTableEntry* entry) { + int32_t index = entry->payload.memo_index - start; + if (index >= 0) { + out_data[index] = entry->payload.value; + } + }); + // Zero-initialize the null entry + if (null_index_ != kKeyNotFound) { + int32_t index = null_index_ - start; + if (index >= 0) { + out_data[index] = Scalar{}; + } + } + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + protected: + struct Payload { + Scalar value; + int32_t memo_index; + }; + + using HashTableType = HashTableTemplateType; + using HashTableEntry = typename HashTableType::Entry; + HashTableType hash_table_; + int32_t null_index_ = kKeyNotFound; + + hash_t ComputeHash(const Scalar& value) const { + return ScalarHelper::ComputeHash(value); + } + + public: + // defined here so that `HashTableType` is visible + // Merge entries from `other_table` into `this->hash_table_`. + Status MergeTable(const ScalarMemoTable& other_table) { + const HashTableType& other_hashtable = other_table.hash_table_; + + other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused)); + }); + // TODO: ARROW-17074 - implement proper error handling + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// A memoization table for small scalar values, using direct indexing + +template +struct SmallScalarTraits {}; + +template <> +struct SmallScalarTraits { + static constexpr int32_t cardinality = 2; + + static uint32_t AsIndex(bool value) { return value ? 1 : 0; } +}; + +template +struct SmallScalarTraits::value>> { + using Unsigned = typename std::make_unsigned::type; + + static constexpr int32_t cardinality = 1U + std::numeric_limits::max(); + + static uint32_t AsIndex(Scalar value) { return static_cast(value); } +}; + +template class HashTableTemplateType = HashTable> +class SmallScalarMemoTable : public MemoTable { + public: + explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) { + std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound); + index_to_value_.reserve(cardinality); + } + + int32_t Get(const Scalar value) const { + auto value_index = AsIndex(value); + return value_to_index_[value_index]; + } + + template + Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto value_index = AsIndex(value); + auto memo_index = value_to_index_[value_index]; + if (memo_index == kKeyNotFound) { + memo_index = static_cast(index_to_value_.size()); + index_to_value_.push_back(value); + value_to_index_[value_index] = memo_index; + DCHECK_LT(memo_index, cardinality + 1); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return value_to_index_[cardinality]; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + auto memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = value_to_index_[cardinality] = size(); + index_to_value_.push_back(0); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { return static_cast(index_to_value_.size()); } + + // Merge entries from `other_table` into `this`. + Status MergeTable(const SmallScalarMemoTable& other_table) { + for (const Scalar& other_val : other_table.index_to_value_) { + int32_t unused; + RETURN_NOT_OK(this->GetOrInsert(other_val, &unused)); + } + return Status::OK(); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + DCHECK_GE(start, 0); + DCHECK_LE(static_cast(start), index_to_value_.size()); + int64_t offset = start * static_cast(sizeof(Scalar)); + memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar)); + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + const std::vector& values() const { return index_to_value_; } + + protected: + static constexpr auto cardinality = SmallScalarTraits::cardinality; + static_assert(cardinality <= 256, "cardinality too large for direct-addressed table"); + + uint32_t AsIndex(Scalar value) const { + return SmallScalarTraits::AsIndex(value); + } + + // The last index is reserved for the null element. + int32_t value_to_index_[cardinality + 1]; + std::vector index_to_value_; +}; + +// ---------------------------------------------------------------------- +// A memoization table for variable-sized binary data. + +template +class BinaryMemoTable : public MemoTable { + public: + using builder_offset_type = typename BinaryBuilderT::offset_type; + explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0, + int64_t values_size = -1) + : hash_table_(pool, static_cast(entries)), binary_builder_(pool) { + const int64_t data_size = (values_size < 0) ? entries * 4 : values_size; + DCHECK_OK(binary_builder_.Resize(entries)); + DCHECK_OK(binary_builder_.ReserveData(data_size)); + } + + int32_t Get(const void* data, builder_offset_type length) const { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + int32_t Get(std::string_view value) const { + return Get(value.data(), static_cast(value.length())); + } + + template + Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found, + Func2&& on_not_found, int32_t* out_memo_index) { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + // Insert string value + RETURN_NOT_OK(binary_builder_.Append(static_cast(data), length)); + // Insert hash entry + RETURN_NOT_OK( + hash_table_.Insert(const_cast(p.first), h, {memo_index})); + + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + template + Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + std::forward(on_found), std::forward(on_not_found), + out_memo_index); + } + + Status GetOrInsert(const void* data, builder_offset_type length, + int32_t* out_memo_index) { + return GetOrInsert( + data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + Status GetOrInsert(std::string_view value, int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = null_index_ = size(); + DCHECK_OK(binary_builder_.AppendNull()); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size() + (GetNull() != kKeyNotFound)); + } + + int64_t values_size() const { return binary_builder_.value_data_length(); } + + // Copy (n + 1) offsets starting from index `start` into `out_data` + template + void CopyOffsets(int32_t start, Offset* out_data) const { + DCHECK_LE(start, size()); + + const builder_offset_type* offsets = binary_builder_.offsets_data(); + const builder_offset_type delta = + start < binary_builder_.length() ? offsets[start] : 0; + for (int32_t i = start; i < size(); ++i) { + const builder_offset_type adjusted_offset = offsets[i] - delta; + Offset cast_offset = static_cast(adjusted_offset); + assert(static_cast(cast_offset) == + adjusted_offset); // avoid truncation + *out_data++ = cast_offset; + } + + // Copy last value since BinaryBuilder only materializes it on in Finish() + *out_data = static_cast(binary_builder_.value_data_length() - delta); + } + + template + void CopyOffsets(Offset* out_data) const { + CopyOffsets(0, out_data); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, uint8_t* out_data) const { + CopyValues(start, -1, out_data); + } + + // Same as above, but check output size in debug mode + void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const { + DCHECK_LE(start, size()); + + // The absolute byte offset of `start` value in the binary buffer. + const builder_offset_type offset = binary_builder_.offset(start); + const auto length = binary_builder_.value_data_length() - static_cast(offset); + + if (out_size != -1) { + assert(static_cast(length) <= out_size); + } + + auto view = binary_builder_.GetView(start); + memcpy(out_data, view.data(), length); + } + + void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); } + + void CopyValues(int64_t out_size, uint8_t* out_data) const { + CopyValues(0, out_size, out_data); + } + + void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size, + uint8_t* out_data) const { + // This method exists to cope with the fact that the BinaryMemoTable does + // not know the fixed width when inserting the null value. The data + // buffer hold a zero length string for the null value (if found). + // + // Thus, the method will properly inject an empty value of the proper width + // in the output buffer. + // + if (start >= size()) { + return; + } + + int32_t null_index = GetNull(); + if (null_index < start) { + // Nothing to skip, proceed as usual. + CopyValues(start, out_size, out_data); + return; + } + + builder_offset_type left_offset = binary_builder_.offset(start); + + // Ensure that the data length is exactly missing width_size bytes to fit + // in the expected output (n_values * width_size). +#ifndef NDEBUG + int64_t data_length = values_size() - static_cast(left_offset); + assert(data_length + width_size == out_size); + ARROW_UNUSED(data_length); +#endif + + auto in_data = binary_builder_.value_data() + left_offset; + // The null use 0-length in the data, slice the data in 2 and skip by + // width_size in out_data. [part_1][width_size][part_2] + auto null_data_offset = binary_builder_.offset(null_index); + auto left_size = null_data_offset - left_offset; + if (left_size > 0) { + memcpy(out_data, in_data + left_offset, left_size); + } + // Zero-initialize the null entry + memset(out_data + left_size, 0, width_size); + + auto right_size = values_size() - static_cast(null_data_offset); + if (right_size > 0) { + // skip the null fixed size value. + auto out_offset = left_size + width_size; + assert(out_data + out_offset + right_size == out_data + out_size); + memcpy(out_data + out_offset, in_data + null_data_offset, right_size); + } + } + + // Visit the stored values in insertion order. + // The visitor function should have the signature `void(std::string_view)` + // or `void(const std::string_view&)`. + template + void VisitValues(int32_t start, VisitFunc&& visit) const { + for (int32_t i = start; i < size(); ++i) { + visit(binary_builder_.GetView(i)); + } + } + + protected: + struct Payload { + int32_t memo_index; + }; + + using HashTableType = HashTable; + using HashTableEntry = typename HashTable::Entry; + HashTableType hash_table_; + BinaryBuilderT binary_builder_; + + int32_t null_index_ = kKeyNotFound; + + std::pair Lookup(hash_t h, const void* data, + builder_offset_type length) const { + auto cmp_func = [&](const Payload* payload) { + std::string_view lhs = binary_builder_.GetView(payload->memo_index); + std::string_view rhs(static_cast(data), length); + return lhs == rhs; + }; + return hash_table_.Lookup(h, cmp_func); + } + + public: + Status MergeTable(const BinaryMemoTable& other_table) { + other_table.VisitValues(0, [this](std::string_view other_value) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_value, &unused)); + }); + return Status::OK(); + } +}; + +template +struct HashTraits {}; + +template <> +struct HashTraits { + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits> { + using c_type = typename T::c_type; + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits::value && !is_8bit_int::value>> { + using c_type = typename T::c_type; + using MemoTableType = ScalarMemoTable; +}; + +template +struct HashTraits::value && + !std::is_base_of::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table, + int64_t start_offset, int64_t* null_count, + std::shared_ptr* null_bitmap) { + int64_t dict_length = static_cast(memo_table.size()) - start_offset; + int64_t null_index = memo_table.GetNull(); + + *null_count = 0; + *null_bitmap = nullptr; + + if (null_index != kKeyNotFound && null_index >= start_offset) { + null_index -= start_offset; + *null_count = 1; + ARROW_ASSIGN_OR_RAISE(*null_bitmap, + internal::BitmapAllButOne(pool, dict_length, null_index)); + } + + return Status::OK(); +} + +struct StringViewHash { + // std::hash compatible hasher for use with std::unordered_* + // (the std::hash specialization provided by nonstd constructs std::string + // temporaries then invokes std::hash against those) + hash_t operator()(std::string_view value) const { + return ComputeStringHash<0>(value.data(), static_cast(value.size())); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h new file mode 100644 index 0000000000000000000000000000000000000000..59a2ac7109a3c08b4cd265f88b7ca0ecffe5ae9d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" + +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +struct ArraySpan; +struct Scalar; + +namespace internal { + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes, + int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length, + uint8_t min_width = 1); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int64_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length); + +ARROW_EXPORT +void UpcastInts(const int32_t* source, int64_t* dest, int64_t length); + +template +inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + DowncastInts(source, dest, length); +} + +template +inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + UpcastInts(source, dest, length); +} + +template +ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length, + const int32_t* transpose_map); + +ARROW_EXPORT +Status TransposeInts(const DataType& src_type, const DataType& dest_type, + const uint8_t* src, uint8_t* dest, int64_t src_offset, + int64_t dest_offset, int64_t length, const int32_t* transpose_map); + +/// \brief Do vectorized boundschecking of integer-type array indices. The +/// indices must be nonnegative and strictly less than the passed upper +/// limit (which is usually the length of an array that is being indexed-into). +ARROW_EXPORT +Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit); + +/// \brief Boundscheck integer values to determine if they are all between the +/// passed upper and lower limits (inclusive). Upper and lower bounds must be +/// the same type as the data and are not currently casted. +ARROW_EXPORT +Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower, + const Scalar& bound_upper); + +/// \brief Use CheckIntegersInRange to determine whether the passed integers +/// can fit safely in the passed integer type. This helps quickly determine if +/// integer narrowing (e.g. int64->int32) is safe to do. +ARROW_EXPORT +Status IntegersCanFit(const ArraySpan& values, const DataType& target_type); + +/// \brief Convenience for boundschecking a single Scalar value +ARROW_EXPORT +Status IntegersCanFit(const Scalar& value, const DataType& target_type); + +/// Upcast an integer to the largest possible width (currently 64 bits) + +template +typename std::enable_if< + std::is_integral::value && std::is_signed::value, int64_t>::type +UpcastInt(Integer v) { + return v; +} + +template +typename std::enable_if< + std::is_integral::value && std::is_unsigned::value, uint64_t>::type +UpcastInt(Integer v) { + return v; +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h new file mode 100644 index 0000000000000000000000000000000000000000..892641d4bc52f61e5aa88b48f7d6651862747c4a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h @@ -0,0 +1,452 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifndef _WIN32 +# define ARROW_HAVE_SIGACTION 1 +#endif + +#include +#include +#include +#include +#include +#include + +#if ARROW_HAVE_SIGACTION +# include // Needed for struct sigaction +#endif + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::internal { + +// NOTE: 8-bit path strings on Windows are encoded using UTF-8. +// Using MBCS would fail encoding some paths. + +#if defined(_WIN32) +using NativePathString = std::wstring; +#else +using NativePathString = std::string; +#endif + +class ARROW_EXPORT PlatformFilename { + public: + struct Impl; + + ~PlatformFilename(); + PlatformFilename(); + PlatformFilename(const PlatformFilename&); + PlatformFilename(PlatformFilename&&); + PlatformFilename& operator=(const PlatformFilename&); + PlatformFilename& operator=(PlatformFilename&&); + explicit PlatformFilename(NativePathString path); + explicit PlatformFilename(const NativePathString::value_type* path); + + const NativePathString& ToNative() const; + std::string ToString() const; + + PlatformFilename Parent() const; + Result Real() const; + + // These functions can fail for character encoding reasons. + static Result FromString(std::string_view file_name); + Result Join(std::string_view child_name) const; + + PlatformFilename Join(const PlatformFilename& child_name) const; + + bool operator==(const PlatformFilename& other) const; + bool operator!=(const PlatformFilename& other) const; + + // Made public to avoid the proliferation of friend declarations. + const Impl* impl() const { return impl_.get(); } + + private: + std::unique_ptr impl_; + + explicit PlatformFilename(Impl impl); +}; + +/// Create a directory if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDir(const PlatformFilename& dir_path); + +/// Create a directory and its parents if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDirTree(const PlatformFilename& dir_path); + +/// Delete a directory's contents (but not the directory itself) if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirContents(const PlatformFilename& dir_path, + bool allow_not_found = true); + +/// Delete a directory tree if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true); + +// Non-recursively list the contents of the given directory. +// The returned names are the children's base names, not including dir_path. +ARROW_EXPORT +Result> ListDir(const PlatformFilename& dir_path); + +/// Delete a file if it exists. +/// +/// Return whether the file existed. +ARROW_EXPORT +Result DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true); + +/// Return whether a file exists. +ARROW_EXPORT +Result FileExists(const PlatformFilename& path); + +// TODO expose this more publicly to make it available from io/file.h? +/// A RAII wrapper for a file descriptor. +/// +/// The underlying file descriptor is automatically closed on destruction. +/// Moving is supported with well-defined semantics. +/// Furthermore, closing is idempotent. +class ARROW_EXPORT FileDescriptor { + public: + FileDescriptor() = default; + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(FileDescriptor&&); + FileDescriptor& operator=(FileDescriptor&&); + + ~FileDescriptor(); + + Status Close(); + + /// May return -1 if closed or default-initialized + int fd() const { return fd_.load(); } + + /// Detach and return the underlying file descriptor + int Detach(); + + bool closed() const { return fd_.load() == -1; } + + protected: + static void CloseFromDestructor(int fd); + + std::atomic fd_{-1}; +}; + +/// Open a file for reading and return a file descriptor. +ARROW_EXPORT +Result FileOpenReadable(const PlatformFilename& file_name); + +/// Open a file for writing and return a file descriptor. +ARROW_EXPORT +Result FileOpenWritable(const PlatformFilename& file_name, + bool write_only = true, bool truncate = true, + bool append = false); + +/// Read from current file position. Return number of bytes read. +ARROW_EXPORT +Result FileRead(int fd, uint8_t* buffer, int64_t nbytes); +/// Read from given file position. Return number of bytes read. +ARROW_EXPORT +Result FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes); + +ARROW_EXPORT +Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes); +ARROW_EXPORT +Status FileTruncate(int fd, const int64_t size); + +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos); +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos, int whence); +ARROW_EXPORT +Result FileTell(int fd); +ARROW_EXPORT +Result FileGetSize(int fd); + +ARROW_EXPORT +Status FileClose(int fd); + +struct Pipe { + FileDescriptor rfd; + FileDescriptor wfd; + + Status Close() { return rfd.Close() & wfd.Close(); } +}; + +ARROW_EXPORT +Result CreatePipe(); + +ARROW_EXPORT +Status SetPipeFileDescriptorNonBlocking(int fd); + +class ARROW_EXPORT SelfPipe { + public: + static Result> Make(bool signal_safe); + virtual ~SelfPipe(); + + /// \brief Wait for a wakeup. + /// + /// Status::Invalid is returned if the pipe has been shutdown. + /// Otherwise the next sent payload is returned. + virtual Result Wait() = 0; + + /// \brief Wake up the pipe by sending a payload. + /// + /// This method is async-signal-safe if `signal_safe` was set to true. + virtual void Send(uint64_t payload) = 0; + + /// \brief Wake up the pipe and shut it down. + virtual Status Shutdown() = 0; +}; + +ARROW_EXPORT +int64_t GetPageSize(); + +struct MemoryRegion { + void* addr; + size_t size; +}; + +ARROW_EXPORT +Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes, + void** new_addr); +ARROW_EXPORT +Status MemoryAdviseWillNeed(const std::vector& regions); + +ARROW_EXPORT +Result GetEnvVar(const char* name); +ARROW_EXPORT +Result GetEnvVar(const std::string& name); +ARROW_EXPORT +Result GetEnvVarNative(const char* name); +ARROW_EXPORT +Result GetEnvVarNative(const std::string& name); + +ARROW_EXPORT +Status SetEnvVar(const char* name, const char* value); +ARROW_EXPORT +Status SetEnvVar(const std::string& name, const std::string& value); +ARROW_EXPORT +Status DelEnvVar(const char* name); +ARROW_EXPORT +Status DelEnvVar(const std::string& name); + +ARROW_EXPORT +std::string ErrnoMessage(int errnum); +#if _WIN32 +ARROW_EXPORT +std::string WinErrorMessage(int errnum); +#endif + +ARROW_EXPORT +std::shared_ptr StatusDetailFromErrno(int errnum); +ARROW_EXPORT +std::optional ErrnoFromStatusDetail(const StatusDetail& detail); +#if _WIN32 +ARROW_EXPORT +std::shared_ptr StatusDetailFromWinError(int errnum); +#endif +ARROW_EXPORT +std::shared_ptr StatusDetailFromSignal(int signum); + +template +Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromErrno(int errnum, Args&&... args) { + return StatusFromErrno(errnum, StatusCode::IOError, std::forward(args)...); +} + +#if _WIN32 +template +Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromWinError(int errnum, Args&&... args) { + return StatusFromWinError(errnum, StatusCode::IOError, std::forward(args)...); +} +#endif + +template +Status StatusFromSignal(int signum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum), + std::forward(args)...); +} + +template +Status CancelledFromSignal(int signum, Args&&... args) { + return StatusFromSignal(signum, StatusCode::Cancelled, std::forward(args)...); +} + +ARROW_EXPORT +int ErrnoFromStatus(const Status&); + +// Always returns 0 on non-Windows platforms (for Python). +ARROW_EXPORT +int WinErrorFromStatus(const Status&); + +ARROW_EXPORT +int SignalFromStatus(const Status&); + +class ARROW_EXPORT TemporaryDir { + public: + ~TemporaryDir(); + + /// '/'-terminated path to the temporary dir + const PlatformFilename& path() { return path_; } + + /// Create a temporary subdirectory in the system temporary dir, + /// named starting with `prefix`. + static Result> Make(const std::string& prefix); + + private: + PlatformFilename path_; + + explicit TemporaryDir(PlatformFilename&&); +}; + +class ARROW_EXPORT SignalHandler { + public: + using Callback = void (*)(int); + + SignalHandler(); + explicit SignalHandler(Callback cb); +#if ARROW_HAVE_SIGACTION + explicit SignalHandler(const struct sigaction& sa); +#endif + + Callback callback() const; +#if ARROW_HAVE_SIGACTION + const struct sigaction& action() const; +#endif + + protected: +#if ARROW_HAVE_SIGACTION + // Storing the full sigaction allows to restore the entire signal handling + // configuration. + struct sigaction sa_; +#else + Callback cb_; +#endif +}; + +/// \brief Return the current handler for the given signal number. +ARROW_EXPORT +Result GetSignalHandler(int signum); + +/// \brief Set a new handler for the given signal number. +/// +/// The old signal handler is returned. +ARROW_EXPORT +Result SetSignalHandler(int signum, const SignalHandler& handler); + +/// \brief Reinstate the signal handler +/// +/// For use in signal handlers. This is needed on platforms without sigaction() +/// such as Windows, as the default signal handler is restored there as +/// soon as a signal is raised. +ARROW_EXPORT +void ReinstateSignalHandler(int signum, SignalHandler::Callback handler); + +/// \brief Send a signal to the current process +/// +/// The thread which will receive the signal is unspecified. +ARROW_EXPORT +Status SendSignal(int signum); + +/// \brief Send a signal to the given thread +/// +/// This function isn't supported on Windows. +ARROW_EXPORT +Status SendSignalToThread(int signum, uint64_t thread_id); + +/// \brief Get an unpredictable random seed +/// +/// This function may be slightly costly, so should only be used to initialize +/// a PRNG, not to generate a large amount of random numbers. +/// It is better to use this function rather than std::random_device, unless +/// absolutely necessary (e.g. to generate a cryptographic secret). +ARROW_EXPORT +int64_t GetRandomSeed(); + +/// \brief Get the current thread id +/// +/// In addition to having the same properties as std::thread, the returned value +/// is a regular integer value, which is more convenient than an opaque type. +ARROW_EXPORT +uint64_t GetThreadId(); + +/// \brief Get the current memory used by the current process in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetCurrentRSS(); + +/// \brief Get the total memory available to the system in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetTotalMemoryBytes(); + +/// \brief Load a dynamic library +/// +/// This wraps dlopen() except on Windows, where LoadLibrary() is called. +/// These two platforms handle absolute paths consistently; relative paths +/// or the library's bare name may be handled but inconsistently. +/// +/// \return An opaque handle for the dynamic library, which can be used for +/// subsequent symbol lookup. Nullptr will never be returned; instead +/// an error will be raised. +ARROW_EXPORT Result LoadDynamicLibrary(const PlatformFilename& path); + +/// \brief Load a dynamic library +/// +/// An overload taking null terminated string. +ARROW_EXPORT Result LoadDynamicLibrary(const char* path); + +/// \brief Retrieve a symbol by name from a library handle. +/// +/// This wraps dlsym() except on Windows, where GetProcAddress() is called. +/// +/// \return The address associated with the named symbol. Nullptr will never be +/// returned; instead an error will be raised. +ARROW_EXPORT Result GetSymbol(void* handle, const char* name); + +template +Result GetSymbolAs(void* handle, const char* name) { + ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name)); + return reinterpret_cast(sym); +} + +} // namespace arrow::internal diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5025799b9a37254835c41d3e66751e6337c4eff6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h @@ -0,0 +1,575 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +class Iterator; + +template +struct IterationTraits { + /// \brief a reserved value which indicates the end of iteration. By + /// default this is NULLPTR since most iterators yield pointer types. + /// Specialize IterationTraits if different end semantics are required. + /// + /// Note: This should not be used to determine if a given value is a + /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This + /// is only for returning terminal values. + static T End() { return T(NULLPTR); } + + /// \brief Checks to see if the value is a terminal value. + /// A method is used here since T is not necessarily comparable in many + /// cases even though it has a distinct final value + static bool IsEnd(const T& val) { return val == End(); } +}; + +template +T IterationEnd() { + return IterationTraits::End(); +} + +template +bool IsIterationEnd(const T& val) { + return IterationTraits::IsEnd(val); +} + +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of optional, + /// nullopt indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static std::optional End() { return std::nullopt; } + + /// \brief by default when iterating through a sequence of optional, + /// nullopt (!has_value()) indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static bool IsEnd(const std::optional& val) { return !val.has_value(); } + + // TODO(bkietz) The range-for loop over Iterator> yields + // Result> which is unnecessary (since only the unyielded end optional + // is nullopt. Add IterationTraits::GetRangeElement() to handle this case +}; + +/// \brief A generic Iterator that can return errors +template +class Iterator : public util::EqualityComparable> { + public: + /// \brief Iterator may be constructed from any type which has a member function + /// with signature Result Next(); + /// End of iterator is signalled by returning IteratorTraits::End(); + /// + /// The argument is moved or copied to the heap and kept in a unique_ptr. Only + /// its destructor and its Next method (which are stored in function pointers) are + /// referenced after construction. + /// + /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using + /// an abstract template base class: instead of being inlined as usual for a template + /// function the base's virtual destructor will be exported, leading to multiple + /// definition errors when linking to any other TU where the base is instantiated. + template + explicit Iterator(Wrapped has_next) + : ptr_(new Wrapped(std::move(has_next)), Delete), next_(Next) {} + + Iterator() : ptr_(NULLPTR, [](void*) {}) {} + + /// \brief Return the next element of the sequence, IterationTraits::End() when the + /// iteration is completed. + Result Next() { + if (ptr_) { + auto next_result = next_(ptr_.get()); + if (next_result.ok() && IsIterationEnd(next_result.ValueUnsafe())) { + ptr_.reset(NULLPTR); + } + return next_result; + } else { + return IterationTraits::End(); + } + } + + /// Pass each element of the sequence to a visitor. Will return any error status + /// returned by the visitor, terminating iteration. + template + Status Visit(Visitor&& visitor) { + for (;;) { + ARROW_ASSIGN_OR_RAISE(auto value, Next()); + + if (IsIterationEnd(value)) break; + + ARROW_RETURN_NOT_OK(visitor(std::move(value))); + } + + return Status::OK(); + } + + /// Iterators will only compare equal if they are both null. + /// Equality comparability is required to make an Iterator of Iterators + /// (to check for the end condition). + bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; } + + explicit operator bool() const { return ptr_ != NULLPTR; } + + class RangeIterator { + public: + RangeIterator() : value_(IterationTraits::End()) {} + + explicit RangeIterator(Iterator i) + : value_(IterationTraits::End()), + iterator_(std::make_shared(std::move(i))) { + Next(); + } + + bool operator!=(const RangeIterator& other) const { return value_ != other.value_; } + + RangeIterator& operator++() { + Next(); + return *this; + } + + Result operator*() { + ARROW_RETURN_NOT_OK(value_.status()); + + auto value = std::move(value_); + value_ = IterationTraits::End(); + return value; + } + + private: + void Next() { + if (!value_.ok()) { + value_ = IterationTraits::End(); + return; + } + value_ = iterator_->Next(); + } + + Result value_; + std::shared_ptr iterator_; + }; + + RangeIterator begin() { return RangeIterator(std::move(*this)); } + + RangeIterator end() { return RangeIterator(); } + + /// \brief Move every element of this iterator into a vector. + Result> ToVector() { + std::vector out; + for (auto maybe_element : *this) { + ARROW_ASSIGN_OR_RAISE(auto element, maybe_element); + out.push_back(std::move(element)); + } + return out; + } + + private: + /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and + /// deletes that. + template + static void Delete(void* ptr) { + delete static_cast(ptr); + } + + /// Implementation of Next: Casts from void* to the wrapped type and invokes that + /// type's Next member function. + template + static Result Next(void* ptr) { + return static_cast(ptr)->Next(); + } + + /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first + /// casts from void* to a pointer to the wrapped type then deletes that. + std::unique_ptr ptr_; + + /// next_ is a function pointer which first casts from void* to a pointer to the wrapped + /// type then invokes its Next member function. + Result (*next_)(void*) = NULLPTR; +}; + +template +struct TransformFlow { + using YieldValueType = T; + + TransformFlow(YieldValueType value, bool ready_for_next) + : finished_(false), + ready_for_next_(ready_for_next), + yield_value_(std::move(value)) {} + TransformFlow(bool finished, bool ready_for_next) + : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {} + + bool HasValue() const { return yield_value_.has_value(); } + bool Finished() const { return finished_; } + bool ReadyForNext() const { return ready_for_next_; } + T Value() const { return *yield_value_; } + + bool finished_ = false; + bool ready_for_next_ = false; + std::optional yield_value_; +}; + +struct TransformFinish { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(true, true); + } +}; + +struct TransformSkip { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(false, true); + } +}; + +template +TransformFlow TransformYield(T value = {}, bool ready_for_next = true) { + return TransformFlow(std::move(value), ready_for_next); +} + +template +using Transformer = std::function>(T)>; + +template +class TransformIterator { + public: + explicit TransformIterator(Iterator it, Transformer transformer) + : it_(std::move(it)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Result Next() { + while (!finished_) { + ARROW_ASSIGN_OR_RAISE(std::optional next, Pump()); + if (next.has_value()) { + return std::move(*next); + } + ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next()); + } + return IterationTraits::End(); + } + + private: + // Calls the transform function on the current value. Can return in several ways + // * If the next value is requested (e.g. skip) it will return an empty optional + // * If an invalid status is encountered that will be returned + // * If finished it will return IterationTraits::End() + // * If a value is returned by the transformer that will be returned + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + auto next_res = transformer_(*last_value_); + if (!next_res.ok()) { + finished_ = true; + return next_res.status(); + } + auto next = *next_res; + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + Iterator it_; + Transformer transformer_; + std::optional last_value_; + bool finished_ = false; +}; + +/// \brief Transforms an iterator according to a transformer, returning a new Iterator. +/// +/// The transformer will be called on each element of the source iterator and for each +/// call it can yield a value, skip, or finish the iteration. When yielding a value the +/// transformer can choose to consume the source item (the default, ready_for_next = true) +/// or to keep it and it will be called again on the same value. +/// +/// This is essentially a more generic form of the map operation that can return 0, 1, or +/// many values for each of the source items. +/// +/// The transformer will be exposed to the end of the source sequence +/// (IterationTraits::End) in case it needs to return some penultimate item(s). +/// +/// Any invalid status returned by the transformer will be returned immediately. +template +Iterator MakeTransformedIterator(Iterator it, Transformer op) { + return Iterator(TransformIterator(std::move(it), std::move(op))); +} + +template +struct IterationTraits> { + // The end condition for an Iterator of Iterators is a default constructed (null) + // Iterator. + static Iterator End() { return Iterator(); } + static bool IsEnd(const Iterator& val) { return !val; } +}; + +template +class FunctionIterator { + public: + explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {} + + Result Next() { return fn_(); } + + private: + Fn fn_; +}; + +/// \brief Construct an Iterator which invokes a callable on Next() +template ::ValueType> +Iterator MakeFunctionIterator(Fn fn) { + return Iterator(FunctionIterator(std::move(fn))); +} + +template +Iterator MakeEmptyIterator() { + return MakeFunctionIterator([]() -> Result { return IterationTraits::End(); }); +} + +template +Iterator MakeErrorIterator(Status s) { + return MakeFunctionIterator([s]() -> Result { + ARROW_RETURN_NOT_OK(s); + return IterationTraits::End(); + }); +} + +/// \brief Simple iterator which yields the elements of a std::vector +template +class VectorIterator { + public: + explicit VectorIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return IterationTraits::End(); + } + return std::move(elements_[i_++]); + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorIterator(std::vector v) { + return Iterator(VectorIterator(std::move(v))); +} + +/// \brief Simple iterator which yields *pointers* to the elements of a std::vector. +/// This is provided to support T where IterationTraits::End is not specialized +template +class VectorPointingIterator { + public: + explicit VectorPointingIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return NULLPTR; + } + return &elements_[i_++]; + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorPointingIterator(std::vector v) { + return Iterator(VectorPointingIterator(std::move(v))); +} + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template +class MapIterator { + public: + explicit MapIterator(Fn map, Iterator it) + : map_(std::move(map)), it_(std::move(it)) {} + + Result Next() { + ARROW_ASSIGN_OR_RAISE(I i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + return map_(std::move(i)); + } + + private: + Fn map_; + Iterator it_; +}; + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template , + typename To = internal::call_traits::return_type> +Iterator MakeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +/// \brief Like MapIterator, but where the function can fail. +template , + typename To = typename internal::call_traits::return_type::ValueType> +Iterator MakeMaybeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +struct FilterIterator { + enum Action { ACCEPT, REJECT }; + + template + static Result> Reject() { + return std::make_pair(IterationTraits::End(), REJECT); + } + + template + static Result> Accept(To out) { + return std::make_pair(std::move(out), ACCEPT); + } + + template + static Result> MaybeAccept(Result maybe_out) { + return std::move(maybe_out).Map(Accept); + } + + template + static Result> Error(Status s) { + return s; + } + + template + class Impl { + public: + explicit Impl(Fn filter, Iterator it) : filter_(filter), it_(std::move(it)) {} + + Result Next() { + To out = IterationTraits::End(); + Action action; + + for (;;) { + ARROW_ASSIGN_OR_RAISE(From i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i))); + + if (action == ACCEPT) return out; + } + } + + private: + Fn filter_; + Iterator it_; + }; +}; + +/// \brief Like MapIterator, but where the function can fail or reject elements. +template < + typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>, + typename Ret = typename internal::call_traits::return_type::ValueType, + typename To = typename std::tuple_element<0, Ret>::type, + typename Enable = typename std::enable_if::type, FilterIterator::Action>::value>::type> +Iterator MakeFilterIterator(Fn filter, Iterator it) { + return Iterator( + FilterIterator::Impl(std::move(filter), std::move(it))); +} + +/// \brief FlattenIterator takes an iterator generating iterators and yields a +/// unified iterator that flattens/concatenates in a single stream. +template +class FlattenIterator { + public: + explicit FlattenIterator(Iterator> it) : parent_(std::move(it)) {} + + Result Next() { + if (IsIterationEnd(child_)) { + // Pop from parent's iterator. + ARROW_ASSIGN_OR_RAISE(child_, parent_.Next()); + + // Check if final iteration reached. + if (IsIterationEnd(child_)) { + return IterationTraits::End(); + } + + return Next(); + } + + // Pop from child_ and check for depletion. + ARROW_ASSIGN_OR_RAISE(T out, child_.Next()); + if (IsIterationEnd(out)) { + // Reset state such that we pop from parent on the recursive call + child_ = IterationTraits>::End(); + + return Next(); + } + + return out; + } + + private: + Iterator> parent_; + Iterator child_ = IterationTraits>::End(); +}; + +template +Iterator MakeFlattenIterator(Iterator> it) { + return Iterator(FlattenIterator(std::move(it))); +} + +template +Iterator MakeIteratorFromReader( + const std::shared_ptr& reader) { + return MakeFunctionIterator([reader] { return reader->Next(); }); +} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h new file mode 100644 index 0000000000000000000000000000000000000000..58deb8019d94155e4488af7e3047e599abb7197b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/data.h" +#include "arrow/result.h" + +namespace arrow { +namespace list_util { +namespace internal { + +/// \brief Calculate the smallest continuous range of values used by the +/// var-length list-like input (list, map and list-view types). +/// +/// \param input The input array such that is_var_length_list_like(input.type) +/// is true +/// \return A pair of (offset, length) describing the range +ARROW_EXPORT Result> RangeOfValuesUsed( + const ArraySpan& input); + +/// \brief Calculate the sum of the sizes of all valid lists or list-views +/// +/// This is usually the same as the length of the RangeOfValuesUsed() range, but +/// it can be: +/// - Smaller: when the child array contains many values that are not +/// referenced by the lists or list-views in the parent array +/// - Greater: when the list-views share child array ranges +/// +/// \param input The input array such that is_var_length_list_like(input.type) +/// is true +/// \return The sum of all list or list-view sizes +ARROW_EXPORT Result SumOfLogicalListSizes(const ArraySpan& input); + +} // namespace internal + +} // namespace list_util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h new file mode 100644 index 0000000000000000000000000000000000000000..7832f4a4c223270eb92a1912a22c2e1e81e90b90 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h @@ -0,0 +1,186 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +struct SourceLocation { + const char* file = ""; + int line = 0; +}; + +struct LogDetails { + ArrowLogLevel severity = ArrowLogLevel::ARROW_INFO; + std::chrono::system_clock::time_point timestamp = std::chrono::system_clock::now(); + SourceLocation source_location{}; + std::string_view message = ""; +}; + +/// \brief A base interface for custom loggers. +/// +/// Loggers can be added to the LoggerRegistry for global access or directly provided to +/// certain logging utilities. +class Logger { + public: + virtual ~Logger() = default; + + virtual void Log(const LogDetails& details) = 0; + + virtual bool Flush(std::chrono::microseconds timeout) { return true; } + bool Flush() { return this->Flush(std::chrono::microseconds::max()); } + + virtual bool is_enabled() const { return true; } + + virtual ArrowLogLevel severity_threshold() const { return ArrowLogLevel::ARROW_TRACE; } +}; + +/// \brief Creates a simple logger that redirects output to std::cerr +ARROW_EXPORT std::shared_ptr MakeOStreamLogger(ArrowLogLevel severity_threshold); +/// \brief Creates a simple logger that redirects output to the provided ostream +ARROW_EXPORT std::shared_ptr MakeOStreamLogger(ArrowLogLevel severity_threshold, + std::ostream& sink); + +class ARROW_EXPORT LoggerRegistry { + public: + /// \brief Add a logger to the registry with the associated name + /// + /// Returns Invalid if a logger with the provided name already exists. Users should call + /// `UnregisterLogger` first if they wish to overwrite it. + static Status RegisterLogger(std::string_view name, std::shared_ptr logger); + + /// \brief Remove a logger from the registry + static void UnregisterLogger(std::string_view name); + + /// \brief Return the logger associated with the provided name + /// + /// If `name` is empty, the default logger is returned. If `name` doesn't match any of + /// the registered loggers then a non-null noop logger is returned + static std::shared_ptr GetLogger(std::string_view name = ""); + + /// \brief Return the default logger + static std::shared_ptr GetDefaultLogger(); + /// \brief Set the default logger + static void SetDefaultLogger(std::shared_ptr logger); +}; + +/// \brief Represents a single log record to be emitted by an underlying logger +class ARROW_EXPORT LogMessage { + public: + /// \brief Construct a LogMessage with the provided underlying logger + LogMessage(ArrowLogLevel severity, std::shared_ptr logger, + SourceLocation source_location = {}); + /// \brief Construct a LogMessage with the provided logger name, which will be used to + /// find an underlying logger in the registry + LogMessage(ArrowLogLevel severity, std::string_view logger_name, + SourceLocation source_location = {}); + + std::ostream& Stream(); + + // Convenience method - mainly for use in ARROW_LOG_* macros. This prevents unnecessary + // argument evaluation when log statements are stripped in certain builds + template + LogMessage& Append(Args&&... args) { + if constexpr (sizeof...(Args) > 0) { + if (CheckIsEnabled()) { + (Stream() << ... << args); + } + } + return *this; + } + + private: + bool CheckIsEnabled(); + + class Impl; + std::shared_ptr impl_; +}; + +} // namespace util +} // namespace arrow + +// For the following macros, log statements with a lower severity than +// `ARROW_MINIMUM_LOG_LEVEL` will be stripped from the build +#ifndef ARROW_MINIMUM_LOG_LEVEL +# define ARROW_MINIMUM_LOG_LEVEL -1000 +#endif + +#define ARROW_LOGGER_INTERNAL(LOGGER, LEVEL) \ + (::arrow::util::LogMessage(::arrow::util::ArrowLogLevel::ARROW_##LEVEL, LOGGER, \ + ::arrow::util::SourceLocation{__FILE__, __LINE__})) + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_TRACE) == -2); +#if ARROW_MINIMUM_LOG_LEVEL <= -2 +# define ARROW_LOGGER_TRACE(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, TRACE).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_TRACE(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_DEBUG) == -1); +#if ARROW_MINIMUM_LOG_LEVEL <= -1 +# define ARROW_LOGGER_DEBUG(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, DEBUG).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_DEBUG(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_INFO) == 0); +#if ARROW_MINIMUM_LOG_LEVEL <= 0 +# define ARROW_LOGGER_INFO(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, INFO).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_INFO(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_WARNING) == 1); +#if ARROW_MINIMUM_LOG_LEVEL <= 1 +# define ARROW_LOGGER_WARNING(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, WARNING).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_WARNING(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_ERROR) == 2); +#if ARROW_MINIMUM_LOG_LEVEL <= 2 +# define ARROW_LOGGER_ERROR(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, ERROR).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_ERROR(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_FATAL) == 3); +#if ARROW_MINIMUM_LOG_LEVEL <= 3 +# define ARROW_LOGGER_FATAL(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, FATAL).Append(__VA_ARGS__)) +#else +# define ARROW_LOGGER_FATAL(...) ARROW_UNUSED(0) +#endif + +#define ARROW_LOGGER_CALL(LOGGER, LEVEL, ...) ARROW_LOGGER_##LEVEL(LOGGER, __VA_ARGS__) diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..af29fd636b51a0f6bedff279a19e8fb4c112f77b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h @@ -0,0 +1,239 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#define ARROW_EXPAND(x) x +#define ARROW_STRINGIFY(x) #x +#define ARROW_CONCAT(x, y) x##y + +// From Google gutil +#ifndef ARROW_DISALLOW_COPY_AND_ASSIGN +# define ARROW_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif + +#ifndef ARROW_DEFAULT_MOVE_AND_ASSIGN +# define ARROW_DEFAULT_MOVE_AND_ASSIGN(TypeName) \ + TypeName(TypeName&&) = default; \ + TypeName& operator=(TypeName&&) = default +#endif + +// With ARROW_PREDICT_FALSE, GCC and clang can be told that a certain branch is +// not likely to be taken (for instance, a CHECK failure), and use that information in +// static analysis. Giving the compiler this information can affect the generated code +// layout in the absence of better information (i.e. -fprofile-arcs). [1] explains how +// this feature can be used to improve code generation. It was written as a positive +// comment to a negative article about the use of these annotations. +// +// ARROW_COMPILER_ASSUME allows the compiler to assume that a given expression is +// true, without evaluating it, and to optimise based on this assumption [2]. If this +// condition is violated at runtime, the behavior is undefined. This can be useful to +// generate both faster and smaller code in compute kernels. +// +// IMPORTANT: Different optimisers are likely to react differently to this annotation! +// It should be used with care when we can prove by some means that the assumption +// is (1) guaranteed to always hold and (2) is useful for optimization [3]. If the +// assumption is pessimistic, it might even block the compiler from decisions that +// could lead to better code [4]. If you have a good intuition for what the compiler +// can do with assumptions [5], you can use this macro to guide it and end up with +// results you would only get with more complex code transformations. +// `clang -S -emit-llvm` can be used to check how the generated code changes with +// your specific use of this macro. +// +// [1] https://lobste.rs/s/uwgtkt/don_t_use_likely_unlikely_attributes#c_xi3wmc +// [2] "Portable assumptions" +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1774r4.pdf +// [3] "Assertions Are Pessimistic, Assumptions Are Optimistic" +// https://blog.regehr.org/archives/1096 +// [4] https://discourse.llvm.org/t/llvm-assume-blocks-optimization/71609 +// [5] J. Doerfert et al. 2019. "Performance Exploration Through Optimistic Static +// Program Annotations". https://github.com/jdoerfert/PETOSPA/blob/master/ISC19.pdf +#define ARROW_UNUSED(x) (void)(x) +#ifdef ARROW_WARN_DOCUMENTATION +# define ARROW_ARG_UNUSED(x) x +#else +# define ARROW_ARG_UNUSED(x) +#endif +#if defined(__GNUC__) // GCC and compatible compilers (clang, Intel ICC) +# define ARROW_NORETURN __attribute__((noreturn)) +# define ARROW_NOINLINE __attribute__((noinline)) +# define ARROW_FORCE_INLINE __attribute__((always_inline)) +# define ARROW_PREDICT_FALSE(x) (__builtin_expect(!!(x), 0)) +# define ARROW_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +# define ARROW_RESTRICT __restrict +# if defined(__clang__) // clang-specific +# define ARROW_COMPILER_ASSUME(expr) __builtin_assume(expr) +# else // GCC-specific +# if __GNUC__ >= 13 +# define ARROW_COMPILER_ASSUME(expr) __attribute__((assume(expr))) +# else +// GCC does not have a built-in assume intrinsic before GCC 13, so we use an +// if statement and __builtin_unreachable() to achieve the same effect [2]. +// Unlike clang's __builtin_assume and C++23's [[assume(expr)]], using this +// on GCC won't warn about side-effects in the expression, so make sure expr +// is side-effect free when working with GCC versions before 13 (Jan-2024), +// otherwise clang/MSVC builds will fail in CI. +# define ARROW_COMPILER_ASSUME(expr) \ + if (expr) { \ + } else { \ + __builtin_unreachable(); \ + } +# endif // __GNUC__ >= 13 +# endif +#elif defined(_MSC_VER) // MSVC +# define ARROW_NORETURN __declspec(noreturn) +# define ARROW_NOINLINE __declspec(noinline) +# define ARROW_FORCE_INLINE __forceinline +# define ARROW_PREDICT_FALSE(x) (x) +# define ARROW_PREDICT_TRUE(x) (x) +# define ARROW_RESTRICT __restrict +# define ARROW_COMPILER_ASSUME(expr) __assume(expr) +#else +# define ARROW_NORETURN +# define ARROW_NOINLINE +# define ARROW_FORCE_INLINE +# define ARROW_PREDICT_FALSE(x) (x) +# define ARROW_PREDICT_TRUE(x) (x) +# define ARROW_RESTRICT +# define ARROW_COMPILER_ASSUME(expr) +#endif + +// ---------------------------------------------------------------------- +// C++/CLI support macros (see ARROW-1134) + +#ifndef NULLPTR + +# ifdef __cplusplus_cli +# define NULLPTR __nullptr +# else +# define NULLPTR nullptr +# endif + +#endif // ifndef NULLPTR + +// ---------------------------------------------------------------------- + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +// This macro takes an optional deprecation message +#ifdef __COVERITY__ +# define ARROW_DEPRECATED(...) +#else +# define ARROW_DEPRECATED(...) [[deprecated(__VA_ARGS__)]] +#endif + +#ifdef __COVERITY__ +# define ARROW_DEPRECATED_ENUM_VALUE(...) +#else +# define ARROW_DEPRECATED_ENUM_VALUE(...) [[deprecated(__VA_ARGS__)]] +#endif + +// clang-format on + +// Macros to disable deprecation warnings + +#ifdef __clang__ +# define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +# define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +# define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +# define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define ARROW_SUPPRESS_DEPRECATION_WARNING \ + __pragma(warning(push)) __pragma(warning(disable : 4996)) +# define ARROW_UNSUPPRESS_DEPRECATION_WARNING __pragma(warning(pop)) +#else +# define ARROW_SUPPRESS_DEPRECATION_WARNING +# define ARROW_UNSUPPRESS_DEPRECATION_WARNING +#endif + +// ---------------------------------------------------------------------- + +// macros to disable padding +// these macros are portable across different compilers and platforms +//[https://github.com/google/flatbuffers/blob/master/include/flatbuffers/flatbuffers.h#L1355] +#if !defined(MANUALLY_ALIGNED_STRUCT) +# if defined(_MSC_VER) +# define MANUALLY_ALIGNED_STRUCT(alignment) \ + __pragma(pack(1)); \ + struct __declspec(align(alignment)) +# define STRUCT_END(name, size) \ + __pragma(pack()); \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +# elif defined(__GNUC__) || defined(__clang__) +# define MANUALLY_ALIGNED_STRUCT(alignment) \ + _Pragma("pack(1)") struct __attribute__((aligned(alignment))) +# define STRUCT_END(name, size) \ + _Pragma("pack()") static_assert(sizeof(name) == size, \ + "compiler breaks packing rules") +# else +# error Unknown compiler, please define structure alignment macros +# endif +#endif // !defined(MANUALLY_ALIGNED_STRUCT) + +// ---------------------------------------------------------------------- +// Convenience macro disabling a particular UBSan check in a function + +#if defined(__clang__) +# define ARROW_DISABLE_UBSAN(feature) __attribute__((no_sanitize(feature))) +#else +# define ARROW_DISABLE_UBSAN(feature) +#endif + +// ---------------------------------------------------------------------- +// Machine information + +#if INTPTR_MAX == INT64_MAX +# define ARROW_BITNESS 64 +#elif INTPTR_MAX == INT32_MAX +# define ARROW_BITNESS 32 +#else +# error Unexpected INTPTR_MAX +#endif + +// ---------------------------------------------------------------------- +// From googletest +// (also in parquet-cpp) + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name) \ + friend class test_case_name##_##test_name##_Test diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h new file mode 100644 index 0000000000000000000000000000000000000000..5523909061d4c096b03c4853584ec9abc0f39a14 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/result.h" + +namespace arrow { +namespace internal { + +/// Helper providing single-lookup conditional insertion into std::map or +/// std::unordered_map. If `key` exists in the container, an iterator to that pair +/// will be returned. If `key` does not exist in the container, `gen(key)` will be +/// invoked and its return value inserted. +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> decltype(map->begin()->second = gen(map->begin()->first), map->begin()) { + decltype(gen(map->begin()->first)) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + *value = gen(inserted_key); + } + return it_success.first; +} + +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> Resultbegin()->second = gen(map->begin()->first).ValueOrDie(), + map->begin())> { + decltype(gen(map->begin()->first).ValueOrDie()) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + ARROW_ASSIGN_OR_RAISE(*value, gen(inserted_key)); + } + return it_success.first; +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..3524f88e0ba9a5c2f4cd49079c2f3de90e5e9aaa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// Not provided by default in MSVC, +// and _USE_MATH_DEFINES is not reliable with unity builds +#ifndef M_PI +# define M_PI 3.14159265358979323846 +#endif +#ifndef M_PI_2 +# define M_PI_2 1.57079632679489661923 +#endif +#ifndef M_PI_4 +# define M_PI_4 0.785398163397448309616 +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..4250d0694b7dd283aad6bbb159bd3e36328fe7ae --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +// A helper function for doing memcpy with multiple threads. This is required +// to saturate the memory bandwidth of modern cpus. +void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, + uintptr_t block_size, int num_threads); + +// A helper function for checking if two wrapped objects implementing `Equals` +// are equal. +template +bool SharedPtrEquals(const std::shared_ptr& left, const std::shared_ptr& right) { + if (left == right) return true; + if (left == NULLPTR || right == NULLPTR) return false; + return left->Equals(*right); +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h new file mode 100644 index 0000000000000000000000000000000000000000..ac63cf70cd9ae9c05189f89e2f96c4d216d09573 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +/// A wrapper around std::mutex since we can't use it directly in +/// public headers due to C++/CLI. +/// https://docs.microsoft.com/en-us/cpp/standard-library/mutex#remarks +class ARROW_EXPORT Mutex { + public: + Mutex(); + Mutex(Mutex&&) = default; + Mutex& operator=(Mutex&&) = default; + + /// A Guard is falsy if a lock could not be acquired. + class ARROW_EXPORT Guard { + public: + Guard() : locked_(NULLPTR, [](Mutex* mutex) {}) {} + Guard(Guard&&) = default; + Guard& operator=(Guard&&) = default; + + explicit operator bool() const { return bool(locked_); } + + void Unlock() { locked_.reset(); } + + private: + explicit Guard(Mutex* locked); + + std::unique_ptr locked_; + friend Mutex; + }; + + Guard TryLock(); + Guard Lock(); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +#ifndef _WIN32 +/// Return a pointer to a process-wide, process-specific Mutex that can be used +/// at any point in a child process. NULL is returned when called in the parent. +/// +/// The rule is to first check that getpid() corresponds to the parent process pid +/// and, if not, call this function to lock any after-fork reinitialization code. +/// Like this: +/// +/// std::atomic pid{getpid()}; +/// ... +/// if (pid.load() != getpid()) { +/// // In child process +/// auto lock = GlobalForkSafeMutex()->Lock(); +/// if (pid.load() != getpid()) { +/// // Reinitialize internal structures after fork +/// ... +/// pid.store(getpid()); +ARROW_EXPORT +Mutex* GlobalForkSafeMutex(); +#endif + +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h new file mode 100644 index 0000000000000000000000000000000000000000..768f2328200fb2635213358226cfdb3f9273c808 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/pcg/pcg_random.hpp" // IWYU pragma: export + +namespace arrow { +namespace random { + +using pcg32 = ::arrow_vendored::pcg32; +using pcg64 = ::arrow_vendored::pcg64; +using pcg32_fast = ::arrow_vendored::pcg32_fast; +using pcg64_fast = ::arrow_vendored::pcg64_fast; +using pcg32_oneseq = ::arrow_vendored::pcg32_oneseq; +using pcg64_oneseq = ::arrow_vendored::pcg64_oneseq; + +} // namespace random +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/prefetch.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/prefetch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e9b5ae670ca173edb6448d6575fd5a946aaf4c9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/prefetch.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(__GNUC__) // GCC and compatible compilers (clang, Intel ICC) +# define ARROW_PREFETCH(addr) __builtin_prefetch(addr) +#elif defined(_MSC_VER) // MSVC +# if defined(ARROW_HAVE_SSE4_2) || defined(ARROW_HAVE_RUNTIME_SSE4_2) +# include +# define ARROW_PREFETCH(addr) _mm_prefetch((const char*)(addr), _MM_HINT_T0) +# else +# define ARROW_PREFETCH(addr) +# endif +#else +# define ARROW_PREFETCH(addr) +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h new file mode 100644 index 0000000000000000000000000000000000000000..82cea473c5b277323772c6914ee28b1903b5240d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. template + +#pragma once + +#include +#include "arrow/util/string.h" + +using arrow::internal::ToChars; + +namespace arrow { +namespace internal { + +namespace detail { + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) { + TuplePrinter::Print(os, t); + *os << std::get(t); + } +}; + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) {} +}; + +} // namespace detail + +// Print elements from a tuple to a stream, in order. +// Typical use is to pack a bunch of existing values with std::forward_as_tuple() +// before passing it to this function. +template +void PrintTuple(OStream* os, const std::tuple& tup) { + detail::TuplePrinter, sizeof...(Args)>::Print(os, tup); +} + +template +struct PrintVector { + const Range& range_; + const Separator& separator_; + + template // template to dodge inclusion of + friend Os& operator<<(Os& os, PrintVector l) { + bool first = true; + os << "["; + for (const auto& element : l.range_) { + if (first) { + first = false; + } else { + os << l.separator_; + } + os << ToChars(element); // use ToChars to avoid locale dependence + } + os << "]"; + return os; + } +}; +template +PrintVector(const Range&, const Separator&) -> PrintVector; +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h new file mode 100644 index 0000000000000000000000000000000000000000..6c71fa6e155e8818801db2ccb18127d75d6364a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/ProducerConsumerQueue.h" + +namespace arrow { +namespace util { + +template +using SpscQueue = arrow_vendored::folly::ProducerConsumerQueue; + +} +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h new file mode 100644 index 0000000000000000000000000000000000000000..20553287985423970c228308742a7f85464a4a87 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h @@ -0,0 +1,258 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow::internal { + +/// Create a vector containing the values from start up to stop +template +std::vector Iota(T start, T stop) { + if (start > stop) { + return {}; + } + std::vector result(static_cast(stop - start)); + std::iota(result.begin(), result.end(), start); + return result; +} + +/// Create a vector containing the values from 0 up to length +template +std::vector Iota(T length) { + return Iota(static_cast(0), length); +} + +/// Create a range from a callable which takes a single index parameter +/// and returns the value of iterator on each call and a length. +/// Only iterators obtained from the same range should be compared, the +/// behaviour generally similar to other STL containers. +template +class LazyRange { + private: + // callable which generates the values + // has to be defined at the beginning of the class for type deduction + const Generator gen_; + // the length of the range + int64_t length_; +#ifdef _MSC_VER + // workaround to VS2010 not supporting decltype properly + // see https://stackoverflow.com/questions/21782846/decltype-for-class-member-function + static Generator gen_static_; +#endif + + public: +#ifdef _MSC_VER + using return_type = decltype(gen_static_(0)); +#else + using return_type = decltype(gen_(0)); +#endif + + /// Construct a new range from a callable and length + LazyRange(Generator gen, int64_t length) : gen_(gen), length_(length) {} + + // Class of the dependent iterator, created implicitly by begin and end + class RangeIter { + public: + using difference_type = int64_t; + using value_type = return_type; + using reference = const value_type&; + using pointer = const value_type*; + using iterator_category = std::forward_iterator_tag; + +#ifdef _MSC_VER + // msvc complains about unchecked iterators, + // see https://stackoverflow.com/questions/21655496/error-c4996-checked-iterators + using _Unchecked_type = typename LazyRange::RangeIter; +#endif + + RangeIter() = delete; + RangeIter(const RangeIter& other) = default; + RangeIter& operator=(const RangeIter& other) = default; + + RangeIter(const LazyRange& range, int64_t index) + : range_(&range), index_(index) {} + + const return_type operator*() const { return range_->gen_(index_); } + + RangeIter operator+(difference_type length) const { + return RangeIter(*range_, index_ + length); + } + + // pre-increment + RangeIter& operator++() { + ++index_; + return *this; + } + + // post-increment + RangeIter operator++(int) { + auto copy = RangeIter(*this); + ++index_; + return copy; + } + + bool operator==(const typename LazyRange::RangeIter& other) const { + return this->index_ == other.index_ && this->range_ == other.range_; + } + + bool operator!=(const typename LazyRange::RangeIter& other) const { + return this->index_ != other.index_ || this->range_ != other.range_; + } + + int64_t operator-(const typename LazyRange::RangeIter& other) const { + return this->index_ - other.index_; + } + + bool operator<(const typename LazyRange::RangeIter& other) const { + return this->index_ < other.index_; + } + + private: + // parent range reference + const LazyRange* range_; + // current index + int64_t index_; + }; + + friend class RangeIter; + + // Create a new begin const iterator + RangeIter begin() { return RangeIter(*this, 0); } + + // Create a new end const iterator + RangeIter end() { return RangeIter(*this, length_); } +}; + +/// Helper function to create a lazy range from a callable (e.g. lambda) and length +template +LazyRange MakeLazyRange(Generator&& gen, int64_t length) { + return LazyRange(std::forward(gen), length); +} + +/// \brief A helper for iterating multiple ranges simultaneously, similar to C++23's +/// zip() view adapter modelled after python's built-in zip() function. +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// std::function()> GetNames = ... +/// for (auto [table, name] : Zip(tables, GetNames())) { +/// static_assert(std::is_same_v); +/// static_assert(std::is_same_v); +/// // temporaries (like this vector of strings) are kept alive for the +/// // duration of a loop and are safely movable). +/// RegisterTableWithName(std::move(name), &table); +/// } +/// \endcode +/// +/// The zipped sequence ends as soon as any of its member ranges ends. +/// +/// Always use `auto` for the loop's declaration; it will always be a tuple +/// of references so for example using `const auto&` will compile but will +/// *look* like forcing const-ness even though the members of the tuple are +/// still mutable references. +/// +/// NOTE: we *could* make Zip a more full fledged range and enable things like +/// - gtest recognizing it as a container; it currently doesn't since Zip is +/// always mutable so this breaks: +/// EXPECT_THAT(Zip(std::vector{0}, std::vector{1}), +/// ElementsAre(std::tuple{0, 1})); +/// - letting it be random access when possible so we can do things like *sort* +/// parallel ranges +/// - ... +/// +/// However doing this will increase the compile time overhead of using Zip as +/// long as we're still using headers. Therefore until we can use c++20 modules: +/// *don't* extend Zip. +template +struct Zip; + +template +Zip(Ranges&&...) -> Zip, std::index_sequence_for>; + +template +struct Zip, std::index_sequence> { + explicit Zip(Ranges... ranges) : ranges_(std::forward(ranges)...) {} + + std::tuple ranges_; + + using sentinel = std::tuple(ranges_)))...>; + constexpr sentinel end() { return {std::end(std::get(ranges_))...}; } + + struct iterator : std::tuple(ranges_)))...> { + using std::tuple(ranges_)))...>::tuple; + + constexpr auto operator*() { + return std::tuple(*this))...>{*std::get(*this)...}; + } + + constexpr iterator& operator++() { + (++std::get(*this), ...); + return *this; + } + + constexpr bool operator!=(const sentinel& s) const { + bool all_iterators_valid = (... && (std::get(*this) != std::get(s))); + return all_iterators_valid; + } + }; + constexpr iterator begin() { return {std::begin(std::get(ranges_))...}; } +}; + +/// \brief A lazy sequence of integers which starts from 0 and never stops. +/// +/// This can be used in conjunction with Zip() to emulate python's built-in +/// enumerate() function: +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// for (auto [i, table] : Zip(Enumerate<>, tables)) { +/// std::cout << "#" << i << ": " << table.name() << std::endl; +/// } +/// \endcode +template +constexpr auto Enumerate = [] { + struct { + struct sentinel {}; + constexpr sentinel end() const { return {}; } + + struct iterator { + I value{0}; + + constexpr I operator*() { return value; } + + constexpr iterator& operator++() { + ++value; + return *this; + } + + constexpr std::true_type operator!=(sentinel) const { return {}; } + }; + constexpr iterator begin() const { return {}; } + } out; + + return out; +}(); + +} // namespace arrow::internal diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e745ba830a37fce75100fd4f87505607b3fa5b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h @@ -0,0 +1,582 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace ree_util { + +/// \brief Get the child array holding the run ends from an REE array +inline const ArraySpan& RunEndsArray(const ArraySpan& span) { return span.child_data[0]; } + +/// \brief Get the child array holding the data values from an REE array +inline const ArraySpan& ValuesArray(const ArraySpan& span) { return span.child_data[1]; } + +/// \brief Get a pointer to run ends values of an REE array +template +const RunEndCType* RunEnds(const ArraySpan& span) { + assert(RunEndsArray(span).type->id() == CTypeTraits::ArrowType::type_id); + return RunEndsArray(span).GetValues(1); +} + +/// \brief Perform basic validations on the parameters of an REE array +/// and its two children arrays +/// +/// All the checks complete in O(1) time. Consequently, this function: +/// - DOES NOT check that run_ends is sorted and all-positive +/// - DOES NOT check the actual contents of the run_ends and values arrays +Status ValidateRunEndEncodedChildren(const RunEndEncodedType& type, + int64_t logical_length, + const std::shared_ptr& run_ends_data, + const std::shared_ptr& values_data, + int64_t null_count, int64_t logical_offset); + +/// \brief Compute the logical null count of an REE array +int64_t LogicalNullCount(const ArraySpan& span); + +namespace internal { + +/// \brief Uses binary-search to find the physical offset given a logical offset +/// and run-end values +/// +/// \return the physical offset or run_ends_size if the physical offset is not +/// found in run_ends +template +int64_t FindPhysicalIndex(const RunEndCType* run_ends, int64_t run_ends_size, int64_t i, + int64_t absolute_offset) { + assert(absolute_offset + i >= 0); + auto it = std::upper_bound(run_ends, run_ends + run_ends_size, absolute_offset + i); + int64_t result = std::distance(run_ends, it); + assert(result <= run_ends_size); + return result; +} + +/// \brief Uses binary-search to calculate the range of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length +/// +/// \return a pair of physical offset and physical length +template +std::pair FindPhysicalRange(const RunEndCType* run_ends, + int64_t run_ends_size, int64_t length, + int64_t offset) { + const int64_t physical_offset = + FindPhysicalIndex(run_ends, run_ends_size, 0, offset); + // The physical length is calculated by finding the offset of the last element + // and adding 1 to it, so first we ensure there is at least one element. + if (length == 0) { + return {physical_offset, 0}; + } + const int64_t physical_index_of_last = FindPhysicalIndex( + run_ends + physical_offset, run_ends_size - physical_offset, length - 1, offset); + + assert(physical_index_of_last < run_ends_size - physical_offset); + return {physical_offset, physical_index_of_last + 1}; +} + +/// \brief Uses binary-search to calculate the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length +template +int64_t FindPhysicalLength(const RunEndCType* run_ends, int64_t run_ends_size, + int64_t length, int64_t offset) { + auto [_, physical_length] = + FindPhysicalRange(run_ends, run_ends_size, length, offset); + // GH-37107: This is a workaround for GCC 7. GCC 7 doesn't ignore + // variables in structured binding automatically from unused + // variables when one of these variables are used. + // See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81767 + ARROW_UNUSED(_); + return physical_length; +} + +/// \brief Find the physical index into the values array of the REE ArraySpan +/// +/// This function uses binary-search, so it has a O(log N) cost. +template +int64_t FindPhysicalIndex(const ArraySpan& span, int64_t i, int64_t absolute_offset) { + const int64_t run_ends_size = RunEndsArray(span).length; + return FindPhysicalIndex(RunEnds(span), run_ends_size, i, absolute_offset); +} + +/// \brief Find the physical length of an REE ArraySpan +/// +/// The physical length of an REE is the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length. +/// +/// Avoid calling this function if the physical length can be established in +/// some other way (e.g. when iterating over the runs sequentially until the +/// end). This function uses binary-search, so it has a O(log N) cost. +template +int64_t FindPhysicalLength(const ArraySpan& span) { + return FindPhysicalLength( + /*run_ends=*/RunEnds(span), + /*run_ends_size=*/RunEndsArray(span).length, + /*length=*/span.length, + /*offset=*/span.offset); +} + +template +struct PhysicalIndexFinder; + +// non-inline implementations for each run-end type +ARROW_EXPORT int64_t FindPhysicalIndexImpl16(PhysicalIndexFinder& self, + int64_t i); +ARROW_EXPORT int64_t FindPhysicalIndexImpl32(PhysicalIndexFinder& self, + int64_t i); +ARROW_EXPORT int64_t FindPhysicalIndexImpl64(PhysicalIndexFinder& self, + int64_t i); + +/// \brief Stateful version of FindPhysicalIndex() that caches the result of +/// the previous search and uses it to optimize the next search. +/// +/// When new queries for the physical index of a logical index come in, +/// binary search is performed again but the first candidate checked is the +/// result of the previous search (cached physical index) instead of the +/// midpoint of the run-ends array. +/// +/// If that test fails, internal::FindPhysicalIndex() is called with one of the +/// partitions defined by the cached index. If the queried logical indices +/// follow an increasing or decreasing pattern, this first test is much more +/// effective in (1) finding the answer right away (close logical indices belong +/// to the same runs) or (2) discarding many more candidates than probing +/// the midpoint would. +/// +/// The most adversarial case (i.e. alternating between 0 and length-1 queries) +/// only adds one extra binary search probe when compared to always starting +/// binary search from the midpoint without any of these optimizations. +/// +/// \tparam RunEndCType The numeric type of the run-ends array. +template +struct PhysicalIndexFinder { + const ArraySpan array_span; + const RunEndCType* run_ends; + int64_t last_physical_index = 0; + + explicit PhysicalIndexFinder(const ArrayData& data) + : array_span(data), + run_ends(RunEndsArray(array_span).template GetValues(1)) { + assert(CTypeTraits::ArrowType::type_id == + ::arrow::internal::checked_cast(*data.type) + .run_end_type() + ->id()); + } + + /// \brief Find the physical index into the values array of the REE array. + /// + /// \pre 0 <= i < array_span.length() + /// \param i the logical index into the REE array + /// \return the physical index into the values array + int64_t FindPhysicalIndex(int64_t i) { + if constexpr (std::is_same_v) { + return FindPhysicalIndexImpl16(*this, i); + } else if constexpr (std::is_same_v) { + return FindPhysicalIndexImpl32(*this, i); + } else { + static_assert(std::is_same_v, "Unsupported RunEndCType."); + return FindPhysicalIndexImpl64(*this, i); + } + } +}; + +} // namespace internal + +/// \brief Find the physical index into the values array of the REE ArraySpan +/// +/// This function uses binary-search, so it has a O(log N) cost. +ARROW_EXPORT int64_t FindPhysicalIndex(const ArraySpan& span, int64_t i, + int64_t absolute_offset); + +/// \brief Find the physical length of an REE ArraySpan +/// +/// The physical length of an REE is the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length. +/// +/// Avoid calling this function if the physical length can be established in +/// some other way (e.g. when iterating over the runs sequentially until the +/// end). This function uses binary-search, so it has a O(log N) cost. +ARROW_EXPORT int64_t FindPhysicalLength(const ArraySpan& span); + +/// \brief Find the physical range of physical values referenced by the REE in +/// the logical range from offset to offset + length +/// +/// \return a pair of physical offset and physical length +ARROW_EXPORT std::pair FindPhysicalRange(const ArraySpan& span, + int64_t offset, + int64_t length); + +// Publish PhysicalIndexFinder outside of the internal namespace. +template +using PhysicalIndexFinder = internal::PhysicalIndexFinder; + +template +class RunEndEncodedArraySpan { + private: + struct PrivateTag {}; + + public: + /// \brief Iterator representing the current run during iteration over a + /// run-end encoded array + class Iterator { + public: + Iterator(PrivateTag, const RunEndEncodedArraySpan& span, int64_t logical_pos, + int64_t physical_pos) + : span(span), logical_pos_(logical_pos), physical_pos_(physical_pos) {} + + /// \brief Return the physical index of the run + /// + /// The values array can be addressed with this index to get the value + /// that makes up the run. + /// + /// NOTE: if this Iterator is equal to RunEndEncodedArraySpan::end(), + /// the value returned is undefined. + int64_t index_into_array() const { return physical_pos_; } + + /// \brief Return the initial logical position of the run + /// + /// If this Iterator is equal to RunEndEncodedArraySpan::end(), this is + /// the same as RunEndEncodedArraySpan::length(). + int64_t logical_position() const { return logical_pos_; } + + /// \brief Return the logical position immediately after the run. + /// + /// Pre-condition: *this != RunEndEncodedArraySpan::end() + int64_t run_end() const { return span.run_end(physical_pos_); } + + /// \brief Returns the logical length of the run. + /// + /// Pre-condition: *this != RunEndEncodedArraySpan::end() + int64_t run_length() const { return run_end() - logical_pos_; } + + /// \brief Check if the iterator is at the end of the array. + /// + /// This can be used to avoid paying the cost of a call to + /// RunEndEncodedArraySpan::end(). + /// + /// \return true if the iterator is at the end of the array + bool is_end(const RunEndEncodedArraySpan& span) const { + return logical_pos_ >= span.length(); + } + + Iterator& operator++() { + logical_pos_ = span.run_end(physical_pos_); + physical_pos_ += 1; + return *this; + } + + Iterator operator++(int) { + const Iterator prev = *this; + ++(*this); + return prev; + } + + Iterator& operator--() { + physical_pos_ -= 1; + logical_pos_ = (physical_pos_ > 0) ? span.run_end(physical_pos_ - 1) : 0; + return *this; + } + + Iterator operator--(int) { + const Iterator prev = *this; + --(*this); + return prev; + } + + bool operator==(const Iterator& other) const { + return logical_pos_ == other.logical_pos_; + } + + bool operator!=(const Iterator& other) const { + return logical_pos_ != other.logical_pos_; + } + + public: + const RunEndEncodedArraySpan& span; + + private: + int64_t logical_pos_; + int64_t physical_pos_; + }; + + // Prevent implicit ArrayData -> ArraySpan conversion in + // RunEndEncodedArraySpan instantiation. + explicit RunEndEncodedArraySpan(const ArrayData& data) = delete; + + /// \brief Construct a RunEndEncodedArraySpan from an ArraySpan and new + /// absolute offset and length. + /// + /// RunEndEncodedArraySpan{span, off, len} is equivalent to: + /// + /// span.SetSlice(off, len); + /// RunEndEncodedArraySpan{span} + /// + /// ArraySpan::SetSlice() updates the null_count to kUnknownNullCount, but + /// we don't need that here as REE arrays have null_count set to 0 by + /// convention. + explicit RunEndEncodedArraySpan(const ArraySpan& array_span, int64_t offset, + int64_t length) + : array_span_{array_span}, + run_ends_(RunEnds(array_span_)), + length_(length), + offset_(offset) { + assert(array_span_.type->id() == Type::RUN_END_ENCODED); + } + + explicit RunEndEncodedArraySpan(const ArraySpan& array_span) + : RunEndEncodedArraySpan(array_span, array_span.offset, array_span.length) {} + + int64_t offset() const { return offset_; } + int64_t length() const { return length_; } + + int64_t PhysicalIndex(int64_t logical_pos) const { + return internal::FindPhysicalIndex(run_ends_, RunEndsArray(array_span_).length, + logical_pos, offset_); + } + + /// \brief Create an iterator from a logical position and its + /// pre-computed physical offset into the run ends array + /// + /// \param logical_pos is an index in the [0, length()] range + /// \param physical_offset the pre-calculated PhysicalIndex(logical_pos) + Iterator iterator(int64_t logical_pos, int64_t physical_offset) const { + return Iterator{PrivateTag{}, *this, logical_pos, physical_offset}; + } + + /// \brief Create an iterator from a logical position + /// + /// \param logical_pos is an index in the [0, length()] range + Iterator iterator(int64_t logical_pos) const { + if (logical_pos < length()) { + return iterator(logical_pos, PhysicalIndex(logical_pos)); + } + // If logical_pos is above the valid range, use length() as the logical + // position and calculate the physical address right after the last valid + // physical position. Which is the physical index of the last logical + // position, plus 1. + return (length() == 0) ? iterator(0, PhysicalIndex(0)) + : iterator(length(), PhysicalIndex(length() - 1) + 1); + } + + /// \brief Create an iterator representing the logical begin of the run-end + /// encoded array + Iterator begin() const { return iterator(0, PhysicalIndex(0)); } + + /// \brief Create an iterator representing the first invalid logical position + /// of the run-end encoded array + /// + /// \warning Avoid calling end() in a loop, as it will recompute the physical + /// length of the array on each call (O(log N) cost per call). + /// + /// \par You can write your loops like this instead: + /// \code + /// for (auto it = array.begin(), end = array.end(); it != end; ++it) { + /// // ... + /// } + /// \endcode + /// + /// \par Or this version that does not look like idiomatic C++, but removes + /// the need for calling end() completely: + /// \code + /// for (auto it = array.begin(); !it.is_end(array); ++it) { + /// // ... + /// } + /// \endcode + Iterator end() const { + return iterator(length(), + (length() == 0) ? PhysicalIndex(0) : PhysicalIndex(length() - 1) + 1); + } + + // Pre-condition: physical_pos < RunEndsArray(array_span_).length); + inline int64_t run_end(int64_t physical_pos) const { + assert(physical_pos < RunEndsArray(array_span_).length); + // Logical index of the end of the run at physical_pos with offset applied + const int64_t logical_run_end = + std::max(static_cast(run_ends_[physical_pos]) - offset(), 0); + // The current run may go further than the logical length, cap it + return std::min(logical_run_end, length()); + } + + private: + const ArraySpan& array_span_; + const RunEndCType* run_ends_; + const int64_t length_; + const int64_t offset_; +}; + +/// \brief Iterate over two run-end encoded arrays in runs or sub-runs that are +/// inside run boundaries on both inputs +/// +/// Both RunEndEncodedArraySpan should have the same logical length. Instances +/// of this iterator only hold references to the RunEndEncodedArraySpan inputs. +template +class MergedRunsIterator { + private: + using LeftIterator = typename Left::Iterator; + using RightIterator = typename Right::Iterator; + + MergedRunsIterator(LeftIterator left_it, RightIterator right_it, + int64_t common_logical_length, int64_t common_logical_pos) + : ree_iterators_{std::move(left_it), std::move(right_it)}, + logical_length_(common_logical_length), + logical_pos_(common_logical_pos) {} + + public: + /// \brief Construct a MergedRunsIterator positioned at logical position 0. + /// + /// Pre-condition: left.length() == right.length() + MergedRunsIterator(const Left& left, const Right& right) + : MergedRunsIterator(left.begin(), right.begin(), left.length(), 0) { + assert(left.length() == right.length()); + } + + static Result MakeBegin(const Left& left, const Right& right) { + if (left.length() != right.length()) { + return Status::Invalid( + "MergedRunsIterator expects RunEndEncodedArraySpans of the same length"); + } + return MergedRunsIterator(left, right); + } + + static Result MakeEnd(const Left& left, const Right& right) { + if (left.length() != right.length()) { + return Status::Invalid( + "MergedRunsIterator expects RunEndEncodedArraySpans of the same length"); + } + return MergedRunsIterator(left.end(), right.end(), left.length(), left.length()); + } + + /// \brief Return the left RunEndEncodedArraySpan child + const Left& left() const { return std::get<0>(ree_iterators_).span; } + + /// \brief Return the right RunEndEncodedArraySpan child + const Right& right() const { return std::get<1>(ree_iterators_).span; } + + /// \brief Return the initial logical position of the run + /// + /// If is_end(), this is the same as length(). + int64_t logical_position() const { return logical_pos_; } + + /// \brief Whether the iterator is at logical position 0. + bool is_begin() const { return logical_pos_ == 0; } + + /// \brief Whether the iterator has reached the end of both arrays + bool is_end() const { return logical_pos_ == logical_length_; } + + /// \brief Return the logical position immediately after the run. + /// + /// Pre-condition: !is_end() + int64_t run_end() const { + const auto& left_it = std::get<0>(ree_iterators_); + const auto& right_it = std::get<1>(ree_iterators_); + return std::min(left_it.run_end(), right_it.run_end()); + } + + /// \brief returns the logical length of the current run + /// + /// Pre-condition: !is_end() + int64_t run_length() const { return run_end() - logical_pos_; } + + /// \brief Return a physical index into the values array of a given input, + /// pointing to the value of the current run + template + int64_t index_into_array() const { + return std::get(ree_iterators_).index_into_array(); + } + + int64_t index_into_left_array() const { return index_into_array<0>(); } + int64_t index_into_right_array() const { return index_into_array<1>(); } + + MergedRunsIterator& operator++() { + auto& left_it = std::get<0>(ree_iterators_); + auto& right_it = std::get<1>(ree_iterators_); + + const int64_t left_run_end = left_it.run_end(); + const int64_t right_run_end = right_it.run_end(); + + if (left_run_end < right_run_end) { + logical_pos_ = left_run_end; + ++left_it; + } else if (left_run_end > right_run_end) { + logical_pos_ = right_run_end; + ++right_it; + } else { + logical_pos_ = left_run_end; + ++left_it; + ++right_it; + } + return *this; + } + + MergedRunsIterator operator++(int) { + MergedRunsIterator prev = *this; + ++(*this); + return prev; + } + + MergedRunsIterator& operator--() { + auto& left_it = std::get<0>(ree_iterators_); + auto& right_it = std::get<1>(ree_iterators_); + + // The logical position of each iterator is the run_end() of the previous run. + const int64_t left_logical_pos = left_it.logical_position(); + const int64_t right_logical_pos = right_it.logical_position(); + + if (left_logical_pos < right_logical_pos) { + --right_it; + logical_pos_ = std::max(left_logical_pos, right_it.logical_position()); + } else if (left_logical_pos > right_logical_pos) { + --left_it; + logical_pos_ = std::max(left_it.logical_position(), right_logical_pos); + } else { + --left_it; + --right_it; + logical_pos_ = std::max(left_it.logical_position(), right_it.logical_position()); + } + return *this; + } + + MergedRunsIterator operator--(int) { + MergedRunsIterator prev = *this; + --(*this); + return prev; + } + + bool operator==(const MergedRunsIterator& other) const { + return logical_pos_ == other.logical_position(); + } + + bool operator!=(const MergedRunsIterator& other) const { return !(*this == other); } + + private: + std::tuple ree_iterators_; + const int64_t logical_length_; + int64_t logical_pos_; +}; + +} // namespace ree_util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h new file mode 100644 index 0000000000000000000000000000000000000000..cc1a7d6cc807cc2139d3bb0ee706e51f4c2a0192 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _MSC_VER +// MSVC x86_64/arm64 + +# if defined(_M_AMD64) || defined(_M_X64) +# include +# endif + +#else +// gcc/clang (possibly others) + +# if defined(ARROW_HAVE_BMI2) || defined(ARROW_HAVE_RUNTIME_BMI2) +# include +# endif + +# if defined(ARROW_HAVE_AVX2) || defined(ARROW_HAVE_AVX512) || \ + defined(ARROW_HAVE_RUNTIME_AVX2) || defined(ARROW_HAVE_RUNTIME_AVX512) +# include +# elif defined(ARROW_HAVE_SSE4_2) || defined(ARROW_HAVE_RUNTIME_SSE4_2) +# include +# endif + +# ifdef ARROW_HAVE_NEON +# include +# endif + +// GH-44098: Workaround for missing _mm256_set_m128i in older versions of GCC. +# if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8 +# define _mm256_set_m128i(hi, lo) \ + _mm256_inserti128_si256(_mm256_castsi128_si256(lo), (hi), 1) +# endif + +#endif diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..52e191c4c07846b922a5bd830c2cbbde50538eba --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/aligned_storage.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + void destroy() noexcept {} +}; + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + ~StaticVectorStorageBase() noexcept { destroy(); } + + void destroy() noexcept { storage_type::destroy_several(static_data_, size_); } +}; + +template ::value> +struct StaticVectorStorage : public StaticVectorStorageBase { + using Base = StaticVectorStorageBase; + using typename Base::storage_type; + + using Base::size_; + using Base::static_data_; + + StaticVectorStorage() noexcept = default; + + constexpr storage_type* storage_ptr() { return static_data_; } + + constexpr const storage_type* const_storage_ptr() const { return static_data_; } + + // Adjust storage size, but don't initialize any objects + void bump_size(size_t addend) { + assert(size_ + addend <= N); + size_ += addend; + } + + void ensure_capacity(size_t min_capacity) { assert(min_capacity <= N); } + + // Adjust storage size, but don't destroy any objects + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + // Move objects from another storage, but don't destroy any objects currently + // stored in *this. + // You need to call destroy() first if necessary (e.g. in a + // move assignment operator). + void move_construct(StaticVectorStorage&& other) noexcept { + size_ = other.size_; + if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return N; } + + constexpr size_t max_size() const { return N; } + + void reserve(size_t n) {} + + void clear() { + storage_type::destroy_several(static_data_, size_); + size_ = 0; + } +}; + +template +struct SmallVectorStorage { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + storage_type* data_ = static_data_; + size_t dynamic_capacity_ = 0; + + SmallVectorStorage() noexcept = default; + + ~SmallVectorStorage() { destroy(); } + + constexpr storage_type* storage_ptr() { return data_; } + + constexpr const storage_type* const_storage_ptr() const { return data_; } + + void bump_size(size_t addend) { + const size_t new_size = size_ + addend; + ensure_capacity(new_size); + size_ = new_size; + } + + void ensure_capacity(size_t min_capacity) { + if (dynamic_capacity_) { + // Grow dynamic storage if necessary + if (min_capacity > dynamic_capacity_) { + size_t new_capacity = std::max(dynamic_capacity_ * 2, min_capacity); + reallocate_dynamic(new_capacity); + } + } else if (min_capacity > N) { + switch_to_dynamic(min_capacity); + } + } + + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + void destroy() noexcept { + storage_type::destroy_several(data_, size_); + if (dynamic_capacity_) { + delete[] data_; + } + } + + void move_construct(SmallVectorStorage&& other) noexcept { + size_ = other.size_; + dynamic_capacity_ = other.dynamic_capacity_; + if (dynamic_capacity_) { + data_ = other.data_; + other.data_ = other.static_data_; + other.dynamic_capacity_ = 0; + other.size_ = 0; + } else if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return dynamic_capacity_ ? dynamic_capacity_ : N; } + + constexpr size_t max_size() const { return std::numeric_limits::max(); } + + void reserve(size_t n) { + if (dynamic_capacity_) { + if (n > dynamic_capacity_) { + reallocate_dynamic(n); + } + } else if (n > N) { + switch_to_dynamic(n); + } + } + + void clear() { + storage_type::destroy_several(data_, size_); + size_ = 0; + } + + private: + void switch_to_dynamic(size_t new_capacity) { + dynamic_capacity_ = new_capacity; + data_ = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(static_data_, data_, size_); + } + + void reallocate_dynamic(size_t new_capacity) { + assert(new_capacity >= size_); + auto new_data = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(data_, new_data, size_); + delete[] data_; + dynamic_capacity_ = new_capacity; + data_ = new_data; + } +}; + +template +class StaticVectorImpl { + private: + Storage storage_; + + T* data_ptr() { return storage_.storage_ptr()->get(); } + + constexpr const T* const_data_ptr() const { + return storage_.const_storage_ptr()->get(); + } + + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + constexpr StaticVectorImpl() noexcept = default; + + // Move and copy constructors + StaticVectorImpl(StaticVectorImpl&& other) noexcept { + storage_.move_construct(std::move(other.storage_)); + } + + StaticVectorImpl& operator=(StaticVectorImpl&& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + // TODO move_assign? + storage_.destroy(); + storage_.move_construct(std::move(other.storage_)); + } + return *this; + } + + StaticVectorImpl(const StaticVectorImpl& other) { + init_by_copying(other.storage_.size_, other.const_data_ptr()); + } + + StaticVectorImpl& operator=(const StaticVectorImpl& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + assign_by_copying(other.storage_.size_, other.data()); + } + return *this; + } + + // Automatic conversion from std::vector, for convenience + StaticVectorImpl(const std::vector& other) { // NOLINT: explicit + init_by_copying(other.size(), other.data()); + } + + StaticVectorImpl(std::vector&& other) noexcept { // NOLINT: explicit + init_by_moving(other.size(), other.data()); + } + + StaticVectorImpl& operator=(const std::vector& other) { + assign_by_copying(other.size(), other.data()); + return *this; + } + + StaticVectorImpl& operator=(std::vector&& other) noexcept { + assign_by_moving(other.size(), other.data()); + return *this; + } + + // Constructing from count and optional initialization value + explicit StaticVectorImpl(size_t count) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(); + } + } + + StaticVectorImpl(size_t count, const T& value) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(value); + } + } + + StaticVectorImpl(std::initializer_list values) { + storage_.bump_size(values.size()); + auto* p = storage_.storage_ptr(); + for (auto&& v : values) { + // Unfortunately, cannot move initializer values + p++->construct(v); + } + } + + // Size inspection + + constexpr bool empty() const { return storage_.size_ == 0; } + + constexpr size_t size() const { return storage_.size_; } + + constexpr size_t capacity() const { return storage_.capacity(); } + + constexpr size_t max_size() const { return storage_.max_size(); } + + // Data access + + T& operator[](size_t i) { return data_ptr()[i]; } + + constexpr const T& operator[](size_t i) const { return const_data_ptr()[i]; } + + T& front() { return data_ptr()[0]; } + + constexpr const T& front() const { return const_data_ptr()[0]; } + + T& back() { return data_ptr()[storage_.size_ - 1]; } + + constexpr const T& back() const { return const_data_ptr()[storage_.size_ - 1]; } + + T* data() { return data_ptr(); } + + constexpr const T* data() const { return const_data_ptr(); } + + // Iterators + + iterator begin() { return iterator(data_ptr()); } + + constexpr const_iterator begin() const { return const_iterator(const_data_ptr()); } + + constexpr const_iterator cbegin() const { return const_iterator(const_data_ptr()); } + + iterator end() { return iterator(data_ptr() + storage_.size_); } + + constexpr const_iterator end() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + constexpr const_iterator cend() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + constexpr const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + constexpr const_reverse_iterator crbegin() const { + return const_reverse_iterator(end()); + } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + constexpr const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + constexpr const_reverse_iterator crend() const { + return const_reverse_iterator(begin()); + } + + // Mutations + + void reserve(size_t n) { storage_.reserve(n); } + + void clear() { storage_.clear(); } + + void push_back(const T& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(value); + } + + void push_back(T&& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::move(value)); + } + + template + void emplace_back(Args&&... args) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::forward(args)...); + } + + template + iterator insert(const_iterator insert_at, InputIt first, InputIt last) { + const size_t n = storage_.size_; + const size_t it_size = static_cast(last - first); // XXX might be O(n)? + const size_t pos = static_cast(insert_at - const_data_ptr()); + storage_.bump_size(it_size); + auto* p = storage_.storage_ptr(); + if (it_size == 0) { + return p[pos].get(); + } + const size_t end_pos = pos + it_size; + + // Move [pos; n) to [end_pos; end_pos + n - pos) + size_t i = n; + size_t j = end_pos + n - pos; + while (j > std::max(n, end_pos)) { + p[--j].move_construct(&p[--i]); + } + while (j > end_pos) { + p[--j].move_assign(&p[--i]); + } + assert(j == end_pos); + // Copy [first; last) to [pos; end_pos) + j = pos; + while (j < std::min(n, end_pos)) { + p[j++].assign(*first++); + } + while (j < end_pos) { + p[j++].construct(*first++); + } + assert(first == last); + return p[pos].get(); + } + + void resize(size_t n) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(T{}); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + void resize(size_t n, const T& value) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(value); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + private: + template + void init_by_copying(size_t n, InputIt src) { + storage_.bump_size(n); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } + + template + void init_by_moving(size_t n, InputIt src) { + init_by_copying(n, std::make_move_iterator(src)); + } + + template + void assign_by_copying(size_t n, InputIt src) { + const size_t old_size = storage_.size_; + if (n > old_size) { + storage_.bump_size(n - old_size); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < old_size; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = old_size; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } else { + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = n; i < old_size; ++i) { + dest[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + template + void assign_by_moving(size_t n, InputIt src) { + assign_by_copying(n, std::make_move_iterator(src)); + } +}; + +template +using StaticVector = StaticVectorImpl>; + +template +using SmallVector = StaticVectorImpl>; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..cdffe0b2317e5ba555c37ec16e5294bc912a49d4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace arrow { +namespace internal { + +template > +std::vector ArgSort(const std::vector& values, Cmp&& cmp = {}) { + std::vector indices(values.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), + [&](int64_t i, int64_t j) -> bool { return cmp(values[i], values[j]); }); + return indices; +} + +template +size_t Permute(const std::vector& indices, std::vector* values) { + if (indices.size() <= 1) { + return indices.size(); + } + + // mask indicating which of values are in the correct location + std::vector sorted(indices.size(), false); + + size_t cycle_count = 0; + + for (auto cycle_start = sorted.begin(); cycle_start != sorted.end(); + cycle_start = std::find(cycle_start, sorted.end(), false)) { + ++cycle_count; + + // position in which an element belongs WRT sort + auto sort_into = static_cast(cycle_start - sorted.begin()); + + if (indices[sort_into] == sort_into) { + // trivial cycle + sorted[sort_into] = true; + continue; + } + + // resolve this cycle + const auto end = sort_into; + for (int64_t take_from = indices[sort_into]; take_from != end; + take_from = indices[sort_into]) { + std::swap(values->at(sort_into), values->at(take_from)); + sorted[sort_into] = true; + sort_into = take_from; + } + sorted[sort_into] = true; + } + + return cycle_count; +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h new file mode 100644 index 0000000000000000000000000000000000000000..f2081d0937b77ffacb9097808d08ca09291101bf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +ARROW_EXPORT std::string HexEncode(const uint8_t* data, size_t length); + +ARROW_EXPORT std::string Escape(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(std::string_view str); + +ARROW_EXPORT std::string Escape(std::string_view str); + +ARROW_EXPORT Status ParseHexValue(const char* hex_pair, uint8_t* out); + +ARROW_EXPORT Status ParseHexValues(std::string_view hex_string, uint8_t* out); + +namespace internal { + +/// Like std::string_view::starts_with in C++20 +inline bool StartsWith(std::string_view s, std::string_view prefix) { + return s.length() >= prefix.length() && + (s.empty() || s.substr(0, prefix.length()) == prefix); +} + +/// Like std::string_view::ends_with in C++20 +inline bool EndsWith(std::string_view s, std::string_view suffix) { + return s.length() >= suffix.length() && + (s.empty() || s.substr(s.length() - suffix.length()) == suffix); +} + +/// \brief Split a string with a delimiter +ARROW_EXPORT +std::vector SplitString(std::string_view v, char delim, + int64_t limit = 0); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Trim whitespace from left and right sides of string +ARROW_EXPORT +std::string TrimString(std::string value); + +ARROW_EXPORT +bool AsciiEqualsCaseInsensitive(std::string_view left, std::string_view right); + +ARROW_EXPORT +std::string AsciiToLower(std::string_view value); + +ARROW_EXPORT +std::string AsciiToUpper(std::string_view value); + +/// \brief Search for the first instance of a token and replace it or return nullopt if +/// the token is not found. +ARROW_EXPORT +std::optional Replace(std::string_view s, std::string_view token, + std::string_view replacement); + +/// \brief Get boolean value from string +/// +/// If "1", "true" (case-insensitive), returns true +/// If "0", "false" (case-insensitive), returns false +/// Otherwise, returns Status::Invalid +ARROW_EXPORT +arrow::Result ParseBoolean(std::string_view value); + +#if __has_include() + +namespace detail { +template +struct can_to_chars : public std::false_type {}; + +template +struct can_to_chars< + T, std::void_t(), std::declval(), + std::declval>()))>> + : public std::true_type {}; +} // namespace detail + +/// \brief Whether std::to_chars exists for the current value type. +/// +/// This is useful as some C++ libraries do not implement all specified overloads +/// for std::to_chars. +template +inline constexpr bool have_to_chars = detail::can_to_chars::value; + +/// \brief An ergonomic wrapper around std::to_chars, returning a std::string +/// +/// For most inputs, the std::string result will not incur any heap allocation +/// thanks to small string optimization. +/// +/// Compared to std::to_string, this function gives locale-agnostic results +/// and might also be faster. +template +std::string ToChars(T value, Args&&... args) { + if constexpr (!have_to_chars) { + // Some C++ standard libraries do not yet implement std::to_chars for all types, + // in which case we have to fallback to std::string. + return std::to_string(value); + } else { + // According to various sources, the GNU libstdc++ and Microsoft's C++ STL + // allow up to 15 bytes of small string optimization, while clang's libc++ + // goes up to 22 bytes. Choose the pessimistic value. + std::string out(15, 0); + auto res = std::to_chars(&out.front(), &out.back(), value, args...); + while (res.ec != std::errc{}) { + assert(res.ec == std::errc::value_too_large); + out.resize(out.capacity() * 2); + res = std::to_chars(&out.front(), &out.back(), value, args...); + } + const auto length = res.ptr - out.data(); + assert(length <= static_cast(out.length())); + out.resize(length); + return out; + } +} + +#else // !__has_include() + +template +inline constexpr bool have_to_chars = false; + +template +std::string ToChars(T value, Args&&... args) { + return std::to_string(value); +} + +#endif + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb72f0d9cb7d7bb8b9ce8f2a65cc9f954924ca3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief A group of related tasks +/// +/// A TaskGroup executes tasks with the signature `Status()`. +/// Execution can be serial or parallel, depending on the TaskGroup +/// implementation. When Finish() returns, it is guaranteed that all +/// tasks have finished, or at least one has errored. +/// +/// Once an error has occurred any tasks that are submitted to the task group +/// will not run. The call to Append will simply return without scheduling the +/// task. +/// +/// If the task group is parallel it is possible that multiple tasks could be +/// running at the same time and one of those tasks fails. This will put the +/// task group in a failure state (so additional tasks cannot be run) however +/// it will not interrupt running tasks. Finish will not complete +/// until all running tasks have finished, even if one task fails. +/// +/// Once a task group has finished new tasks may not be added to it. If you need to start +/// a new batch of work then you should create a new task group. +class ARROW_EXPORT TaskGroup : public std::enable_shared_from_this { + public: + /// Add a Status-returning function to execute. Execution order is + /// undefined. The function may be executed immediately or later. + template + void Append(Function&& func) { + return AppendReal(std::forward(func)); + } + + /// Wait for execution of all tasks (and subgroups) to be finished, + /// or for at least one task (or subgroup) to error out. + /// The returned Status propagates the error status of the first failing + /// task (or subgroup). + virtual Status Finish() = 0; + + /// Returns a future that will complete the first time all tasks are finished. + /// This should be called only after all top level tasks + /// have been added to the task group. + /// + /// If you are using a TaskGroup asynchronously there are a few considerations to keep + /// in mind. The tasks should not block on I/O, etc (defeats the purpose of using + /// futures) and should not be doing any nested locking or you run the risk of the tasks + /// getting stuck in the thread pool waiting for tasks which cannot get scheduled. + /// + /// Primarily this call is intended to help migrate existing work written with TaskGroup + /// in mind to using futures without having to do a complete conversion on the first + /// pass. + virtual Future<> FinishAsync() = 0; + + /// The current aggregate error Status. Non-blocking, useful for stopping early. + virtual Status current_status() = 0; + + /// Whether some tasks have already failed. Non-blocking, useful for stopping early. + virtual bool ok() const = 0; + + /// How many tasks can typically be executed in parallel. + /// This is only a hint, useful for testing or debugging. + virtual int parallelism() = 0; + + static std::shared_ptr MakeSerial(StopToken = StopToken::Unstoppable()); + static std::shared_ptr MakeThreaded(internal::Executor*, + StopToken = StopToken::Unstoppable()); + + virtual ~TaskGroup() = default; + + protected: + TaskGroup() = default; + ARROW_DISALLOW_COPY_AND_ASSIGN(TaskGroup); + + virtual void AppendReal(FnOnce task) = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h new file mode 100644 index 0000000000000000000000000000000000000000..ea033ed696d1b4f0badfc02c4a562940eac0565b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// approximate quantiles from arbitrary length dataset with O(1) space +// based on 'Computing Extremely Accurate Quantiles Using t-Digests' from Dunning & Ertl +// - https://arxiv.org/abs/1902.04023 +// - https://github.com/tdunning/t-digest + +#pragma once + +#include +#include +#include + +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace internal { + +class ARROW_EXPORT TDigest { + public: + explicit TDigest(uint32_t delta = 100, uint32_t buffer_size = 500); + ~TDigest(); + TDigest(TDigest&&); + TDigest& operator=(TDigest&&); + + // reset and re-use this tdigest + void Reset(); + + // validate data integrity + Status Validate() const; + + // dump internal data, only for debug + void Dump() const; + + // buffer a single data point, consume internal buffer if full + // this function is intensively called and performance critical + // call it only if you are sure no NAN exists in input data + void Add(double value) { + ARROW_DCHECK(!std::isnan(value)) << "cannot add NAN"; + if (ARROW_PREDICT_FALSE(input_.size() == input_.capacity())) { + MergeInput(); + } + input_.push_back(value); + } + + // skip NAN on adding + template + typename std::enable_if::value>::type NanAdd(T value) { + if (!std::isnan(value)) Add(value); + } + + template + typename std::enable_if::value>::type NanAdd(T value) { + Add(static_cast(value)); + } + + // merge with other t-digests, called infrequently + void Merge(const std::vector& others); + void Merge(const TDigest& other); + + // calculate quantile + double Quantile(double q) const; + + double Min() const { return Quantile(0); } + double Max() const { return Quantile(1); } + double Mean() const; + + // check if this tdigest contains no valid data points + bool is_empty() const; + + private: + // merge input data with current tdigest + void MergeInput() const; + + // input buffer, size = buffer_size * sizeof(double) + mutable std::vector input_; + + // hide other members with pimpl + class TDigestImpl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..511daed1ecaac688b6d444349bf1c63fb6c53ad6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/iterator.h" + +namespace arrow { + +struct TestInt { + TestInt(); + TestInt(int i); // NOLINT runtime/explicit + int value; + + bool operator==(const TestInt& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestInt& v); +}; + +template <> +struct IterationTraits { + static TestInt End() { return TestInt(); } + static bool IsEnd(const TestInt& val) { return val == IterationTraits::End(); } +}; + +struct TestStr { + TestStr(); + TestStr(const std::string& s); // NOLINT runtime/explicit + TestStr(const char* s); // NOLINT runtime/explicit + explicit TestStr(const TestInt& test_int); + std::string value; + + bool operator==(const TestStr& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestStr& v); +}; + +template <> +struct IterationTraits { + static TestStr End() { return TestStr(); } + static bool IsEnd(const TestStr& val) { return val == IterationTraits::End(); } +}; + +std::vector RangeVector(unsigned int max, unsigned int step = 1); + +template +inline Iterator VectorIt(std::vector v) { + return MakeVectorIterator(std::move(v)); +} + +template +inline Iterator PossiblySlowVectorIt(std::vector v, bool slow = false) { + auto iterator = MakeVectorIterator(std::move(v)); + if (slow) { + return MakeTransformedIterator(std::move(iterator), + [](T item) -> Result> { + SleepABit(); + return TransformYield(item); + }); + } else { + return iterator; + } +} + +template +inline void AssertIteratorExhausted(Iterator& it) { + ASSERT_OK_AND_ASSIGN(T next, it.Next()); + ASSERT_TRUE(IsIterationEnd(next)); +} + +Transformer MakeFilter(std::function filter); + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..cd32781aed756baf9fa11b168c36f9d76d99c6a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h @@ -0,0 +1,620 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/cancel.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +#if defined(_MSC_VER) +// Disable harmless warning for decorated name length limit +# pragma warning(disable : 4503) +#endif + +namespace arrow { + +/// \brief Get the capacity of the global thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various CPU-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetCpuThreadPoolCapacity(). +ARROW_EXPORT int GetCpuThreadPoolCapacity(); + +/// \brief Set the capacity of the global thread pool +/// +/// Set the number of worker threads int the thread pool to which +/// Arrow dispatches various CPU-bound tasks. +/// +/// The current number is returned by GetCpuThreadPoolCapacity(). +ARROW_EXPORT Status SetCpuThreadPoolCapacity(int threads); + +namespace internal { + +// Hints about a task that may be used by an Executor. +// They are ignored by the provided ThreadPool implementation. +struct TaskHints { + // The lower, the more urgent + int32_t priority = 0; + // The IO transfer size in bytes + int64_t io_size = -1; + // The approximate CPU cost in number of instructions + int64_t cpu_cost = -1; + // An application-specific ID + int64_t external_id = -1; +}; + +class ARROW_EXPORT Executor { + public: + using StopCallback = internal::FnOnce; + + virtual ~Executor(); + + // Spawn a fire-and-forget task. + template + Status Spawn(Function&& func) { + return SpawnReal(TaskHints{}, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(Function&& func, StopToken stop_token) { + return SpawnReal(TaskHints{}, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func) { + return SpawnReal(hints, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token, + StopCallback stop_callback) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + std::move(stop_callback)); + } + + // Transfers a future to this executor. Any continuations added to the + // returned future will run in this executor. Otherwise they would run + // on the same thread that called MarkFinished. + // + // This is necessary when (for example) an I/O task is completing a future. + // The continuations of that future should run on the CPU thread pool keeping + // CPU heavy work off the I/O thread pool. So the I/O task should transfer + // the future to the CPU executor before returning. + // + // By default this method will only transfer if the future is not already completed. If + // the future is already completed then any callback would be run synchronously and so + // no transfer is typically necessary. However, in cases where you want to force a + // transfer (e.g. to help the scheduler break up units of work across multiple cores) + // then you can override this behavior with `always_transfer`. + template + Future Transfer(Future future) { + return DoTransfer(std::move(future), false); + } + + // Overload of Transfer which will always schedule callbacks on new threads even if the + // future is finished when the callback is added. + // + // This can be useful in cases where you want to ensure parallelism + template + Future TransferAlways(Future future) { + return DoTransfer(std::move(future), true); + } + + // Submit a callable and arguments for execution. Return a future that + // will return the callable's result value once. + // The callable's arguments are copied before execution. + template > + Result Submit(TaskHints hints, StopToken stop_token, Function&& func, + Args&&... args) { + using ValueType = typename FutureType::ValueType; + + auto future = FutureType::Make(); + auto task = std::bind(::arrow::detail::ContinueFuture{}, future, + std::forward(func), std::forward(args)...); + struct { + WeakFuture weak_fut; + + void operator()(const Status& st) { + auto fut = weak_fut.get(); + if (fut.is_valid()) { + fut.MarkFinished(st); + } + } + } stop_callback{WeakFuture(future)}; + ARROW_RETURN_NOT_OK(SpawnReal(hints, std::move(task), std::move(stop_token), + std::move(stop_callback))); + + return future; + } + + template > + Result Submit(StopToken stop_token, Function&& func, Args&&... args) { + return Submit(TaskHints{}, stop_token, std::forward(func), + std::forward(args)...); + } + + template > + Result Submit(TaskHints hints, Function&& func, Args&&... args) { + return Submit(std::move(hints), StopToken::Unstoppable(), + std::forward(func), std::forward(args)...); + } + + template > + Result Submit(Function&& func, Args&&... args) { + return Submit(TaskHints{}, StopToken::Unstoppable(), std::forward(func), + std::forward(args)...); + } + + // Return the level of parallelism (the number of tasks that may be executed + // concurrently). This may be an approximate number. + virtual int GetCapacity() = 0; + + // Return true if the thread from which this function is called is owned by this + // Executor. Returns false if this Executor does not support this property. + virtual bool OwnsThisThread() { return false; } + + // Return true if this is the current executor being called + // n.b. this defaults to just calling OwnsThisThread + // unless the threadpool is disabled + virtual bool IsCurrentExecutor() { return OwnsThisThread(); } + + /// \brief An interface to represent something with a custom destructor + /// + /// \see KeepAlive + class ARROW_EXPORT Resource { + public: + virtual ~Resource() = default; + }; + + /// \brief Keep a resource alive until all executor threads have terminated + /// + /// Executors may have static storage duration. In particular, the CPU and I/O + /// executors are currently implemented this way. These threads may access other + /// objects with static storage duration such as the OpenTelemetry runtime context + /// the default memory pool, or other static executors. + /// + /// The order in which these objects are destroyed is difficult to control. In order + /// to ensure those objects remain alive until all threads have finished those objects + /// should be wrapped in a Resource object and passed into this method. The given + /// shared_ptr will be kept alive until all threads have finished their worker loops. + virtual void KeepAlive(std::shared_ptr resource); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Executor); + + Executor() = default; + + template , typename FTSync = typename FT::SyncType> + Future DoTransfer(Future future, bool always_transfer = false) { + auto transferred = Future::Make(); + if (always_transfer) { + CallbackOptions callback_options = CallbackOptions::Defaults(); + callback_options.should_schedule = ShouldSchedule::Always; + callback_options.executor = this; + auto sync_callback = [transferred](const FTSync& result) mutable { + transferred.MarkFinished(result); + }; + future.AddCallback(sync_callback, callback_options); + return transferred; + } + + // We could use AddCallback's ShouldSchedule::IfUnfinished but we can save a bit of + // work by doing the test here. + auto callback = [this, transferred](const FTSync& result) mutable { + auto spawn_status = + Spawn([transferred, result]() mutable { transferred.MarkFinished(result); }); + if (!spawn_status.ok()) { + transferred.MarkFinished(spawn_status); + } + }; + auto callback_factory = [&callback]() { return callback; }; + if (future.TryAddCallback(callback_factory)) { + return transferred; + } + // If the future is already finished and we aren't going to force spawn a thread + // then we don't need to add another layer of callback and can return the original + // future + return future; + } + + // Subclassing API + virtual Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) = 0; +}; + +/// \brief An executor implementation that runs all tasks on a single thread using an +/// event loop. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT SerialExecutor : public Executor { + public: + template + using TopLevelTask = internal::FnOnce(Executor*)>; + + ~SerialExecutor() override; + + int GetCapacity() override { return 1; }; + bool OwnsThisThread() override; + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + /// \brief Runs the TopLevelTask and any scheduled tasks + /// + /// The TopLevelTask (or one of the tasks it schedules) must either return an invalid + /// status or call the finish signal. Failure to do this will result in a deadlock. For + /// this reason it is preferable (if possible) to use the helper methods (below) + /// RunSynchronously/RunSerially which delegates the responsibility onto a Future + /// producer's existing responsibility to always mark a future finished (which can + /// someday be aided by ARROW-12207). + template , + typename FTSync = typename FT::SyncType> + static FTSync RunInSerialExecutor(TopLevelTask initial_task) { + Future fut = SerialExecutor().Run(std::move(initial_task)); + return FutureToSync(fut); + } + + /// \brief Transform an AsyncGenerator into an Iterator + /// + /// An event loop will be created and each call to Next will power the event loop with + /// the calling thread until the next item is ready to be delivered. + /// + /// Note: The iterator's destructor will run until the given generator is fully + /// exhausted. If you wish to abandon iteration before completion then the correct + /// approach is to use a stop token to cause the generator to exhaust early. + template + static Iterator IterateGenerator( + internal::FnOnce()>>(Executor*)> initial_task) { + auto serial_executor = std::unique_ptr(new SerialExecutor()); + auto maybe_generator = std::move(initial_task)(serial_executor.get()); + if (!maybe_generator.ok()) { + return MakeErrorIterator(maybe_generator.status()); + } + auto generator = maybe_generator.MoveValueUnsafe(); + struct SerialIterator { + SerialIterator(std::unique_ptr executor, + std::function()> generator) + : executor(std::move(executor)), generator(std::move(generator)) {} + ARROW_DISALLOW_COPY_AND_ASSIGN(SerialIterator); + ARROW_DEFAULT_MOVE_AND_ASSIGN(SerialIterator); + ~SerialIterator() { + // A serial iterator must be consumed before it can be destroyed. Allowing it to + // do otherwise would lead to resource leakage. There will likely be deadlocks at + // this spot in the future but these will be the result of other bugs and not the + // fact that we are forcing consumption here. + + // If a streaming API needs to support early abandonment then it should be done so + // with a cancellation token and not simply discarding the iterator and expecting + // the underlying work to clean up correctly. + if (executor && !executor->IsFinished()) { + while (true) { + Result maybe_next = Next(); + if (!maybe_next.ok() || IsIterationEnd(*maybe_next)) { + break; + } + } + } + } + + Result Next() { + executor->Unpause(); + // This call may lead to tasks being scheduled in the serial executor + Future next_fut = generator(); + next_fut.AddCallback([this](const Result& res) { + // If we're done iterating we should drain the rest of the tasks in the executor + if (!res.ok() || IsIterationEnd(*res)) { + executor->Finish(); + return; + } + // Otherwise we will break out immediately, leaving the remaining tasks for + // the next call. + executor->Pause(); + }); +#ifdef ARROW_ENABLE_THREADING + // future must run on this thread + // Borrow this thread and run tasks until the future is finished + executor->RunLoop(); +#else + next_fut.Wait(); +#endif + if (!next_fut.is_finished()) { + // Not clear this is possible since RunLoop wouldn't generally exit + // unless we paused/finished which would imply next_fut has been + // finished. + return Status::Invalid( + "Serial executor terminated before next result computed"); + } + // At this point we may still have tasks in the executor, that is ok. + // We will run those tasks the next time through. + return next_fut.result(); + } + + std::unique_ptr executor; + std::function()> generator; + }; + return Iterator(SerialIterator{std::move(serial_executor), std::move(generator)}); + } + +#ifndef ARROW_ENABLE_THREADING + // run a pending task from loop + // returns true if any tasks were run in the last go round the loop (i.e. if it + // returns false, all executors are waiting) + static bool RunTasksOnAllExecutors(); + static SerialExecutor* GetCurrentExecutor(); + + bool IsCurrentExecutor() override; + +#endif + + protected: + virtual void RunLoop(); + + // State uses mutex + struct State; + std::shared_ptr state_; + + SerialExecutor(); + + // We mark the serial executor "finished" when there should be + // no more tasks scheduled on it. It's not strictly needed but + // can help catch bugs where we are trying to use the executor + // after we are done with it. + void Finish(); + bool IsFinished(); + // We pause the executor when we are running an async generator + // and we have received an item that we can deliver. + void Pause(); + void Unpause(); + + template ::SyncType> + Future Run(TopLevelTask initial_task) { + auto final_fut = std::move(initial_task)(this); + final_fut.AddCallback([this](const FTSync&) { Finish(); }); + RunLoop(); + return final_fut; + } + +#ifndef ARROW_ENABLE_THREADING + // we have to run tasks from all live executors + // during RunLoop if we don't have threading + static std::unordered_set all_executors; + // a pointer to the last one called by the loop + // so all tasks get spawned equally + // on multiple calls to RunTasksOnAllExecutors + static SerialExecutor* last_called_executor; + // without threading we can't tell which executor called the + // current process - so we set it in spawning the task + static SerialExecutor* current_executor; +#endif // ARROW_ENABLE_THREADING +}; + +#ifdef ARROW_ENABLE_THREADING + +/// An Executor implementation spawning tasks in FIFO manner on a fixed-size +/// pool of worker threads. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT ThreadPool : public Executor { + public: + // Construct a thread pool with the given number of worker threads + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + bool OwnsThisThread() override; + // Dynamically change the number of worker threads. + // + // This function always returns immediately. + // If fewer threads are running than this number, new threads are spawned + // on-demand when needed for task execution. + // If more threads are running than this number, excess threads are reaped + // as soon as possible. + Status SetCapacity(int threads); + + // Heuristic for the default capacity of a thread pool for CPU-bound tasks. + // This is exposed as a static method to help with testing. + static int DefaultCapacity(); + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + void KeepAlive(std::shared_ptr resource) override; + + struct State; + + protected: + FRIEND_TEST(TestThreadPool, SetCapacity); + FRIEND_TEST(TestGlobalThreadPool, Capacity); + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + ThreadPool(); + + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Collect finished worker threads, making sure the OS threads have exited + void CollectFinishedWorkersUnlocked(); + // Launch a given number of additional workers + void LaunchWorkersUnlocked(int threads); + // Get the current actual capacity + int GetActualCapacity(); + + static std::shared_ptr MakeCpuThreadPool(); + + std::shared_ptr sp_state_; + State* state_; + bool shutdown_on_destroy_; +}; +#else // ARROW_ENABLE_THREADING +// an executor implementation which pretends to be a thread pool but runs everything +// on the main thread using a static queue (shared between all thread pools, otherwise +// cross-threadpool dependencies will break everything) +class ARROW_EXPORT ThreadPool : public SerialExecutor { + public: + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + virtual int GetActualCapacity(); + + bool OwnsThisThread() override { return true; } + + // Dynamically change the number of worker threads. + // without threading this is equal to the + // number of tasks that can be running at once + // (inside each other) + Status SetCapacity(int threads); + + static int DefaultCapacity() { return 8; } + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + protected: + static std::shared_ptr MakeCpuThreadPool(); + ThreadPool(); +}; + +#endif // ARROW_ENABLE_THREADING + +// Return the process-global thread pool for CPU-bound tasks. +ARROW_EXPORT ThreadPool* GetCpuThreadPool(); + +/// \brief Potentially run an async operation serially (if use_threads is false) +/// \see RunSerially +/// +/// If `use_threads` is true, the global CPU executor is used. +/// If `use_threads` is false, a temporary SerialExecutor is used. +/// `get_future` is called (from this thread) with the chosen executor and must +/// return a future that will eventually finish. This function returns once the +/// future has finished. +template +typename Fut::SyncType RunSynchronously(FnOnce get_future, + bool use_threads) { + if (use_threads) { + auto fut = std::move(get_future)(GetCpuThreadPool()); + return FutureToSync(fut); + } else { + return SerialExecutor::RunInSerialExecutor(std::move(get_future)); + } +} + +/// \brief Potentially iterate an async generator serially (if use_threads is false) +/// \see IterateGenerator +/// +/// If `use_threads` is true, the global CPU executor will be used. Each call to +/// the iterator will simply wait until the next item is available. Tasks may run in +/// the background between calls. +/// +/// If `use_threads` is false, the calling thread only will be used. Each call to +/// the iterator will use the calling thread to do enough work to generate one item. +/// Tasks will be left in a queue until the next call and no work will be done between +/// calls. +template +Iterator IterateSynchronously( + FnOnce()>>(Executor*)> get_gen, bool use_threads) { + if (use_threads) { + auto maybe_gen = std::move(get_gen)(GetCpuThreadPool()); + if (!maybe_gen.ok()) { + return MakeErrorIterator(maybe_gen.status()); + } + return MakeGeneratorIterator(*maybe_gen); + } else { + return SerialExecutor::IterateGenerator(std::move(get_gen)); + } +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h new file mode 100644 index 0000000000000000000000000000000000000000..d7808256418eef0faaf54a189d11c6896583d68b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { +namespace tracing { + +class ARROW_EXPORT SpanDetails { + public: + virtual ~SpanDetails() {} +}; + +class ARROW_EXPORT Span { + public: + Span() noexcept; + /// True if this span has been started with START_SPAN + bool valid() const; + /// End the span early + void reset(); + std::unique_ptr details; +}; + +} // namespace tracing +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h new file mode 100644 index 0000000000000000000000000000000000000000..7815d4d1ecc1d66ba20c45eddb6c626833aa54e2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A non-zero-terminated small string class. +// std::string usually has a small string optimization +// (see review at https://shaharmike.com/cpp/std-string/) +// but this one allows tight control and optimization of memory layout. +template +class SmallString { + public: + SmallString() : length_(0) {} + + template + SmallString(const T& v) { // NOLINT implicit constructor + *this = std::string_view(v); + } + + SmallString& operator=(const std::string_view s) { +#ifndef NDEBUG + CheckSize(s.size()); +#endif + length_ = static_cast(s.size()); + std::memcpy(data_, s.data(), length_); + return *this; + } + + SmallString& operator=(const std::string& s) { + *this = std::string_view(s); + return *this; + } + + SmallString& operator=(const char* s) { + *this = std::string_view(s); + return *this; + } + + explicit operator std::string_view() const { return std::string_view(data_, length_); } + + const char* data() const { return data_; } + size_t length() const { return length_; } + bool empty() const { return length_ == 0; } + char operator[](size_t pos) const { +#ifdef NDEBUG + assert(pos <= length_); +#endif + return data_[pos]; + } + + SmallString substr(size_t pos) const { + return SmallString(std::string_view(*this).substr(pos)); + } + + SmallString substr(size_t pos, size_t count) const { + return SmallString(std::string_view(*this).substr(pos, count)); + } + + template + bool operator==(T&& other) const { + return std::string_view(*this) == std::string_view(std::forward(other)); + } + + template + bool operator!=(T&& other) const { + return std::string_view(*this) != std::string_view(std::forward(other)); + } + + protected: + uint8_t length_; + char data_[N]; + + void CheckSize(size_t n) { assert(n <= N); } +}; + +template +std::ostream& operator<<(std::ostream& os, const SmallString& str) { + return os << std::string_view(str); +} + +// A trie class for byte strings, optimized for small sets of short strings. +// This class is immutable by design, use a TrieBuilder to construct it. +class ARROW_EXPORT Trie { + using index_type = int16_t; + using fast_index_type = int_fast16_t; + static constexpr auto kMaxIndex = std::numeric_limits::max(); + + public: + Trie() : size_(0) {} + Trie(Trie&&) = default; + Trie& operator=(Trie&&) = default; + + int32_t Find(std::string_view s) const { + const Node* node = &nodes_[0]; + fast_index_type pos = 0; + if (s.length() > static_cast(kMaxIndex)) { + return -1; + } + fast_index_type remaining = static_cast(s.length()); + + while (remaining > 0) { + auto substring_length = node->substring_length(); + if (substring_length > 0) { + auto substring_data = node->substring_data(); + if (remaining < substring_length) { + // Input too short + return -1; + } + for (fast_index_type i = 0; i < substring_length; ++i) { + if (s[pos++] != substring_data[i]) { + // Mismatching substring + return -1; + } + --remaining; + } + if (remaining == 0) { + // Matched node exactly + return node->found_index_; + } + } + // Lookup child using next input character + if (node->child_lookup_ == -1) { + // Input too long + return -1; + } + auto c = static_cast(s[pos++]); + --remaining; + auto child_index = lookup_table_[node->child_lookup_ * 256 + c]; + if (child_index == -1) { + // Child not found + return -1; + } + node = &nodes_[child_index]; + } + + // Input exhausted + if (node->substring_.empty()) { + // Matched node exactly + return node->found_index_; + } else { + return -1; + } + } + + Status Validate() const; + + void Dump() const; + + protected: + static constexpr size_t kNodeSize = 16; + static constexpr auto kMaxSubstringLength = + kNodeSize - 2 * sizeof(index_type) - sizeof(int8_t); + + struct Node { + // If this node is a valid end of string, index of found string, otherwise -1 + index_type found_index_; + // Base index for child lookup in lookup_table_ (-1 if no child nodes) + index_type child_lookup_; + // The substring for this node. + SmallString substring_; + + fast_index_type substring_length() const { + return static_cast(substring_.length()); + } + const char* substring_data() const { return substring_.data(); } + }; + + static_assert(sizeof(Node) == kNodeSize, "Unexpected node size"); + + ARROW_DISALLOW_COPY_AND_ASSIGN(Trie); + + void Dump(const Node* node, const std::string& indent) const; + + // Node table: entry 0 is the root node + std::vector nodes_; + + // Indexed lookup structure: gives index in node table, or -1 if not found + std::vector lookup_table_; + + // Number of entries + index_type size_; + + friend class TrieBuilder; +}; + +class ARROW_EXPORT TrieBuilder { + using index_type = Trie::index_type; + using fast_index_type = Trie::fast_index_type; + + public: + TrieBuilder(); + Status Append(std::string_view s, bool allow_duplicate = false); + Trie Finish(); + + protected: + // Extend the lookup table by 256 entries, return the index of the new span + Status ExtendLookupTable(index_type* out_lookup_index); + // Split the node given by the index at the substring index `split_at` + Status SplitNode(fast_index_type node_index, fast_index_type split_at); + // Append an already constructed child node to the parent + Status AppendChildNode(Trie::Node* parent, uint8_t ch, Trie::Node&& node); + // Create a matching child node from this parent + Status CreateChildNode(Trie::Node* parent, uint8_t ch, std::string_view substring); + Status CreateChildNode(Trie::Node* parent, char ch, std::string_view substring); + + Trie trie_; + + static constexpr auto kMaxIndex = std::numeric_limits::max(); +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..c1906152423c97e11ef9f577f46c7f4d4d124597 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +/// \brief Metafunction to allow checking if a type matches any of another set of types +template +struct IsOneOf : std::false_type {}; /// Base case: nothing has matched + +template +struct IsOneOf { + /// Recursive case: T == U or T matches any other types provided (not including U). + static constexpr bool value = std::is_same::value || IsOneOf::value; +}; + +/// \brief Shorthand for using IsOneOf + std::enable_if +template +using EnableIfIsOneOf = typename std::enable_if::value, T>::type; + +/// \brief is_null_pointer from C++17 +template +struct is_null_pointer : std::is_same::type> { +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h new file mode 100644 index 0000000000000000000000000000000000000000..d2e383e714b3eb8e0a0b6a23b1086913093a5c29 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +#include + +namespace arrow { + +[[noreturn]] ARROW_EXPORT void Unreachable(const char* message = "Unreachable"); + +[[noreturn]] ARROW_EXPORT void Unreachable(std::string_view message); + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h new file mode 100644 index 0000000000000000000000000000000000000000..ca93fab5b9f4e1f43d451689f0e75cb5572ce983 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +// Convert a UTF8 string to a wstring (either UTF16 or UTF32, depending +// on the wchar_t width). +ARROW_EXPORT Result UTF8ToWideString(std::string_view source); + +// Similarly, convert a wstring to a UTF8 string. +ARROW_EXPORT Result WideStringToUTF8(const std::wstring& source); + +// Convert UTF8 string to a UTF16 string. +ARROW_EXPORT Result UTF8StringToUTF16(std::string_view source); + +// Convert UTF16 string to a UTF8 string. +ARROW_EXPORT Result UTF16StringToUTF8(std::u16string_view source); + +// This function needs to be called before doing UTF8 validation. +ARROW_EXPORT void InitializeUTF8(); + +ARROW_EXPORT bool ValidateUTF8(const uint8_t* data, int64_t size); + +ARROW_EXPORT bool ValidateUTF8(std::string_view str); + +// Skip UTF8 byte order mark, if any. +ARROW_EXPORT +Result SkipUTF8BOM(const uint8_t* data, int64_t size); + +static constexpr uint32_t kMaxUnicodeCodepoint = 0x110000; + +} // namespace util +} // namespace arrow