diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h new file mode 100644 index 0000000000000000000000000000000000000000..82e0a600513d4abd9bb956053a2a7e94a1033f39 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class Column; +class DataType; +class MemoryPool; +class Status; +class Table; + +namespace py { + +enum class MapConversionType { + DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas + LOSSY, // report warnings when lossiness is encountered due to duplicate keys + STRICT_, // raise a Python exception when lossiness is encountered due to duplicate + // keys +}; + +struct PandasOptions { + /// arrow::MemoryPool to use for memory allocations + MemoryPool* pool = default_memory_pool(); + + /// If true, we will convert all string columns to categoricals + bool strings_to_categorical = false; + bool zero_copy_only = false; + bool integer_object_nulls = false; + bool date_as_object = false; + bool timestamp_as_object = false; + bool use_threads = false; + + /// Coerce all date and timestamp to datetime64[ns] + bool coerce_temporal_nanoseconds = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + /// \brief If true, do not create duplicate PyObject versions of equal + /// objects. This only applies to immutable objects like strings or datetime + /// objects + bool deduplicate_objects = false; + + /// \brief For certain data types, a cast is needed in order to store the + /// data in a pandas DataFrame or Series (e.g. timestamps are always stored + /// as nanoseconds in pandas). This option controls whether it is a safe + /// cast or not. + bool safe_cast = true; + + /// \brief If true, create one block per column rather than consolidated + /// blocks (1 per data type). Do zero-copy wrapping when there are no + /// nulls. pandas currently will consolidate the blocks on its own, causing + /// increased memory use, so keep this in mind if you are working on a + /// memory-constrained situation. + bool split_blocks = false; + + /// \brief If true, allow non-writable zero-copy views to be created for + /// single column blocks. This option is also used to provide zero copy for + /// Series data + bool allow_zero_copy_blocks = false; + + /// \brief If true, attempt to deallocate buffers in passed Arrow object if + /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for + /// original context for this feature. Only currently implemented for Table + /// conversions + bool self_destruct = false; + + /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to + /// Python association lists (list-of-tuples) in the same order as the Arrow + /// Map, as in [(key1, value1), (key2, value2), ...] + /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts. + /// This can change the ordering of (key, value) pairs, and will deduplicate + /// multiple keys, resulting in a possible loss of data. + /// If 'lossy', this key deduplication results in a warning printed + /// when detected. If 'strict', this instead results in an exception + /// being raised when detected. + MapConversionType maps_as_pydicts = MapConversionType::DEFAULT; + + // Used internally for nested arrays. + bool decode_dictionaries = false; + + // Columns that should be casted to categorical + std::unordered_set categorical_columns; + + // Columns that should be passed through to be converted to + // ExtensionArray/Block + std::unordered_set extension_columns; + + // Used internally to decipher between to_numpy() and to_pandas() when + // the expected output differs + bool to_numpy = false; +}; + +ARROW_PYTHON_EXPORT +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out); + +ARROW_PYTHON_EXPORT +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr col, PyObject* py_ref, + PyObject** out); + +// Convert a whole table as efficiently as possible to a pandas.DataFrame. +// +// The returned Python object is a list of tuples consisting of the exact 2D +// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x. +// +// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) +ARROW_PYTHON_EXPORT +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr table, + PyObject** out); + +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..8060dd33722a08eb0935687ea5cb306dbd38a9f0 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace benchmark { + +// Micro-benchmark routines for use from ASV + +// Run PandasObjectIsNull() once over every object in *list* +ARROW_PYTHON_EXPORT +void Benchmark_PandasObjectIsNull(PyObject* list); + +} // namespace benchmark +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..34302e93667394d616692a6a4603e6d0be67d211 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/python/common.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { +namespace csv { + +using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult( + PyObject*, const ::arrow::csv::InvalidRow&)>; + +ARROW_PYTHON_EXPORT +::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback, + PyObject* handler); + +} // namespace csv +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h new file mode 100644 index 0000000000000000000000000000000000000000..41b6a13a38875cf56abf8102d90526b66af3f9ab --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/python/serialize.h" +#include "arrow/python/visibility.h" +#include "arrow/status.h" + +namespace arrow { + +class RecordBatch; +class Tensor; + +namespace io { + +class RandomAccessFile; + +} // namespace io + +namespace py { + +struct ARROW_PYTHON_EXPORT SparseTensorCounts { + int coo; + int csr; + int csc; + int csf; + int ndim_csf; + + int num_total_tensors() const { return coo + csr + csc + csf; } + int num_total_buffers() const { + return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf; + } +}; + +/// \brief Read serialized Python sequence from file interface using Arrow IPC +/// \param[in] src a RandomAccessFile +/// \param[out] out the reconstructed data +/// \return Status +ARROW_PYTHON_EXPORT +Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out); + +/// \brief Reconstruct SerializedPyObject from representation produced by +/// SerializedPyObject::GetComponents. +/// +/// \param[in] num_tensors number of tensors in the object +/// \param[in] num_sparse_tensors number of sparse tensors in the object +/// \param[in] num_ndarrays number of numpy Ndarrays in the object +/// \param[in] num_buffers number of buffers in the object +/// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 + +/// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 + +/// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length +/// \param[out] out the reconstructed object +/// \return Status +ARROW_PYTHON_EXPORT +Status GetSerializedFromComponents(int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, int num_buffers, PyObject* data, + SerializedPyObject* out); + +/// \brief Reconstruct Python object from Arrow-serialized representation +/// \param[in] context Serialization context which contains custom serialization +/// and deserialization callbacks. Can be any Python object with a +/// _serialize_callback method for serialization and a _deserialize_callback +/// method for deserialization. If context is None, no custom serialization +/// will be attempted. +/// \param[in] object Object to deserialize +/// \param[in] base a Python object holding the underlying data that any NumPy +/// arrays will reference, to avoid premature deallocation +/// \param[out] out The returned object +/// \return Status +/// This acquires the GIL +ARROW_PYTHON_EXPORT +Status DeserializeObject(PyObject* context, const SerializedPyObject& object, + PyObject* base, PyObject** out); + +/// \brief Reconstruct Ndarray from Arrow-serialized representation +/// \param[in] object Object to deserialize +/// \param[out] out The deserialized tensor +/// \return Status +ARROW_PYTHON_EXPORT +Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr* out); + +ARROW_PYTHON_EXPORT +Status NdarrayFromBuffer(std::shared_ptr src, std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h new file mode 100644 index 0000000000000000000000000000000000000000..1ddcbb51f6e0b70c1b16dc9a9ce6caf79fb2369e --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace gdb { + +ARROW_PYTHON_EXPORT +void TestSession(); + +} // namespace gdb +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..e2fd8212ae68d0fb32f8858e9395be07a41350aa --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h @@ -0,0 +1,162 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include +#include + +#include "arrow/python/numpy_interop.h" + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +namespace arrow { + +namespace py { + +class OwnedRef; + +// \brief Get an arrow DataType instance from Arrow's Type::type enum +// \param[in] type One of the values of Arrow's Type::type enum +// \return A shared pointer to DataType +ARROW_PYTHON_EXPORT std::shared_ptr GetPrimitiveType(Type::type type); + +// \brief Construct a np.float16 object from a npy_half value. +ARROW_PYTHON_EXPORT PyObject* PyHalf_FromHalf(npy_half value); + +// \brief Convert a Python object to a npy_half value. +ARROW_PYTHON_EXPORT Status PyFloat_AsHalf(PyObject* obj, npy_half* out); + +namespace internal { + +// \brief Check that a Python module has been already imported +// \param[in] module_name The name of the module +Result IsModuleImported(const std::string& module_name); + +// \brief Import a Python module +// \param[in] module_name The name of the module +// \param[out] ref The OwnedRef containing the module PyObject* +ARROW_PYTHON_EXPORT +Status ImportModule(const std::string& module_name, OwnedRef* ref); + +// \brief Import an object from a Python module +// \param[in] module A Python module +// \param[in] name The name of the object to import +// \param[out] ref The OwnedRef containing the \c name attribute of the Python module \c +// module +ARROW_PYTHON_EXPORT +Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref); + +// \brief Check whether obj is an integer, independent of Python versions. +inline bool IsPyInteger(PyObject* obj) { return PyLong_Check(obj); } + +// \brief Import symbols from pandas that we need for various type-checking, +// like pandas.NaT or pandas.NA +void InitPandasStaticData(); + +// \brief Use pandas missing value semantics to check if a value is null +ARROW_PYTHON_EXPORT +bool PandasObjectIsNull(PyObject* obj); + +// \brief Check that obj is a pandas.Timedelta instance +ARROW_PYTHON_EXPORT +bool IsPandasTimedelta(PyObject* obj); + +// \brief Check that obj is a pandas.Timestamp instance +bool IsPandasTimestamp(PyObject* obj); + +// \brief Returned a borrowed reference to the pandas.tseries.offsets.DateOffset +PyObject* BorrowPandasDataOffsetType(); + +// \brief Check whether obj is a floating-point NaN +ARROW_PYTHON_EXPORT +bool PyFloat_IsNaN(PyObject* obj); + +inline bool IsPyBinary(PyObject* obj) { + return PyBytes_Check(obj) || PyByteArray_Check(obj) || PyMemoryView_Check(obj); +} + +// \brief Convert a Python integer into a C integer +// \param[in] obj A Python integer +// \param[out] out A pointer to a C integer to hold the result of the conversion +// \return The status of the operation +template +Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message = ""); + +// \brief Convert a Python unicode string to a std::string +ARROW_PYTHON_EXPORT +Status PyUnicode_AsStdString(PyObject* obj, std::string* out); + +// \brief Convert a Python bytes object to a std::string +ARROW_PYTHON_EXPORT +std::string PyBytes_AsStdString(PyObject* obj); + +// \brief Call str() on the given object and return the result as a std::string +ARROW_PYTHON_EXPORT +Status PyObject_StdStringStr(PyObject* obj, std::string* out); + +// \brief Return the repr() of the given object (always succeeds) +ARROW_PYTHON_EXPORT +std::string PyObject_StdStringRepr(PyObject* obj); + +// \brief Cast the given size to int32_t, with error checking +inline Status CastSize(Py_ssize_t size, int32_t* out, + const char* error_msg = "Maximum size exceeded (2GB)") { + // size is assumed to be positive + if (size > std::numeric_limits::max()) { + return Status::Invalid(error_msg); + } + *out = static_cast(size); + return Status::OK(); +} + +inline Status CastSize(Py_ssize_t size, int64_t* out, const char* error_msg = NULLPTR) { + // size is assumed to be positive + *out = static_cast(size); + return Status::OK(); +} + +// \brief Print the Python object's __str__ form along with the passed error +// message +ARROW_PYTHON_EXPORT +Status InvalidValue(PyObject* obj, const std::string& why); + +ARROW_PYTHON_EXPORT +Status InvalidType(PyObject* obj, const std::string& why); + +ARROW_PYTHON_EXPORT +Status IntegerScalarToDoubleSafe(PyObject* obj, double* result); +ARROW_PYTHON_EXPORT +Status IntegerScalarToFloat32Safe(PyObject* obj, float* result); + +// \brief Print Python object __repr__ +void DebugPrint(PyObject* obj); + +ARROW_PYTHON_EXPORT +bool IsThreadingEnabled(); + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h new file mode 100644 index 0000000000000000000000000000000000000000..983384db118a16141e49a679388b83c75d1d77d6 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +// These functions take a sequence input, not arbitrary iterables + +/// \brief Infer Arrow type from a Python sequence +/// \param[in] obj the sequence of values +/// \param[in] mask an optional mask where True values are null. May +/// be nullptr +/// \param[in] pandas_null_sentinels use pandas's null value markers +ARROW_PYTHON_EXPORT +Result> InferArrowType(PyObject* obj, PyObject* mask, + bool pandas_null_sentinels); + +/// Checks whether the passed Python object is a boolean scalar +ARROW_PYTHON_EXPORT +bool IsPyBool(PyObject* obj); + +/// Checks whether the passed Python object is an integer scalar +ARROW_PYTHON_EXPORT +bool IsPyInt(PyObject* obj); + +/// Checks whether the passed Python object is a float scalar +ARROW_PYTHON_EXPORT +bool IsPyFloat(PyObject* obj); + +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2e6c954862bd92af369baf04bf10a76e0c076fb5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" +#include "arrow/python/visibility.h" + +extern "C" { +ARROW_PYTHON_EXPORT +int arrow_init_numpy(); +} diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h new file mode 100644 index 0000000000000000000000000000000000000000..6c4fee277774dba421569dd4691b775ab73e283a --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h @@ -0,0 +1,201 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE_API__pyarrow__lib +#define __PYX_HAVE_API__pyarrow__lib +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "lib.h" + +static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0; +#define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0; +#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0; +#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0; +#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0; +#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0; +#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0; +#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0; +#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0; +#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0; +#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0; +#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0; +#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0; +#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0; +#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table +static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0; +#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer +static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0; +#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type +static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0; +#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field +static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0; +#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema +static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0; +#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar +static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0; +#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array +static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0; +#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array +static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor +static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix +static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor +static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix +static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor +static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0; +#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch +static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0; +#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0; +#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0; +#define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0; +#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0; +#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0; +#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0; +#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0; +#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0; +#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0; +#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0; +#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0; +#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0; +#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0; +#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_10 +#define __PYX_HAVE_RT_ImportFunction_3_0_10 +static int __Pyx_ImportFunction_3_0_10(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_pyarrow__lib(void) { + PyObject *module = 0; + module = PyImport_ImportModule("pyarrow.lib"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea7d6e16f5285f4b2dbec7c575c80e1e029b6f8 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" // IWYU pragma: export + +#include // IWYU pragma: export + +// Don't use the deprecated Numpy functions +#ifdef NPY_1_7_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#else +#define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED +#define NPY_ARRAY_ALIGNED NPY_ALIGNED +#define NPY_ARRAY_WRITEABLE NPY_WRITEABLE +#define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY +#endif + +// This is required to be able to access the NumPy C API properly in C++ files +// other than init.cc. +#define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API +#ifndef NUMPY_IMPORT_ARRAY +#define NO_IMPORT_ARRAY +#endif + +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export + +// A bit subtle. Numpy has 5 canonical integer types: +// (or, rather, type pairs: signed and unsigned) +// NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG +// It also has 4 fixed-width integer aliases. +// When mapping Arrow integer types to these 4 fixed-width aliases, +// we always miss one of the canonical types (even though it may +// have the same width as one of the aliases). +// Which one depends on the platform... +// On a LP64 system, NPY_INT64 maps to NPY_LONG and +// NPY_LONGLONG needs to be handled separately. +// On a LLP64 system, NPY_INT32 maps to NPY_LONG and +// NPY_INT needs to be handled separately. + +#if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64 +#define NPY_INT64_IS_LONG_LONG 1 +#else +#define NPY_INT64_IS_LONG_LONG 0 +#endif + +#if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64 +#define NPY_INT32_IS_INT 1 +#else +#define NPY_INT32_IS_INT 0 +#endif + +// Backported NumPy 2 API (can be removed if numpy 2 is required) +#if NPY_ABI_VERSION < 0x02000000 +#define PyDataType_ELSIZE(descr) ((descr)->elsize) +#define PyDataType_C_METADATA(descr) ((descr)->c_metadata) +#define PyDataType_FIELDS(descr) ((descr)->fields) +#endif + +namespace arrow { +namespace py { + +inline int import_numpy() { +#ifdef NUMPY_IMPORT_ARRAY + import_array1(-1); + import_umath1(-1); +#endif + + return 0; +} + +// See above about the missing Numpy integer type numbers +inline int fix_numpy_type_num(int type_num) { +#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32 + if (type_num == NPY_INT) return NPY_INT32; + if (type_num == NPY_UINT) return NPY_UINT32; +#endif +#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64 + if (type_num == NPY_LONGLONG) return NPY_INT64; + if (type_num == NPY_ULONGLONG) return NPY_UINT64; +#endif + return type_num; +} + +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..b6cd093e5542008cf173f43de311e40c418e7c8d --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Converting from pandas memory representation to Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/compute/api.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class DataType; +class MemoryPool; +class Status; + +namespace py { + +/// Convert NumPy arrays to Arrow. If target data type is not known, pass a +/// type with null +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[in] cast_options casting options +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + const compute::CastOptions& cast_options, + std::shared_ptr* out); + +/// Safely convert NumPy arrays to Arrow. If target data type is not known, +/// pass a type with null. +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..a1aaa30e260f5042c98f96bf081b4a49245ea656 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" +#include "parquet/encryption/crypto_factory.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING) +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows + +namespace arrow { +namespace py { +namespace parquet { +namespace encryption { + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable { + public: + std::function + wrap_key; + std::function + unwrap_key; +}; + +/// \brief A helper for KmsClient implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient + : public ::parquet::encryption::KmsClient { + public: + PyKmsClient(PyObject* handler, PyKmsClientVtable vtable); + ~PyKmsClient() override; + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientVtable vtable_; +}; + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable { + public: + std::function* out)> + create_kms_client; +}; + +/// \brief A helper for KmsClientFactory implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory + : public ::parquet::encryption::KmsClientFactory { + public: + PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable); + ~PyKmsClientFactory() override; + + std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientFactoryVtable vtable_; +}; + +/// \brief A CryptoFactory that returns Results instead of throwing exceptions. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory + : public ::parquet::encryption::CryptoFactory { + public: + arrow::Result> + SafeGetFileEncryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::EncryptionConfiguration& encryption_config); + + /// The returned FileDecryptionProperties object will use the cache inside this + /// CryptoFactory object, so please keep this + /// CryptoFactory object alive along with the returned + /// FileDecryptionProperties object. + arrow::Result> + SafeGetFileDecryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::DecryptionConfiguration& decryption_config); +}; + +} // namespace encryption +} // namespace parquet +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d688b4f17c4d0461ebd66105676083ebcb5b41 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" +#include "arrow/python/platform.h" diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..e71c7ac85399e4e3f7c93d4814fd7fdad774dc13 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +// If PY_SSIZE_T_CLEAN is defined, argument parsing functions treat #-specifier +// to mean Py_ssize_t (defining this to suppress deprecation warning) +#define PY_SSIZE_T_CLEAN + +#include // IWYU pragma: export +#include + +// Work around C2528 error +#ifdef _MSC_VER +#if _MSC_VER >= 1900 +#undef timezone +#endif + +// https://bugs.python.org/issue36020 +// TODO(wjones127): Can remove once we drop support for CPython 3.9 +#ifdef snprintf +#undef snprintf +#endif +#endif diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h new file mode 100644 index 0000000000000000000000000000000000000000..113035500c0053dbb9dde5a99216aec1aefd1140 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" + +#include "arrow/sparse_tensor.h" + +// Work around ARROW-2317 (C linkage warning from Cython) +extern "C++" { + +namespace arrow { + +class Array; +class Buffer; +class DataType; +class Field; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; + +namespace py { + +// Returns 0 on success, -1 on error. +ARROW_PYTHON_EXPORT int import_pyarrow(); + +#define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \ + ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \ + ARROW_PYTHON_EXPORT Result> unwrap_##FUNC_SUFFIX( \ + PyObject*); \ + ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr&); + +DECLARE_WRAP_FUNCTIONS(buffer, Buffer) + +DECLARE_WRAP_FUNCTIONS(data_type, DataType) +DECLARE_WRAP_FUNCTIONS(field, Field) +DECLARE_WRAP_FUNCTIONS(schema, Schema) + +DECLARE_WRAP_FUNCTIONS(scalar, Scalar) + +DECLARE_WRAP_FUNCTIONS(array, Array) +DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray) + +DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix) +DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix) +DECLARE_WRAP_FUNCTIONS(tensor, Tensor) + +DECLARE_WRAP_FUNCTIONS(batch, RecordBatch) +DECLARE_WRAP_FUNCTIONS(table, Table) + +#undef DECLARE_WRAP_FUNCTIONS + +namespace internal { + +// If status is ok, return 0. +// If status is not ok, set Python error indicator and return -1. +ARROW_PYTHON_EXPORT int check_status(const Status& status); + +// Convert status to a Python exception object. Status must not be ok. +ARROW_PYTHON_EXPORT PyObject* convert_status(const Status& status); + +} // namespace internal +} // namespace py +} // namespace arrow + +} // extern "C++" diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h new file mode 100644 index 0000000000000000000000000000000000000000..e509593c254468a62216e0e4a7ea073ad9a3f1d4 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h @@ -0,0 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// For backward compatibility. +#include "arrow/python/lib.h" diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h new file mode 100644 index 0000000000000000000000000000000000000000..c2eb62fc29accb670f5d53e326381d68a6534335 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace testing { + +struct TestCase { + std::string name; + std::function func; +}; + +ARROW_PYTHON_EXPORT +std::vector GetCppTestCases(); + +} // namespace testing +} // namespace py +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..d167996ba8da6796ac62da0fa0186419a3211930 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "arrow/python/common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +struct PyConversionOptions { + PyConversionOptions() = default; + + PyConversionOptions(const std::shared_ptr& type, int64_t size, + MemoryPool* pool, bool from_pandas) + : type(type), size(size), from_pandas(from_pandas) {} + + // Set to null if to be inferred + std::shared_ptr type; + + // Default is -1, which indicates the size should the same as the input sequence + int64_t size = -1; + + bool from_pandas = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + bool strict = false; +}; + +/// \brief Convert sequence (list, generator, NumPy array with dtype object) of +/// Python objects. +/// \param[in] obj the sequence to convert +/// \param[in] mask a NumPy array of true/false values to indicate whether +/// values in the sequence are null (true) or not null (false). This parameter +/// may be null +/// \param[in] options various conversion options +/// \param[in] pool MemoryPool to use for allocations +/// \return Result ChunkedArray +ARROW_PYTHON_EXPORT +Result> ConvertPySequence( + PyObject* obj, PyObject* mask, PyConversionOptions options, + MemoryPool* pool = default_memory_pool()); + +} // namespace py + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h new file mode 100644 index 0000000000000000000000000000000000000000..d8c4e430e53d49a8fe7d237ffe7ba8feae5e452f --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/compute/exec.h" +#include "arrow/compute/function.h" +#include "arrow/compute/registry.h" +#include "arrow/python/platform.h" +#include "arrow/record_batch.h" +#include "arrow/util/iterator.h" + +#include "arrow/python/common.h" +#include "arrow/python/pyarrow.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +namespace py { + +// TODO: TODO(ARROW-16041): UDF Options are not exposed to the Python +// users. This feature will be included when extending to provide advanced +// options for the users. +struct ARROW_PYTHON_EXPORT UdfOptions { + std::string func_name; + compute::Arity arity; + compute::FunctionDoc func_doc; + std::vector> input_types; + std::shared_ptr output_type; +}; + +/// \brief A context passed as the first argument of UDF functions. +struct ARROW_PYTHON_EXPORT UdfContext { + MemoryPool* pool; + int64_t batch_length; +}; + +using UdfWrapperCallback = std::function; + +/// \brief register a Scalar user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterScalarFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Table user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterTabularFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Aggregate user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterAggregateFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +/// \brief register a Vector user-defined-function from Python +Status ARROW_PYTHON_EXPORT RegisterVectorFunction( + PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options, + compute::FunctionRegistry* registry = NULLPTR); + +Result> ARROW_PYTHON_EXPORT +CallTabularFunction(const std::string& func_name, const std::vector& args, + compute::FunctionRegistry* registry = NULLPTR); + +} // namespace py + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..dd43b32fd43ff46e195d0057cf3198b926b9fdd0 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_EXPORT +#elif defined(ARROW_PYTHON_EXPORTING) +#define ARROW_PYTHON_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_EXPORT +#define ARROW_PYTHON_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0e6ba709d974daebf81cf9e6cdb7aa8b947cc8 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/result.h" + +namespace arrow { + +template +Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out, + UnaryOperation unary_op) { + for (; first != last; ++first, (void)++out) { + ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first)); + } + return Status::OK(); +} + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h new file mode 100644 index 0000000000000000000000000000000000000000..71920e49f4aa2b1d92312b4aabaffafe35d323c7 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" + +namespace arrow { +namespace internal { + +struct BitmapWordAlignParams { + int64_t leading_bits; + int64_t trailing_bits; + int64_t trailing_bit_offset; + const uint8_t* aligned_start; + int64_t aligned_bits; + int64_t aligned_words; +}; + +// Compute parameters for accessing a bitmap using aligned word instructions. +// The returned parameters describe: +// - a leading area of size `leading_bits` before the aligned words +// - a word-aligned area of size `aligned_bits` +// - a trailing area of size `trailing_bits` after the aligned words +template +inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset, + int64_t length) { + static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES), + "ALIGN_IN_BYTES should be a positive power of two"); + constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8; + + BitmapWordAlignParams p; + + // Compute a "bit address" that we can align up to ALIGN_IN_BITS. + // We don't care about losing the upper bits since we are only interested in the + // difference between both addresses. + const uint64_t bit_addr = + reinterpret_cast(data) * 8 + static_cast(bit_offset); + const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS); + + p.leading_bits = std::min(length, aligned_bit_addr - bit_addr); + p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS; + p.aligned_bits = p.aligned_words * ALIGN_IN_BITS; + p.trailing_bits = length - p.leading_bits - p.aligned_bits; + p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits; + + p.aligned_start = data + (bit_offset + p.leading_bits) / 8; + return p; +} +} // namespace internal + +namespace util { + +// Functions to check if the provided Arrow object is aligned by the specified alignment + +/// \brief Special alignment value to use data type-specific alignment +/// +/// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment +/// functions, then the function will ensure each buffer is suitably aligned +/// for the data type of the array. For example, given an int32 buffer the values +/// buffer's address must be a multiple of 4. Given a large_string buffer the offsets +/// buffer's address must be a multiple of 8. +constexpr int64_t kValueAlignment = -3; + +/// \brief Calculate if the buffer's address is a multiple of `alignment` +/// +/// If `alignment` is less than or equal to 0 then this method will always return true +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment); +/// \brief Calculate if all buffers in the array data are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array data to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment); +/// \brief Calculate if all buffers in the array are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment); + +// Following functions require an additional boolean vector which stores the +// alignment check bits of the constituent objects. +// For example, needs_alignment vector for a ChunkedArray will contain the +// check bits of the constituent Arrays. +// The boolean vector check was introduced to minimize the repetitive checks +// of the constituent objects during the EnsureAlignment function where certain +// objects can be ignored for further checking if we already know that they are +// completely aligned. + +/// \brief Calculate which (if any) chunks in a chunked array are unaligned +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the check +/// it must be set to a valid vector. Extra elements will be added to the end +/// of the vector for each chunk that is checked. `true` will be stored if +/// the chunk is unaligned. +/// \param offset the index of the chunk to start checking +/// \return true if all chunks (starting at `offset`) are aligned, false otherwise +ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment, + std::vector* needs_alignment, int offset = 0); + +/// \brief calculate which (if any) columns in a record batch are unaligned +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment, + std::vector* needs_alignment); + +/// \brief calculate which (if any) columns in a table are unaligned +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment, + std::vector* needs_alignment); + +/// \brief return a buffer that has the given alignment and the same data as the input +/// buffer +/// +/// If the input buffer is already aligned then this method will return the input buffer +/// If the input buffer is not already aligned then this method will allocate a new +/// buffer. The alignment of the new buffer will have at least +/// max(kDefaultBufferAlignment, alignment) bytes of alignment. +/// +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate a new buffer if the +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr buffer, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array data where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array_data the array data to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array_data, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr array, + int64_t alignment, + MemoryPool* memory_pool); + +/// \brief return a chunked array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the chunked array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a record batch where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr batch, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a table where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr
table, + int64_t alignment, + MemoryPool* memory_pool); + +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..fd66298d1a9d61ee9276eeb2f162cd0fc628caea --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h @@ -0,0 +1,2058 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/async_generator_fwd.h" +#include "arrow/util/async_util.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/io_util.h" +#include "arrow/util/iterator.h" +#include "arrow/util/mutex.h" +#include "arrow/util/queue.h" +#include "arrow/util/thread_pool.h" + +namespace arrow { + +// The methods in this file create, modify, and utilize AsyncGenerator which is an +// iterator of futures. This allows an asynchronous source (like file input) to be run +// through a pipeline in the same way that iterators can be used to create pipelined +// workflows. +// +// In order to support pipeline parallelism we introduce the concept of asynchronous +// reentrancy. This is different than synchronous reentrancy. With synchronous code a +// function is reentrant if the function can be called again while a previous call to that +// function is still running. Unless otherwise specified none of these generators are +// synchronously reentrant. Care should be taken to avoid calling them in such a way (and +// the utilities Visit/Collect/Await take care to do this). +// +// Asynchronous reentrancy on the other hand means the function is called again before the +// future returned by the function is marked finished (but after the call to get the +// future returns). Some of these generators are async-reentrant while others (e.g. +// those that depend on ordered processing like decompression) are not. Read the MakeXYZ +// function comments to determine which generators support async reentrancy. +// +// Note: Generators that are not asynchronously reentrant can still support readahead +// (\see MakeSerialReadaheadGenerator). +// +// Readahead operators, and some other operators, may introduce queueing. Any operators +// that introduce buffering should detail the amount of buffering they introduce in their +// MakeXYZ function comments. +// +// A generator should always be fully consumed before it is destroyed. +// A generator should not mark a future complete with an error status or a terminal value +// until all outstanding futures have completed. Generators that spawn multiple +// concurrent futures may need to hold onto an error while other concurrent futures wrap +// up. +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of AsyncGenerator, + /// an empty function indicates the end of iteration. + static AsyncGenerator End() { return AsyncGenerator(); } + + static bool IsEnd(const AsyncGenerator& val) { return !val; } +}; + +template +Future AsyncGeneratorEnd() { + return Future::MakeFinished(IterationTraits::End()); +} + +/// returning a future that completes when all have been visited +template +Future<> VisitAsyncGenerator(AsyncGenerator generator, Visitor visitor) { + struct LoopBody { + struct Callback { + Result> operator()(const T& next) { + if (IsIterationEnd(next)) { + return Break(); + } else { + auto visited = visitor(next); + if (visited.ok()) { + return Continue(); + } else { + return visited; + } + } + } + + Visitor visitor; + }; + + Future> operator()() { + Callback callback{visitor}; + auto next = generator(); + return next.Then(std::move(callback)); + } + + AsyncGenerator generator; + Visitor visitor; + }; + + return Loop(LoopBody{std::move(generator), std::move(visitor)}); +} + +/// \brief Wait for an async generator to complete, discarding results. +template +Future<> DiscardAllFromAsyncGenerator(AsyncGenerator generator) { + std::function visitor = [](const T&) { return Status::OK(); }; + return VisitAsyncGenerator(generator, visitor); +} + +/// \brief Collect the results of an async generator into a vector +template +Future> CollectAsyncGenerator(AsyncGenerator generator) { + auto vec = std::make_shared>(); + auto loop_body = [generator = std::move(generator), + vec = std::move(vec)]() -> Future>> { + auto next = generator(); + return next.Then([vec](const T& result) -> Result>> { + if (IsIterationEnd(result)) { + return Break(*vec); + } else { + vec->push_back(result); + return Continue(); + } + }); + }; + return Loop(std::move(loop_body)); +} + +/// \see MakeMappedGenerator +template +class MappingGenerator { + public: + MappingGenerator(AsyncGenerator source, std::function(const T&)> map) + : state_(std::make_shared(std::move(source), std::move(map))) {} + + Future operator()() { + auto future = Future::Make(); + bool should_trigger; + { + auto guard = state_->mutex.Lock(); + if (state_->finished) { + return AsyncGeneratorEnd(); + } + should_trigger = state_->waiting_jobs.empty(); + state_->waiting_jobs.push_back(future); + } + if (should_trigger) { + state_->source().AddCallback(Callback{state_}); + } + return future; + } + + private: + struct State { + State(AsyncGenerator source, std::function(const T&)> map) + : source(std::move(source)), + map(std::move(map)), + waiting_jobs(), + mutex(), + finished(false) {} + + void Purge() { + // This might be called by an original callback (if the source iterator fails or + // ends) or by a mapped callback (if the map function fails or ends prematurely). + // Either way it should only be called once and after finished is set so there is no + // need to guard access to `waiting_jobs`. + while (!waiting_jobs.empty()) { + waiting_jobs.front().MarkFinished(IterationTraits::End()); + waiting_jobs.pop_front(); + } + } + + AsyncGenerator source; + std::function(const T&)> map; + std::deque> waiting_jobs; + util::Mutex mutex; + bool finished; + }; + + struct Callback; + + struct MappedCallback { + void operator()(const Result& maybe_next) { + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + if (end) { + { + auto guard = state->mutex.Lock(); + should_purge = !state->finished; + state->finished = true; + } + } + sink.MarkFinished(maybe_next); + if (should_purge) { + state->Purge(); + } + } + std::shared_ptr state; + Future sink; + }; + + struct Callback { + void operator()(const Result& maybe_next) { + Future sink; + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + bool should_trigger; + { + auto guard = state->mutex.Lock(); + // A MappedCallback may have purged or be purging the queue; + // we shouldn't do anything here. + if (state->finished) return; + if (end) { + should_purge = !state->finished; + state->finished = true; + } + sink = state->waiting_jobs.front(); + state->waiting_jobs.pop_front(); + should_trigger = !end && !state->waiting_jobs.empty(); + } + if (should_purge) { + state->Purge(); + } + if (should_trigger) { + state->source().AddCallback(Callback{state}); + } + if (maybe_next.ok()) { + const T& val = maybe_next.ValueUnsafe(); + if (IsIterationEnd(val)) { + sink.MarkFinished(IterationTraits::End()); + } else { + Future mapped_fut = state->map(val); + mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)}); + } + } else { + sink.MarkFinished(maybe_next.status()); + } + } + + std::shared_ptr state; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that will apply the map function to each element of +/// source. The map function is not called on the end token. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeMappedGenerator(AsyncGenerator source_generator, MapFn map) { + auto map_callback = [map = std::move(map)](const T& val) mutable -> Future { + return ToFuture(map(val)); + }; + return MappingGenerator(std::move(source_generator), std::move(map_callback)); +} + +/// \brief Create a generator that will apply the map function to +/// each element of source. The map function is not called on the end +/// token. The result of the map function should be another +/// generator; all these generators will then be flattened to produce +/// a single stream of items. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeFlatMappedGenerator(AsyncGenerator source_generator, MapFn map) { + return MakeConcatenatedGenerator( + MakeMappedGenerator(std::move(source_generator), std::move(map))); +} + +/// \see MakeSequencingGenerator +template +class SequencingGenerator { + public: + SequencingGenerator(AsyncGenerator source, ComesAfter compare, IsNext is_next, + T initial_value) + : state_(std::make_shared(std::move(source), std::move(compare), + std::move(is_next), std::move(initial_value))) {} + + Future operator()() { + { + auto guard = state_->mutex.Lock(); + // We can send a result immediately if the top of the queue is either an + // error or the next item + if (!state_->queue.empty() && + (!state_->queue.top().ok() || + state_->is_next(state_->previous_value, *state_->queue.top()))) { + auto result = std::move(state_->queue.top()); + if (result.ok()) { + state_->previous_value = *result; + } + state_->queue.pop(); + return Future::MakeFinished(result); + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + // The next item is not in the queue so we will need to wait + auto new_waiting_fut = Future::Make(); + state_->waiting_future = new_waiting_fut; + guard.Unlock(); + state_->source().AddCallback(Callback{state_}); + return new_waiting_fut; + } + } + + private: + struct WrappedComesAfter { + bool operator()(const Result& left, const Result& right) { + if (!left.ok() || !right.ok()) { + // Should never happen + return false; + } + return compare(*left, *right); + } + ComesAfter compare; + }; + + struct State { + State(AsyncGenerator source, ComesAfter compare, IsNext is_next, T initial_value) + : source(std::move(source)), + is_next(std::move(is_next)), + previous_value(std::move(initial_value)), + waiting_future(), + queue(WrappedComesAfter{compare}), + finished(false), + mutex() {} + + AsyncGenerator source; + IsNext is_next; + T previous_value; + Future waiting_future; + std::priority_queue, std::vector>, WrappedComesAfter> queue; + bool finished; + util::Mutex mutex; + }; + + class Callback { + public: + explicit Callback(std::shared_ptr state) : state_(std::move(state)) {} + + void operator()(const Result result) { + Future to_deliver; + bool finished; + { + auto guard = state_->mutex.Lock(); + bool ready_to_deliver = false; + if (!result.ok()) { + // Clear any cached results + while (!state_->queue.empty()) { + state_->queue.pop(); + } + ready_to_deliver = true; + state_->finished = true; + } else if (IsIterationEnd(result.ValueUnsafe())) { + ready_to_deliver = state_->queue.empty(); + state_->finished = true; + } else { + ready_to_deliver = state_->is_next(state_->previous_value, *result); + } + + if (ready_to_deliver && state_->waiting_future.is_valid()) { + to_deliver = state_->waiting_future; + if (result.ok()) { + state_->previous_value = *result; + } + } else { + state_->queue.push(result); + } + // Capture state_->finished so we can access it outside the mutex + finished = state_->finished; + } + // Must deliver result outside of the mutex + if (to_deliver.is_valid()) { + to_deliver.MarkFinished(result); + } else { + // Otherwise, if we didn't get the next item (or a terminal item), we + // need to keep looking + if (!finished) { + state_->source().AddCallback(Callback{state_}); + } + } + } + + private: + const std::shared_ptr state_; + }; + + const std::shared_ptr state_; +}; + +/// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter +/// and IsNext determine the sequence order. +/// +/// ComesAfter should be a BinaryPredicate that only returns true if a comes after b +/// +/// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if +/// `b` follows immediately after `a`. It should return true given `initial_value` and +/// `b` if `b` is the first item in the sequence. +/// +/// This operator will queue unboundedly while waiting for the next item. It is intended +/// for jittery sources that might scatter an ordered sequence. It is NOT intended to +/// sort. Using it to try and sort could result in excessive RAM usage. This generator +/// will queue up to N blocks where N is the max "out of order"ness of the source. +/// +/// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3 +/// blocks beyond where it belongs. +/// +/// This generator is not async-reentrant but it consists only of a simple log(n) +/// insertion into a priority queue. +template +AsyncGenerator MakeSequencingGenerator(AsyncGenerator source_generator, + ComesAfter compare, IsNext is_next, + T initial_value) { + return SequencingGenerator( + std::move(source_generator), std::move(compare), std::move(is_next), + std::move(initial_value)); +} + +/// \see MakeTransformedGenerator +template +class TransformingGenerator { + // The transforming generator state will be referenced as an async generator but will + // also be referenced via callback to various futures. If the async generator owner + // moves it around we need the state to be consistent for future callbacks. + struct TransformingGeneratorState + : std::enable_shared_from_this { + TransformingGeneratorState(AsyncGenerator generator, Transformer transformer) + : generator_(std::move(generator)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Future operator()() { + while (true) { + auto maybe_next_result = Pump(); + if (!maybe_next_result.ok()) { + return Future::MakeFinished(maybe_next_result.status()); + } + auto maybe_next = std::move(maybe_next_result).ValueUnsafe(); + if (maybe_next.has_value()) { + return Future::MakeFinished(*std::move(maybe_next)); + } + + auto next_fut = generator_(); + // If finished already, process results immediately inside the loop to avoid + // stack overflow + if (next_fut.is_finished()) { + auto next_result = next_fut.result(); + if (next_result.ok()) { + last_value_ = *next_result; + } else { + return Future::MakeFinished(next_result.status()); + } + // Otherwise, if not finished immediately, add callback to process results + } else { + auto self = this->shared_from_this(); + return next_fut.Then([self](const T& next_result) { + self->last_value_ = next_result; + return (*self)(); + }); + } + } + } + + // See comment on TransformingIterator::Pump + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + ARROW_ASSIGN_OR_RAISE(TransformFlow next, transformer_(*last_value_)); + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + AsyncGenerator generator_; + Transformer transformer_; + std::optional last_value_; + bool finished_; + }; + + public: + explicit TransformingGenerator(AsyncGenerator generator, + Transformer transformer) + : state_(std::make_shared(std::move(generator), + std::move(transformer))) {} + + Future operator()() { return (*state_)(); } + + protected: + std::shared_ptr state_; +}; + +/// \brief Transform an async generator using a transformer function returning a new +/// AsyncGenerator +/// +/// The transform function here behaves exactly the same as the transform function in +/// MakeTransformedIterator and you can safely use the same transform function to +/// transform both synchronous and asynchronous streams. +/// +/// This generator is not async-reentrant +/// +/// This generator may queue up to 1 instance of T but will not delay +template +AsyncGenerator MakeTransformedGenerator(AsyncGenerator generator, + Transformer transformer) { + return TransformingGenerator(generator, transformer); +} + +/// \see MakeSerialReadaheadGenerator +template +class SerialReadaheadGenerator { + public: + SerialReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future operator()() { + if (state_->first_) { + // Lazy generator, need to wait for the first ask to prime the pump + state_->first_ = false; + auto next = state_->source_(); + return next.Then(Callback{state_}, ErrCallback{state_}); + } + + // This generator is not async-reentrant. We won't be called until the last + // future finished so we know there is something in the queue + auto finished = state_->finished_.load(); + if (finished && state_->readahead_queue_.IsEmpty()) { + return AsyncGeneratorEnd(); + } + + std::shared_ptr> next; + if (!state_->readahead_queue_.Read(next)) { + return Status::UnknownError("Could not read from readahead_queue"); + } + + auto last_available = state_->spaces_available_.fetch_add(1); + if (last_available == 0 && !finished) { + // Reader idled out, we need to restart it + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return *next; + } + + private: + struct State { + State(AsyncGenerator source, int max_readahead) + : first_(true), + source_(std::move(source)), + finished_(false), + // There is one extra "space" for the in-flight request + spaces_available_(max_readahead + 1), + // The SPSC queue has size-1 "usable" slots so we need to overallocate 1 + readahead_queue_(max_readahead + 1) {} + + Status Pump(const std::shared_ptr& self) { + // Can't do readahead_queue.write(source().Then(...)) because then the + // callback might run immediately and add itself to the queue before this gets added + // to the queue messing up the order. + auto next_slot = std::make_shared>(); + auto written = readahead_queue_.Write(next_slot); + if (!written) { + return Status::UnknownError("Could not write to readahead_queue"); + } + // If this Pump is being called from a callback it is possible for the source to + // poll and read from the queue between the Write and this spot where we fill the + // value in. However, it is not possible for the future to read this value we are + // writing. That is because this callback (the callback for future X) must be + // finished before future X is marked complete and this source is not pulled + // reentrantly so it will not poll for future X+1 until this callback has completed. + *next_slot = source_().Then(Callback{self}, ErrCallback{self}); + return Status::OK(); + } + + // Only accessed by the consumer end + bool first_; + // Accessed by both threads + AsyncGenerator source_; + std::atomic finished_; + // The queue has a size but it is not atomic. We keep track of how many spaces are + // left in the queue here so we know if we've just written the last value and we need + // to stop reading ahead or if we've just read from a full queue and we need to + // restart reading ahead + std::atomic spaces_available_; + // Needs to be a queue of shared_ptr and not Future because we set the value of the + // future after we add it to the queue + util::SpscQueue>> readahead_queue_; + }; + + struct Callback { + Result operator()(const T& next) { + if (IsIterationEnd(next)) { + state_->finished_.store(true); + return next; + } + auto last_available = state_->spaces_available_.fetch_sub(1); + if (last_available > 1) { + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return next; + } + + std::shared_ptr state_; + }; + + struct ErrCallback { + Result operator()(const Status& st) { + state_->finished_.store(true); + return st; + } + + std::shared_ptr state_; + }; + + std::shared_ptr state_; +}; + +/// \see MakeFromFuture +template +class FutureFirstGenerator { + public: + explicit FutureFirstGenerator(Future> future) + : state_(std::make_shared(std::move(future))) {} + + Future operator()() { + if (state_->source_) { + return state_->source_(); + } else { + auto state = state_; + return state_->future_.Then([state](const AsyncGenerator& source) { + state->source_ = source; + return state->source_(); + }); + } + } + + private: + struct State { + explicit State(Future> future) : future_(future), source_() {} + + Future> future_; + AsyncGenerator source_; + }; + + std::shared_ptr state_; +}; + +/// \brief Transform a Future> into an AsyncGenerator +/// that waits for the future to complete as part of the first item. +/// +/// This generator is not async-reentrant (even if the generator yielded by future is) +/// +/// This generator does not queue +template +AsyncGenerator MakeFromFuture(Future> future) { + return FutureFirstGenerator(std::move(future)); +} + +/// \brief Create a generator that will pull from the source into a queue. Unlike +/// MakeReadaheadGenerator this will not pull reentrantly from the source. +/// +/// The source generator does not need to be async-reentrant +/// +/// This generator is not async-reentrant (even if the source is) +/// +/// This generator may queue up to max_readahead additional instances of T +template +AsyncGenerator MakeSerialReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return SerialReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Create a generator that immediately pulls from the source +/// +/// Typical generators do not pull from their source until they themselves +/// are pulled. This generator does not follow that convention and will call +/// generator() once before it returns. The returned generator will otherwise +/// mirror the source. +/// +/// This generator forwards async-reentrant pressure to the source +/// This generator buffers one item (the first result) until it is delivered. +template +AsyncGenerator MakeAutoStartingGenerator(AsyncGenerator generator) { + struct AutostartGenerator { + Future operator()() { + if (first_future->is_valid()) { + Future result = *first_future; + *first_future = Future(); + return result; + } + return source(); + } + + std::shared_ptr> first_future; + AsyncGenerator source; + }; + + std::shared_ptr> first_future = std::make_shared>(generator()); + return AutostartGenerator{std::move(first_future), std::move(generator)}; +} + +/// \see MakeReadaheadGenerator +template +class ReadaheadGenerator { + public: + ReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future AddMarkFinishedContinuation(Future fut) { + auto state = state_; + return fut.Then( + [state](const T& result) -> Future { + state->MarkFinishedIfDone(result); + if (state->finished.load()) { + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + } else { + state->num_running.fetch_sub(1); + } + return result; + }, + [state](const Status& err) -> Future { + // If there is an error we need to make sure all running + // tasks finish before we return the error. + state->finished.store(true); + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + return state->final_future.Then([err]() -> Result { return err; }); + }); + } + + Future operator()() { + if (state_->readahead_queue.empty()) { + // This is the first request, let's pump the underlying queue + state_->num_running.store(state_->max_readahead); + for (int i = 0; i < state_->max_readahead; i++) { + auto next = state_->source_generator(); + auto next_after_check = AddMarkFinishedContinuation(std::move(next)); + state_->readahead_queue.push(std::move(next_after_check)); + } + } + // Pop one and add one + auto result = state_->readahead_queue.front(); + state_->readahead_queue.pop(); + if (state_->finished.load()) { + state_->readahead_queue.push(AsyncGeneratorEnd()); + } else { + state_->num_running.fetch_add(1); + auto back_of_queue = state_->source_generator(); + auto back_of_queue_after_check = + AddMarkFinishedContinuation(std::move(back_of_queue)); + state_->readahead_queue.push(std::move(back_of_queue_after_check)); + } + return result; + } + + private: + struct State { + State(AsyncGenerator source_generator, int max_readahead) + : source_generator(std::move(source_generator)), max_readahead(max_readahead) {} + + void MarkFinishedIfDone(const T& next_result) { + if (IsIterationEnd(next_result)) { + finished.store(true); + } + } + + AsyncGenerator source_generator; + int max_readahead; + Future<> final_future = Future<>::Make(); + std::atomic num_running{0}; + std::atomic finished{false}; + std::queue> readahead_queue; + }; + + std::shared_ptr state_; +}; + +/// \brief A generator where the producer pushes items on a queue. +/// +/// No back-pressure is applied, so this generator is mostly useful when +/// producing the values is neither CPU- nor memory-expensive (e.g. fetching +/// filesystem metadata). +/// +/// This generator is not async-reentrant. +template +class PushGenerator { + struct State { + State() {} + + util::Mutex mutex; + std::deque> result_q; + std::optional> consumer_fut; + bool finished = false; + }; + + public: + /// Producer API for PushGenerator + class Producer { + public: + explicit Producer(const std::shared_ptr& state) : weak_state_(state) {} + + /// \brief Push a value on the queue + /// + /// True is returned if the value was pushed, false if the generator is + /// already closed or destroyed. If the latter, it is recommended to stop + /// producing any further values. + bool Push(Result result) { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Closed early + return false; + } + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(std::move(result)); + } else { + state->result_q.push_back(std::move(result)); + } + return true; + } + + /// \brief Tell the consumer we have finished producing + /// + /// It is allowed to call this and later call Push() again ("early close"). + /// In this case, calls to Push() after the queue is closed are silently + /// ignored. This can help implementing non-trivial cancellation cases. + /// + /// True is returned on success, false if the generator is already closed + /// or destroyed. + bool Close() { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Already closed + return false; + } + state->finished = true; + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(IterationTraits::End()); + } + return true; + } + + /// Return whether the generator was closed or destroyed. + bool is_closed() const { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return true; + } + auto lock = state->mutex.Lock(); + return state->finished; + } + + private: + const std::weak_ptr weak_state_; + }; + + PushGenerator() : state_(std::make_shared()) {} + + /// Read an item from the queue + Future operator()() const { + auto lock = state_->mutex.Lock(); + assert(!state_->consumer_fut.has_value()); // Non-reentrant + if (!state_->result_q.empty()) { + auto fut = Future::MakeFinished(std::move(state_->result_q.front())); + state_->result_q.pop_front(); + return fut; + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + auto fut = Future::Make(); + state_->consumer_fut = fut; + return fut; + } + + /// \brief Return producer-side interface + /// + /// The returned object must be used by the producer to push values on the queue. + /// Only a single Producer object should be instantiated. + Producer producer() { return Producer{state_}; } + + private: + const std::shared_ptr state_; +}; + +/// \brief Create a generator that pulls reentrantly from a source +/// This generator will pull reentrantly from a source, ensuring that max_readahead +/// requests are active at any given time. +/// +/// The source generator must be async-reentrant +/// +/// This generator itself is async-reentrant. +/// +/// This generator may queue up to max_readahead instances of T +template +AsyncGenerator MakeReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return ReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Creates a generator that will yield finished futures from a vector +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeVectorGenerator(std::vector vec) { + struct State { + explicit State(std::vector vec_) : vec(std::move(vec_)), vec_idx(0) {} + + std::vector vec; + std::atomic vec_idx; + }; + + auto state = std::make_shared(std::move(vec)); + return [state]() { + auto idx = state->vec_idx.fetch_add(1); + if (idx >= state->vec.size()) { + // Eagerly return memory + state->vec.clear(); + return AsyncGeneratorEnd(); + } + return Future::MakeFinished(state->vec[idx]); + }; +} + +/// \see MakeMergedGenerator +template +class MergedGenerator { + // Note, the implementation of this class is quite complex at the moment (PRs to + // simplify are always welcome) + // + // Terminology is borrowed from rxjs. This is a pull based implementation of the + // mergeAll operator. The "outer subscription" refers to the async + // generator that the caller provided when creating this. The outer subscription + // yields generators. + // + // Each of these generators is then subscribed to (up to max_subscriptions) and these + // are referred to as "inner subscriptions". + // + // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For + // each inner subscription we will cache up to 1 value. This means we may have more + // values than we have been asked for. In our example, if a caller asks for one record + // batch we will start scanning `max_subscriptions` different files. For each file we + // will only queue up to 1 batch (so a separate readahead is needed on the file if batch + // readahead is desired). + // + // If the caller is slow we may accumulate ready-to-deliver items. These are stored + // in `delivered_jobs`. + // + // If the caller is very quick we may accumulate requests. These are stored in + // `waiting_jobs`. + // + // It may be helpful to consider an example, in the scanner the outer subscription + // is some kind of asynchronous directory listing. The inner subscription is + // then a scan on a file yielded by the directory listing. + // + // An "outstanding" request is when we have polled either the inner or outer + // subscription but that future hasn't completed yet. + // + // There are three possible "events" that can happen. + // * A caller could request the next future + // * An outer callback occurs when the next subscription is ready (e.g. the directory + // listing has produced a new file) + // * An inner callback occurs when one of the inner subscriptions emits a value (e.g. + // a file scan emits a record batch) + // + // Any time an event happens the logic is broken into two phases. First, we grab the + // lock and modify the shared state. While doing this we figure out what callbacks we + // will need to execute. Then, we give up the lock and execute these callbacks. It is + // important to execute these callbacks without the lock to avoid deadlock. + public: + explicit MergedGenerator(AsyncGenerator> source, + int max_subscriptions) + : state_(std::make_shared(std::move(source), max_subscriptions)) {} + + Future operator()() { + // A caller has requested a future + Future waiting_future; + std::shared_ptr delivered_job; + bool mark_generator_complete = false; + { + auto guard = state_->mutex.Lock(); + if (!state_->delivered_jobs.empty()) { + // If we have a job sitting around we can deliver it + delivered_job = std::move(state_->delivered_jobs.front()); + state_->delivered_jobs.pop_front(); + if (state_->IsCompleteUnlocked(guard)) { + // It's possible this waiting job was the only thing left to handle and + // we have now completed the generator. + mark_generator_complete = true; + } else { + // Since we had a job sitting around we also had an inner subscription + // that had paused. We are going to restart this inner subscription and + // so there will be a new outstanding request. + state_->outstanding_requests++; + } + } else if (state_->broken || + (!state_->first && state_->num_running_subscriptions == 0)) { + // If we are broken or exhausted then prepare a terminal item but + // we won't complete it until we've finished. + Result end_res = IterationEnd(); + if (!state_->final_error.ok()) { + end_res = state_->final_error; + state_->final_error = Status::OK(); + } + return state_->all_finished.Then([end_res]() -> Result { return end_res; }); + } else { + // Otherwise we just queue the request and it will be completed when one of the + // ongoing inner subscriptions delivers a result + waiting_future = Future::Make(); + state_->waiting_jobs.push_back(std::make_shared>(waiting_future)); + } + if (state_->first) { + // On the first request we are going to try and immediately fill our queue + // of subscriptions. We assume we are going to be able to start them all. + state_->outstanding_requests += + static_cast(state_->active_subscriptions.size()); + state_->num_running_subscriptions += + static_cast(state_->active_subscriptions.size()); + } + } + // If we grabbed a finished item from the delivered_jobs queue then we may need + // to mark the generator finished or issue a request for a new item to fill in + // the spot we just vacated. Notice that we issue that request to the same + // subscription that delivered it (deliverer). + if (delivered_job) { + if (mark_generator_complete) { + state_->all_finished.MarkFinished(); + } else { + delivered_job->deliverer().AddCallback( + InnerCallback(state_, delivered_job->index)); + } + return std::move(delivered_job->value); + } + // On the first call we try and fill up our subscriptions. It's possible the outer + // generator only has a few items and we can't fill up to what we were hoping. In + // that case we have to bail early. + if (state_->first) { + state_->first = false; + mark_generator_complete = false; + for (int i = 0; i < static_cast(state_->active_subscriptions.size()); i++) { + state_->PullSource().AddCallback( + OuterCallback{state_, static_cast(i)}); + // If we have to bail early then we need to update the shared state again so + // we need to reacquire the lock. + auto guard = state_->mutex.Lock(); + if (state_->source_exhausted) { + int excess_requests = + static_cast(state_->active_subscriptions.size()) - i - 1; + state_->outstanding_requests -= excess_requests; + state_->num_running_subscriptions -= excess_requests; + if (excess_requests > 0) { + // It's possible that we are completing the generator by reducing the number + // of outstanding requests (e.g. this happens when the outer subscription and + // all inner subscriptions are synchronous) + mark_generator_complete = state_->IsCompleteUnlocked(guard); + } + break; + } + } + if (mark_generator_complete) { + state_->MarkFinishedAndPurge(); + } + } + return waiting_future; + } + + private: + struct DeliveredJob { + explicit DeliveredJob(AsyncGenerator deliverer_, Result value_, + std::size_t index_) + : deliverer(deliverer_), value(std::move(value_)), index(index_) {} + + // The generator that delivered this result, we will request another item + // from this generator once the result is delivered + AsyncGenerator deliverer; + // The result we received from the generator + Result value; + // The index of the generator (in active_subscriptions) that delivered this + // result. This is used if we need to replace a finished generator. + std::size_t index; + }; + + struct State { + State(AsyncGenerator> source, int max_subscriptions) + : source(std::move(source)), + active_subscriptions(max_subscriptions), + delivered_jobs(), + waiting_jobs(), + mutex(), + first(true), + broken(false), + source_exhausted(false), + outstanding_requests(0), + num_running_subscriptions(0), + final_error(Status::OK()) {} + + Future> PullSource() { + // Need to guard access to source() so we don't pull sync-reentrantly which + // is never valid. + auto lock = mutex.Lock(); + return source(); + } + + void SignalErrorUnlocked(const util::Mutex::Guard& guard) { + broken = true; + // Empty any results that have arrived but not asked for. + while (!delivered_jobs.empty()) { + delivered_jobs.pop_front(); + } + } + + // This function is called outside the mutex but it will only ever be + // called once + void MarkFinishedAndPurge() { + all_finished.MarkFinished(); + while (!waiting_jobs.empty()) { + waiting_jobs.front()->MarkFinished(IterationEnd()); + waiting_jobs.pop_front(); + } + } + + // This is called outside the mutex but it is only ever called + // once and Future<>::AddCallback is thread-safe + void MarkFinalError(const Status& err, Future maybe_sink) { + if (maybe_sink.is_valid()) { + // Someone is waiting for this error so lets mark it complete when + // all the work is done + all_finished.AddCallback([maybe_sink, err](const Status& status) mutable { + maybe_sink.MarkFinished(err); + }); + } else { + // No one is waiting for this error right now so it will be delivered + // next. + final_error = err; + } + } + + bool IsCompleteUnlocked(const util::Mutex::Guard& guard) { + return outstanding_requests == 0 && + (broken || (source_exhausted && num_running_subscriptions == 0 && + delivered_jobs.empty())); + } + + bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) { + --outstanding_requests; + return IsCompleteUnlocked(guard); + } + + // The outer generator. Each item we pull from this will be its own generator + // and become an inner subscription + AsyncGenerator> source; + // active_subscriptions and delivered_jobs will be bounded by max_subscriptions + std::vector> active_subscriptions; + // Results delivered by the inner subscriptions that weren't yet asked for by the + // caller + std::deque> delivered_jobs; + // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the + // backpressure + std::deque>> waiting_jobs; + // A future that will be marked complete when the terminal item has arrived and all + // outstanding futures have completed. It is used to hold off emission of an error + // until all outstanding work is done. + Future<> all_finished = Future<>::Make(); + util::Mutex mutex; + // A flag cleared when the caller firsts asks for a future. Used to start polling. + bool first; + // A flag set when an error arrives, prevents us from issuing new requests. + bool broken; + // A flag set when the outer subscription has been exhausted. Prevents us from + // pulling it further (even though it would be generally harmless) and lets us know we + // are finishing up. + bool source_exhausted; + // The number of futures that we have requested from either the outer or inner + // subscriptions that have not yet completed. We cannot mark all_finished until this + // reaches 0. This will never be greater than max_subscriptions + int outstanding_requests; + // The number of running subscriptions. We ramp this up to `max_subscriptions` as + // soon as the first item is requested and then it stays at that level (each exhausted + // inner subscription is replaced by a new inner subscription) until the outer + // subscription is exhausted at which point this descends to 0 (and source_exhausted) + // is then set to true. + int num_running_subscriptions; + // If an error arrives, and the caller hasn't asked for that item, we store the error + // here. It is analagous to delivered_jobs but for errors instead of finished + // results. + Status final_error; + }; + + struct InnerCallback { + InnerCallback(std::shared_ptr state, std::size_t index, bool recursive = false) + : state(std::move(state)), index(index), recursive(recursive) {} + + void operator()(const Result& maybe_next_ref) { + // An item has been delivered by one of the inner subscriptions + Future next_fut; + const Result* maybe_next = &maybe_next_ref; + + // When an item is delivered (and the caller has asked for it) we grab the + // next item from the inner subscription. To avoid this behavior leading to an + // infinite loop (this can happen if the caller's callback asks for the next item) + // we use a while loop. + while (true) { + Future sink; + bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next); + bool pull_next_sub = false; + bool was_broken = false; + bool should_mark_gen_complete = false; + bool should_mark_final_error = false; + { + auto guard = state->mutex.Lock(); + if (state->broken) { + // We've errored out previously so ignore the result. If anyone was waiting + // for this they will get IterationEnd when we purge + was_broken = true; + } else { + if (!sub_finished) { + // There is a result to deliver. Either we can deliver it now or we will + // queue it up + if (state->waiting_jobs.empty()) { + state->delivered_jobs.push_back(std::make_shared( + state->active_subscriptions[index], *maybe_next, index)); + } else { + sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + + // If this is the first error then we transition the state to a broken state + if (!maybe_next->ok()) { + should_mark_final_error = true; + state->SignalErrorUnlocked(guard); + } + } + + // If we finished this inner subscription then we need to grab a new inner + // subscription to take its spot. If we can't (because we're broken or + // exhausted) then we aren't going to be starting any new futures and so + // the number of running subscriptions drops. + pull_next_sub = sub_finished && !state->source_exhausted && !was_broken; + if (sub_finished && !pull_next_sub) { + state->num_running_subscriptions--; + } + // There are three situations we won't pull again. If an error occurred or we + // are already finished or if no one was waiting for our result and so we queued + // it up. We will decrement outstanding_requests and possibly mark the + // generator completed. + if (state->broken || (!sink.is_valid() && !sub_finished) || + (sub_finished && state->source_exhausted)) { + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } + } + + // Now we have given up the lock and we can take all the actions we decided we + // need to take. + if (should_mark_final_error) { + state->MarkFinalError(maybe_next->status(), std::move(sink)); + } + + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + + // An error occurred elsewhere so there is no need to mark any future + // finished (will happen during the purge) or pull from anything + if (was_broken) { + return; + } + + if (pull_next_sub) { + if (recursive) { + was_empty = true; + return; + } + // We pulled an end token so we need to start a new subscription + // in our spot + state->PullSource().AddCallback(OuterCallback{state, index}); + } else if (sink.is_valid()) { + // We pulled a valid result and there was someone waiting for it + // so lets fetch the next result from our subscription + sink.MarkFinished(*maybe_next); + next_fut = state->active_subscriptions[index](); + if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) { + return; + } + // Already completed. Avoid very deep recursion by looping + // here instead of relying on the callback. + maybe_next = &next_fut.result(); + continue; + } + // else: We pulled a valid result but no one was waiting for it so + // we can just stop. + return; + } + } + std::shared_ptr state; + std::size_t index; + bool recursive; + bool was_empty = false; + }; + + struct OuterCallback { + void operator()(const Result>& initial_maybe_next) { + Result> maybe_next = initial_maybe_next; + while (true) { + // We have been given a new inner subscription + bool should_continue = false; + bool should_mark_gen_complete = false; + bool should_deliver_error = false; + bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next); + Future error_sink; + { + auto guard = state->mutex.Lock(); + if (!maybe_next.ok() || source_exhausted || state->broken) { + // If here then we will not pull any more from the outer source + if (!state->broken && !maybe_next.ok()) { + state->SignalErrorUnlocked(guard); + // If here then we are the first error so we need to deliver it + should_deliver_error = true; + if (!state->waiting_jobs.empty()) { + error_sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + if (source_exhausted) { + state->source_exhausted = true; + state->num_running_subscriptions--; + } + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } else { + state->active_subscriptions[index] = *maybe_next; + should_continue = true; + } + } + if (should_deliver_error) { + state->MarkFinalError(maybe_next.status(), std::move(error_sink)); + } + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + if (should_continue) { + // There is a possibility that a large sequence of immediately available inner + // callbacks could lead to a stack overflow. To avoid this we need to + // synchronously loop through inner/outer callbacks until we either find an + // unfinished future or we find an actual item to deliver. + Future next_item = (*maybe_next)(); + if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) { + // By setting recursive to true we signal to the inner callback that, if it is + // empty, instead of adding a new outer callback, it should just immediately + // return, flagging was_empty so that we know we need to check the next + // subscription. + InnerCallback immediate_inner(state, index, /*recursive=*/true); + immediate_inner(next_item.result()); + if (immediate_inner.was_empty) { + Future> next_source = state->PullSource(); + if (next_source.TryAddCallback([this] { + return OuterCallback{state, index}; + })) { + // We hit an unfinished future so we can stop looping + return; + } + // The current subscription was immediately and synchronously empty + // and we were able to synchronously pull the next subscription so we + // can keep looping. + maybe_next = next_source.result(); + continue; + } + } + } + return; + } + } + std::shared_ptr state; + std::size_t index; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that takes in a stream of generators and pulls from up to +/// max_subscriptions at a time +/// +/// Note: This may deliver items out of sequence. For example, items from the third +/// AsyncGenerator generated by the source may be emitted before some items from the first +/// AsyncGenerator generated by the source. +/// +/// This generator will pull from source async-reentrantly unless max_subscriptions is 1 +/// This generator will not pull from the individual subscriptions reentrantly. Add +/// readahead to the individual subscriptions if that is desired. +/// This generator is async-reentrant +/// +/// This generator may queue up to max_subscriptions instances of T +template +AsyncGenerator MakeMergedGenerator(AsyncGenerator> source, + int max_subscriptions) { + return MergedGenerator(std::move(source), max_subscriptions); +} + +template +Result> MakeSequencedMergedGenerator( + AsyncGenerator> source, int max_subscriptions) { + if (max_subscriptions < 0) { + return Status::Invalid("max_subscriptions must be a positive integer"); + } + if (max_subscriptions == 1) { + return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1"); + } + AsyncGenerator> autostarting_source = MakeMappedGenerator( + std::move(source), + [](const AsyncGenerator& sub) { return MakeAutoStartingGenerator(sub); }); + AsyncGenerator> sub_readahead = + MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1); + return MakeConcatenatedGenerator(std::move(sub_readahead)); +} + +/// \brief Create a generator that takes in a stream of generators and pulls from each +/// one in sequence. +/// +/// This generator is async-reentrant but will never pull from source reentrantly and +/// will never pull from any subscription reentrantly. +/// +/// This generator may queue 1 instance of T +/// +/// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that +/// forwards async-reentrant requests instead of buffering them (which is what +/// MergedGenerator does) +template +AsyncGenerator MakeConcatenatedGenerator(AsyncGenerator> source) { + return MergedGenerator(std::move(source), 1); +} + +template +struct Enumerated { + T value; + int index; + bool last; +}; + +template +struct IterationTraits> { + static Enumerated End() { return Enumerated{IterationEnd(), -1, false}; } + static bool IsEnd(const Enumerated& val) { return val.index < 0; } +}; + +/// \see MakeEnumeratedGenerator +template +class EnumeratingGenerator { + public: + EnumeratingGenerator(AsyncGenerator source, T initial_value) + : state_(std::make_shared(std::move(source), std::move(initial_value))) {} + + Future> operator()() { + if (state_->finished) { + return AsyncGeneratorEnd>(); + } else { + auto state = state_; + return state->source().Then([state](const T& next) { + auto finished = IsIterationEnd(next); + auto prev = Enumerated{state->prev_value, state->prev_index, finished}; + state->prev_value = next; + state->prev_index++; + state->finished = finished; + return prev; + }); + } + } + + private: + struct State { + State(AsyncGenerator source, T initial_value) + : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) { + finished = IsIterationEnd(prev_value); + } + + AsyncGenerator source; + T prev_value; + int prev_index; + bool finished; + }; + + std::shared_ptr state_; +}; + +/// Wrap items from a source generator with positional information +/// +/// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be +/// processed in a "first-available" fashion and later resequenced which can reduce the +/// impact of sources with erratic performance (e.g. a filesystem where some items may +/// take longer to read than others). +/// +/// TODO(ARROW-12371) Would require this generator be async-reentrant +/// +/// \see MakeSequencingGenerator for an example of putting items back in order +/// +/// This generator is not async-reentrant +/// +/// This generator buffers one item (so it knows which item is the last item) +template +AsyncGenerator> MakeEnumeratedGenerator(AsyncGenerator source) { + return FutureFirstGenerator>( + source().Then([source](const T& initial_value) -> AsyncGenerator> { + return EnumeratingGenerator(std::move(source), initial_value); + })); +} + +/// \see MakeTransferredGenerator +template +class TransferringGenerator { + public: + explicit TransferringGenerator(AsyncGenerator source, internal::Executor* executor) + : source_(std::move(source)), executor_(executor) {} + + Future operator()() { return executor_->Transfer(source_()); } + + private: + AsyncGenerator source_; + internal::Executor* executor_; +}; + +/// \brief Transfer a future to an underlying executor. +/// +/// Continuations run on the returned future will be run on the given executor +/// if they cannot be run synchronously. +/// +/// This is often needed to move computation off I/O threads or other external +/// completion sources and back on to the CPU executor so the I/O thread can +/// stay busy and focused on I/O +/// +/// Keep in mind that continuations called on an already completed future will +/// always be run synchronously and so no transfer will happen in that case. +/// +/// This generator is async reentrant if the source is +/// +/// This generator will not queue +template +AsyncGenerator MakeTransferredGenerator(AsyncGenerator source, + internal::Executor* executor) { + return TransferringGenerator(std::move(source), executor); +} + +/// \see MakeBackgroundGenerator +template +class BackgroundGenerator { + public: + explicit BackgroundGenerator(Iterator it, internal::Executor* io_executor, int max_q, + int q_restart) + : state_(std::make_shared(io_executor, std::move(it), max_q, q_restart)), + cleanup_(std::make_shared(state_.get())) {} + + Future operator()() { + auto guard = state_->mutex.Lock(); + Future waiting_future; + if (state_->queue.empty()) { + if (state_->finished) { + return AsyncGeneratorEnd(); + } else { + waiting_future = Future::Make(); + state_->waiting_future = waiting_future; + } + } else { + auto next = Future::MakeFinished(std::move(state_->queue.front())); + state_->queue.pop(); + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(next)); + } + return next; + } + // This should only trigger the very first time this method is called + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(waiting_future)); + } + return waiting_future; + } + + protected: + static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits::max()}; + + struct State { + State(internal::Executor* io_executor, Iterator it, int max_q, int q_restart) + : io_executor(io_executor), + max_q(max_q), + q_restart(q_restart), + it(std::move(it)), + reading(false), + finished(false), + should_shutdown(false) {} + + void ClearQueue() { + while (!queue.empty()) { + queue.pop(); + } + } + + bool TaskIsRunning() const { return task_finished.is_valid(); } + + bool NeedsRestart() const { + return !finished && !reading && static_cast(queue.size()) <= q_restart; + } + + void DoRestartTask(std::shared_ptr state, util::Mutex::Guard guard) { + // If we get here we are actually going to start a new task so let's create a + // task_finished future for it + state->task_finished = Future<>::Make(); + state->reading = true; + auto spawn_status = io_executor->Spawn( + [state]() { BackgroundGenerator::WorkerTask(std::move(state)); }); + if (!spawn_status.ok()) { + // If we can't spawn a new task then send an error to the consumer (either via a + // waiting future or the queue) and mark ourselves finished + state->finished = true; + state->task_finished = Future<>(); + if (waiting_future.has_value()) { + auto to_deliver = std::move(waiting_future.value()); + waiting_future.reset(); + guard.Unlock(); + to_deliver.MarkFinished(spawn_status); + } else { + ClearQueue(); + queue.push(spawn_status); + } + } + } + + Future RestartTask(std::shared_ptr state, util::Mutex::Guard guard, + Future next) { + if (TaskIsRunning()) { + // If the task is still cleaning up we need to wait for it to finish before + // restarting. We also want to block the consumer until we've restarted the + // reader to avoid multiple restarts + return task_finished.Then([state, next]() { + // This may appear dangerous (recursive mutex) but we should be guaranteed the + // outer guard has been released by this point. We know... + // * task_finished is not already finished (it would be invalid in that case) + // * task_finished will not be marked complete until we've given up the mutex + auto guard_ = state->mutex.Lock(); + state->DoRestartTask(state, std::move(guard_)); + return next; + }); + } + // Otherwise we can restart immediately + DoRestartTask(std::move(state), std::move(guard)); + return next; + } + + internal::Executor* io_executor; + const int max_q; + const int q_restart; + Iterator it; + std::atomic worker_thread_id{kUnlikelyThreadId}; + + // If true, the task is actively pumping items from the queue and does not need a + // restart + bool reading; + // Set to true when a terminal item arrives + bool finished; + // Signal to the background task to end early because consumers have given up on it + bool should_shutdown; + // If the queue is empty, the consumer will create a waiting future and wait for it + std::queue> queue; + std::optional> waiting_future; + // Every background task is given a future to complete when it is entirely finished + // processing and ready for the next task to start or for State to be destroyed + Future<> task_finished; + util::Mutex mutex; + }; + + // Cleanup task that will be run when all consumer references to the generator are lost + struct Cleanup { + explicit Cleanup(State* state) : state(state) {} + ~Cleanup() { + /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and + /// there is no need to perform this check. + /// + /// It's a deadlock if we enter cleanup from + /// the worker thread but it can happen if the consumer doesn't transfer away + assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId()); + Future<> finish_fut; + { + auto lock = state->mutex.Lock(); + if (!state->TaskIsRunning()) { + return; + } + // Signal the current task to stop and wait for it to finish + state->should_shutdown = true; + finish_fut = state->task_finished; + } + // Using future as a condition variable here + Status st = finish_fut.status(); + ARROW_UNUSED(st); + } + State* state; + }; + + static void WorkerTask(std::shared_ptr state) { + state->worker_thread_id.store(::arrow::internal::GetThreadId()); + // We need to capture the state to read while outside the mutex + bool reading = true; + while (reading) { + auto next = state->it.Next(); + // Need to capture state->waiting_future inside the mutex to mark finished outside + Future waiting_future; + { + auto guard = state->mutex.Lock(); + + if (state->should_shutdown) { + state->finished = true; + break; + } + + if (!next.ok() || IsIterationEnd(*next)) { + // Terminal item. Mark finished to true, send this last item, and quit + state->finished = true; + if (!next.ok()) { + state->ClearQueue(); + } + } + // At this point we are going to send an item. Either we will add it to the + // queue or deliver it to a waiting future. + if (state->waiting_future.has_value()) { + waiting_future = std::move(state->waiting_future.value()); + state->waiting_future.reset(); + } else { + state->queue.push(std::move(next)); + // We just filled up the queue so it is time to quit. We may need to notify + // a cleanup task so we transition to Quitting + if (static_cast(state->queue.size()) >= state->max_q) { + state->reading = false; + } + } + reading = state->reading && !state->finished; + } + // This should happen outside the mutex. Presumably there is a + // transferring generator on the other end that will quickly transfer any + // callbacks off of this thread so we can continue looping. Still, best not to + // rely on that + if (waiting_future.is_valid()) { + waiting_future.MarkFinished(next); + } + } + // Once we've sent our last item we can notify any waiters that we are done and so + // either state can be cleaned up or a new background task can be started + Future<> task_finished; + { + auto guard = state->mutex.Lock(); + // After we give up the mutex state can be safely deleted. We will no longer + // reference it. We can safely transition to idle now. + task_finished = state->task_finished; + state->task_finished = Future<>(); + state->worker_thread_id.store(kUnlikelyThreadId); + } + task_finished.MarkFinished(); + } + + std::shared_ptr state_; + // state_ is held by both the generator and the background thread so it won't be cleaned + // up when all consumer references are relinquished. cleanup_ is only held by the + // generator so it will be destructed when the last consumer reference is gone. We use + // this to cleanup / stop the background generator in case the consuming end stops + // listening (e.g. due to a downstream error) + std::shared_ptr cleanup_; +}; + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Create an AsyncGenerator by iterating over an Iterator on a background +/// thread +/// +/// The parameter max_q and q_restart control queue size and background thread task +/// management. If the background task is fast you typically don't want it creating a +/// thread task for every item. Instead the background thread will run until it fills +/// up a readahead queue. +/// +/// Once the queue has filled up the background thread task will terminate (allowing other +/// I/O tasks to use the thread). Once the queue has been drained enough (specified by +/// q_restart) then the background thread task will be restarted. If q_restart is too low +/// then you may exhaust the queue waiting for the background thread task to start running +/// again. If it is too high then it will be constantly stopping and restarting the +/// background queue task +/// +/// The "background thread" is a logical thread and will run as tasks on the io_executor. +/// This thread may stop and start when the queue fills up but there will only be one +/// active background thread task at any given time. You MUST transfer away from this +/// background generator. Otherwise there could be a race condition if a callback on the +/// background thread deletes the last consumer reference to the background generator. You +/// can transfer onto the same executor as the background thread, it is only necessary to +/// create a new thread task, not to switch executors. +/// +/// This generator is not async-reentrant +/// +/// This generator will queue up to max_q blocks +template +static Result> MakeBackgroundGenerator( + Iterator iterator, internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) { + if (max_q < q_restart) { + return Status::Invalid("max_q must be >= q_restart"); + } + return BackgroundGenerator(std::move(iterator), io_executor, max_q, q_restart); +} + +/// \brief Create an AsyncGenerator by iterating over an Iterator synchronously +/// +/// This should only be used if you know the source iterator does not involve any +/// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending +/// on the complexity of the iterator, it may lead to deadlock. +/// +/// If you are not certain if there will be I/O then it is better to use +/// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator +/// equivalent of Future::MakeFinished +/// +/// It is impossible to call this in an async-reentrant manner since the returned +/// future will be completed by the time it is polled. +/// +/// This generator does not queue +template +static Result> MakeBlockingGenerator( + std::shared_ptr> iterator) { + return [it = std::move(iterator)]() mutable -> Future { + return Future::MakeFinished(it->Next()); + }; +} + +template +static Result> MakeBlockingGenerator(Iterator iterator) { + return MakeBlockingGenerator(std::make_shared>(std::move(iterator))); +} + +/// \see MakeGeneratorIterator +template +class GeneratorIterator { + public: + explicit GeneratorIterator(AsyncGenerator source) : source_(std::move(source)) {} + + Result Next() { return source_().result(); } + + private: + AsyncGenerator source_; +}; + +/// \brief Convert an AsyncGenerator to an Iterator which blocks until each future +/// is finished +template +Iterator MakeGeneratorIterator(AsyncGenerator source) { + return Iterator(GeneratorIterator(std::move(source))); +} + +/// \brief Add readahead to an iterator using a background thread. +/// +/// Under the hood this is converting the iterator to a generator using +/// MakeBackgroundGenerator, adding readahead to the converted generator with +/// MakeReadaheadGenerator, and then converting back to an iterator using +/// MakeGeneratorIterator. +template +Result> MakeReadaheadIterator(Iterator it, int readahead_queue_size) { + ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1)); + auto max_q = readahead_queue_size; + auto q_restart = std::max(1, max_q / 2); + ARROW_ASSIGN_OR_RAISE( + auto background_generator, + MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart)); + // Capture io_executor to keep it alive as long as owned_bg_generator is still + // referenced + AsyncGenerator owned_bg_generator = [io_executor, background_generator]() { + return background_generator(); + }; + return MakeGeneratorIterator(std::move(owned_bg_generator)); +} + +/// \brief Make a generator that returns a single pre-generated future +/// +/// This generator is async-reentrant. +template +std::function()> MakeSingleFutureGenerator(Future future) { + assert(future.is_valid()); + auto state = std::make_shared>(std::move(future)); + return [state]() -> Future { + auto fut = std::move(*state); + if (fut.is_valid()) { + return fut; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that immediately ends. +/// +/// This generator is async-reentrant. +template +std::function()> MakeEmptyGenerator() { + return []() -> Future { return AsyncGeneratorEnd(); }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeFailingGenerator(Status st) { + assert(!st.ok()); + auto state = std::make_shared(std::move(st)); + return [state]() -> Future { + auto st = std::move(*state); + if (!st.ok()) { + return st; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This overload allows inferring the return type from the argument. +template +AsyncGenerator MakeFailingGenerator(const Result& result) { + return MakeFailingGenerator(result.status()); +} + +/// \brief Prepend initial_values onto a generator +/// +/// This generator is async-reentrant but will buffer requests and will not +/// pull from following_values async-reentrantly. +template +AsyncGenerator MakeGeneratorStartsWith(std::vector initial_values, + AsyncGenerator following_values) { + auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values)); + auto gen_gen = MakeVectorGenerator>( + {std::move(initial_values_vec_gen), std::move(following_values)}); + return MakeConcatenatedGenerator(std::move(gen_gen)); +} + +template +struct CancellableGenerator { + Future operator()() { + if (stop_token.IsStopRequested()) { + return stop_token.Poll(); + } + return source(); + } + + AsyncGenerator source; + StopToken stop_token; +}; + +/// \brief Allow an async generator to be cancelled +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeCancellable(AsyncGenerator source, StopToken stop_token) { + return CancellableGenerator{std::move(source), std::move(stop_token)}; +} + +template +class DefaultIfEmptyGenerator { + public: + DefaultIfEmptyGenerator(AsyncGenerator source, T or_value) + : state_(std::make_shared(std::move(source), std::move(or_value))) {} + + Future operator()() { + if (state_->first) { + state_->first = false; + struct { + T or_value; + + Result operator()(const T& value) { + if (IterationTraits::IsEnd(value)) { + return std::move(or_value); + } + return value; + } + } Continuation; + Continuation.or_value = std::move(state_->or_value); + return state_->source().Then(std::move(Continuation)); + } + return state_->source(); + } + + private: + struct State { + AsyncGenerator source; + T or_value; + bool first; + State(AsyncGenerator source_, T or_value_) + : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {} + }; + std::shared_ptr state_; +}; + +/// \brief If the generator is empty, return the given value, else +/// forward the values from the generator. +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeDefaultIfEmptyGenerator(AsyncGenerator source, T or_value) { + return DefaultIfEmptyGenerator(std::move(source), std::move(or_value)); +} +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..f3c5bf9ef6f52b0a0737348c2a5bdc524e62c251 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" + +namespace arrow { + +template +using AsyncGenerator = std::function()>; + +template +class MappingGenerator; + +template +class SequencingGenerator; + +template +class TransformingGenerator; + +template +class SerialReadaheadGenerator; + +template +class ReadaheadGenerator; + +template +class PushGenerator; + +template +class MergedGenerator; + +template +struct Enumerated; + +template +class EnumeratingGenerator; + +template +class TransferringGenerator; + +template +class BackgroundGenerator; + +template +class GeneratorIterator; + +template +struct CancellableGenerator; + +template +class DefaultIfEmptyGenerator; + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h new file mode 100644 index 0000000000000000000000000000000000000000..5b80e19d896b746ccc4318bb2f8ce250c7892e66 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +ARROW_EXPORT +std::string base64_encode(std::string_view s); + +ARROW_EXPORT +std::string base64_decode(std::string_view s); + +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h new file mode 100644 index 0000000000000000000000000000000000000000..94f7a5bdfa667a97bd00a91404a1dd9f64dfd2dd --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/span.h" + +namespace arrow::util { + +inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) { + // Small string: inlined. Bytes beyond size are zeroed + BinaryViewType::c_type out; + out.inlined = {size, {}}; + memcpy(&out.inlined.data, data, size); + return out; +} + +inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) { + return ToInlineBinaryView(v.data(), static_cast(v.size())); +} + +inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size, + int32_t buffer_index, int32_t offset) { + if (size <= BinaryViewType::kInlineSize) { + return ToInlineBinaryView(data, size); + } + + // Large string: store index/offset. + BinaryViewType::c_type out; + out.ref = {size, {}, buffer_index, offset}; + memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix)); + return out; +} + +inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index, + int32_t offset) { + return ToBinaryView(v.data(), static_cast(v.size()), buffer_index, offset); +} + +template +std::string_view FromBinaryView(const BinaryViewType::c_type& v, + const BufferPtr* data_buffers) { + auto* data = v.is_inline() ? v.inlined.data.data() + : data_buffers[v.ref.buffer_index]->data() + v.ref.offset; + return {reinterpret_cast(data), static_cast(v.size())}; +} +template +std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete; + +template +bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r, + const BufferPtr* l_buffers, const BufferPtr* r_buffers) { + int64_t l_size_and_prefix, r_size_and_prefix; + memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix)); + memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix)); + + if (l_size_and_prefix != r_size_and_prefix) return false; + + if (l.is_inline()) { + // The columnar spec mandates that the inlined part be zero-padded, so we can compare + // a word at a time regardless of the exact size. + int64_t l_inlined, r_inlined; + memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined)); + memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined)); + return l_inlined == r_inlined; + } + + // Sizes are equal and this is not inline, therefore both are out + // of line and have kPrefixSize first in common. + const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset; + const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset; + return memcmp(l_data + BinaryViewType::kPrefixSize, + r_data + BinaryViewType::kPrefixSize, + l.size() - BinaryViewType::kPrefixSize) == 0; +} + +} // namespace arrow::util diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..4750e697fc7972e8ad57766ffd1134cf3e99fd14 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/compare.h" +#include "arrow/util/endian.h" +#include "arrow/util/functional.h" +#include "arrow/util/span.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class BooleanArray; + +namespace internal { + +class ARROW_EXPORT Bitmap : public util::ToStringOstreamable, + public util::EqualityComparable { + public: + Bitmap() = default; + + Bitmap(const std::shared_ptr& buffer, int64_t offset, int64_t length) + : data_(buffer->data()), offset_(offset), length_(length) { + if (buffer->is_mutable()) { + mutable_data_ = buffer->mutable_data(); + } + } + + Bitmap(const void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), offset_(offset), length_(length) {} + + Bitmap(void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), + mutable_data_(reinterpret_cast(data)), + offset_(offset), + length_(length) {} + + Bitmap Slice(int64_t offset) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length_ - offset}; + } else { + return {data_, offset_ + offset, length_ - offset}; + } + } + + Bitmap Slice(int64_t offset, int64_t length) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length}; + } else { + return {data_, offset_ + offset, length}; + } + } + + std::string ToString() const; + + bool Equals(const Bitmap& other) const; + + std::string Diff(const Bitmap& other) const; + + bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); } + + bool operator[](int64_t i) const { return GetBit(i); } + + void SetBitTo(int64_t i, bool v) const { + bit_util::SetBitTo(mutable_data_, i + offset_, v); + } + + void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); } + + void CopyFrom(const Bitmap& other); + void CopyFromInverted(const Bitmap& other); + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps, N); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const std::array& bitmaps, Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit words of bits from each bitmap as array + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// + /// TODO(bkietz) allow for early termination + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + // local, mutable variables which will be sliced/decremented to represent consumption: + Bitmap bitmaps[N]; + int64_t offsets[N]; + int64_t bit_length = BitLength(bitmaps_arg, N); + util::span words[N]; + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps_arg[i]; + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + + auto consume = [&](int64_t consumed_bits) { + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits); + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + bit_length -= consumed_bits; + }; + + std::array visited_words; + visited_words.fill(0); + + if (bit_length <= kBitWidth * 2) { + // bitmaps fit into one or two words so don't bother with optimization + while (bit_length > 0) { + auto leading_bits = std::min(bit_length, kBitWidth); + SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + return 0; + } + + int64_t max_offset = *std::max_element(offsets, offsets + N); + int64_t min_offset = *std::min_element(offsets, offsets + N); + if (max_offset > 0) { + // consume leading bits + auto leading_bits = kBitWidth - min_offset; + SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + assert(*std::min_element(offsets, offsets + N) == 0); + + int64_t whole_word_count = bit_length / kBitWidth; + assert(whole_word_count >= 1); + + if (min_offset == max_offset) { + // all offsets were identical, all leading bits have been consumed + assert( + std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; })); + + for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) { + for (size_t i = 0; i < N; ++i) { + visited_words[i] = words[i][word_i]; + } + visitor(visited_words); + } + consume(whole_word_count * kBitWidth); + } else { + // leading bits from potentially incomplete words have been consumed + + // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely + // within the bitmap for all i + for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) { + for (size_t i = 0; i < N; ++i) { + if (offsets[i] == 0) { + visited_words[i] = words[i][word_i]; + } else { + auto words0 = bit_util::ToLittleEndian(words[i][word_i]); + auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]); + visited_words[i] = bit_util::FromLittleEndian( + (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i]))); + } + } + visitor(visited_words); + } + consume((whole_word_count - 1) * kBitWidth); + + SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words); + + visitor(visited_words); + consume(kBitWidth); + } + + // load remaining bits + if (bit_length > 0) { + SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words); + visitor(visited_words); + } + + return min_offset; + } + + template >::type::value_type> + static void RunVisitWordsAndWriteLoop(int64_t bit_length, + std::array& readers, + std::array& writers, + Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + std::array visited_words; + std::array output_words; + + // every reader will have same number of words, since they are same length'ed + // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond + // Word boundary, every Word would have to be created from 2 adjoining Words + auto n_words = readers[0].words(); + bit_length -= n_words * kBitWidth; + while (n_words--) { + // first collect all words to visited_words array + for (size_t i = 0; i < N; i++) { + visited_words[i] = readers[i].NextWord(); + } + visitor(visited_words, &output_words); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextWord(output_words[i]); + } + } + + // every reader will have same number of trailing bytes, because of the above reason + // tailing portion could be more than one word! (ref: BitmapWordReader constructor) + // remaining full/ partial words to write + + if (bit_length) { + // convert the word visitor lambda to a byte_visitor + auto byte_visitor = [&](const std::array& in, + std::array* out) { + std::array in_words; + std::array out_words; + std::copy(in.begin(), in.end(), in_words.begin()); + visitor(in_words, &out_words); + for (size_t i = 0; i < M; i++) { + out->at(i) = static_cast(out_words[i]); + } + }; + + std::array visited_bytes; + std::array output_bytes; + int n_bytes = readers[0].trailing_bytes(); + while (n_bytes--) { + visited_bytes.fill(0); + output_bytes.fill(0); + int valid_bits; + for (size_t i = 0; i < N; i++) { + visited_bytes[i] = readers[i].NextTrailingByte(valid_bits); + } + byte_visitor(visited_bytes, &output_bytes); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextTrailingByte(output_bytes[i], valid_bits); + } + } + } + } + + /// \brief Visit words of bits from each input bitmap as array and collects + /// outputs to an array, to be written into the output bitmaps accordingly. + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// Visitor is expected to have the following signature + /// [](const std::array& in_words, std::array* out_words){...} + /// + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static void VisitWordsAndWrite(const std::array& bitmaps_arg, + std::array* out_bitmaps_arg, + Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps_arg); + assert(bit_length == BitLength(*out_bitmaps_arg)); + + // if both input and output bitmaps have no byte offset, then use special template + if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; }) && + std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; })) { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = BitmapWordReader( + in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter( + out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } else { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = + BitmapWordReader(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter(out_bitmap.mutable_data_, out_bitmap.offset_, + out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } + } + + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return mutable_data_; } + + /// offset of first bit relative to buffer().data() + int64_t offset() const { return offset_; } + + /// number of bits in this Bitmap + int64_t length() const { return length_; } + + /// span of all bytes which contain any bit in this Bitmap + util::span bytes() const { + auto byte_offset = offset_ / 8; + auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset; + return {data_ + byte_offset, static_cast(byte_count)}; + } + + private: + /// span of all Words which contain any bit in this Bitmap + /// + /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36) + /// words() would span bits [16, 48). + /// + /// 0 16 32 48 64 + /// |-------|-------|------|------| (buffer) + /// [ ] (bitmap) + /// |-------|------| (returned words) + /// + /// \warning The words may contain bytes which lie outside the buffer or are + /// uninitialized. + template + util::span words() const { + auto bytes_addr = reinterpret_cast(bytes().data()); + auto words_addr = bytes_addr - bytes_addr % sizeof(Word); + auto word_byte_count = + bit_util::RoundUpToPowerOf2(static_cast(bytes_addr + bytes().size()), + static_cast(sizeof(Word))) - + words_addr; + return {reinterpret_cast(words_addr), + static_cast(word_byte_count / sizeof(Word))}; + } + + /// offset of first bit relative to words().data() + template + int64_t word_offset() const { + return offset_ + 8 * (reinterpret_cast(data_) - + reinterpret_cast(words().data())); + } + + /// load words from bitmaps bitwise + template + static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset, + int64_t out_length, bool set_trailing_bits, + std::array* out) { + out->fill(0); + + int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0; + + Bitmap slices[N], out_bitmaps[N]; + for (size_t i = 0; i < N; ++i) { + slices[i] = bitmaps[i].Slice(offset, out_length); + out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length); + } + + int64_t bit_i = 0; + Bitmap::VisitBits(slices, [&](std::bitset bits) { + for (size_t i = 0; i < N; ++i) { + out_bitmaps[i].SetBitTo(bit_i, bits[i]); + } + ++bit_i; + }); + } + + /// assert bitmaps have identical length and return that length + static int64_t BitLength(const Bitmap* bitmaps, size_t N); + + template + static int64_t BitLength(const std::array& bitmaps) { + for (size_t i = 1; i < N; ++i) { + assert(bitmaps[i].length() == bitmaps[0].length()); + } + return bitmaps[0].length(); + } + + const uint8_t* data_ = NULLPTR; + uint8_t* mutable_data_ = NULLPTR; + int64_t offset_ = 0, length_ = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd2ad44140834487b02d5899d3515e7b7eafefc --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Generate Bitmap with all position to `value` except for one found +/// at `straggler_pos`. +ARROW_EXPORT +Result> BitmapAllButOne(MemoryPool* pool, int64_t length, + int64_t straggler_pos, bool value = true); + +/// \brief Convert vector of bytes to bitmap buffer +ARROW_EXPORT +Result> BytesToBits(const std::vector&, + MemoryPool* pool = default_memory_pool()); + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h new file mode 100644 index 0000000000000000000000000000000000000000..52a1e228e01f1d6c3c37a5e2d49d843f0a4573f9 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A std::generate() like function to write sequential bits into a bitmap area. +// Bits preceding the bitmap area are preserved, bits following the bitmap +// area may be clobbered. + +template +void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) { + if (length == 0) { + return; + } + uint8_t* cur = bitmap + start_offset / 8; + uint8_t bit_mask = bit_util::kBitmask[start_offset % 8]; + uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8]; + + for (int64_t index = 0; index < length; ++index) { + const bool bit = g(); + current_byte = bit ? (current_byte | bit_mask) : current_byte; + bit_mask = static_cast(bit_mask << 1); + if (bit_mask == 0) { + bit_mask = 1; + *cur++ = current_byte; + current_byte = 0; + } + } + if (bit_mask != 1) { + *cur++ = current_byte; + } +} + +// Like GenerateBits(), but unrolls its main loop for higher performance. + +template +void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length, + Generator&& g) { + static_assert(std::is_same()()), bool>::value, + "Functor passed to GenerateBitsUnrolled must return bool"); + + if (length == 0) { + return; + } + uint8_t current_byte; + uint8_t* cur = bitmap + start_offset / 8; + const uint64_t start_bit_offset = start_offset % 8; + uint8_t bit_mask = bit_util::kBitmask[start_bit_offset]; + int64_t remaining = length; + + if (bit_mask != 0x01) { + current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset]; + while (bit_mask != 0 && remaining > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + --remaining; + } + *cur++ = current_byte; + } + + int64_t remaining_bytes = remaining / 8; + uint8_t out_results[8]; + while (remaining_bytes-- > 0) { + for (int i = 0; i < 8; ++i) { + out_results[i] = g(); + } + *cur++ = static_cast(out_results[0] | out_results[1] << 1 | + out_results[2] << 2 | out_results[3] << 3 | + out_results[4] << 4 | out_results[5] << 5 | + out_results[6] << 6 | out_results[7] << 7); + } + + int64_t remaining_bits = remaining % 8; + if (remaining_bits) { + current_byte = 0; + bit_mask = 0x01; + while (remaining_bits-- > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + } + *cur++ = current_byte; + } +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h new file mode 100644 index 0000000000000000000000000000000000000000..c29589013e4b7863705e1de4cf8c69293451eb8b --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_reader.h" + +namespace arrow { +namespace internal { + +// A function that visits each bit in a bitmap and calls a visitor function with a +// boolean representation of that bit. This is intended to be analogous to +// GenerateBits. +template +void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length, + Visitor&& visit) { + BitmapReader reader(bitmap, start_offset, length); + for (int64_t index = 0; index < length; ++index) { + visit(reader.IsSet()); + reader.Next(); + } +} + +// Like VisitBits(), but unrolls its main loop for better performance. +template +void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length, + Visitor&& visit) { + if (length == 0) { + return; + } + + // Start by visiting any bits preceding the first full byte. + int64_t num_bits_before_full_bytes = + bit_util::RoundUpToMultipleOf8(start_offset) - start_offset; + // Truncate num_bits_before_full_bytes if it is greater than length. + if (num_bits_before_full_bytes > length) { + num_bits_before_full_bytes = length; + } + // Use the non loop-unrolled VisitBits since we don't want to add branches + VisitBits(bitmap, start_offset, num_bits_before_full_bytes, visit); + + // Shift the start pointer to the first full byte and compute the + // number of full bytes to be read. + const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8); + const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8; + + // Iterate over each full byte of the input bitmap and call the visitor in + // a loop-unrolled manner. + for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) { + // Get the current bit-packed byte value from the bitmap. + const uint8_t byte = *(first_full_byte + byte_index); + + // Execute the visitor function on each bit of the current byte. + visit(bit_util::GetBitFromByte(byte, 0)); + visit(bit_util::GetBitFromByte(byte, 1)); + visit(bit_util::GetBitFromByte(byte, 2)); + visit(bit_util::GetBitFromByte(byte, 3)); + visit(bit_util::GetBitFromByte(byte, 4)); + visit(bit_util::GetBitFromByte(byte, 5)); + visit(bit_util::GetBitFromByte(byte, 6)); + visit(bit_util::GetBitFromByte(byte, 7)); + } + + // Write any leftover bits in the last byte. + const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8; + VisitBits(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes, + visit); +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ce8012f3eb5a65ec91b1321b687bc0d77f7557 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h @@ -0,0 +1,286 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapWriter { + // A sequential bitwise writer that preserves surrounding bit values. + + public: + BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } else { + current_byte_ = 0; + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; } + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + void Finish() { + // Store current byte if we didn't went past bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +class FirstTimeBitmapWriter { + // Like BitmapWriter, but any bit values *following* the bits written + // might be clobbered. It is hence faster than BitmapWriter, and can + // also avoid false positives with Valgrind. + + public: + FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = + bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8]; + } else { + current_byte_ = 0; + } + } + + /// Appends number_of_bits from word to valid_bits and valid_bits_offset. + /// + /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed + /// to be unset (i.e. 0). + /// \param[in] number_of_bits The number of bits to append from word. + void AppendWord(uint64_t word, int64_t number_of_bits) { + if (ARROW_PREDICT_FALSE(number_of_bits == 0)) { + return; + } + + // Location that the first byte needs to be written to. + uint8_t* append_position = bitmap_ + byte_offset_; + + // Update state variables except for current_byte_ here. + position_ += number_of_bits; + int64_t bit_offset = bit_util::CountTrailingZeros(static_cast(bit_mask_)); + bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8]; + byte_offset_ += (bit_offset + number_of_bits) / 8; + + if (bit_offset != 0) { + // We are in the middle of the byte. This code updates the byte and shifts + // bits appropriately within word so it can be memcpy'd below. + int64_t bits_to_carry = 8 - bit_offset; + // Carry over bits from word to current_byte_. We assume any extra bits in word + // unset so no additional accounting is needed for when number_of_bits < + // bits_to_carry. + current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset; + // Check if everything is transferred into current_byte_. + if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) { + return; + } + *append_position = current_byte_; + append_position++; + // Move the carry bits off of word. + word = word >> bits_to_carry; + number_of_bits -= bits_to_carry; + } + word = bit_util::ToLittleEndian(word); + int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits); + std::memcpy(append_position, &word, bytes_for_word); + // At this point, the previous current_byte_ has been written to bitmap_. + // The new current_byte_ is either the last relevant byte in 'word' + // or cleared if the new position is byte aligned (i.e. a fresh byte). + if (bit_mask_ == 0x1) { + current_byte_ = 0; + } else { + current_byte_ = *(append_position + bytes_for_word - 1); + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() {} + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + current_byte_ = 0; + } + } + + void Finish() { + // Store current byte if we didn't went go bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +template +class BitmapWordWriter { + public: + BitmapWordWriter() = default; + BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)), + mask_((1U << offset_) - 1) { + if (offset_) { + if (length >= static_cast(sizeof(Word) * 8)) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + } + + void PutNextWord(Word word) { + if (may_have_byte_offset && offset_) { + // split one word into two adjacent words, don't touch unused bits + // |<------ word ----->| + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // | | + // v v offset + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // |<------ next ----->|<---- current ---->| + word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_)); + Word next_word = load(bitmap_ + sizeof(Word)); + current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_); + next_word = (next_word & ~mask_) | (word & mask_); + store(bitmap_, current_data.word_); + store(bitmap_ + sizeof(Word), next_word); + current_data.word_ = next_word; + } else { + store(bitmap_, word); + } + bitmap_ += sizeof(Word); + } + + void PutNextTrailingByte(uint8_t byte, int valid_bits) { + if (valid_bits == 8) { + if (may_have_byte_offset && offset_) { + byte = (byte << offset_) | (byte >> (8 - offset_)); + uint8_t next_byte = load(bitmap_ + 1); + current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_); + next_byte = (next_byte & ~mask_) | (byte & mask_); + store(bitmap_, current_data.epi.byte_); + store(bitmap_ + 1, next_byte); + current_data.epi.byte_ = next_byte; + } else { + store(bitmap_, byte); + } + ++bitmap_; + } else { + assert(valid_bits > 0); + assert(valid_bits < 8); + assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_); + internal::BitmapWriter writer(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + (byte & 0x01) ? writer.Set() : writer.Clear(); + writer.Next(); + byte >>= 1; + } + writer.Finish(); + } + } + + private: + int64_t offset_; + uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + uint64_t mask_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } + + template + void store(uint8_t* bitmap, DType data) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + util::SafeStore(bitmap, bit_util::FromLittleEndian(data)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..9b334b3605eeee020a2e717b64f530c5ba82bdcd --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/type_traits.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Store a stack of bitsets efficiently. The top bitset may be +/// accessed and its bits may be modified, but it may not be resized. +class BitsetStack { + public: + using reference = typename std::vector::reference; + + /// \brief push a bitset onto the stack + /// \param size number of bits in the next bitset + /// \param value initial value for bits in the pushed bitset + void Push(int size, bool value) { + offsets_.push_back(bit_count()); + bits_.resize(bit_count() + size, value); + } + + /// \brief number of bits in the bitset at the top of the stack + int TopSize() const { + if (offsets_.size() == 0) return 0; + return bit_count() - offsets_.back(); + } + + /// \brief pop a bitset off the stack + void Pop() { + bits_.resize(offsets_.back()); + offsets_.pop_back(); + } + + /// \brief get the value of a bit in the top bitset + /// \param i index of the bit to access + bool operator[](int i) const { return bits_[offsets_.back() + i]; } + + /// \brief get a mutable reference to a bit in the top bitset + /// \param i index of the bit to access + reference operator[](int i) { return bits_[offsets_.back() + i]; } + + private: + int bit_count() const { return static_cast(bits_.size()); } + std::vector bits_; + std::vector offsets_; +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h new file mode 100644 index 0000000000000000000000000000000000000000..dd85c1638c7bfcd9cfd4034fb80ce775aaa92ce9 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/endian.h" +#include "arrow/util/visibility.h" + +#include + +namespace arrow { +namespace internal { + +ARROW_EXPORT +int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); +ARROW_EXPORT +int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h new file mode 100644 index 0000000000000000000000000000000000000000..96723f803e0c1a64ef753ab6a51d8f2bd8c173d1 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..9d02cd568acbc9661f763259e1d4ed134f609e4d --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..97f6b61a1f8cebd297a5f4a8fe4401b6073de45f --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +namespace arrow { +namespace internal { + +template +inline OutputType checked_cast(InputType&& value) { + static_assert(std::is_class::type>::type>::value, + "checked_cast input type must be a class"); + static_assert(std::is_class::type>::type>::value, + "checked_cast output type must be a class"); +#ifdef NDEBUG + return static_cast(value); +#else + return dynamic_cast(value); +#endif +} + +template +std::shared_ptr checked_pointer_cast(std::shared_ptr r) noexcept { +#ifdef NDEBUG + return std::static_pointer_cast(std::move(r)); +#else + return std::dynamic_pointer_cast(std::move(r)); +#endif +} + +template +std::unique_ptr checked_pointer_cast(std::unique_ptr r) noexcept { +#ifdef NDEBUG + return std::unique_ptr(static_cast(r.release())); +#else + return std::unique_ptr(dynamic_cast(r.release())); +#endif +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bf4d5e12d02d349c3a0e0fce43f6be5ef4d585 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +constexpr int kUseDefaultCompressionLevel = std::numeric_limits::min(); + +/// \brief Streaming compressor interface +/// +class ARROW_EXPORT Compressor { + public: + virtual ~Compressor() = default; + + struct CompressResult { + int64_t bytes_read; + int64_t bytes_written; + }; + struct FlushResult { + int64_t bytes_written; + bool should_retry; + }; + struct EndResult { + int64_t bytes_written; + bool should_retry; + }; + + /// \brief Compress some input. + /// + /// If bytes_read is 0 on return, then a larger output buffer should be supplied. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Flush part of the compressed output. + /// + /// If should_retry is true on return, Flush() should be called again + /// with a larger buffer. + virtual Result Flush(int64_t output_len, uint8_t* output) = 0; + + /// \brief End compressing, doing whatever is necessary to end the stream. + /// + /// If should_retry is true on return, End() should be called again + /// with a larger buffer. Otherwise, the Compressor should not be used anymore. + /// + /// End() implies Flush(). + virtual Result End(int64_t output_len, uint8_t* output) = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Streaming decompressor interface +/// +class ARROW_EXPORT Decompressor { + public: + virtual ~Decompressor() = default; + + struct DecompressResult { + // XXX is need_more_output necessary? (Brotli?) + int64_t bytes_read; + int64_t bytes_written; + bool need_more_output; + }; + + /// \brief Decompress some input. + /// + /// If need_more_output is true on return, a larger output buffer needs + /// to be supplied. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Return whether the compressed stream is finished. + /// + /// This is a heuristic. If true is returned, then it is guaranteed + /// that the stream is finished. If false is returned, however, it may + /// simply be that the underlying library isn't able to provide the information. + virtual bool IsFinished() = 0; + + /// \brief Reinitialize decompressor, making it ready for a new compressed stream. + virtual Status Reset() = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Compression codec options +class ARROW_EXPORT CodecOptions { + public: + explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel) + : compression_level(compression_level) {} + + virtual ~CodecOptions() = default; + + int compression_level; +}; + +// ---------------------------------------------------------------------- +// GZip codec options implementation + +enum class GZipFormat { + ZLIB, + DEFLATE, + GZIP, +}; + +class ARROW_EXPORT GZipCodecOptions : public CodecOptions { + public: + GZipFormat gzip_format = GZipFormat::GZIP; + std::optional window_bits; +}; + +// ---------------------------------------------------------------------- +// brotli codec options implementation + +class ARROW_EXPORT BrotliCodecOptions : public CodecOptions { + public: + std::optional window_bits; +}; + +/// \brief Compression codec +class ARROW_EXPORT Codec { + public: + virtual ~Codec() = default; + + /// \brief Return special value to indicate that a codec implementation + /// should use its default compression level + static int UseDefaultCompressionLevel(); + + /// \brief Return a string name for compression type + static const std::string& GetCodecAsString(Compression::type t); + + /// \brief Return compression type for name (all lower case) + static Result GetCompressionType(const std::string& name); + + /// \brief Create a codec for the given compression algorithm with CodecOptions + static Result> Create( + Compression::type codec, const CodecOptions& codec_options = CodecOptions{}); + + /// \brief Create a codec for the given compression algorithm + static Result> Create(Compression::type codec, + int compression_level); + + /// \brief Return true if support for indicated codec has been enabled + static bool IsAvailable(Compression::type codec); + + /// \brief Return true if indicated codec supports setting a compression level + static bool SupportsCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MinimumCompressionLevel(Compression::type codec); + + /// \brief Return the largest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MaximumCompressionLevel(Compression::type codec); + + /// \brief Return the default compression level + /// Note: This function creates a temporary Codec instance + static Result DefaultCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level + virtual int minimum_compression_level() const = 0; + + /// \brief Return the largest supported compression level + virtual int maximum_compression_level() const = 0; + + /// \brief Return the default compression level + virtual int default_compression_level() const = 0; + + /// \brief One-shot decompression function + /// + /// output_buffer_len must be correct and therefore be obtained in advance. + /// The actual decompressed length is returned. + /// + /// \note One-shot decompression is not always compatible with streaming + /// compression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) = 0; + + /// \brief One-shot compression function + /// + /// output_buffer_len must first have been computed using MaxCompressedLen(). + /// The actual compressed length is returned. + /// + /// \note One-shot compression is not always compatible with streaming + /// decompression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, uint8_t* output_buffer) = 0; + + virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeCompressor() = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeDecompressor() = 0; + + /// \brief This Codec's compression type + virtual Compression::type compression_type() const = 0; + + /// \brief The name of this Codec's compression type + const std::string& name() const { return GetCodecAsString(compression_type()); } + + /// \brief This Codec's compression level, if applicable + virtual int compression_level() const { return UseDefaultCompressionLevel(); } + + private: + /// \brief Initializes the codec's resources. + virtual Status Init(); +}; + +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..c23d6ccd9886e4539d52d537abb85da1dcc93385 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/chunked_array.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { +namespace internal { + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool); + +template +class Converter { + public: + using Self = Converter; + using InputType = Input; + using OptionsType = Options; + + virtual ~Converter() = default; + + Status Construct(std::shared_ptr type, OptionsType options, + MemoryPool* pool) { + type_ = std::move(type); + options_ = std::move(options); + return Init(pool); + } + + virtual Status Append(InputType value) { return Status::NotImplemented("Append"); } + + virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) { + return Status::NotImplemented("Extend"); + } + + virtual Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + return Status::NotImplemented("ExtendMasked"); + } + + const std::shared_ptr& builder() const { return builder_; } + + const std::shared_ptr& type() const { return type_; } + + OptionsType options() const { return options_; } + + bool may_overflow() const { return may_overflow_; } + + bool rewind_on_overflow() const { return rewind_on_overflow_; } + + virtual Status Reserve(int64_t additional_capacity) { + return builder_->Reserve(additional_capacity); + } + + Status AppendNull() { return builder_->AppendNull(); } + + virtual Result> ToArray() { return builder_->Finish(); } + + virtual Result> ToArray(int64_t length) { + ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray()); + return arr->Slice(0, length); + } + + virtual Result> ToChunkedArray() { + ARROW_ASSIGN_OR_RAISE(auto array, ToArray()); + std::vector> chunks = {std::move(array)}; + return std::make_shared(chunks); + } + + protected: + virtual Status Init(MemoryPool* pool) { return Status::OK(); } + + std::shared_ptr type_; + std::shared_ptr builder_; + OptionsType options_; + bool may_overflow_ = false; + bool rewind_on_overflow_ = false; +}; + +template +class PrimitiveConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + + protected: + Status Init(MemoryPool* pool) override { + this->builder_ = std::make_shared(this->type_, pool); + // Narrow variable-sized binary types may overflow + this->may_overflow_ = is_binary_like(this->type_->id()); + primitive_type_ = checked_cast(this->type_.get()); + primitive_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const ArrowType* primitive_type_; + BuilderType* primitive_builder_; +}; + +template class ConverterTrait> +class ListConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + using ConverterType = typename ConverterTrait::type; + + protected: + Status Init(MemoryPool* pool) override { + list_type_ = checked_cast(this->type_.get()); + ARROW_ASSIGN_OR_RAISE(value_converter_, + (MakeConverter( + list_type_->value_type(), this->options_, pool))); + this->builder_ = + std::make_shared(pool, value_converter_->builder(), this->type_); + list_builder_ = checked_cast(this->builder_.get()); + // Narrow list types may overflow + this->may_overflow_ = this->rewind_on_overflow_ = + sizeof(typename ArrowType::offset_type) < sizeof(int64_t); + return Status::OK(); + } + + const ArrowType* list_type_; + BuilderType* list_builder_; + std::unique_ptr value_converter_; +}; + +template class ConverterTrait> +class StructConverter : public BaseConverter { + public: + using ConverterType = typename ConverterTrait::type; + + Status Reserve(int64_t additional_capacity) override { + ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity)); + for (const auto& child : children_) { + ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity)); + } + return Status::OK(); + } + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr child_converter; + std::vector> child_builders; + + struct_type_ = checked_cast(this->type_.get()); + for (const auto& field : struct_type_->fields()) { + ARROW_ASSIGN_OR_RAISE(child_converter, + (MakeConverter( + field->type(), this->options_, pool))); + this->may_overflow_ |= child_converter->may_overflow(); + this->rewind_on_overflow_ = this->may_overflow_; + child_builders.push_back(child_converter->builder()); + children_.push_back(std::move(child_converter)); + } + + this->builder_ = + std::make_shared(this->type_, pool, std::move(child_builders)); + struct_builder_ = checked_cast(this->builder_.get()); + + return Status::OK(); + } + + const StructType* struct_type_; + StructBuilder* struct_builder_; + std::vector> children_; +}; + +template +class DictionaryConverter : public BaseConverter { + public: + using BuilderType = DictionaryBuilder; + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr builder; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder)); + this->builder_ = std::move(builder); + this->may_overflow_ = false; + dict_type_ = checked_cast(this->type_.get()); + value_type_ = checked_cast(dict_type_->value_type().get()); + value_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const DictionaryType* dict_type_; + const ValueType* value_type_; + BuilderType* value_builder_; +}; + +template class ConverterTrait> +struct MakeConverterImpl { + template ::type> + Status Visit(const T&) { + out.reset(new ConverterType()); + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DictionaryType& t) { + switch (t.value_type()->id()) { +#define DICTIONARY_CASE(TYPE) \ + case TYPE::type_id: \ + out = std::make_unique< \ + typename ConverterTrait::template dictionary_type>(); \ + break; + DICTIONARY_CASE(BooleanType); + DICTIONARY_CASE(Int8Type); + DICTIONARY_CASE(Int16Type); + DICTIONARY_CASE(Int32Type); + DICTIONARY_CASE(Int64Type); + DICTIONARY_CASE(UInt8Type); + DICTIONARY_CASE(UInt16Type); + DICTIONARY_CASE(UInt32Type); + DICTIONARY_CASE(UInt64Type); + DICTIONARY_CASE(FloatType); + DICTIONARY_CASE(DoubleType); + DICTIONARY_CASE(BinaryType); + DICTIONARY_CASE(StringType); + DICTIONARY_CASE(FixedSizeBinaryType); +#undef DICTIONARY_CASE + default: + return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(), + " not implemented"); + } + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); } + + std::shared_ptr type; + typename BaseConverter::OptionsType options; + MemoryPool* pool; + std::unique_ptr out; +}; + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool) { + MakeConverterImpl visitor{ + std::move(type), std::move(options), pool, NULLPTR}; + ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor)); + return std::move(visitor.out); +} + +template +class Chunker { + public: + using InputType = typename Converter::InputType; + + explicit Chunker(std::unique_ptr converter) + : converter_(std::move(converter)) {} + + Status Reserve(int64_t additional_capacity) { + ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity)); + reserved_ += additional_capacity; + return Status::OK(); + } + + Status AppendNull() { + auto status = converter_->AppendNull(); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return converter_->AppendNull(); + } + ++length_; + return status; + } + + Status Append(InputType value) { + auto status = converter_->Append(value); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return Append(value); + } + ++length_; + return status; + } + + Status Extend(InputType values, int64_t size, int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->Extend(values, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->ExtendMasked(values, mask, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status FinishChunk() { + ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_)); + chunks_.push_back(chunk); + // Reserve space for the remaining items. + // Besides being an optimization, it is also required if the converter's + // implementation relies on unsafe builder methods in converter->Append(). + auto remaining = reserved_ - length_; + Reset(); + return Reserve(remaining); + } + + Result> ToChunkedArray() { + ARROW_RETURN_NOT_OK(FinishChunk()); + return std::make_shared(chunks_); + } + + protected: + void Reset() { + converter_->builder()->Reset(); + length_ = 0; + reserved_ = 0; + } + + int64_t length_ = 0; + int64_t reserved_ = 0; + std::unique_ptr converter_; + std::vector> chunks_; +}; + +template +static Result>> MakeChunker(std::unique_ptr converter) { + return std::make_unique>(std::move(converter)); +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h new file mode 100644 index 0000000000000000000000000000000000000000..a3c13cc3bea4d6be639b521051021f7cb1c07f14 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef ARROW_COUNTING_SEMAPHORE_H +#define ARROW_COUNTING_SEMAPHORE_H + +#include + +#include "arrow/status.h" + +namespace arrow { +namespace util { + +/// \brief Simple mutex-based counting semaphore with timeout +class ARROW_EXPORT CountingSemaphore { + public: + /// \brief Create an instance with initial_avail starting permits + /// + /// \param[in] initial_avail The semaphore will start with this many permits available + /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations + /// will return Status::Invalid if this timeout elapses + explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10); + ~CountingSemaphore(); + /// \brief Block until num_permits permits are available + Status Acquire(uint32_t num_permits); + /// \brief Make num_permits permits available + Status Release(uint32_t num_permits); + /// \brief Wait until num_waiters are waiting on permits + /// + /// This method is non-standard but useful in unit tests to ensure sequencing + Status WaitForWaiters(uint32_t num_waiters); + /// \brief Immediately time out any waiters + /// + /// This method will return Status::OK only if there were no waiters to time out. + /// Once closed any operation on this instance will return an invalid status. + Status Close(); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace util +} // namespace arrow + +#endif // ARROW_COUNTING_SEMAPHORE_H diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h new file mode 100644 index 0000000000000000000000000000000000000000..949719b97ed84da6277139a70e22203706ed6055 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal +// set of functions needed for Apache Arrow / Apache parquet-cpp + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// CpuInfo is an interface to query for cpu information at runtime. The caller can +/// ask for the sizes of the caches and what hardware features are supported. +/// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and +/// /sys/devices) +class ARROW_EXPORT CpuInfo { + public: + ~CpuInfo(); + + /// x86 features + static constexpr int64_t SSSE3 = (1LL << 0); + static constexpr int64_t SSE4_1 = (1LL << 1); + static constexpr int64_t SSE4_2 = (1LL << 2); + static constexpr int64_t POPCNT = (1LL << 3); + static constexpr int64_t AVX = (1LL << 4); + static constexpr int64_t AVX2 = (1LL << 5); + static constexpr int64_t AVX512F = (1LL << 6); + static constexpr int64_t AVX512CD = (1LL << 7); + static constexpr int64_t AVX512VL = (1LL << 8); + static constexpr int64_t AVX512DQ = (1LL << 9); + static constexpr int64_t AVX512BW = (1LL << 10); + static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW; + static constexpr int64_t BMI1 = (1LL << 11); + static constexpr int64_t BMI2 = (1LL << 12); + + /// Arm features + static constexpr int64_t ASIMD = (1LL << 32); + + /// Cache enums for L1 (data), L2 and L3 + enum class CacheLevel { L1 = 0, L2, L3, Last = L3 }; + + /// CPU vendors + enum class Vendor { Unknown, Intel, AMD }; + + static const CpuInfo* GetInstance(); + + /// Returns all the flags for this cpu + int64_t hardware_flags() const; + + /// Returns the number of cores (including hyper-threaded) on this machine. + int num_cores() const; + + /// Returns the vendor of the cpu. + Vendor vendor() const; + + /// Returns the model name of the cpu (e.g. Intel i7-2600) + const std::string& model_name() const; + + /// Returns the size of the cache in KB at this cache level + int64_t CacheSize(CacheLevel level) const; + + /// \brief Returns whether or not the given feature is enabled. + /// + /// IsSupported() is true iff IsDetected() is also true and the feature + /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL + /// environment variable). + bool IsSupported(int64_t flags) const; + + /// Returns whether or not the given feature is available on the CPU. + bool IsDetected(int64_t flags) const; + + /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error + /// and terminate. + void VerifyCpuRequirements() const; + + /// Toggle a hardware feature on and off. It is not valid to turn on a feature + /// that the underlying hardware cannot support. This is useful for testing. + void EnableFeature(int64_t flag, bool enable); + + bool HasEfficientBmi2() const { + // BMI2 (pext, pdep) is only efficient on Intel X86 processors. + return vendor() == Vendor::Intel && IsSupported(BMI2); + } + + private: + CpuInfo(); + + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..14c7103d5ac0dc88d950976e528860f3b764df84 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h @@ -0,0 +1,298 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/basic_decimal.h" + +namespace arrow { + +/// Represents a signed 128-bit integer in two's complement. +/// Calculations wrap around and overflow is ignored. +/// The max decimal precision that can be safely represented is +/// 38 significant digits. +/// +/// For a discussion of the algorithms, look at Knuth's volume 2, +/// Semi-numerical Algorithms section 4.3.1. +/// +/// Adapted from the Apache ORC C++ implementation +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal128 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal128 +/// - has additional functionality on top of BasicDecimal128 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal128 : public BasicDecimal128 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal128::BasicDecimal128; + /// \endcond + + /// \brief constructor creates a Decimal128 from a BasicDecimal128. + constexpr Decimal128(const BasicDecimal128& value) noexcept // NOLINT runtime/explicit + : BasicDecimal128(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal128(const std::string& value); + + /// \brief Empty constructor creates a Decimal128 with a value of 0. + // This is required on some older compilers. + constexpr Decimal128() noexcept : BasicDecimal128() {} + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal128& divisor) const { + std::pair result; + auto dstatus = BasicDecimal128::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert the Decimal128 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Cast this value to an int64_t. + explicit operator int64_t() const; + + /// \brief Convert a decimal string to a Decimal128 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 16. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + /// \brief Convert Decimal128 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal128 out; + auto dstatus = BasicDecimal128::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// \brief Convert to a signed integer + template > + Result ToInteger() const { + constexpr auto min_value = std::numeric_limits::min(); + constexpr auto max_value = std::numeric_limits::max(); + const auto& self = *this; + if (self < min_value || self > max_value) { + return Status::Invalid("Invalid cast from Decimal128 to ", sizeof(T), + " byte integer"); + } + return static_cast(low_bits()); + } + + /// \brief Convert to a signed integer + template > + Status ToInteger(T* out) const { + return ToInteger().Value(out); + } + + /// \brief Convert to a floating-point number (scaled) + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal128& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// Represents a signed 256-bit integer in two's complement. +/// The max decimal precision that can be safely represented is +/// 76 significant digits. +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal256 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal256 +/// - (TODO) has additional functionality on top of BasicDecimal256 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal256 : public BasicDecimal256 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal256::BasicDecimal256; + /// \endcond + + /// \brief constructor creates a Decimal256 from a BasicDecimal256. + constexpr Decimal256(const BasicDecimal256& value) noexcept // NOLINT(runtime/explicit) + : BasicDecimal256(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal256(const std::string& value); + + /// \brief Empty constructor creates a Decimal256 with a value of 0. + // This is required on some older compilers. + constexpr Decimal256() noexcept : BasicDecimal256() {} + + /// \brief Convert the Decimal256 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Convert a decimal string to a Decimal256 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + /// \brief Convert Decimal256 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal256 out; + auto dstatus = BasicDecimal256::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return out; + } + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal256& divisor) const { + std::pair result; + auto dstatus = BasicDecimal256::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return result; + } + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 32. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert to a floating-point number (scaled). + /// May return infinity in case of overflow. + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal256& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// For an integer type, return the max number of decimal digits +/// (=minimal decimal precision) it can represent. +inline Result MaxDecimalDigitsForInteger(Type::type type_id) { + switch (type_id) { + case Type::INT8: + case Type::UINT8: + return 3; + case Type::INT16: + case Type::UINT16: + return 5; + case Type::INT32: + case Type::UINT32: + return 10; + case Type::INT64: + return 19; + case Type::UINT64: + return 20; + default: + break; + } + return Status::Invalid("Not an integer type: ", type_id); +} + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h new file mode 100644 index 0000000000000000000000000000000000000000..161ad0bfddfc5a52040256a9cb39b5af96b876db --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; + +class ARROW_EXPORT BoundaryFinder { + public: + BoundaryFinder() = default; + + virtual ~BoundaryFinder(); + + /// \brief Find the position of the first delimiter inside block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindFirst(std::string_view partial, std::string_view block, + int64_t* out_pos) = 0; + + /// \brief Find the position of the last delimiter inside block + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the last delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0; + + /// \brief Find the position of the Nth delimiter inside the block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + /// + /// The returned `num_found` is the number of delimiters actually found + virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count, + int64_t* out_pos, int64_t* num_found) = 0; + + static constexpr int64_t kNoDelimiterFound = -1; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder); +}; + +ARROW_EXPORT +std::shared_ptr MakeNewlineBoundaryFinder(); + +/// \brief A reusable block-based chunker for delimited data +/// +/// The chunker takes a block of delimited data and helps carve a sub-block +/// which begins and ends on delimiters (suitable for consumption by parsers +/// which can only parse whole objects). +class ARROW_EXPORT Chunker { + public: + explicit Chunker(std::shared_ptr delimiter); + ~Chunker(); + + /// \brief Carve up a chunk in a block of data to contain only whole objects + /// + /// Pre-conditions: + /// - `block` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// + /// Post-conditions: + /// - block == whole + partial + /// - `whole` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `partial` doesn't contain an entire delimited object + /// (IOW: `partial` is generally small) + /// + /// This method will look for the last delimiter in `block` and may + /// therefore be costly. + /// + /// \param[in] block data to be chunked + /// \param[out] whole subrange of block containing whole delimited objects + /// \param[out] partial subrange of block starting with a partial delimited object + Status Process(std::shared_ptr block, std::shared_ptr* whole, + std::shared_ptr* partial); + + /// \brief Carve the completion of a partial object out of a block + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + /// This method will look for the first delimiter in `block` and should + /// therefore be reasonably cheap. + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[out] completion subrange of block containing the completion of partial + /// \param[out] rest subrange of block containing what completion does not cover + Status ProcessWithPartial(std::shared_ptr partial, + std::shared_ptr block, + std::shared_ptr* completion, + std::shared_ptr* rest); + + /// \brief Like ProcessWithPartial, but for the last block of a file + /// + /// This method allows for a final delimited object without a trailing delimiter + /// (ProcessWithPartial would return an error in that case). + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// - `block` follows `partial` in file order and is the last data block + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + Status ProcessFinal(std::shared_ptr partial, std::shared_ptr block, + std::shared_ptr* completion, std::shared_ptr* rest); + + /// \brief Skip count number of rows + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - `count` is updated to indicate the number of rows that still need to be skipped + /// - If `count` is > 0 then `rest` is an incomplete block that should be a future + /// `partial` + /// - Else `rest` could be one or more valid blocks of delimited data which need to be + /// parsed + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[in] final whether this is the final chunk + /// \param[in,out] count number of rows that need to be skipped + /// \param[out] rest subrange of block containing what was not skipped + Status ProcessSkip(std::shared_ptr partial, std::shared_ptr block, + bool final, int64_t* count, std::shared_ptr* rest); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker); + + std::shared_ptr boundary_finder_; +}; + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a92733ae0f63d589e8dbb381c020e009c453ab4e --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array/data.h" + +namespace arrow { +namespace dict_util { + +int64_t LogicalNullCount(const ArraySpan& span); + +} // namespace dict_util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fae9293f9e79891dcd85b536d697291289804ce5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/cpu_info.h" + +namespace arrow { +namespace internal { + +enum class DispatchLevel : int { + // These dispatch levels, corresponding to instruction set features, + // are sorted in increasing order of preference. + NONE = 0, + SSE4_2, + AVX2, + AVX512, + NEON, + MAX +}; + +/* + A facility for dynamic dispatch according to available DispatchLevel. + + Typical use: + + static void my_function_default(...); + static void my_function_avx2(...); + + struct MyDynamicFunction { + using FunctionType = decltype(&my_function_default); + + static std::vector> implementations() { + return { + { DispatchLevel::NONE, my_function_default } + #if defined(ARROW_HAVE_RUNTIME_AVX2) + , { DispatchLevel::AVX2, my_function_avx2 } + #endif + }; + } + }; + + void my_function(...) { + static DynamicDispatch dispatch; + return dispatch.func(...); + } +*/ +template +class DynamicDispatch { + protected: + using FunctionType = typename DynamicFunction::FunctionType; + using Implementation = std::pair; + + public: + DynamicDispatch() { Resolve(DynamicFunction::implementations()); } + + FunctionType func = {}; + + protected: + // Use the Implementation with the highest DispatchLevel + void Resolve(const std::vector& implementations) { + Implementation cur{DispatchLevel::NONE, {}}; + + for (const auto& impl : implementations) { + if (impl.first >= cur.first && IsSupported(impl.first)) { + // Higher (or same) level than current + cur = impl; + } + } + + if (!cur.second) { + Status::Invalid("No appropriate implementation found").Abort(); + } + func = cur.second; + } + + private: + bool IsSupported(DispatchLevel level) const { + static const auto cpu_info = arrow::internal::CpuInfo::GetInstance(); + + switch (level) { + case DispatchLevel::NONE: + return true; + case DispatchLevel::SSE4_2: + return cpu_info->IsSupported(CpuInfo::SSE4_2); + case DispatchLevel::AVX2: + return cpu_info->IsSupported(CpuInfo::AVX2); + case DispatchLevel::AVX512: + return cpu_info->IsSupported(CpuInfo::AVX512); + default: + return false; + } + } +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..dd9af907ecc374e94138e0fec20e87739a271658 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h @@ -0,0 +1,656 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for number-to-string formatting utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/double_conversion.h" +#include "arrow/util/macros.h" +#include "arrow/util/string.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" + +namespace arrow { +namespace internal { + +/// \brief The entry point for conversion to strings. +template +class StringFormatter; + +template +struct is_formattable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_formattable = enable_if_t::value, R>; + +template +using Return = decltype(std::declval()(std::string_view{})); + +///////////////////////////////////////////////////////////////////////// +// Boolean formatting + +template <> +class StringFormatter { + public: + explicit StringFormatter(const DataType* = NULLPTR) {} + + using value_type = bool; + + template + Return operator()(bool value, Appender&& append) { + if (value) { + const char string[] = "true"; + return append(std::string_view(string)); + } else { + const char string[] = "false"; + return append(std::string_view(string)); + } + } +}; + +///////////////////////////////////////////////////////////////////////// +// Decimals formatting + +template +class DecimalToStringFormatterMixin { + public: + explicit DecimalToStringFormatterMixin(const DataType* type) + : scale_(static_cast(type)->scale()) {} + + using value_type = typename TypeTraits::CType; + + template + Return operator()(const value_type& value, Appender&& append) { + return append(value.ToString(scale_)); + } + + private: + int32_t scale_; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Integer formatting + +namespace detail { + +// A 2x100 direct table mapping integers in [0..99] to their decimal representations. +ARROW_EXPORT extern const char digit_pairs[]; + +// Based on fmtlib's format_int class: +// Write digits from right to left into a stack allocated buffer. +// \pre *cursor points to the byte after the one that will be written. +// \post *cursor points to the byte that was written. +inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; } + +template +void FormatOneDigit(Int value, char** cursor) { + assert(value >= 0 && value <= 9); + FormatOneChar(static_cast('0' + value), cursor); +} + +// GH-35662: I don't know why but the following combination causes SEGV: +// * template implementation without inline +// * MinGW +// * Release build +template +inline void FormatTwoDigits(Int value, char** cursor) { + assert(value >= 0 && value <= 99); + auto digit_pair = &digit_pairs[value * 2]; + FormatOneChar(digit_pair[1], cursor); + FormatOneChar(digit_pair[0], cursor); +} + +template +void FormatAllDigits(Int value, char** cursor) { + assert(value >= 0); + while (value >= 100) { + FormatTwoDigits(value % 100, cursor); + value /= 100; + } + + if (value >= 10) { + FormatTwoDigits(value, cursor); + } else { + FormatOneDigit(value, cursor); + } +} + +template +void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) { + auto end = *cursor - pad; + FormatAllDigits(value, cursor); + while (*cursor > end) { + FormatOneChar(pad_char, cursor); + } +} + +template +std::string_view ViewDigitBuffer(const std::array& buffer, + char* cursor) { + auto buffer_end = buffer.data() + BUFFER_SIZE; + return {cursor, static_cast(buffer_end - cursor)}; +} + +template ::type> +constexpr UInt Abs(Int value) { + return value < 0 ? ~static_cast(value) + 1 : static_cast(value); +} + +template +constexpr size_t Digits10(Int value) { + return value <= 9 ? 1 : Digits10(value / 10) + 1; +} + +} // namespace detail + +template +class IntToStringFormatterMixin { + public: + explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {} + + using value_type = typename ARROW_TYPE::c_type; + + template + Return operator()(value_type value, Appender&& append) { + constexpr size_t buffer_size = + detail::Digits10(std::numeric_limits::max()) + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + detail::FormatAllDigits(detail::Abs(value), &cursor); + if (value < 0) { + detail::FormatOneChar('-', &cursor); + } + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Floating-point formatting + +class ARROW_EXPORT FloatToStringFormatter { + public: + FloatToStringFormatter(); + FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode); + ~FloatToStringFormatter(); + + // Returns the number of characters written + int FormatFloat(float v, char* out_buffer, int out_size); + int FormatFloat(double v, char* out_buffer, int out_size); + int FormatFloat(uint16_t v, char* out_buffer, int out_size); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +template +class FloatToStringFormatterMixin : public FloatToStringFormatter { + public: + using value_type = typename ARROW_TYPE::c_type; + + static constexpr int buffer_size = 50; + + explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {} + + FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode) + : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character, + decimal_in_shortest_low, decimal_in_shortest_high, + max_leading_padding_zeroes_in_precision_mode, + max_trailing_padding_zeroes_in_precision_mode) {} + + template + Return operator()(value_type value, Appender&& append) { + char buffer[buffer_size]; + int size = FormatFloat(value, buffer, buffer_size); + return append(std::string_view(buffer, size)); + } +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Temporal formatting + +namespace detail { + +constexpr size_t BufferSizeYYYY_MM_DD() { + // "-"? "99999-12-31" + return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 + + detail::Digits10(31); +} + +inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) { + FormatTwoDigits(static_cast(ymd.day()), cursor); + FormatOneChar('-', cursor); + FormatTwoDigits(static_cast(ymd.month()), cursor); + FormatOneChar('-', cursor); + auto year = static_cast(ymd.year()); + const auto is_neg_year = year < 0; + year = std::abs(year); + assert(year <= 99999); + FormatTwoDigits(year % 100, cursor); + year /= 100; + FormatTwoDigits(year % 100, cursor); + if (year >= 100) { + FormatOneDigit(year / 100, cursor); + } + if (is_neg_year) { + FormatOneChar('-', cursor); + } +} + +template +constexpr size_t BufferSizeHH_MM_SS() { + // "23:59:59" ("." "9"+)? + return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 + + detail::Digits10(Duration::period::den) - 1; +} + +template +void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss hms, char** cursor) { + constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1; + if (subsecond_digits != 0) { + FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor); + FormatOneChar('.', cursor); + } + FormatTwoDigits(hms.seconds().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.minutes().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.hours().count(), cursor); +} + +// Some out-of-bound datetime values would result in erroneous printing +// because of silent integer wraparound in the `arrow_vendored::date` library. +// +// To avoid such misprinting, we must therefore check the bounds explicitly. +// The bounds correspond to start of year -32767 and end of year 32767, +// respectively (-32768 is an invalid year value in `arrow_vendored::date`). +// +// Note these values are the same as documented for C++20: +// https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days +template +bool IsDateTimeInRange(Unit duration) { + constexpr Unit kMinIncl = + std::chrono::duration_cast(arrow_vendored::date::days{-12687428}); + constexpr Unit kMaxExcl = + std::chrono::duration_cast(arrow_vendored::date::days{11248738}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +// IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of +// nanoseconds cannot represent years outside of the [-32767, 32767] +// range, and the {kMinIncl, kMaxExcl} constants above would overflow. +constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; } + +template +bool IsTimeInRange(Unit duration) { + constexpr Unit kMinIncl = std::chrono::duration_cast(std::chrono::seconds{0}); + constexpr Unit kMaxExcl = std::chrono::duration_cast(std::chrono::seconds{86400}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +template +Return FormatOutOfRange(RawValue&& raw_value, Appender&& append) { + // XXX locale-sensitive but good enough for now + std::string formatted = ""; + return append(std::move(formatted)); +} + +const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970}; + +} // namespace detail + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +class DateToStringFormatterMixin { + public: + explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {} + + protected: + template + Return FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) { + arrow_vendored::date::sys_days timepoint_days{since_epoch}; + + constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days}, + &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date32Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = arrow_vendored::date::days{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(since_epoch, std::forward(append)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date64Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = std::chrono::milliseconds{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(std::chrono::duration_cast(since_epoch), + std::forward(append)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = int64_t; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()), + timezone_(checked_cast(*type).timezone()) {} + + template + Return operator()(Duration, value_type value, Appender&& append) { + using arrow_vendored::date::days; + + const Duration since_epoch{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + + const auto timepoint = detail::kEpoch + since_epoch; + // Round days towards zero + // (the naive approach of using arrow_vendored::date::floor() would + // result in UB for very large negative timestamps, similarly as + // https://github.com/HowardHinnant/date/issues/696) + auto timepoint_days = std::chrono::time_point_cast(timepoint); + Duration since_midnight; + if (timepoint_days <= timepoint) { + // Year >= 1970 + since_midnight = timepoint - timepoint_days; + } else { + // Year < 1970 + since_midnight = days(1) - (timepoint_days - timepoint); + timepoint_days -= days(1); + } + + // YYYY_MM_DD " " HH_MM_SS "Z"? + constexpr size_t buffer_size = + detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS() + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + if (timezone_.size() > 0) { + detail::FormatOneChar('Z', &cursor); + } + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + detail::FormatOneChar(' ', &cursor); + detail::FormatYYYY_MM_DD(timepoint_days, &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; + std::string timezone_; +}; + +template +class StringFormatter> { + public: + using value_type = typename T::c_type; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()) {} + + template + Return operator()(Duration, value_type count, Appender&& append) { + const Duration since_midnight{count}; + if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) { + return detail::FormatOutOfRange(count, append); + } + + constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; +}; + +template <> +class StringFormatter { + public: + using value_type = MonthIntervalType::c_type; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*'m'*/ 3 + /*negative signs*/ 1 + + /*months*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval), &cursor); + if (interval < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = DayTimeIntervalType::DayMilliseconds; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*d, ms*/ 3 + /*negative signs*/ 2 + + /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('m', &cursor); + detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor); + if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = MonthDayNanoIntervalType::MonthDayNanos; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*m, d, ns*/ 4 + /*negative signs*/ 3 + + /*months/days*/ 2 * detail::Digits10(std::numeric_limits::max()) + + /*nanoseconds*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('n', &cursor); + detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor); + if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval.months), &cursor); + if (interval.months < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h new file mode 100644 index 0000000000000000000000000000000000000000..0aa2842703712d0245f47c2b0e1885067a4f8f90 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h @@ -0,0 +1,882 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/tracing.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +struct EnsureFuture; + +namespace detail { + +template +struct is_future : std::false_type {}; + +template +struct is_future> : std::true_type {}; + +template +struct result_of; + +template +struct result_of()(std::declval()...))>> { + using type = decltype(std::declval()(std::declval()...)); +}; + +template +using result_of_t = typename result_of::type; + +// Helper to find the synchronous counterpart for a Future +template +struct SyncType { + using type = Result; +}; + +template <> +struct SyncType { + using type = Status; +}; + +template +using first_arg_is_status = + std::is_same>::type, + Status>; + +template > +using if_has_no_args = typename std::conditional::type; + +/// Creates a callback that can be added to a future to mark a `dest` future finished +template +struct MarkNextFinished {}; + +/// If the source and dest are both empty we can pass on the status +template +struct MarkNextFinished { + void operator()(const Status& status) && { next.MarkFinished(status); } + Dest next; +}; + +/// If the source is not empty but the dest is then we can take the +/// status out of the result +template +struct MarkNextFinished { + void operator()(const Result& res) && { + next.MarkFinished(internal::Empty::ToResult(res.status())); + } + Dest next; +}; + +/// If neither are empty we pass on the result +template +struct MarkNextFinished { + void operator()(const Result& res) && { + next.MarkFinished(res); + } + Dest next; +}; + +/// Helper that contains information about how to apply a continuation +struct ContinueFuture { + template + struct ForReturnImpl; + + template + using ForReturn = typename ForReturnImpl::type; + + template + using ForSignature = ForReturn>; + + // If the callback returns void then we return Future<> that always finishes OK. + template , + typename NextFuture = ForReturn> + typename std::enable_if::value>::type operator()( + NextFuture next, ContinueFunc&& f, Args&&... a) const { + std::forward(f)(std::forward(a)...); + next.MarkFinished(); + } + + /// If the callback returns a non-future then we return Future + /// and mark the future finished with the callback result. It will get promoted + /// to Result as part of MarkFinished if it isn't already. + /// + /// If the callback returns Status and we return Future<> then also send the callback + /// result as-is to the destination future. + template , + typename NextFuture = ForReturn> + typename std::enable_if< + !std::is_void::value && !is_future::value && + (!NextFuture::is_empty || std::is_same::value)>::type + operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const { + next.MarkFinished(std::forward(f)(std::forward(a)...)); + } + + /// If the callback returns a Result and the next future is Future<> then we mark + /// the future finished with the callback result. + /// + /// It may seem odd that the next future is Future<> when the callback returns a + /// result but this can occur if the OnFailure callback returns a result while the + /// OnSuccess callback is void/Status (e.g. you would get this calling the one-arg + /// version of Then with an OnSuccess callback that returns void) + template , + typename NextFuture = ForReturn> + typename std::enable_if::value && + !is_future::value && NextFuture::is_empty && + !std::is_same::value>::type + operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const { + next.MarkFinished(std::forward(f)(std::forward(a)...).status()); + } + + /// If the callback returns a Future then we return Future. We create a new + /// future and add a callback to the future given to us by the user that forwards the + /// result to the future we just created + template , + typename NextFuture = ForReturn> + typename std::enable_if::value>::type operator()( + NextFuture next, ContinueFunc&& f, Args&&... a) const { + ContinueResult signal_to_complete_next = + std::forward(f)(std::forward(a)...); + MarkNextFinished callback{std::move(next)}; + signal_to_complete_next.AddCallback(std::move(callback)); + } + + /// Helpers to conditionally ignore arguments to ContinueFunc + template + void IgnoringArgsIf(std::true_type, NextFuture&& next, ContinueFunc&& f, + Args&&...) const { + operator()(std::forward(next), std::forward(f)); + } + template + void IgnoringArgsIf(std::false_type, NextFuture&& next, ContinueFunc&& f, + Args&&... a) const { + operator()(std::forward(next), std::forward(f), + std::forward(a)...); + } +}; + +/// Helper struct which tells us what kind of Future gets returned from `Then` based on +/// the return type of the OnSuccess callback +template <> +struct ContinueFuture::ForReturnImpl { + using type = Future<>; +}; + +template <> +struct ContinueFuture::ForReturnImpl { + using type = Future<>; +}; + +template +struct ContinueFuture::ForReturnImpl { + using type = Future; +}; + +template +struct ContinueFuture::ForReturnImpl> { + using type = Future; +}; + +template +struct ContinueFuture::ForReturnImpl> { + using type = Future; +}; + +} // namespace detail + +/// A Future's execution or completion status +enum class FutureState : int8_t { PENDING, SUCCESS, FAILURE }; + +inline bool IsFutureFinished(FutureState state) { return state != FutureState::PENDING; } + +/// \brief Describe whether the callback should be scheduled or run synchronously +enum class ShouldSchedule { + /// Always run the callback synchronously (the default) + Never = 0, + /// Schedule a new task only if the future is not finished when the + /// callback is added + IfUnfinished = 1, + /// Always schedule the callback as a new task + Always = 2, + /// Schedule a new task only if it would run on an executor other than + /// the specified executor. + IfDifferentExecutor = 3, +}; + +/// \brief Options that control how a continuation is run +struct CallbackOptions { + /// Describe whether the callback should be run synchronously or scheduled + ShouldSchedule should_schedule = ShouldSchedule::Never; + /// If the callback is scheduled then this is the executor it should be scheduled + /// on. If this is NULL then should_schedule must be Never + internal::Executor* executor = NULLPTR; + + static CallbackOptions Defaults() { return {}; } +}; + +// Untyped private implementation +class ARROW_EXPORT FutureImpl : public std::enable_shared_from_this { + public: + FutureImpl(); + virtual ~FutureImpl() = default; + + FutureState state() { return state_.load(); } + + static std::unique_ptr Make(); + static std::unique_ptr MakeFinished(FutureState state); + +#ifdef ARROW_WITH_OPENTELEMETRY + void SetSpan(util::tracing::Span* span) { span_ = span; } +#endif + + // Future API + void MarkFinished(); + void MarkFailed(); + void Wait(); + bool Wait(double seconds); + template + Result* CastResult() const { + return static_cast*>(result_.get()); + } + + using Callback = internal::FnOnce; + void AddCallback(Callback callback, CallbackOptions opts); + bool TryAddCallback(const std::function& callback_factory, + CallbackOptions opts); + + std::atomic state_{FutureState::PENDING}; + + // Type erased storage for arbitrary results + // XXX small objects could be stored inline instead of boxed in a pointer + using Storage = std::unique_ptr; + Storage result_{NULLPTR, NULLPTR}; + + struct CallbackRecord { + Callback callback; + CallbackOptions options; + }; + std::vector callbacks_; +#ifdef ARROW_WITH_OPENTELEMETRY + util::tracing::Span* span_ = NULLPTR; +#endif +}; + +// --------------------------------------------------------------------- +// Public API + +/// \brief EXPERIMENTAL A std::future-like class with more functionality. +/// +/// A Future represents the results of a past or future computation. +/// The Future API has two sides: a producer side and a consumer side. +/// +/// The producer API allows creating a Future and setting its result or +/// status, possibly after running a computation function. +/// +/// The consumer API allows querying a Future's current state, wait for it +/// to complete, and composing futures with callbacks. +template +class [[nodiscard]] Future { + public: + using ValueType = T; + using SyncType = typename detail::SyncType::type; + static constexpr bool is_empty = std::is_same::value; + // The default constructor creates an invalid Future. Use Future::Make() + // for a valid Future. This constructor is mostly for the convenience + // of being able to presize a vector of Futures. + Future() = default; + +#ifdef ARROW_WITH_OPENTELEMETRY + void SetSpan(util::tracing::Span* span) { impl_->SetSpan(span); } +#endif + + // Consumer API + + bool is_valid() const { return impl_ != NULLPTR; } + + /// \brief Return the Future's current state + /// + /// A return value of PENDING is only indicative, as the Future can complete + /// concurrently. A return value of FAILURE or SUCCESS is definitive, though. + FutureState state() const { + CheckValid(); + return impl_->state(); + } + + /// \brief Whether the Future is finished + /// + /// A false return value is only indicative, as the Future can complete + /// concurrently. A true return value is definitive, though. + bool is_finished() const { + CheckValid(); + return IsFutureFinished(impl_->state()); + } + + /// \brief Wait for the Future to complete and return its Result + const Result& result() const& { + Wait(); + return *GetResult(); + } + + /// \brief Returns an rvalue to the result. This method is potentially unsafe + /// + /// The future is not the unique owner of the result, copies of a future will + /// also point to the same result. You must make sure that no other copies + /// of the future exist. Attempts to add callbacks after you move the result + /// will result in undefined behavior. + Result&& MoveResult() { + Wait(); + return std::move(*GetResult()); + } + + /// \brief Wait for the Future to complete and return its Status + const Status& status() const { return result().status(); } + + /// \brief Future is convertible to Future<>, which views only the + /// Status of the original. Marking the returned Future Finished is not supported. + explicit operator Future<>() const { + Future<> status_future; + status_future.impl_ = impl_; + return status_future; + } + + /// \brief Wait for the Future to complete + void Wait() const { + CheckValid(); + impl_->Wait(); + } + + /// \brief Wait for the Future to complete, or for the timeout to expire + /// + /// `true` is returned if the Future completed, `false` if the timeout expired. + /// Note a `false` value is only indicative, as the Future can complete + /// concurrently. + bool Wait(double seconds) const { + CheckValid(); + return impl_->Wait(seconds); + } + + // Producer API + + /// \brief Producer API: mark Future finished + /// + /// The Future's result is set to `res`. + void MarkFinished(Result res) { DoMarkFinished(std::move(res)); } + + /// \brief Mark a Future<> completed with the provided Status. + template ::value>::type> + void MarkFinished(Status s = Status::OK()) { + return DoMarkFinished(E::ToResult(std::move(s))); + } + + /// \brief Producer API: instantiate a valid Future + /// + /// The Future's state is initialized with PENDING. If you are creating a future with + /// this method you must ensure that future is eventually completed (with success or + /// failure). Creating a future, returning it, and never completing the future can lead + /// to memory leaks (for example, see Loop). + static Future Make() { + Future fut; + fut.impl_ = FutureImpl::Make(); + return fut; + } + + /// \brief Producer API: instantiate a finished Future + static Future MakeFinished(Result res) { + Future fut; + fut.InitializeFromResult(std::move(res)); + return fut; + } + + /// \brief Make a finished Future<> with the provided Status. + template ::value>::type> + static Future<> MakeFinished(Status s = Status::OK()) { + return MakeFinished(E::ToResult(std::move(s))); + } + + struct WrapResultOnComplete { + template + struct Callback { + void operator()(const FutureImpl& impl) && { + std::move(on_complete)(*impl.CastResult()); + } + OnComplete on_complete; + }; + }; + + struct WrapStatusyOnComplete { + template + struct Callback { + static_assert(std::is_same::value, + "Only callbacks for Future<> should accept Status and not Result"); + + void operator()(const FutureImpl& impl) && { + std::move(on_complete)(impl.CastResult()->status()); + } + OnComplete on_complete; + }; + }; + + template + using WrapOnComplete = typename std::conditional< + detail::first_arg_is_status::value, WrapStatusyOnComplete, + WrapResultOnComplete>::type::template Callback; + + /// \brief Consumer API: Register a callback to run when this future completes + /// + /// The callback should receive the result of the future (const Result&) + /// For a void or statusy future this should be (const Status&) + /// + /// There is no guarantee to the order in which callbacks will run. In + /// particular, callbacks added while the future is being marked complete + /// may be executed immediately, ahead of, or even the same time as, other + /// callbacks that have been previously added. + /// + /// WARNING: callbacks may hold arbitrary references, including cyclic references. + /// Since callbacks will only be destroyed after they are invoked, this can lead to + /// memory leaks if a Future is never marked finished (abandoned): + /// + /// { + /// auto fut = Future<>::Make(); + /// fut.AddCallback([fut]() {}); + /// } + /// + /// In this example `fut` falls out of scope but is not destroyed because it holds a + /// cyclic reference to itself through the callback. + template > + void AddCallback(OnComplete on_complete, + CallbackOptions opts = CallbackOptions::Defaults()) const { + // We know impl_ will not be dangling when invoking callbacks because at least one + // thread will be waiting for MarkFinished to return. Thus it's safe to keep a + // weak reference to impl_ here + impl_->AddCallback(Callback{std::move(on_complete)}, opts); + } + + /// \brief Overload of AddCallback that will return false instead of running + /// synchronously + /// + /// This overload will guarantee the callback is never run synchronously. If the future + /// is already finished then it will simply return false. This can be useful to avoid + /// stack overflow in a situation where you have recursive Futures. For an example + /// see the Loop function + /// + /// Takes in a callback factory function to allow moving callbacks (the factory function + /// will only be called if the callback can successfully be added) + /// + /// Returns true if a callback was actually added and false if the callback failed + /// to add because the future was marked complete. + template , + typename Callback = WrapOnComplete> + bool TryAddCallback(CallbackFactory callback_factory, + CallbackOptions opts = CallbackOptions::Defaults()) const { + return impl_->TryAddCallback([&]() { return Callback{callback_factory()}; }, opts); + } + + template + struct ThenOnComplete { + static constexpr bool has_no_args = + internal::call_traits::argument_count::value == 0; + + using ContinuedFuture = detail::ContinueFuture::ForSignature< + detail::if_has_no_args>; + + static_assert( + std::is_same, + ContinuedFuture>::value, + "OnSuccess and OnFailure must continue with the same future type"); + + struct DummyOnSuccess { + void operator()(const T&); + }; + using OnSuccessArg = typename std::decay>>::type; + + static_assert( + !std::is_same::type>::value, + "OnSuccess' argument should not be a Result"); + + void operator()(const Result& result) && { + detail::ContinueFuture continue_future; + if (ARROW_PREDICT_TRUE(result.ok())) { + // move on_failure to a(n immediately destroyed) temporary to free its resources + ARROW_UNUSED(OnFailure(std::move(on_failure))); + continue_future.IgnoringArgsIf( + detail::if_has_no_args{}, + std::move(next), std::move(on_success), result.ValueOrDie()); + } else { + ARROW_UNUSED(OnSuccess(std::move(on_success))); + continue_future(std::move(next), std::move(on_failure), result.status()); + } + } + + OnSuccess on_success; + OnFailure on_failure; + ContinuedFuture next; + }; + + template + struct PassthruOnFailure { + using ContinuedFuture = detail::ContinueFuture::ForSignature< + detail::if_has_no_args>; + + Result operator()(const Status& s) { return s; } + }; + + /// \brief Consumer API: Register a continuation to run when this future completes + /// + /// The continuation will run in the same thread that called MarkFinished (whatever + /// callback is registered with this function will run before MarkFinished returns). + /// Avoid long-running callbacks in favor of submitting a task to an Executor and + /// returning the future. + /// + /// Two callbacks are supported: + /// - OnSuccess, called with the result (const ValueType&) on successful completion. + /// for an empty future this will be called with nothing () + /// - OnFailure, called with the error (const Status&) on failed completion. + /// This callback is optional and defaults to a passthru of any errors. + /// + /// Then() returns a Future whose ValueType is derived from the return type of the + /// callbacks. If a callback returns: + /// - void, a Future<> will be returned which will completes successfully as soon + /// as the callback runs. + /// - Status, a Future<> will be returned which will complete with the returned Status + /// as soon as the callback runs. + /// - V or Result, a Future will be returned which will complete with the result + /// of invoking the callback as soon as the callback runs. + /// - Future, a Future will be returned which will be marked complete when the + /// future returned by the callback completes (and will complete with the same + /// result). + /// + /// The continued Future type must be the same for both callbacks. + /// + /// Note that OnFailure can swallow errors, allowing continued Futures to successfully + /// complete even if this Future fails. + /// + /// If this future is already completed then the callback will be run immediately + /// and the returned future may already be marked complete. + /// + /// See AddCallback for general considerations when writing callbacks. + template , + typename OnComplete = ThenOnComplete, + typename ContinuedFuture = typename OnComplete::ContinuedFuture> + ContinuedFuture Then(OnSuccess on_success, OnFailure on_failure = {}, + CallbackOptions options = CallbackOptions::Defaults()) const { + auto next = ContinuedFuture::Make(); + AddCallback(OnComplete{std::forward(on_success), + std::forward(on_failure), next}, + options); + return next; + } + + /// \brief Implicit constructor to create a finished future from a value + Future(ValueType val) : Future() { // NOLINT runtime/explicit + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + SetResult(std::move(val)); + } + + /// \brief Implicit constructor to create a future from a Result, enabling use + /// of macros like ARROW_ASSIGN_OR_RAISE. + Future(Result res) : Future() { // NOLINT runtime/explicit + if (ARROW_PREDICT_TRUE(res.ok())) { + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + } else { + impl_ = FutureImpl::MakeFinished(FutureState::FAILURE); + } + SetResult(std::move(res)); + } + + /// \brief Implicit constructor to create a future from a Status, enabling use + /// of macros like ARROW_RETURN_NOT_OK. + Future(Status s) // NOLINT runtime/explicit + : Future(Result(std::move(s))) {} + + protected: + void InitializeFromResult(Result res) { + if (ARROW_PREDICT_TRUE(res.ok())) { + impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS); + } else { + impl_ = FutureImpl::MakeFinished(FutureState::FAILURE); + } + SetResult(std::move(res)); + } + + void Initialize() { impl_ = FutureImpl::Make(); } + + Result* GetResult() const { return impl_->CastResult(); } + + void SetResult(Result res) { + impl_->result_ = {new Result(std::move(res)), + [](void* p) { delete static_cast*>(p); }}; + } + + void DoMarkFinished(Result res) { + SetResult(std::move(res)); + + if (ARROW_PREDICT_TRUE(GetResult()->ok())) { + impl_->MarkFinished(); + } else { + impl_->MarkFailed(); + } + } + + void CheckValid() const { +#ifndef NDEBUG + if (!is_valid()) { + Status::Invalid("Invalid Future (default-initialized?)").Abort(); + } +#endif + } + + explicit Future(std::shared_ptr impl) : impl_(std::move(impl)) {} + + std::shared_ptr impl_; + + friend struct detail::ContinueFuture; + + template + friend class Future; + friend class WeakFuture; + + FRIEND_TEST(FutureRefTest, ChainRemoved); + FRIEND_TEST(FutureRefTest, TailRemoved); + FRIEND_TEST(FutureRefTest, HeadRemoved); +}; + +template +typename Future::SyncType FutureToSync(const Future& fut) { + return fut.result(); +} + +template <> +inline typename Future::SyncType FutureToSync( + const Future& fut) { + return fut.status(); +} + +template <> +inline Future<>::Future(Status s) : Future(internal::Empty::ToResult(std::move(s))) {} + +template +class WeakFuture { + public: + explicit WeakFuture(const Future& future) : impl_(future.impl_) {} + + Future get() { return Future{impl_.lock()}; } + + private: + std::weak_ptr impl_; +}; + +/// \defgroup future-utilities Functions for working with Futures +/// @{ + +/// If a Result holds an error instead of a Future, construct a finished Future +/// holding that error. +template +static Future DeferNotOk(Result> maybe_future) { + if (ARROW_PREDICT_FALSE(!maybe_future.ok())) { + return Future::MakeFinished(std::move(maybe_future).status()); + } + return std::move(maybe_future).MoveValueUnsafe(); +} + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future's result is a vector of the results of `futures`. +/// Note that this future will never be marked "failed"; failed results +/// will be stored in the result vector alongside successful results. +template +Future>> All(std::vector> futures) { + struct State { + explicit State(std::vector> f) + : futures(std::move(f)), n_remaining(futures.size()) {} + + std::vector> futures; + std::atomic n_remaining; + }; + + if (futures.size() == 0) { + return {std::vector>{}}; + } + + auto state = std::make_shared(std::move(futures)); + + auto out = Future>>::Make(); + for (const Future& future : state->futures) { + future.AddCallback([state, out](const Result&) mutable { + if (state->n_remaining.fetch_sub(1) != 1) return; + + std::vector> results(state->futures.size()); + for (size_t i = 0; i < results.size(); ++i) { + results[i] = state->futures[i].result(); + } + out.MarkFinished(std::move(results)); + }); + } + return out; +} + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future will be marked complete if all `futures` complete +/// successfully. Otherwise, it will be marked failed with the status of +/// the first failing future. +ARROW_EXPORT +Future<> AllComplete(const std::vector>& futures); + +/// \brief Create a Future which completes when all of `futures` complete. +/// +/// The future will finish with an ok status if all `futures` finish with +/// an ok status. Otherwise, it will be marked failed with the status of +/// one of the failing futures. +/// +/// Unlike AllComplete this Future will not complete immediately when a +/// failure occurs. It will wait until all futures have finished. +ARROW_EXPORT +Future<> AllFinished(const std::vector>& futures); + +/// @} + +struct Continue { + template + operator std::optional() && { // NOLINT explicit + return {}; + } +}; + +template +std::optional Break(T break_value = {}) { + return std::optional{std::move(break_value)}; +} + +template +using ControlFlow = std::optional; + +/// \brief Loop through an asynchronous sequence +/// +/// \param[in] iterate A generator of Future>. On completion +/// of each yielded future the resulting ControlFlow will be examined. A Break will +/// terminate the loop, while a Continue will re-invoke `iterate`. +/// +/// \return A future which will complete when a Future returned by iterate completes with +/// a Break +template ::ValueType, + typename BreakValueType = typename Control::value_type> +Future Loop(Iterate iterate) { + struct Callback { + bool CheckForTermination(const Result& control_res) { + if (!control_res.ok()) { + break_fut.MarkFinished(control_res.status()); + return true; + } + if (control_res->has_value()) { + break_fut.MarkFinished(**control_res); + return true; + } + return false; + } + + void operator()(const Result& maybe_control) && { + if (CheckForTermination(maybe_control)) return; + + auto control_fut = iterate(); + while (true) { + if (control_fut.TryAddCallback([this]() { return *this; })) { + // Adding a callback succeeded; control_fut was not finished + // and we must wait to CheckForTermination. + return; + } + // Adding a callback failed; control_fut was finished and we + // can CheckForTermination immediately. This also avoids recursion and potential + // stack overflow. + if (CheckForTermination(control_fut.result())) return; + + control_fut = iterate(); + } + } + + Iterate iterate; + + // If the future returned by control_fut is never completed then we will be hanging on + // to break_fut forever even if the listener has given up listening on it. Instead we + // rely on the fact that a producer (the caller of Future<>::Make) is always + // responsible for completing the futures they create. + // TODO: Could avoid this kind of situation with "future abandonment" similar to mesos + Future break_fut; + }; + + auto break_fut = Future::Make(); + auto control_fut = iterate(); + control_fut.AddCallback(Callback{std::move(iterate), break_fut}); + + return break_fut; +} + +inline Future<> ToFuture(Status status) { + return Future<>::MakeFinished(std::move(status)); +} + +template +Future ToFuture(T value) { + return Future::MakeFinished(std::move(value)); +} + +template +Future ToFuture(Result maybe_value) { + return Future::MakeFinished(std::move(maybe_value)); +} + +template +Future ToFuture(Future fut) { + return fut; +} + +template +struct EnsureFuture { + using type = decltype(ToFuture(std::declval())); +}; + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h new file mode 100644 index 0000000000000000000000000000000000000000..2de9f4153248f0acebf4589fc492eed912a847a9 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h @@ -0,0 +1,944 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Private header, not to be exported + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/builder_binary.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_builders.h" +#include "arrow/util/endian.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" + +#define XXH_INLINE_ALL + +#include "arrow/vendored/xxhash.h" // IWYU pragma: keep + +namespace arrow { +namespace internal { + +// XXX would it help to have a 32-bit hash value on large datasets? +typedef uint64_t hash_t; + +// Notes about the choice of a hash function. +// - XXH3 is extremely fast on most data sizes, from small to huge; +// faster even than HW CRC-based hashing schemes +// - our custom hash function for tiny values (< 16 bytes) is still +// significantly faster (~30%), at least on this machine and compiler + +template +inline hash_t ComputeStringHash(const void* data, int64_t length); + +/// \brief A hash function for bitmaps that can handle offsets and lengths in +/// terms of number of bits. The hash only depends on the bits actually hashed. +/// +/// It's the caller's responsibility to ensure that bits_offset + num_bits are +/// readable from the bitmap. +/// +/// \pre bits_offset >= 0 +/// \pre num_bits >= 0 +/// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap +/// +/// \param bitmap The pointer to the bitmap. +/// \param seed The seed for the hash function (useful when chaining hash functions). +/// \param bits_offset The offset in bits relative to the start of the bitmap. +/// \param num_bits The number of bits after the offset to be hashed. +ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed, + int64_t bits_offset, int64_t num_bits); + +template +struct ScalarHelperBase { + static bool CompareScalars(Scalar u, Scalar v) { return u == v; } + + static hash_t ComputeHash(const Scalar& value) { + // Generic hash computation for scalars. Simply apply the string hash + // to the bit representation of the value. + + // XXX in the case of FP values, we'd like equal values to have the same hash, + // even if they have different bit representations... + return ComputeStringHash(&value, sizeof(value)); + } +}; + +template +struct ScalarHelper : public ScalarHelperBase {}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for integers + + static hash_t ComputeHash(const Scalar& value) { + // Faster hash computation for integers. + + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + static constexpr uint64_t multipliers[] = {11400714785074694791ULL, + 14029467366897019727ULL}; + + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + auto h = static_cast(value); + return bit_util::ByteSwap(multipliers[AlgNum] * h); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for std::string_view + + static hash_t ComputeHash(std::string_view value) { + return ComputeStringHash(value.data(), static_cast(value.size())); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for reals + + static bool CompareScalars(Scalar u, Scalar v) { + if (std::isnan(u)) { + // XXX should we do a bit-precise comparison? + return std::isnan(v); + } + return u == v; + } +}; + +template +hash_t ComputeStringHash(const void* data, int64_t length) { + if (ARROW_PREDICT_TRUE(length <= 16)) { + // Specialize for small hash strings, as they are quite common as + // hash table keys. Even XXH3 isn't quite as fast. + auto p = reinterpret_cast(data); + auto n = static_cast(length); + if (n <= 8) { + if (n <= 3) { + if (n == 0) { + return 1U; + } + uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1]; + return ScalarHelper::ComputeHash(x); + } + // 4 <= length <= 8 + // We can read the string as two overlapping 32-bit ints, apply + // different hash functions to each of them in parallel, then XOR + // the results + uint32_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 4); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + // 8 <= length <= 16 + // Apply the same principle as above + uint64_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 8); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + +#if XXH3_SECRET_SIZE_MIN != 136 +#error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets +#endif + + // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow. + // Instead, we use hard-coded random secrets. To maximize cache efficiency, + // they reuse the same memory area. + static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = { + 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f, + 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24, + 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26, + 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75, + 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce, + 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3, + 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42, + 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1, + 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5, + 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87}; + + static_assert(AlgNum < 2, "AlgNum too large"); + static constexpr auto secret = kXxh3Secrets + AlgNum; + return XXH3_64bits_withSecret(data, static_cast(length), secret, + XXH3_SECRET_SIZE_MIN); +} + +// XXX add a HashEq struct with both hash and compare functions? + +// ---------------------------------------------------------------------- +// An open-addressing insert-only hash table (no deletes) + +template +class HashTable { + public: + static constexpr hash_t kSentinel = 0ULL; + static constexpr int64_t kLoadFactor = 2UL; + + struct Entry { + hash_t h; + Payload payload; + + // An entry is valid if the hash is different from the sentinel value + operator bool() const { return h != kSentinel; } + }; + + HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) { + DCHECK_NE(pool, nullptr); + // Minimum of 32 elements + capacity = std::max(capacity, 32UL); + capacity_ = bit_util::NextPower2(capacity); + capacity_mask_ = capacity_ - 1; + size_ = 0; + + DCHECK_OK(UpsizeBuffer(capacity_)); + } + + // Lookup with non-linear probing + // cmp_func should have signature bool(const Payload*). + // Return a (Entry*, found) pair. + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) const { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + Status Insert(Entry* entry, hash_t h, const Payload& payload) { + // Ensure entry is empty before inserting + assert(!*entry); + entry->h = FixHash(h); + entry->payload = payload; + ++size_; + + if (ARROW_PREDICT_FALSE(NeedUpsizing())) { + // Resize less frequently since it is expensive + return Upsize(capacity_ * kLoadFactor * 2); + } + return Status::OK(); + } + + uint64_t size() const { return size_; } + + // Visit all non-empty entries in the table + // The visit_func should have signature void(const Entry*) + template + void VisitEntries(VisitFunc&& visit_func) const { + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = entries_[i]; + if (entry) { + visit_func(&entry); + } + } + } + + protected: + // NoCompare is for when the value is known not to exist in the table + enum CompareKind { DoCompare, NoCompare }; + + // The workhorse lookup function + template + std::pair Lookup(hash_t h, const Entry* entries, uint64_t size_mask, + CmpFunc&& cmp_func) const { + static constexpr uint8_t perturb_shift = 5; + + uint64_t index, perturb; + const Entry* entry; + + h = FixHash(h); + index = h & size_mask; + perturb = (h >> perturb_shift) + 1U; + + while (true) { + entry = &entries[index]; + if (CompareEntry(h, entry, std::forward(cmp_func))) { + // Found + return {index, true}; + } + if (entry->h == kSentinel) { + // Empty slot + return {index, false}; + } + + // Perturbation logic inspired from CPython's set / dict object. + // The goal is that all 64 bits of the unmasked hash value eventually + // participate in the probing sequence, to minimize clustering. + index = (index + perturb) & size_mask; + perturb = (perturb >> perturb_shift) + 1U; + } + } + + template + bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const { + if (CKind == NoCompare) { + return false; + } else { + return entry->h == h && cmp_func(&entry->payload); + } + } + + bool NeedUpsizing() const { + // Keep the load factor <= 1/2 + return size_ * kLoadFactor >= capacity_; + } + + Status UpsizeBuffer(uint64_t capacity) { + RETURN_NOT_OK(entries_builder_.Resize(capacity)); + entries_ = entries_builder_.mutable_data(); + memset(static_cast(entries_), 0, capacity * sizeof(Entry)); + + return Status::OK(); + } + + Status Upsize(uint64_t new_capacity) { + assert(new_capacity > capacity_); + uint64_t new_mask = new_capacity - 1; + assert((new_capacity & new_mask) == 0); // it's a power of two + + // Stash old entries and seal builder, effectively resetting the Buffer + const Entry* old_entries = entries_; + ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_)); + // Allocate new buffer + RETURN_NOT_OK(UpsizeBuffer(new_capacity)); + + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = old_entries[i]; + if (entry) { + // Dummy compare function will not be called + auto p = Lookup(entry.h, entries_, new_mask, + [](const Payload*) { return false; }); + // Lookup (and CompareEntry) ensure that an + // empty slots is always returned + assert(!p.second); + entries_[p.first] = entry; + } + } + capacity_ = new_capacity; + capacity_mask_ = new_mask; + + return Status::OK(); + } + + hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; } + + // The number of slots available in the hash table array. + uint64_t capacity_; + uint64_t capacity_mask_; + // The number of used slots in the hash table array. + uint64_t size_; + + Entry* entries_; + TypedBufferBuilder entries_builder_; +}; + +// XXX typedef memo_index_t int32_t ? + +constexpr int32_t kKeyNotFound = -1; + +// ---------------------------------------------------------------------- +// A base class for memoization table. + +class MemoTable { + public: + virtual ~MemoTable() = default; + + virtual int32_t size() const = 0; +}; + +// ---------------------------------------------------------------------- +// A memoization table for memory-cheap scalar values. + +// The memoization table remembers and allows to look up the insertion +// index for each key. + +template class HashTableTemplateType = HashTable> +class ScalarMemoTable : public MemoTable { + public: + explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0) + : hash_table_(pool, static_cast(entries)) {} + + int32_t Get(const Scalar& value) const { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(payload->value, value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + template + Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(value, payload->value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index})); + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index != kKeyNotFound) { + on_found(memo_index); + } else { + null_index_ = memo_index = size(); + on_not_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table +1 if null was added. + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size()) + (GetNull() != kKeyNotFound); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + hash_table_.VisitEntries([=](const HashTableEntry* entry) { + int32_t index = entry->payload.memo_index - start; + if (index >= 0) { + out_data[index] = entry->payload.value; + } + }); + // Zero-initialize the null entry + if (null_index_ != kKeyNotFound) { + int32_t index = null_index_ - start; + if (index >= 0) { + out_data[index] = Scalar{}; + } + } + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + protected: + struct Payload { + Scalar value; + int32_t memo_index; + }; + + using HashTableType = HashTableTemplateType; + using HashTableEntry = typename HashTableType::Entry; + HashTableType hash_table_; + int32_t null_index_ = kKeyNotFound; + + hash_t ComputeHash(const Scalar& value) const { + return ScalarHelper::ComputeHash(value); + } + + public: + // defined here so that `HashTableType` is visible + // Merge entries from `other_table` into `this->hash_table_`. + Status MergeTable(const ScalarMemoTable& other_table) { + const HashTableType& other_hashtable = other_table.hash_table_; + + other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused)); + }); + // TODO: ARROW-17074 - implement proper error handling + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// A memoization table for small scalar values, using direct indexing + +template +struct SmallScalarTraits {}; + +template <> +struct SmallScalarTraits { + static constexpr int32_t cardinality = 2; + + static uint32_t AsIndex(bool value) { return value ? 1 : 0; } +}; + +template +struct SmallScalarTraits::value>> { + using Unsigned = typename std::make_unsigned::type; + + static constexpr int32_t cardinality = 1U + std::numeric_limits::max(); + + static uint32_t AsIndex(Scalar value) { return static_cast(value); } +}; + +template class HashTableTemplateType = HashTable> +class SmallScalarMemoTable : public MemoTable { + public: + explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) { + std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound); + index_to_value_.reserve(cardinality); + } + + int32_t Get(const Scalar value) const { + auto value_index = AsIndex(value); + return value_to_index_[value_index]; + } + + template + Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto value_index = AsIndex(value); + auto memo_index = value_to_index_[value_index]; + if (memo_index == kKeyNotFound) { + memo_index = static_cast(index_to_value_.size()); + index_to_value_.push_back(value); + value_to_index_[value_index] = memo_index; + DCHECK_LT(memo_index, cardinality + 1); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return value_to_index_[cardinality]; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + auto memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = value_to_index_[cardinality] = size(); + index_to_value_.push_back(0); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { return static_cast(index_to_value_.size()); } + + // Merge entries from `other_table` into `this`. + Status MergeTable(const SmallScalarMemoTable& other_table) { + for (const Scalar& other_val : other_table.index_to_value_) { + int32_t unused; + RETURN_NOT_OK(this->GetOrInsert(other_val, &unused)); + } + return Status::OK(); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + DCHECK_GE(start, 0); + DCHECK_LE(static_cast(start), index_to_value_.size()); + int64_t offset = start * static_cast(sizeof(Scalar)); + memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar)); + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + const std::vector& values() const { return index_to_value_; } + + protected: + static constexpr auto cardinality = SmallScalarTraits::cardinality; + static_assert(cardinality <= 256, "cardinality too large for direct-addressed table"); + + uint32_t AsIndex(Scalar value) const { + return SmallScalarTraits::AsIndex(value); + } + + // The last index is reserved for the null element. + int32_t value_to_index_[cardinality + 1]; + std::vector index_to_value_; +}; + +// ---------------------------------------------------------------------- +// A memoization table for variable-sized binary data. + +template +class BinaryMemoTable : public MemoTable { + public: + using builder_offset_type = typename BinaryBuilderT::offset_type; + explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0, + int64_t values_size = -1) + : hash_table_(pool, static_cast(entries)), binary_builder_(pool) { + const int64_t data_size = (values_size < 0) ? entries * 4 : values_size; + DCHECK_OK(binary_builder_.Resize(entries)); + DCHECK_OK(binary_builder_.ReserveData(data_size)); + } + + int32_t Get(const void* data, builder_offset_type length) const { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + int32_t Get(std::string_view value) const { + return Get(value.data(), static_cast(value.length())); + } + + template + Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found, + Func2&& on_not_found, int32_t* out_memo_index) { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + // Insert string value + RETURN_NOT_OK(binary_builder_.Append(static_cast(data), length)); + // Insert hash entry + RETURN_NOT_OK( + hash_table_.Insert(const_cast(p.first), h, {memo_index})); + + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + template + Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + std::forward(on_found), std::forward(on_not_found), + out_memo_index); + } + + Status GetOrInsert(const void* data, builder_offset_type length, + int32_t* out_memo_index) { + return GetOrInsert( + data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + Status GetOrInsert(std::string_view value, int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = null_index_ = size(); + DCHECK_OK(binary_builder_.AppendNull()); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size() + (GetNull() != kKeyNotFound)); + } + + int64_t values_size() const { return binary_builder_.value_data_length(); } + + // Copy (n + 1) offsets starting from index `start` into `out_data` + template + void CopyOffsets(int32_t start, Offset* out_data) const { + DCHECK_LE(start, size()); + + const builder_offset_type* offsets = binary_builder_.offsets_data(); + const builder_offset_type delta = + start < binary_builder_.length() ? offsets[start] : 0; + for (int32_t i = start; i < size(); ++i) { + const builder_offset_type adjusted_offset = offsets[i] - delta; + Offset cast_offset = static_cast(adjusted_offset); + assert(static_cast(cast_offset) == + adjusted_offset); // avoid truncation + *out_data++ = cast_offset; + } + + // Copy last value since BinaryBuilder only materializes it on in Finish() + *out_data = static_cast(binary_builder_.value_data_length() - delta); + } + + template + void CopyOffsets(Offset* out_data) const { + CopyOffsets(0, out_data); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, uint8_t* out_data) const { + CopyValues(start, -1, out_data); + } + + // Same as above, but check output size in debug mode + void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const { + DCHECK_LE(start, size()); + + // The absolute byte offset of `start` value in the binary buffer. + const builder_offset_type offset = binary_builder_.offset(start); + const auto length = binary_builder_.value_data_length() - static_cast(offset); + + if (out_size != -1) { + assert(static_cast(length) <= out_size); + } + + auto view = binary_builder_.GetView(start); + memcpy(out_data, view.data(), length); + } + + void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); } + + void CopyValues(int64_t out_size, uint8_t* out_data) const { + CopyValues(0, out_size, out_data); + } + + void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size, + uint8_t* out_data) const { + // This method exists to cope with the fact that the BinaryMemoTable does + // not know the fixed width when inserting the null value. The data + // buffer hold a zero length string for the null value (if found). + // + // Thus, the method will properly inject an empty value of the proper width + // in the output buffer. + // + if (start >= size()) { + return; + } + + int32_t null_index = GetNull(); + if (null_index < start) { + // Nothing to skip, proceed as usual. + CopyValues(start, out_size, out_data); + return; + } + + builder_offset_type left_offset = binary_builder_.offset(start); + + // Ensure that the data length is exactly missing width_size bytes to fit + // in the expected output (n_values * width_size). +#ifndef NDEBUG + int64_t data_length = values_size() - static_cast(left_offset); + assert(data_length + width_size == out_size); + ARROW_UNUSED(data_length); +#endif + + auto in_data = binary_builder_.value_data() + left_offset; + // The null use 0-length in the data, slice the data in 2 and skip by + // width_size in out_data. [part_1][width_size][part_2] + auto null_data_offset = binary_builder_.offset(null_index); + auto left_size = null_data_offset - left_offset; + if (left_size > 0) { + memcpy(out_data, in_data + left_offset, left_size); + } + // Zero-initialize the null entry + memset(out_data + left_size, 0, width_size); + + auto right_size = values_size() - static_cast(null_data_offset); + if (right_size > 0) { + // skip the null fixed size value. + auto out_offset = left_size + width_size; + assert(out_data + out_offset + right_size == out_data + out_size); + memcpy(out_data + out_offset, in_data + null_data_offset, right_size); + } + } + + // Visit the stored values in insertion order. + // The visitor function should have the signature `void(std::string_view)` + // or `void(const std::string_view&)`. + template + void VisitValues(int32_t start, VisitFunc&& visit) const { + for (int32_t i = start; i < size(); ++i) { + visit(binary_builder_.GetView(i)); + } + } + + protected: + struct Payload { + int32_t memo_index; + }; + + using HashTableType = HashTable; + using HashTableEntry = typename HashTable::Entry; + HashTableType hash_table_; + BinaryBuilderT binary_builder_; + + int32_t null_index_ = kKeyNotFound; + + std::pair Lookup(hash_t h, const void* data, + builder_offset_type length) const { + auto cmp_func = [&](const Payload* payload) { + std::string_view lhs = binary_builder_.GetView(payload->memo_index); + std::string_view rhs(static_cast(data), length); + return lhs == rhs; + }; + return hash_table_.Lookup(h, cmp_func); + } + + public: + Status MergeTable(const BinaryMemoTable& other_table) { + other_table.VisitValues(0, [this](std::string_view other_value) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_value, &unused)); + }); + return Status::OK(); + } +}; + +template +struct HashTraits {}; + +template <> +struct HashTraits { + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits> { + using c_type = typename T::c_type; + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits::value && !is_8bit_int::value>> { + using c_type = typename T::c_type; + using MemoTableType = ScalarMemoTable; +}; + +template +struct HashTraits::value && + !std::is_base_of::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table, + int64_t start_offset, int64_t* null_count, + std::shared_ptr* null_bitmap) { + int64_t dict_length = static_cast(memo_table.size()) - start_offset; + int64_t null_index = memo_table.GetNull(); + + *null_count = 0; + *null_bitmap = nullptr; + + if (null_index != kKeyNotFound && null_index >= start_offset) { + null_index -= start_offset; + *null_count = 1; + ARROW_ASSIGN_OR_RAISE(*null_bitmap, + internal::BitmapAllButOne(pool, dict_length, null_index)); + } + + return Status::OK(); +} + +struct StringViewHash { + // std::hash compatible hasher for use with std::unordered_* + // (the std::hash specialization provided by nonstd constructs std::string + // temporaries then invokes std::hash against those) + hash_t operator()(std::string_view value) const { + return ComputeStringHash<0>(value.data(), static_cast(value.size())); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h new file mode 100644 index 0000000000000000000000000000000000000000..59a2ac7109a3c08b4cd265f88b7ca0ecffe5ae9d --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" + +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +struct ArraySpan; +struct Scalar; + +namespace internal { + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes, + int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length, + uint8_t min_width = 1); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int64_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length); + +ARROW_EXPORT +void UpcastInts(const int32_t* source, int64_t* dest, int64_t length); + +template +inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + DowncastInts(source, dest, length); +} + +template +inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + UpcastInts(source, dest, length); +} + +template +ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length, + const int32_t* transpose_map); + +ARROW_EXPORT +Status TransposeInts(const DataType& src_type, const DataType& dest_type, + const uint8_t* src, uint8_t* dest, int64_t src_offset, + int64_t dest_offset, int64_t length, const int32_t* transpose_map); + +/// \brief Do vectorized boundschecking of integer-type array indices. The +/// indices must be nonnegative and strictly less than the passed upper +/// limit (which is usually the length of an array that is being indexed-into). +ARROW_EXPORT +Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit); + +/// \brief Boundscheck integer values to determine if they are all between the +/// passed upper and lower limits (inclusive). Upper and lower bounds must be +/// the same type as the data and are not currently casted. +ARROW_EXPORT +Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower, + const Scalar& bound_upper); + +/// \brief Use CheckIntegersInRange to determine whether the passed integers +/// can fit safely in the passed integer type. This helps quickly determine if +/// integer narrowing (e.g. int64->int32) is safe to do. +ARROW_EXPORT +Status IntegersCanFit(const ArraySpan& values, const DataType& target_type); + +/// \brief Convenience for boundschecking a single Scalar value +ARROW_EXPORT +Status IntegersCanFit(const Scalar& value, const DataType& target_type); + +/// Upcast an integer to the largest possible width (currently 64 bits) + +template +typename std::enable_if< + std::is_integral::value && std::is_signed::value, int64_t>::type +UpcastInt(Integer v) { + return v; +} + +template +typename std::enable_if< + std::is_integral::value && std::is_unsigned::value, uint64_t>::type +UpcastInt(Integer v) { + return v; +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe78be2470ddb846b5816be632e9921c041a23e --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// "safe-math.h" includes from the Windows headers. +#include "arrow/util/windows_compatibility.h" +#include "arrow/vendored/portable-snippets/safe-math.h" +// clang-format off (avoid include reordering) +#include "arrow/util/windows_fixup.h" +// clang-format on + +namespace arrow { +namespace internal { + +// Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow +// with the signature `bool(T u, T v, T* out)` where T is an integer type. +// On overflow, these functions return true. Otherwise, false is returned +// and `out` is updated with the result of the operation. + +#define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \ + } + +#define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64) + +OPS_WITH_OVERFLOW(AddWithOverflow, add) +OPS_WITH_OVERFLOW(SubtractWithOverflow, sub) +OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul) +OPS_WITH_OVERFLOW(DivideWithOverflow, div) + +#undef OP_WITH_OVERFLOW +#undef OPS_WITH_OVERFLOW + +// Define function NegateWithOverflow with the signature `bool(T u, T* out)` +// where T is a signed integer type. On overflow, these functions return true. +// Otherwise, false is returned and `out` is updated with the result of the +// operation. + +#define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \ + } + +#define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) + +SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg) + +#undef UNARY_OP_WITH_OVERFLOW +#undef SIGNED_UNARY_OPS_WITH_OVERFLOW + +/// Signed addition with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedAdd(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) + + static_cast(v)); +} + +/// Signed subtraction with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) - + static_cast(v)); +} + +/// Signed negation with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedNegate(SignedInt u) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(~static_cast(u) + 1); +} + +/// Signed left shift with well-defined behaviour on negative numbers or overflow +template +SignedInt SafeLeftShift(SignedInt u, Shift shift) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) << shift); +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5bbd169e2eb60e97958d7375f63c15ae5d9fe4 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h @@ -0,0 +1,452 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifndef _WIN32 +#define ARROW_HAVE_SIGACTION 1 +#endif + +#include +#include +#include +#include +#include +#include + +#if ARROW_HAVE_SIGACTION +#include // Needed for struct sigaction +#endif + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::internal { + +// NOTE: 8-bit path strings on Windows are encoded using UTF-8. +// Using MBCS would fail encoding some paths. + +#if defined(_WIN32) +using NativePathString = std::wstring; +#else +using NativePathString = std::string; +#endif + +class ARROW_EXPORT PlatformFilename { + public: + struct Impl; + + ~PlatformFilename(); + PlatformFilename(); + PlatformFilename(const PlatformFilename&); + PlatformFilename(PlatformFilename&&); + PlatformFilename& operator=(const PlatformFilename&); + PlatformFilename& operator=(PlatformFilename&&); + explicit PlatformFilename(NativePathString path); + explicit PlatformFilename(const NativePathString::value_type* path); + + const NativePathString& ToNative() const; + std::string ToString() const; + + PlatformFilename Parent() const; + Result Real() const; + + // These functions can fail for character encoding reasons. + static Result FromString(std::string_view file_name); + Result Join(std::string_view child_name) const; + + PlatformFilename Join(const PlatformFilename& child_name) const; + + bool operator==(const PlatformFilename& other) const; + bool operator!=(const PlatformFilename& other) const; + + // Made public to avoid the proliferation of friend declarations. + const Impl* impl() const { return impl_.get(); } + + private: + std::unique_ptr impl_; + + explicit PlatformFilename(Impl impl); +}; + +/// Create a directory if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDir(const PlatformFilename& dir_path); + +/// Create a directory and its parents if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDirTree(const PlatformFilename& dir_path); + +/// Delete a directory's contents (but not the directory itself) if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirContents(const PlatformFilename& dir_path, + bool allow_not_found = true); + +/// Delete a directory tree if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true); + +// Non-recursively list the contents of the given directory. +// The returned names are the children's base names, not including dir_path. +ARROW_EXPORT +Result> ListDir(const PlatformFilename& dir_path); + +/// Delete a file if it exists. +/// +/// Return whether the file existed. +ARROW_EXPORT +Result DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true); + +/// Return whether a file exists. +ARROW_EXPORT +Result FileExists(const PlatformFilename& path); + +// TODO expose this more publicly to make it available from io/file.h? +/// A RAII wrapper for a file descriptor. +/// +/// The underlying file descriptor is automatically closed on destruction. +/// Moving is supported with well-defined semantics. +/// Furthermore, closing is idempotent. +class ARROW_EXPORT FileDescriptor { + public: + FileDescriptor() = default; + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(FileDescriptor&&); + FileDescriptor& operator=(FileDescriptor&&); + + ~FileDescriptor(); + + Status Close(); + + /// May return -1 if closed or default-initialized + int fd() const { return fd_.load(); } + + /// Detach and return the underlying file descriptor + int Detach(); + + bool closed() const { return fd_.load() == -1; } + + protected: + static void CloseFromDestructor(int fd); + + std::atomic fd_{-1}; +}; + +/// Open a file for reading and return a file descriptor. +ARROW_EXPORT +Result FileOpenReadable(const PlatformFilename& file_name); + +/// Open a file for writing and return a file descriptor. +ARROW_EXPORT +Result FileOpenWritable(const PlatformFilename& file_name, + bool write_only = true, bool truncate = true, + bool append = false); + +/// Read from current file position. Return number of bytes read. +ARROW_EXPORT +Result FileRead(int fd, uint8_t* buffer, int64_t nbytes); +/// Read from given file position. Return number of bytes read. +ARROW_EXPORT +Result FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes); + +ARROW_EXPORT +Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes); +ARROW_EXPORT +Status FileTruncate(int fd, const int64_t size); + +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos); +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos, int whence); +ARROW_EXPORT +Result FileTell(int fd); +ARROW_EXPORT +Result FileGetSize(int fd); + +ARROW_EXPORT +Status FileClose(int fd); + +struct Pipe { + FileDescriptor rfd; + FileDescriptor wfd; + + Status Close() { return rfd.Close() & wfd.Close(); } +}; + +ARROW_EXPORT +Result CreatePipe(); + +ARROW_EXPORT +Status SetPipeFileDescriptorNonBlocking(int fd); + +class ARROW_EXPORT SelfPipe { + public: + static Result> Make(bool signal_safe); + virtual ~SelfPipe(); + + /// \brief Wait for a wakeup. + /// + /// Status::Invalid is returned if the pipe has been shutdown. + /// Otherwise the next sent payload is returned. + virtual Result Wait() = 0; + + /// \brief Wake up the pipe by sending a payload. + /// + /// This method is async-signal-safe if `signal_safe` was set to true. + virtual void Send(uint64_t payload) = 0; + + /// \brief Wake up the pipe and shut it down. + virtual Status Shutdown() = 0; +}; + +ARROW_EXPORT +int64_t GetPageSize(); + +struct MemoryRegion { + void* addr; + size_t size; +}; + +ARROW_EXPORT +Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes, + void** new_addr); +ARROW_EXPORT +Status MemoryAdviseWillNeed(const std::vector& regions); + +ARROW_EXPORT +Result GetEnvVar(const char* name); +ARROW_EXPORT +Result GetEnvVar(const std::string& name); +ARROW_EXPORT +Result GetEnvVarNative(const char* name); +ARROW_EXPORT +Result GetEnvVarNative(const std::string& name); + +ARROW_EXPORT +Status SetEnvVar(const char* name, const char* value); +ARROW_EXPORT +Status SetEnvVar(const std::string& name, const std::string& value); +ARROW_EXPORT +Status DelEnvVar(const char* name); +ARROW_EXPORT +Status DelEnvVar(const std::string& name); + +ARROW_EXPORT +std::string ErrnoMessage(int errnum); +#if _WIN32 +ARROW_EXPORT +std::string WinErrorMessage(int errnum); +#endif + +ARROW_EXPORT +std::shared_ptr StatusDetailFromErrno(int errnum); +ARROW_EXPORT +std::optional ErrnoFromStatusDetail(const StatusDetail& detail); +#if _WIN32 +ARROW_EXPORT +std::shared_ptr StatusDetailFromWinError(int errnum); +#endif +ARROW_EXPORT +std::shared_ptr StatusDetailFromSignal(int signum); + +template +Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromErrno(int errnum, Args&&... args) { + return StatusFromErrno(errnum, StatusCode::IOError, std::forward(args)...); +} + +#if _WIN32 +template +Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromWinError(int errnum, Args&&... args) { + return StatusFromWinError(errnum, StatusCode::IOError, std::forward(args)...); +} +#endif + +template +Status StatusFromSignal(int signum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum), + std::forward(args)...); +} + +template +Status CancelledFromSignal(int signum, Args&&... args) { + return StatusFromSignal(signum, StatusCode::Cancelled, std::forward(args)...); +} + +ARROW_EXPORT +int ErrnoFromStatus(const Status&); + +// Always returns 0 on non-Windows platforms (for Python). +ARROW_EXPORT +int WinErrorFromStatus(const Status&); + +ARROW_EXPORT +int SignalFromStatus(const Status&); + +class ARROW_EXPORT TemporaryDir { + public: + ~TemporaryDir(); + + /// '/'-terminated path to the temporary dir + const PlatformFilename& path() { return path_; } + + /// Create a temporary subdirectory in the system temporary dir, + /// named starting with `prefix`. + static Result> Make(const std::string& prefix); + + private: + PlatformFilename path_; + + explicit TemporaryDir(PlatformFilename&&); +}; + +class ARROW_EXPORT SignalHandler { + public: + using Callback = void (*)(int); + + SignalHandler(); + explicit SignalHandler(Callback cb); +#if ARROW_HAVE_SIGACTION + explicit SignalHandler(const struct sigaction& sa); +#endif + + Callback callback() const; +#if ARROW_HAVE_SIGACTION + const struct sigaction& action() const; +#endif + + protected: +#if ARROW_HAVE_SIGACTION + // Storing the full sigaction allows to restore the entire signal handling + // configuration. + struct sigaction sa_; +#else + Callback cb_; +#endif +}; + +/// \brief Return the current handler for the given signal number. +ARROW_EXPORT +Result GetSignalHandler(int signum); + +/// \brief Set a new handler for the given signal number. +/// +/// The old signal handler is returned. +ARROW_EXPORT +Result SetSignalHandler(int signum, const SignalHandler& handler); + +/// \brief Reinstate the signal handler +/// +/// For use in signal handlers. This is needed on platforms without sigaction() +/// such as Windows, as the default signal handler is restored there as +/// soon as a signal is raised. +ARROW_EXPORT +void ReinstateSignalHandler(int signum, SignalHandler::Callback handler); + +/// \brief Send a signal to the current process +/// +/// The thread which will receive the signal is unspecified. +ARROW_EXPORT +Status SendSignal(int signum); + +/// \brief Send a signal to the given thread +/// +/// This function isn't supported on Windows. +ARROW_EXPORT +Status SendSignalToThread(int signum, uint64_t thread_id); + +/// \brief Get an unpredictable random seed +/// +/// This function may be slightly costly, so should only be used to initialize +/// a PRNG, not to generate a large amount of random numbers. +/// It is better to use this function rather than std::random_device, unless +/// absolutely necessary (e.g. to generate a cryptographic secret). +ARROW_EXPORT +int64_t GetRandomSeed(); + +/// \brief Get the current thread id +/// +/// In addition to having the same properties as std::thread, the returned value +/// is a regular integer value, which is more convenient than an opaque type. +ARROW_EXPORT +uint64_t GetThreadId(); + +/// \brief Get the current memory used by the current process in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetCurrentRSS(); + +/// \brief Get the total memory available to the system in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetTotalMemoryBytes(); + +/// \brief Load a dynamic library +/// +/// This wraps dlopen() except on Windows, where LoadLibrary() is called. +/// These two platforms handle absolute paths consistently; relative paths +/// or the library's bare name may be handled but inconsistently. +/// +/// \return An opaque handle for the dynamic library, which can be used for +/// subsequent symbol lookup. Nullptr will never be returned; instead +/// an error will be raised. +ARROW_EXPORT Result LoadDynamicLibrary(const PlatformFilename& path); + +/// \brief Load a dynamic library +/// +/// An overload taking null terminated string. +ARROW_EXPORT Result LoadDynamicLibrary(const char* path); + +/// \brief Retrieve a symbol by name from a library handle. +/// +/// This wraps dlsym() except on Windows, where GetProcAddress() is called. +/// +/// \return The address associated with the named symbol. Nullptr will never be +/// returned; instead an error will be raised. +ARROW_EXPORT Result GetSymbol(void* handle, const char* name); + +template +Result GetSymbolAs(void* handle, const char* name) { + ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name)); + return reinterpret_cast(sym); +} + +} // namespace arrow::internal diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5025799b9a37254835c41d3e66751e6337c4eff6 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h @@ -0,0 +1,575 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +class Iterator; + +template +struct IterationTraits { + /// \brief a reserved value which indicates the end of iteration. By + /// default this is NULLPTR since most iterators yield pointer types. + /// Specialize IterationTraits if different end semantics are required. + /// + /// Note: This should not be used to determine if a given value is a + /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This + /// is only for returning terminal values. + static T End() { return T(NULLPTR); } + + /// \brief Checks to see if the value is a terminal value. + /// A method is used here since T is not necessarily comparable in many + /// cases even though it has a distinct final value + static bool IsEnd(const T& val) { return val == End(); } +}; + +template +T IterationEnd() { + return IterationTraits::End(); +} + +template +bool IsIterationEnd(const T& val) { + return IterationTraits::IsEnd(val); +} + +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of optional, + /// nullopt indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static std::optional End() { return std::nullopt; } + + /// \brief by default when iterating through a sequence of optional, + /// nullopt (!has_value()) indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static bool IsEnd(const std::optional& val) { return !val.has_value(); } + + // TODO(bkietz) The range-for loop over Iterator> yields + // Result> which is unnecessary (since only the unyielded end optional + // is nullopt. Add IterationTraits::GetRangeElement() to handle this case +}; + +/// \brief A generic Iterator that can return errors +template +class Iterator : public util::EqualityComparable> { + public: + /// \brief Iterator may be constructed from any type which has a member function + /// with signature Result Next(); + /// End of iterator is signalled by returning IteratorTraits::End(); + /// + /// The argument is moved or copied to the heap and kept in a unique_ptr. Only + /// its destructor and its Next method (which are stored in function pointers) are + /// referenced after construction. + /// + /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using + /// an abstract template base class: instead of being inlined as usual for a template + /// function the base's virtual destructor will be exported, leading to multiple + /// definition errors when linking to any other TU where the base is instantiated. + template + explicit Iterator(Wrapped has_next) + : ptr_(new Wrapped(std::move(has_next)), Delete), next_(Next) {} + + Iterator() : ptr_(NULLPTR, [](void*) {}) {} + + /// \brief Return the next element of the sequence, IterationTraits::End() when the + /// iteration is completed. + Result Next() { + if (ptr_) { + auto next_result = next_(ptr_.get()); + if (next_result.ok() && IsIterationEnd(next_result.ValueUnsafe())) { + ptr_.reset(NULLPTR); + } + return next_result; + } else { + return IterationTraits::End(); + } + } + + /// Pass each element of the sequence to a visitor. Will return any error status + /// returned by the visitor, terminating iteration. + template + Status Visit(Visitor&& visitor) { + for (;;) { + ARROW_ASSIGN_OR_RAISE(auto value, Next()); + + if (IsIterationEnd(value)) break; + + ARROW_RETURN_NOT_OK(visitor(std::move(value))); + } + + return Status::OK(); + } + + /// Iterators will only compare equal if they are both null. + /// Equality comparability is required to make an Iterator of Iterators + /// (to check for the end condition). + bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; } + + explicit operator bool() const { return ptr_ != NULLPTR; } + + class RangeIterator { + public: + RangeIterator() : value_(IterationTraits::End()) {} + + explicit RangeIterator(Iterator i) + : value_(IterationTraits::End()), + iterator_(std::make_shared(std::move(i))) { + Next(); + } + + bool operator!=(const RangeIterator& other) const { return value_ != other.value_; } + + RangeIterator& operator++() { + Next(); + return *this; + } + + Result operator*() { + ARROW_RETURN_NOT_OK(value_.status()); + + auto value = std::move(value_); + value_ = IterationTraits::End(); + return value; + } + + private: + void Next() { + if (!value_.ok()) { + value_ = IterationTraits::End(); + return; + } + value_ = iterator_->Next(); + } + + Result value_; + std::shared_ptr iterator_; + }; + + RangeIterator begin() { return RangeIterator(std::move(*this)); } + + RangeIterator end() { return RangeIterator(); } + + /// \brief Move every element of this iterator into a vector. + Result> ToVector() { + std::vector out; + for (auto maybe_element : *this) { + ARROW_ASSIGN_OR_RAISE(auto element, maybe_element); + out.push_back(std::move(element)); + } + return out; + } + + private: + /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and + /// deletes that. + template + static void Delete(void* ptr) { + delete static_cast(ptr); + } + + /// Implementation of Next: Casts from void* to the wrapped type and invokes that + /// type's Next member function. + template + static Result Next(void* ptr) { + return static_cast(ptr)->Next(); + } + + /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first + /// casts from void* to a pointer to the wrapped type then deletes that. + std::unique_ptr ptr_; + + /// next_ is a function pointer which first casts from void* to a pointer to the wrapped + /// type then invokes its Next member function. + Result (*next_)(void*) = NULLPTR; +}; + +template +struct TransformFlow { + using YieldValueType = T; + + TransformFlow(YieldValueType value, bool ready_for_next) + : finished_(false), + ready_for_next_(ready_for_next), + yield_value_(std::move(value)) {} + TransformFlow(bool finished, bool ready_for_next) + : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {} + + bool HasValue() const { return yield_value_.has_value(); } + bool Finished() const { return finished_; } + bool ReadyForNext() const { return ready_for_next_; } + T Value() const { return *yield_value_; } + + bool finished_ = false; + bool ready_for_next_ = false; + std::optional yield_value_; +}; + +struct TransformFinish { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(true, true); + } +}; + +struct TransformSkip { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(false, true); + } +}; + +template +TransformFlow TransformYield(T value = {}, bool ready_for_next = true) { + return TransformFlow(std::move(value), ready_for_next); +} + +template +using Transformer = std::function>(T)>; + +template +class TransformIterator { + public: + explicit TransformIterator(Iterator it, Transformer transformer) + : it_(std::move(it)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Result Next() { + while (!finished_) { + ARROW_ASSIGN_OR_RAISE(std::optional next, Pump()); + if (next.has_value()) { + return std::move(*next); + } + ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next()); + } + return IterationTraits::End(); + } + + private: + // Calls the transform function on the current value. Can return in several ways + // * If the next value is requested (e.g. skip) it will return an empty optional + // * If an invalid status is encountered that will be returned + // * If finished it will return IterationTraits::End() + // * If a value is returned by the transformer that will be returned + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + auto next_res = transformer_(*last_value_); + if (!next_res.ok()) { + finished_ = true; + return next_res.status(); + } + auto next = *next_res; + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + Iterator it_; + Transformer transformer_; + std::optional last_value_; + bool finished_ = false; +}; + +/// \brief Transforms an iterator according to a transformer, returning a new Iterator. +/// +/// The transformer will be called on each element of the source iterator and for each +/// call it can yield a value, skip, or finish the iteration. When yielding a value the +/// transformer can choose to consume the source item (the default, ready_for_next = true) +/// or to keep it and it will be called again on the same value. +/// +/// This is essentially a more generic form of the map operation that can return 0, 1, or +/// many values for each of the source items. +/// +/// The transformer will be exposed to the end of the source sequence +/// (IterationTraits::End) in case it needs to return some penultimate item(s). +/// +/// Any invalid status returned by the transformer will be returned immediately. +template +Iterator MakeTransformedIterator(Iterator it, Transformer op) { + return Iterator(TransformIterator(std::move(it), std::move(op))); +} + +template +struct IterationTraits> { + // The end condition for an Iterator of Iterators is a default constructed (null) + // Iterator. + static Iterator End() { return Iterator(); } + static bool IsEnd(const Iterator& val) { return !val; } +}; + +template +class FunctionIterator { + public: + explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {} + + Result Next() { return fn_(); } + + private: + Fn fn_; +}; + +/// \brief Construct an Iterator which invokes a callable on Next() +template ::ValueType> +Iterator MakeFunctionIterator(Fn fn) { + return Iterator(FunctionIterator(std::move(fn))); +} + +template +Iterator MakeEmptyIterator() { + return MakeFunctionIterator([]() -> Result { return IterationTraits::End(); }); +} + +template +Iterator MakeErrorIterator(Status s) { + return MakeFunctionIterator([s]() -> Result { + ARROW_RETURN_NOT_OK(s); + return IterationTraits::End(); + }); +} + +/// \brief Simple iterator which yields the elements of a std::vector +template +class VectorIterator { + public: + explicit VectorIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return IterationTraits::End(); + } + return std::move(elements_[i_++]); + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorIterator(std::vector v) { + return Iterator(VectorIterator(std::move(v))); +} + +/// \brief Simple iterator which yields *pointers* to the elements of a std::vector. +/// This is provided to support T where IterationTraits::End is not specialized +template +class VectorPointingIterator { + public: + explicit VectorPointingIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return NULLPTR; + } + return &elements_[i_++]; + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorPointingIterator(std::vector v) { + return Iterator(VectorPointingIterator(std::move(v))); +} + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template +class MapIterator { + public: + explicit MapIterator(Fn map, Iterator it) + : map_(std::move(map)), it_(std::move(it)) {} + + Result Next() { + ARROW_ASSIGN_OR_RAISE(I i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + return map_(std::move(i)); + } + + private: + Fn map_; + Iterator it_; +}; + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template , + typename To = internal::call_traits::return_type> +Iterator MakeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +/// \brief Like MapIterator, but where the function can fail. +template , + typename To = typename internal::call_traits::return_type::ValueType> +Iterator MakeMaybeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +struct FilterIterator { + enum Action { ACCEPT, REJECT }; + + template + static Result> Reject() { + return std::make_pair(IterationTraits::End(), REJECT); + } + + template + static Result> Accept(To out) { + return std::make_pair(std::move(out), ACCEPT); + } + + template + static Result> MaybeAccept(Result maybe_out) { + return std::move(maybe_out).Map(Accept); + } + + template + static Result> Error(Status s) { + return s; + } + + template + class Impl { + public: + explicit Impl(Fn filter, Iterator it) : filter_(filter), it_(std::move(it)) {} + + Result Next() { + To out = IterationTraits::End(); + Action action; + + for (;;) { + ARROW_ASSIGN_OR_RAISE(From i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i))); + + if (action == ACCEPT) return out; + } + } + + private: + Fn filter_; + Iterator it_; + }; +}; + +/// \brief Like MapIterator, but where the function can fail or reject elements. +template < + typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>, + typename Ret = typename internal::call_traits::return_type::ValueType, + typename To = typename std::tuple_element<0, Ret>::type, + typename Enable = typename std::enable_if::type, FilterIterator::Action>::value>::type> +Iterator MakeFilterIterator(Fn filter, Iterator it) { + return Iterator( + FilterIterator::Impl(std::move(filter), std::move(it))); +} + +/// \brief FlattenIterator takes an iterator generating iterators and yields a +/// unified iterator that flattens/concatenates in a single stream. +template +class FlattenIterator { + public: + explicit FlattenIterator(Iterator> it) : parent_(std::move(it)) {} + + Result Next() { + if (IsIterationEnd(child_)) { + // Pop from parent's iterator. + ARROW_ASSIGN_OR_RAISE(child_, parent_.Next()); + + // Check if final iteration reached. + if (IsIterationEnd(child_)) { + return IterationTraits::End(); + } + + return Next(); + } + + // Pop from child_ and check for depletion. + ARROW_ASSIGN_OR_RAISE(T out, child_.Next()); + if (IsIterationEnd(out)) { + // Reset state such that we pop from parent on the recursive call + child_ = IterationTraits>::End(); + + return Next(); + } + + return out; + } + + private: + Iterator> parent_; + Iterator child_ = IterationTraits>::End(); +}; + +template +Iterator MakeFlattenIterator(Iterator> it) { + return Iterator(FlattenIterator(std::move(it))); +} + +template +Iterator MakeIteratorFromReader( + const std::shared_ptr& reader) { + return MakeFunctionIterator([reader] { return reader->Next(); }); +} + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4533c4b4760a416b0aca4b91c32ffd324d7f08 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +#if __cpp_lib_launder +using std::launder; +#else +template +constexpr T* launder(T* p) noexcept { + return p; +} +#endif + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h new file mode 100644 index 0000000000000000000000000000000000000000..5200503bb4fdb454b7e2d5acd787914c6f5fdbaf --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logger.h @@ -0,0 +1,186 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +struct SourceLocation { + const char* file = ""; + int line = 0; +}; + +struct LogDetails { + ArrowLogLevel severity = ArrowLogLevel::ARROW_INFO; + std::chrono::system_clock::time_point timestamp = std::chrono::system_clock::now(); + SourceLocation source_location{}; + std::string_view message = ""; +}; + +/// \brief A base interface for custom loggers. +/// +/// Loggers can be added to the LoggerRegistry for global access or directly provided to +/// certain logging utilities. +class Logger { + public: + virtual ~Logger() = default; + + virtual void Log(const LogDetails& details) = 0; + + virtual bool Flush(std::chrono::microseconds timeout) { return true; } + bool Flush() { return this->Flush(std::chrono::microseconds::max()); } + + virtual bool is_enabled() const { return true; } + + virtual ArrowLogLevel severity_threshold() const { return ArrowLogLevel::ARROW_TRACE; } +}; + +/// \brief Creates a simple logger that redirects output to std::cerr +ARROW_EXPORT std::shared_ptr MakeOStreamLogger(ArrowLogLevel severity_threshold); +/// \brief Creates a simple logger that redirects output to the provided ostream +ARROW_EXPORT std::shared_ptr MakeOStreamLogger(ArrowLogLevel severity_threshold, + std::ostream& sink); + +class ARROW_EXPORT LoggerRegistry { + public: + /// \brief Add a logger to the registry with the associated name + /// + /// Returns Invalid if a logger with the provided name already exists. Users should call + /// `UnregisterLogger` first if they wish to overwrite it. + static Status RegisterLogger(std::string_view name, std::shared_ptr logger); + + /// \brief Remove a logger from the registry + static void UnregisterLogger(std::string_view name); + + /// \brief Return the logger associated with the provided name + /// + /// If `name` is empty, the default logger is returned. If `name` doesn't match any of + /// the registered loggers then a non-null noop logger is returned + static std::shared_ptr GetLogger(std::string_view name = ""); + + /// \brief Return the default logger + static std::shared_ptr GetDefaultLogger(); + /// \brief Set the default logger + static void SetDefaultLogger(std::shared_ptr logger); +}; + +/// \brief Represents a single log record to be emitted by an underlying logger +class ARROW_EXPORT LogMessage { + public: + /// \brief Construct a LogMessage with the provided underlying logger + LogMessage(ArrowLogLevel severity, std::shared_ptr logger, + SourceLocation source_location = {}); + /// \brief Construct a LogMessage with the provided logger name, which will be used to + /// find an underlying logger in the registry + LogMessage(ArrowLogLevel severity, std::string_view logger_name, + SourceLocation source_location = {}); + + std::ostream& Stream(); + + // Convenience method - mainly for use in ARROW_LOG_* macros. This prevents unnecessary + // argument evaluation when log statements are stripped in certain builds + template + LogMessage& Append(Args&&... args) { + if constexpr (sizeof...(Args) > 0) { + if (CheckIsEnabled()) { + (Stream() << ... << args); + } + } + return *this; + } + + private: + bool CheckIsEnabled(); + + class Impl; + std::shared_ptr impl_; +}; + +} // namespace util +} // namespace arrow + +// For the following macros, log statements with a lower severity than +// `ARROW_MINIMUM_LOG_LEVEL` will be stripped from the build +#ifndef ARROW_MINIMUM_LOG_LEVEL +#define ARROW_MINIMUM_LOG_LEVEL -1000 +#endif + +#define ARROW_LOGGER_INTERNAL(LOGGER, LEVEL) \ + (::arrow::util::LogMessage(::arrow::util::ArrowLogLevel::ARROW_##LEVEL, LOGGER, \ + ::arrow::util::SourceLocation{__FILE__, __LINE__})) + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_TRACE) == -2); +#if ARROW_MINIMUM_LOG_LEVEL <= -2 +#define ARROW_LOGGER_TRACE(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, TRACE).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_TRACE(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_DEBUG) == -1); +#if ARROW_MINIMUM_LOG_LEVEL <= -1 +#define ARROW_LOGGER_DEBUG(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, DEBUG).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_DEBUG(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_INFO) == 0); +#if ARROW_MINIMUM_LOG_LEVEL <= 0 +#define ARROW_LOGGER_INFO(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, INFO).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_INFO(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_WARNING) == 1); +#if ARROW_MINIMUM_LOG_LEVEL <= 1 +#define ARROW_LOGGER_WARNING(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, WARNING).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_WARNING(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_ERROR) == 2); +#if ARROW_MINIMUM_LOG_LEVEL <= 2 +#define ARROW_LOGGER_ERROR(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, ERROR).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_ERROR(...) ARROW_UNUSED(0) +#endif + +static_assert(static_cast(::arrow::util::ArrowLogLevel::ARROW_FATAL) == 3); +#if ARROW_MINIMUM_LOG_LEVEL <= 3 +#define ARROW_LOGGER_FATAL(LOGGER, ...) \ + (ARROW_LOGGER_INTERNAL(LOGGER, FATAL).Append(__VA_ARGS__)) +#else +#define ARROW_LOGGER_FATAL(...) ARROW_UNUSED(0) +#endif + +#define ARROW_LOGGER_CALL(LOGGER, LEVEL, ...) ARROW_LOGGER_##LEVEL(LOGGER, __VA_ARGS__) diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..2a2175ec0fc72a8a25461e8e22f3fe7adc29a0f5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h @@ -0,0 +1,260 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef GANDIVA_IR + +// The LLVM IR code doesn't have an NDEBUG mode. And, it shouldn't include references to +// streams or stdc++. So, making the DCHECK calls void in that case. + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define DCHECK(condition) ARROW_IGNORE_EXPR(condition) +#define DCHECK_OK(status) ARROW_IGNORE_EXPR(status) +#define DCHECK_EQ(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_NE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LT(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GT(val1, val2) ARROW_IGNORE_EXPR(val1) + +#else // !GANDIVA_IR + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum class ArrowLogLevel : int { + ARROW_TRACE = -2, + ARROW_DEBUG = -1, + ARROW_INFO = 0, + ARROW_WARNING = 1, + ARROW_ERROR = 2, + ARROW_FATAL = 3 +}; + +#define ARROW_LOG_INTERNAL(level) ::arrow::util::ArrowLog(__FILE__, __LINE__, level) +#define ARROW_LOG(level) ARROW_LOG_INTERNAL(::arrow::util::ArrowLogLevel::ARROW_##level) + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define ARROW_CHECK_OR_LOG(condition, level) \ + ARROW_PREDICT_TRUE(condition) \ + ? ARROW_IGNORE_EXPR(0) \ + : ::arrow::util::Voidify() & ARROW_LOG(level) << " Check failed: " #condition " " + +#define ARROW_CHECK(condition) ARROW_CHECK_OR_LOG(condition, FATAL) + +// If 'to_call' returns a bad status, CHECK immediately with a logged message +// of 'msg' followed by the status. +#define ARROW_CHECK_OK_PREPEND(to_call, msg, level) \ + do { \ + ::arrow::Status _s = (to_call); \ + ARROW_CHECK_OR_LOG(_s.ok(), level) \ + << "Operation failed: " << ARROW_STRINGIFY(to_call) << "\n" \ + << (msg) << ": " << _s.ToString(); \ + } while (false) + +// If the status is bad, CHECK immediately, appending the status to the +// logged message. +#define ARROW_CHECK_OK(s) ARROW_CHECK_OK_PREPEND(s, "Bad status", FATAL) + +#define ARROW_CHECK_EQ(val1, val2) ARROW_CHECK((val1) == (val2)) +#define ARROW_CHECK_NE(val1, val2) ARROW_CHECK((val1) != (val2)) +#define ARROW_CHECK_LE(val1, val2) ARROW_CHECK((val1) <= (val2)) +#define ARROW_CHECK_LT(val1, val2) ARROW_CHECK((val1) < (val2)) +#define ARROW_CHECK_GE(val1, val2) ARROW_CHECK((val1) >= (val2)) +#define ARROW_CHECK_GT(val1, val2) ARROW_CHECK((val1) > (val2)) + +#ifdef NDEBUG +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_WARNING + +// CAUTION: DCHECK_OK() always evaluates its argument, but other DCHECK*() macros +// only do so in debug mode. + +#define ARROW_DCHECK(condition) \ + while (false) ARROW_IGNORE_EXPR(condition); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_OK(s) \ + ARROW_IGNORE_EXPR(s); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_EQ(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_NE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() + +#else +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_FATAL + +#define ARROW_DCHECK ARROW_CHECK +#define ARROW_DCHECK_OK ARROW_CHECK_OK +#define ARROW_DCHECK_EQ ARROW_CHECK_EQ +#define ARROW_DCHECK_NE ARROW_CHECK_NE +#define ARROW_DCHECK_LE ARROW_CHECK_LE +#define ARROW_DCHECK_LT ARROW_CHECK_LT +#define ARROW_DCHECK_GE ARROW_CHECK_GE +#define ARROW_DCHECK_GT ARROW_CHECK_GT + +#endif // NDEBUG + +#define DCHECK ARROW_DCHECK +#define DCHECK_OK ARROW_DCHECK_OK +#define DCHECK_EQ ARROW_DCHECK_EQ +#define DCHECK_NE ARROW_DCHECK_NE +#define DCHECK_LE ARROW_DCHECK_LE +#define DCHECK_LT ARROW_DCHECK_LT +#define DCHECK_GE ARROW_DCHECK_GE +#define DCHECK_GT ARROW_DCHECK_GT + +// This code is adapted from +// https://github.com/ray-project/ray/blob/master/src/ray/util/logging.h. + +// To make the logging lib pluggable with other logging libs and make +// the implementation unawared by the user, ArrowLog is only a declaration +// which hide the implementation into logging.cc file. +// In logging.cc, we can choose different log libs using different macros. + +// This is also a null log which does not output anything. +class ARROW_EXPORT ArrowLogBase { + public: + virtual ~ArrowLogBase() {} + + virtual bool IsEnabled() const { return false; } + + template + ArrowLogBase& operator<<(const T& t) { + if (IsEnabled()) { + Stream() << t; + } + return *this; + } + + protected: + virtual std::ostream& Stream() = 0; +}; + +class ARROW_EXPORT ArrowLog : public ArrowLogBase { + public: + ArrowLog(const char* file_name, int line_number, ArrowLogLevel severity); + ~ArrowLog() override; + + /// Return whether or not current logging instance is enabled. + /// + /// \return True if logging is enabled and false otherwise. + bool IsEnabled() const override; + + /// The init function of arrow log for a program which should be called only once. + /// + /// \param appName The app name which starts the log. + /// \param severity_threshold Logging threshold for the program. + /// \param logDir Logging output file name. If empty, the log won't output to file. + static void StartArrowLog(const std::string& appName, + ArrowLogLevel severity_threshold = ArrowLogLevel::ARROW_INFO, + const std::string& logDir = ""); + + /// The shutdown function of arrow log, it should be used with StartArrowLog as a pair. + static void ShutDownArrowLog(); + + /// Install the failure signal handler to output call stack when crash. + /// If glog is not installed, this function won't do anything. + static void InstallFailureSignalHandler(); + + /// Uninstall the signal actions installed by InstallFailureSignalHandler. + static void UninstallSignalAction(); + + /// Return whether or not the log level is enabled in current setting. + /// + /// \param log_level The input log level to test. + /// \return True if input log level is not lower than the threshold. + static bool IsLevelEnabled(ArrowLogLevel log_level); + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrowLog); + + // Hide the implementation of log provider by void *. + // Otherwise, lib user may define the same macro to use the correct header file. + void* logging_provider_; + /// True if log messages should be logged and false if they should be ignored. + bool is_enabled_; + + static ArrowLogLevel severity_threshold_; + + protected: + std::ostream& Stream() override; +}; + +// This class make ARROW_CHECK compilation pass to change the << operator to void. +// This class is copied from glog. +class ARROW_EXPORT Voidify { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(ArrowLogBase&) {} +}; + +namespace detail { + +/// @brief A helper for the nil log sink. +/// +/// Using this helper is analogous to sending log messages to /dev/null: +/// nothing gets logged. +class NullLog { + public: + /// The no-op output operator. + /// + /// @param [in] t + /// The object to send into the nil sink. + /// @return Reference to the updated object. + template + NullLog& operator<<(const T& t) { + return *this; + } +}; + +} // namespace detail +} // namespace util +} // namespace arrow + +#endif // GANDIVA_IR diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..484df3400d92d1ec1f53777324cc3582f315c660 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#define ARROW_EXPAND(x) x +#define ARROW_STRINGIFY(x) #x +#define ARROW_CONCAT(x, y) x##y + +// From Google gutil +#ifndef ARROW_DISALLOW_COPY_AND_ASSIGN +#define ARROW_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif + +#ifndef ARROW_DEFAULT_MOVE_AND_ASSIGN +#define ARROW_DEFAULT_MOVE_AND_ASSIGN(TypeName) \ + TypeName(TypeName&&) = default; \ + TypeName& operator=(TypeName&&) = default +#endif + +// With ARROW_PREDICT_FALSE, GCC and clang can be told that a certain branch is +// not likely to be taken (for instance, a CHECK failure), and use that information in +// static analysis. Giving the compiler this information can affect the generated code +// layout in the absence of better information (i.e. -fprofile-arcs). [1] explains how +// this feature can be used to improve code generation. It was written as a positive +// comment to a negative article about the use of these annotations. +// +// ARROW_COMPILER_ASSUME allows the compiler to assume that a given expression is +// true, without evaluating it, and to optimise based on this assumption [2]. If this +// condition is violated at runtime, the behavior is undefined. This can be useful to +// generate both faster and smaller code in compute kernels. +// +// IMPORTANT: Different optimisers are likely to react differently to this annotation! +// It should be used with care when we can prove by some means that the assumption +// is (1) guaranteed to always hold and (2) is useful for optimization [3]. If the +// assumption is pessimistic, it might even block the compiler from decisions that +// could lead to better code [4]. If you have a good intuition for what the compiler +// can do with assumptions [5], you can use this macro to guide it and end up with +// results you would only get with more complex code transformations. +// `clang -S -emit-llvm` can be used to check how the generated code changes with +// your specific use of this macro. +// +// [1] https://lobste.rs/s/uwgtkt/don_t_use_likely_unlikely_attributes#c_xi3wmc +// [2] "Portable assumptions" +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1774r4.pdf +// [3] "Assertions Are Pessimistic, Assumptions Are Optimistic" +// https://blog.regehr.org/archives/1096 +// [4] https://discourse.llvm.org/t/llvm-assume-blocks-optimization/71609 +// [5] J. Doerfert et al. 2019. "Performance Exploration Through Optimistic Static +// Program Annotations". https://github.com/jdoerfert/PETOSPA/blob/master/ISC19.pdf +#define ARROW_UNUSED(x) (void)(x) +#ifdef ARROW_WARN_DOCUMENTATION +#define ARROW_ARG_UNUSED(x) x +#else +#define ARROW_ARG_UNUSED(x) +#endif +#if defined(__GNUC__) // GCC and compatible compilers (clang, Intel ICC) +#define ARROW_NORETURN __attribute__((noreturn)) +#define ARROW_NOINLINE __attribute__((noinline)) +#define ARROW_FORCE_INLINE __attribute__((always_inline)) +#define ARROW_PREDICT_FALSE(x) (__builtin_expect(!!(x), 0)) +#define ARROW_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#define ARROW_PREFETCH(addr) __builtin_prefetch(addr) +#define ARROW_RESTRICT __restrict +#if defined(__clang__) // clang-specific +#define ARROW_COMPILER_ASSUME(expr) __builtin_assume(expr) +#else // GCC-specific +#if __GNUC__ >= 13 +#define ARROW_COMPILER_ASSUME(expr) __attribute__((assume(expr))) +#else +// GCC does not have a built-in assume intrinsic before GCC 13, so we use an +// if statement and __builtin_unreachable() to achieve the same effect [2]. +// Unlike clang's __builtin_assume and C++23's [[assume(expr)]], using this +// on GCC won't warn about side-effects in the expression, so make sure expr +// is side-effect free when working with GCC versions before 13 (Jan-2024), +// otherwise clang/MSVC builds will fail in CI. +#define ARROW_COMPILER_ASSUME(expr) \ + if (expr) { \ + } else { \ + __builtin_unreachable(); \ + } +#endif // __GNUC__ >= 13 +#endif +#elif defined(_MSC_VER) // MSVC +#define ARROW_NORETURN __declspec(noreturn) +#define ARROW_NOINLINE __declspec(noinline) +#define ARROW_FORCE_INLINE __forceinline +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#define ARROW_RESTRICT __restrict +#define ARROW_COMPILER_ASSUME(expr) __assume(expr) +#else +#define ARROW_NORETURN +#define ARROW_NOINLINE +#define ARROW_FORCE_INLINE +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#define ARROW_RESTRICT +#define ARROW_COMPILER_ASSUME(expr) +#endif + +// ---------------------------------------------------------------------- +// C++/CLI support macros (see ARROW-1134) + +#ifndef NULLPTR + +#ifdef __cplusplus_cli +#define NULLPTR __nullptr +#else +#define NULLPTR nullptr +#endif + +#endif // ifndef NULLPTR + +// ---------------------------------------------------------------------- + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +// This macro takes an optional deprecation message +#ifdef __COVERITY__ +# define ARROW_DEPRECATED(...) +#else +# define ARROW_DEPRECATED(...) [[deprecated(__VA_ARGS__)]] +#endif + +#ifdef __COVERITY__ +# define ARROW_DEPRECATED_ENUM_VALUE(...) +#else +# define ARROW_DEPRECATED_ENUM_VALUE(...) [[deprecated(__VA_ARGS__)]] +#endif + +// clang-format on + +// Macros to disable deprecation warnings + +#ifdef __clang__ +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + __pragma(warning(push)) __pragma(warning(disable : 4996)) +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING __pragma(warning(pop)) +#else +#define ARROW_SUPPRESS_DEPRECATION_WARNING +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING +#endif + +// ---------------------------------------------------------------------- + +// macros to disable padding +// these macros are portable across different compilers and platforms +//[https://github.com/google/flatbuffers/blob/master/include/flatbuffers/flatbuffers.h#L1355] +#if !defined(MANUALLY_ALIGNED_STRUCT) +#if defined(_MSC_VER) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + __pragma(pack(1)); \ + struct __declspec(align(alignment)) +#define STRUCT_END(name, size) \ + __pragma(pack()); \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +#elif defined(__GNUC__) || defined(__clang__) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + _Pragma("pack(1)") struct __attribute__((aligned(alignment))) +#define STRUCT_END(name, size) \ + _Pragma("pack()") static_assert(sizeof(name) == size, "compiler breaks packing rules") +#else +#error Unknown compiler, please define structure alignment macros +#endif +#endif // !defined(MANUALLY_ALIGNED_STRUCT) + +// ---------------------------------------------------------------------- +// Convenience macro disabling a particular UBSan check in a function + +#if defined(__clang__) +#define ARROW_DISABLE_UBSAN(feature) __attribute__((no_sanitize(feature))) +#else +#define ARROW_DISABLE_UBSAN(feature) +#endif + +// ---------------------------------------------------------------------- +// Machine information + +#if INTPTR_MAX == INT64_MAX +#define ARROW_BITNESS 64 +#elif INTPTR_MAX == INT32_MAX +#define ARROW_BITNESS 32 +#else +#error Unexpected INTPTR_MAX +#endif + +// ---------------------------------------------------------------------- +// From googletest +// (also in parquet-cpp) + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name) \ + friend class test_case_name##_##test_name##_Test diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..7ee87c5d6ac8160c921ce83153e30112335ad7fe --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// Not provided by default in MSVC, +// and _USE_MATH_DEFINES is not reliable with unity builds +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 +#endif +#ifndef M_PI_4 +#define M_PI_4 0.785398163397448309616 +#endif diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..4250d0694b7dd283aad6bbb159bd3e36328fe7ae --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +// A helper function for doing memcpy with multiple threads. This is required +// to saturate the memory bandwidth of modern cpus. +void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, + uintptr_t block_size, int num_threads); + +// A helper function for checking if two wrapped objects implementing `Equals` +// are equal. +template +bool SharedPtrEquals(const std::shared_ptr& left, const std::shared_ptr& right) { + if (left == right) return true; + if (left == NULLPTR || right == NULLPTR) return false; + return left->Equals(*right); +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h new file mode 100644 index 0000000000000000000000000000000000000000..ac63cf70cd9ae9c05189f89e2f96c4d216d09573 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +/// A wrapper around std::mutex since we can't use it directly in +/// public headers due to C++/CLI. +/// https://docs.microsoft.com/en-us/cpp/standard-library/mutex#remarks +class ARROW_EXPORT Mutex { + public: + Mutex(); + Mutex(Mutex&&) = default; + Mutex& operator=(Mutex&&) = default; + + /// A Guard is falsy if a lock could not be acquired. + class ARROW_EXPORT Guard { + public: + Guard() : locked_(NULLPTR, [](Mutex* mutex) {}) {} + Guard(Guard&&) = default; + Guard& operator=(Guard&&) = default; + + explicit operator bool() const { return bool(locked_); } + + void Unlock() { locked_.reset(); } + + private: + explicit Guard(Mutex* locked); + + std::unique_ptr locked_; + friend Mutex; + }; + + Guard TryLock(); + Guard Lock(); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +#ifndef _WIN32 +/// Return a pointer to a process-wide, process-specific Mutex that can be used +/// at any point in a child process. NULL is returned when called in the parent. +/// +/// The rule is to first check that getpid() corresponds to the parent process pid +/// and, if not, call this function to lock any after-fork reinitialization code. +/// Like this: +/// +/// std::atomic pid{getpid()}; +/// ... +/// if (pid.load() != getpid()) { +/// // In child process +/// auto lock = GlobalForkSafeMutex()->Lock(); +/// if (pid.load() != getpid()) { +/// // Reinitialize internal structures after fork +/// ... +/// pid.store(getpid()); +ARROW_EXPORT +Mutex* GlobalForkSafeMutex(); +#endif + +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h new file mode 100644 index 0000000000000000000000000000000000000000..768f2328200fb2635213358226cfdb3f9273c808 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/pcg/pcg_random.hpp" // IWYU pragma: export + +namespace arrow { +namespace random { + +using pcg32 = ::arrow_vendored::pcg32; +using pcg64 = ::arrow_vendored::pcg64; +using pcg32_fast = ::arrow_vendored::pcg32_fast; +using pcg64_fast = ::arrow_vendored::pcg64_fast; +using pcg32_oneseq = ::arrow_vendored::pcg32_oneseq; +using pcg64_oneseq = ::arrow_vendored::pcg64_oneseq; + +} // namespace random +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h new file mode 100644 index 0000000000000000000000000000000000000000..82cea473c5b277323772c6914ee28b1903b5240d --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. template + +#pragma once + +#include +#include "arrow/util/string.h" + +using arrow::internal::ToChars; + +namespace arrow { +namespace internal { + +namespace detail { + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) { + TuplePrinter::Print(os, t); + *os << std::get(t); + } +}; + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) {} +}; + +} // namespace detail + +// Print elements from a tuple to a stream, in order. +// Typical use is to pack a bunch of existing values with std::forward_as_tuple() +// before passing it to this function. +template +void PrintTuple(OStream* os, const std::tuple& tup) { + detail::TuplePrinter, sizeof...(Args)>::Print(os, tup); +} + +template +struct PrintVector { + const Range& range_; + const Separator& separator_; + + template // template to dodge inclusion of + friend Os& operator<<(Os& os, PrintVector l) { + bool first = true; + os << "["; + for (const auto& element : l.range_) { + if (first) { + first = false; + } else { + os << l.separator_; + } + os << ToChars(element); // use ToChars to avoid locale dependence + } + os << "]"; + return os; + } +}; +template +PrintVector(const Range&, const Separator&) -> PrintVector; +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h new file mode 100644 index 0000000000000000000000000000000000000000..6c71fa6e155e8818801db2ccb18127d75d6364a8 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/ProducerConsumerQueue.h" + +namespace arrow { +namespace util { + +template +using SpscQueue = arrow_vendored::folly::ProducerConsumerQueue; + +} +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h new file mode 100644 index 0000000000000000000000000000000000000000..20553287985423970c228308742a7f85464a4a87 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h @@ -0,0 +1,258 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow::internal { + +/// Create a vector containing the values from start up to stop +template +std::vector Iota(T start, T stop) { + if (start > stop) { + return {}; + } + std::vector result(static_cast(stop - start)); + std::iota(result.begin(), result.end(), start); + return result; +} + +/// Create a vector containing the values from 0 up to length +template +std::vector Iota(T length) { + return Iota(static_cast(0), length); +} + +/// Create a range from a callable which takes a single index parameter +/// and returns the value of iterator on each call and a length. +/// Only iterators obtained from the same range should be compared, the +/// behaviour generally similar to other STL containers. +template +class LazyRange { + private: + // callable which generates the values + // has to be defined at the beginning of the class for type deduction + const Generator gen_; + // the length of the range + int64_t length_; +#ifdef _MSC_VER + // workaround to VS2010 not supporting decltype properly + // see https://stackoverflow.com/questions/21782846/decltype-for-class-member-function + static Generator gen_static_; +#endif + + public: +#ifdef _MSC_VER + using return_type = decltype(gen_static_(0)); +#else + using return_type = decltype(gen_(0)); +#endif + + /// Construct a new range from a callable and length + LazyRange(Generator gen, int64_t length) : gen_(gen), length_(length) {} + + // Class of the dependent iterator, created implicitly by begin and end + class RangeIter { + public: + using difference_type = int64_t; + using value_type = return_type; + using reference = const value_type&; + using pointer = const value_type*; + using iterator_category = std::forward_iterator_tag; + +#ifdef _MSC_VER + // msvc complains about unchecked iterators, + // see https://stackoverflow.com/questions/21655496/error-c4996-checked-iterators + using _Unchecked_type = typename LazyRange::RangeIter; +#endif + + RangeIter() = delete; + RangeIter(const RangeIter& other) = default; + RangeIter& operator=(const RangeIter& other) = default; + + RangeIter(const LazyRange& range, int64_t index) + : range_(&range), index_(index) {} + + const return_type operator*() const { return range_->gen_(index_); } + + RangeIter operator+(difference_type length) const { + return RangeIter(*range_, index_ + length); + } + + // pre-increment + RangeIter& operator++() { + ++index_; + return *this; + } + + // post-increment + RangeIter operator++(int) { + auto copy = RangeIter(*this); + ++index_; + return copy; + } + + bool operator==(const typename LazyRange::RangeIter& other) const { + return this->index_ == other.index_ && this->range_ == other.range_; + } + + bool operator!=(const typename LazyRange::RangeIter& other) const { + return this->index_ != other.index_ || this->range_ != other.range_; + } + + int64_t operator-(const typename LazyRange::RangeIter& other) const { + return this->index_ - other.index_; + } + + bool operator<(const typename LazyRange::RangeIter& other) const { + return this->index_ < other.index_; + } + + private: + // parent range reference + const LazyRange* range_; + // current index + int64_t index_; + }; + + friend class RangeIter; + + // Create a new begin const iterator + RangeIter begin() { return RangeIter(*this, 0); } + + // Create a new end const iterator + RangeIter end() { return RangeIter(*this, length_); } +}; + +/// Helper function to create a lazy range from a callable (e.g. lambda) and length +template +LazyRange MakeLazyRange(Generator&& gen, int64_t length) { + return LazyRange(std::forward(gen), length); +} + +/// \brief A helper for iterating multiple ranges simultaneously, similar to C++23's +/// zip() view adapter modelled after python's built-in zip() function. +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// std::function()> GetNames = ... +/// for (auto [table, name] : Zip(tables, GetNames())) { +/// static_assert(std::is_same_v); +/// static_assert(std::is_same_v); +/// // temporaries (like this vector of strings) are kept alive for the +/// // duration of a loop and are safely movable). +/// RegisterTableWithName(std::move(name), &table); +/// } +/// \endcode +/// +/// The zipped sequence ends as soon as any of its member ranges ends. +/// +/// Always use `auto` for the loop's declaration; it will always be a tuple +/// of references so for example using `const auto&` will compile but will +/// *look* like forcing const-ness even though the members of the tuple are +/// still mutable references. +/// +/// NOTE: we *could* make Zip a more full fledged range and enable things like +/// - gtest recognizing it as a container; it currently doesn't since Zip is +/// always mutable so this breaks: +/// EXPECT_THAT(Zip(std::vector{0}, std::vector{1}), +/// ElementsAre(std::tuple{0, 1})); +/// - letting it be random access when possible so we can do things like *sort* +/// parallel ranges +/// - ... +/// +/// However doing this will increase the compile time overhead of using Zip as +/// long as we're still using headers. Therefore until we can use c++20 modules: +/// *don't* extend Zip. +template +struct Zip; + +template +Zip(Ranges&&...) -> Zip, std::index_sequence_for>; + +template +struct Zip, std::index_sequence> { + explicit Zip(Ranges... ranges) : ranges_(std::forward(ranges)...) {} + + std::tuple ranges_; + + using sentinel = std::tuple(ranges_)))...>; + constexpr sentinel end() { return {std::end(std::get(ranges_))...}; } + + struct iterator : std::tuple(ranges_)))...> { + using std::tuple(ranges_)))...>::tuple; + + constexpr auto operator*() { + return std::tuple(*this))...>{*std::get(*this)...}; + } + + constexpr iterator& operator++() { + (++std::get(*this), ...); + return *this; + } + + constexpr bool operator!=(const sentinel& s) const { + bool all_iterators_valid = (... && (std::get(*this) != std::get(s))); + return all_iterators_valid; + } + }; + constexpr iterator begin() { return {std::begin(std::get(ranges_))...}; } +}; + +/// \brief A lazy sequence of integers which starts from 0 and never stops. +/// +/// This can be used in conjunction with Zip() to emulate python's built-in +/// enumerate() function: +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// for (auto [i, table] : Zip(Enumerate<>, tables)) { +/// std::cout << "#" << i << ": " << table.name() << std::endl; +/// } +/// \endcode +template +constexpr auto Enumerate = [] { + struct { + struct sentinel {}; + constexpr sentinel end() const { return {}; } + + struct iterator { + I value{0}; + + constexpr I operator*() { return value; } + + constexpr iterator& operator++() { + ++value; + return *this; + } + + constexpr std::true_type operator!=(sentinel) const { return {}; } + }; + constexpr iterator begin() const { return {}; } + } out; + + return out; +}(); + +} // namespace arrow::internal diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f5690062a049dd2485fe68461237eb6d9e0265 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h @@ -0,0 +1,826 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Imported from Apache Impala (incubating) on 2016-01-29 and modified for use +// in parquet-cpp, Arrow + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_stream_utils.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +/// Utility classes to do run length encoding (RLE) for fixed bit width values. If runs +/// are sufficiently long, RLE is used, otherwise, the values are just bit-packed +/// (literal encoding). +/// For both types of runs, there is a byte-aligned indicator which encodes the length +/// of the run and the type of the run. +/// This encoding has the benefit that when there aren't any long enough runs, values +/// are always decoded at fixed (can be precomputed) bit offsets OR both the value and +/// the run length are byte aligned. This allows for very efficient decoding +/// implementations. +/// The encoding is: +/// encoded-block := run* +/// run := literal-run | repeated-run +/// literal-run := literal-indicator < literal bytes > +/// repeated-run := repeated-indicator < repeated value. padded to byte boundary > +/// literal-indicator := varint_encode( number_of_groups << 1 | 1) +/// repeated-indicator := varint_encode( number_of_repetitions << 1 ) +// +/// Each run is preceded by a varint. The varint's least significant bit is +/// used to indicate whether the run is a literal run or a repeated run. The rest +/// of the varint is used to determine the length of the run (eg how many times the +/// value repeats). +// +/// In the case of literal runs, the run length is always a multiple of 8 (i.e. encode +/// in groups of 8), so that no matter the bit-width of the value, the sequence will end +/// on a byte boundary without padding. +/// Given that we know it is a multiple of 8, we store the number of 8-groups rather than +/// the actual number of encoded ints. (This means that the total number of encoded values +/// cannot be determined from the encoded data, since the number of values in the last +/// group may not be a multiple of 8). For the last group of literal runs, we pad +/// the group to 8 with zeros. This allows for 8 at a time decoding on the read side +/// without the need for additional checks. +// +/// There is a break-even point when it is more storage efficient to do run length +/// encoding. For 1 bit-width values, that point is 8 values. They require 2 bytes +/// for both the repeated encoding or the literal encoding. This value can always +/// be computed based on the bit-width. +/// TODO: think about how to use this for strings. The bit packing isn't quite the same. +// +/// Examples with bit-width 1 (eg encoding booleans): +/// ---------------------------------------- +/// 100 1s followed by 100 0s: +/// <1, padded to 1 byte> <0, padded to 1 byte> +/// - (total 4 bytes) +// +/// alternating 1s and 0s (200 total): +/// 200 ints = 25 groups of 8 +/// <25 bytes of values, bitpacked> +/// (total 26 bytes, 1 byte overhead) +// + +/// Decoder class for RLE encoded data. +class RleDecoder { + public: + /// Create a decoder object. buffer/buffer_len is the decoded data. + /// bit_width is the width of each value (before encoding). + RleDecoder(const uint8_t* buffer, int buffer_len, int bit_width) + : bit_reader_(buffer, buffer_len), + bit_width_(bit_width), + current_value_(0), + repeat_count_(0), + literal_count_(0) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + } + + RleDecoder() : bit_width_(-1) {} + + void Reset(const uint8_t* buffer, int buffer_len, int bit_width) { + DCHECK_GE(bit_width, 0); + DCHECK_LE(bit_width, 64); + bit_reader_.Reset(buffer, buffer_len); + bit_width_ = bit_width; + current_value_ = 0; + repeat_count_ = 0; + literal_count_ = 0; + } + + /// Gets the next value. Returns false if there are no more. + template + bool Get(T* val); + + /// Gets a batch of values. Returns the number of decoded elements. + template + int GetBatch(T* values, int batch_size); + + /// Like GetBatch but add spacing for null entries + template + int GetBatchSpaced(int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out); + + /// Like GetBatch but the values are then decoded using the provided dictionary + template + int GetBatchWithDict(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size); + + /// Like GetBatchWithDict but add spacing for null entries + /// + /// Null entries will be zero-initialized in `values` to avoid leaking + /// private data. + template + int GetBatchWithDictSpaced(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset); + + protected: + ::arrow::bit_util::BitReader bit_reader_; + /// Number of bits needed to encode the value. Must be between 0 and 64. + int bit_width_; + uint64_t current_value_; + int32_t repeat_count_; + int32_t literal_count_; + + private: + /// Fills literal_count_ and repeat_count_ with next values. Returns false if there + /// are no more. + template + bool NextCounts(); + + /// Utility methods for retrieving spaced values. + template + int GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, T* out); +}; + +/// Class to incrementally build the rle data. This class does not allocate any memory. +/// The encoding has two modes: encoding repeated runs and literal runs. +/// If the run is sufficiently short, it is more efficient to encode as a literal run. +/// This class does so by buffering 8 values at a time. If they are not all the same +/// they are added to the literal run. If they are the same, they are added to the +/// repeated run. When we switch modes, the previous run is flushed out. +class RleEncoder { + public: + /// buffer/buffer_len: preallocated output buffer. + /// bit_width: max number of bits for value. + /// TODO: consider adding a min_repeated_run_length so the caller can control + /// when values should be encoded as repeated runs. Currently this is derived + /// based on the bit_width, which can determine a storage optimal choice. + /// TODO: allow 0 bit_width (and have dict encoder use it) + RleEncoder(uint8_t* buffer, int buffer_len, int bit_width) + : bit_width_(bit_width), bit_writer_(buffer, buffer_len) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + max_run_byte_size_ = MinBufferSize(bit_width); + DCHECK_GE(buffer_len, max_run_byte_size_) << "Input buffer not big enough."; + Clear(); + } + + /// Returns the minimum buffer size needed to use the encoder for 'bit_width' + /// This is the maximum length of a single run for 'bit_width'. + /// It is not valid to pass a buffer less than this length. + static int MinBufferSize(int bit_width) { + /// 1 indicator byte and MAX_VALUES_PER_LITERAL_RUN 'bit_width' values. + int max_literal_run_size = 1 + static_cast(::arrow::bit_util::BytesForBits( + MAX_VALUES_PER_LITERAL_RUN * bit_width)); + /// Up to kMaxVlqByteLength indicator and a single 'bit_width' value. + int max_repeated_run_size = + ::arrow::bit_util::BitReader::kMaxVlqByteLength + + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + return std::max(max_literal_run_size, max_repeated_run_size); + } + + /// Returns the maximum byte size it could take to encode 'num_values'. + static int MaxBufferSize(int bit_width, int num_values) { + // For a bit_width > 1, the worst case is the repetition of "literal run of length 8 + // and then a repeated run of length 8". + // 8 values per smallest run, 8 bits per byte + int bytes_per_run = bit_width; + int num_runs = static_cast(::arrow::bit_util::CeilDiv(num_values, 8)); + int literal_max_size = num_runs + num_runs * bytes_per_run; + + // In the very worst case scenario, the data is a concatenation of repeated + // runs of 8 values. Repeated run has a 1 byte varint followed by the + // bit-packed repeated value + int min_repeated_run_size = + 1 + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + int repeated_max_size = num_runs * min_repeated_run_size; + + return std::max(literal_max_size, repeated_max_size); + } + + /// Encode value. Returns true if the value fits in buffer, false otherwise. + /// This value must be representable with bit_width_ bits. + bool Put(uint64_t value); + + /// Flushes any pending values to the underlying buffer. + /// Returns the total number of bytes written + int Flush(); + + /// Resets all the state in the encoder. + void Clear(); + + /// Returns pointer to underlying buffer + uint8_t* buffer() { return bit_writer_.buffer(); } + int32_t len() { return bit_writer_.bytes_written(); } + + private: + /// Flushes any buffered values. If this is part of a repeated run, this is largely + /// a no-op. + /// If it is part of a literal run, this will call FlushLiteralRun, which writes + /// out the buffered literal values. + /// If 'done' is true, the current run would be written even if it would normally + /// have been buffered more. This should only be called at the end, when the + /// encoder has received all values even if it would normally continue to be + /// buffered. + void FlushBufferedValues(bool done); + + /// Flushes literal values to the underlying buffer. If update_indicator_byte, + /// then the current literal run is complete and the indicator byte is updated. + void FlushLiteralRun(bool update_indicator_byte); + + /// Flushes a repeated run to the underlying buffer. + void FlushRepeatedRun(); + + /// Checks and sets buffer_full_. This must be called after flushing a run to + /// make sure there are enough bytes remaining to encode the next run. + void CheckBufferFull(); + + /// The maximum number of values in a single literal run + /// (number of groups encodable by a 1-byte indicator * 8) + static const int MAX_VALUES_PER_LITERAL_RUN = (1 << 6) * 8; + + /// Number of bits needed to encode the value. Must be between 0 and 64. + const int bit_width_; + + /// Underlying buffer. + ::arrow::bit_util::BitWriter bit_writer_; + + /// If true, the buffer is full and subsequent Put()'s will fail. + bool buffer_full_; + + /// The maximum byte size a single run can take. + int max_run_byte_size_; + + /// We need to buffer at most 8 values for literals. This happens when the + /// bit_width is 1 (so 8 values fit in one byte). + /// TODO: generalize this to other bit widths + int64_t buffered_values_[8]; + + /// Number of values in buffered_values_ + int num_buffered_values_; + + /// The current (also last) value that was written and the count of how + /// many times in a row that value has been seen. This is maintained even + /// if we are in a literal run. If the repeat_count_ get high enough, we switch + /// to encoding repeated runs. + uint64_t current_value_; + int repeat_count_; + + /// Number of literals in the current run. This does not include the literals + /// that might be in buffered_values_. Only after we've got a group big enough + /// can we decide if they should part of the literal_count_ or repeat_count_ + int literal_count_; + + /// Pointer to a byte in the underlying buffer that stores the indicator byte. + /// This is reserved as soon as we need a literal run but the value is written + /// when the literal run is complete. + uint8_t* literal_indicator_byte_; +}; + +template +inline bool RleDecoder::Get(T* val) { + return GetBatch(val, 1) == 1; +} + +template +inline int RleDecoder::GetBatch(T* values, int batch_size) { + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { // Repeated value case. + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, static_cast(current_value_)); + + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(remaining, literal_count_); + int actual_read = bit_reader_.GetBatch(bit_width_, out, literal_batch); + if (actual_read != literal_batch) { + return values_read; + } + + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + T* out) { + if (ARROW_PREDICT_FALSE(null_count == batch_size)) { + converter.FillZero(out, out + batch_size); + return batch_size; + } + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + int values_remaining = batch_size - null_count; + + // Assume no bits to start. + arrow::internal::BitRunReader bit_reader(valid_bits, valid_bits_offset, + /*length=*/batch_size); + arrow::internal::BitRun valid_run = bit_reader.NextRun(); + while (values_read < batch_size) { + if (ARROW_PREDICT_FALSE(valid_run.length == 0)) { + valid_run = bit_reader.NextRun(); + } + + DCHECK_GT(batch_size, 0); + DCHECK_GT(valid_run.length, 0); + + if (valid_run.set) { + if ((repeat_count_ == 0) && (literal_count_ == 0)) { + if (!NextCounts()) return values_read; + DCHECK((repeat_count_ > 0) ^ (literal_count_ > 0)); + } + + if (repeat_count_ > 0) { + int repeat_batch = 0; + // Consume the entire repeat counts incrementing repeat_batch to + // be the total of nulls + values consumed, we only need to + // get the total count because we can fill in the same value for + // nulls and non-nulls. This proves to be a big efficiency win. + while (repeat_count_ > 0 && (values_read + repeat_batch) < batch_size) { + DCHECK_GT(valid_run.length, 0); + if (valid_run.set) { + int update_size = std::min(static_cast(valid_run.length), repeat_count_); + repeat_count_ -= update_size; + repeat_batch += update_size; + valid_run.length -= update_size; + values_remaining -= update_size; + } else { + // We can consume all nulls here because we would do so on + // the next loop anyways. + repeat_batch += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + RunType current_value = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!converter.IsValid(current_value))) { + return values_read; + } + converter.Fill(out, out + repeat_batch, current_value); + out += repeat_batch; + values_read += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(values_remaining, literal_count_); + DCHECK_GT(literal_batch, 0); + + // Decode the literals + constexpr int kBufferSize = 1024; + RunType indices[kBufferSize]; + literal_batch = std::min(literal_batch, kBufferSize); + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (!converter.IsValid(indices, /*length=*/actual_read)) { + return values_read; + } + int skipped = 0; + int literals_read = 0; + while (literals_read < literal_batch) { + if (valid_run.set) { + int update_size = std::min(literal_batch - literals_read, + static_cast(valid_run.length)); + converter.Copy(out, indices + literals_read, update_size); + literals_read += update_size; + out += update_size; + valid_run.length -= update_size; + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + skipped += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + literal_count_ -= literal_batch; + values_remaining -= literal_batch; + values_read += literal_batch + skipped; + } + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + values_read += static_cast(valid_run.length); + valid_run.length = 0; + } + } + DCHECK_EQ(valid_run.length, 0); + DCHECK_EQ(values_remaining, 0); + return values_read; +} + +// Converter for GetSpaced that handles runs that get returned +// directly as output. +template +struct PlainRleConverter { + T kZero = {}; + inline bool IsValid(const T& values) const { return true; } + inline bool IsValid(const T* values, int32_t length) const { return true; } + inline void Fill(T* begin, T* end, const T& run_value) const { + std::fill(begin, end, run_value); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + inline void Copy(T* out, const T* values, int length) const { + std::memcpy(out, values, length * sizeof(T)); + } +}; + +template +inline int RleDecoder::GetBatchSpaced(int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out) { + if (null_count == 0) { + return GetBatch(out, batch_size); + } + + PlainRleConverter converter; + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatch(out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +static inline bool IndexInRange(int32_t idx, int32_t dictionary_length) { + return idx >= 0 && idx < dictionary_length; +} + +// Converter for GetSpaced that handles runs of returned dictionary +// indices. +template +struct DictionaryConverter { + T kZero = {}; + const T* dictionary; + int32_t dictionary_length; + + inline bool IsValid(int32_t value) { return IndexInRange(value, dictionary_length); } + + inline bool IsValid(const int32_t* values, int32_t length) const { + using IndexType = int32_t; + IndexType min_index = std::numeric_limits::max(); + IndexType max_index = std::numeric_limits::min(); + for (int x = 0; x < length; x++) { + min_index = std::min(values[x], min_index); + max_index = std::max(values[x], max_index); + } + + return IndexInRange(min_index, dictionary_length) && + IndexInRange(max_index, dictionary_length); + } + inline void Fill(T* begin, T* end, const int32_t& run_value) const { + std::fill(begin, end, dictionary[run_value]); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + + inline void Copy(T* out, const int32_t* values, int length) const { + for (int x = 0; x < length; x++) { + out[x] = dictionary[values[x]]; + } + } +}; + +template +inline int RleDecoder::GetBatchWithDict(const T* dictionary, int32_t dictionary_length, + T* values, int batch_size) { + // Per https://github.com/apache/parquet-format/blob/master/Encodings.md, + // the maximum dictionary index width in Parquet is 32 bits. + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { + auto idx = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!IndexInRange(idx, dictionary_length))) { + return values_read; + } + T val = dictionary[idx]; + + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, val); + + /* Upkeep counters */ + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + constexpr int kBufferSize = 1024; + IndexType indices[kBufferSize]; + + int literal_batch = std::min(remaining, literal_count_); + literal_batch = std::min(literal_batch, kBufferSize); + + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (ARROW_PREDICT_FALSE(!converter.IsValid(indices, /*length=*/literal_batch))) { + return values_read; + } + converter.Copy(out, indices, literal_batch); + + /* Upkeep counters */ + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, + int32_t dictionary_length, T* out, + int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (null_count == 0) { + return GetBatchWithDict(dictionary, dictionary_length, out, batch_size); + } + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatchWithDict(dictionary, dictionary_length, out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +template +bool RleDecoder::NextCounts() { + // Read the next run's indicator int, it could be a literal or repeated run. + // The int is encoded as a vlq-encoded value. + uint32_t indicator_value = 0; + if (!bit_reader_.GetVlqInt(&indicator_value)) return false; + + // lsb indicates if it is a literal run or repeated run + bool is_literal = indicator_value & 1; + uint32_t count = indicator_value >> 1; + if (is_literal) { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX) / 8)) { + return false; + } + literal_count_ = count * 8; + } else { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX))) { + return false; + } + repeat_count_ = count; + T value = {}; + if (!bit_reader_.GetAligned( + static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8)), &value)) { + return false; + } + current_value_ = static_cast(value); + } + return true; +} + +/// This function buffers input values 8 at a time. After seeing all 8 values, +/// it decides whether they should be encoded as a literal or repeated run. +inline bool RleEncoder::Put(uint64_t value) { + DCHECK(bit_width_ == 64 || value < (1ULL << bit_width_)); + if (ARROW_PREDICT_FALSE(buffer_full_)) return false; + + if (ARROW_PREDICT_TRUE(current_value_ == value)) { + ++repeat_count_; + if (repeat_count_ > 8) { + // This is just a continuation of the current run, no need to buffer the + // values. + // Note that this is the fast path for long repeated runs. + return true; + } + } else { + if (repeat_count_ >= 8) { + // We had a run that was long enough but it has ended. Flush the + // current repeated run. + DCHECK_EQ(literal_count_, 0); + FlushRepeatedRun(); + } + repeat_count_ = 1; + current_value_ = value; + } + + buffered_values_[num_buffered_values_] = value; + if (++num_buffered_values_ == 8) { + DCHECK_EQ(literal_count_ % 8, 0); + FlushBufferedValues(false); + } + return true; +} + +inline void RleEncoder::FlushLiteralRun(bool update_indicator_byte) { + if (literal_indicator_byte_ == NULL) { + // The literal indicator byte has not been reserved yet, get one now. + literal_indicator_byte_ = bit_writer_.GetNextBytePtr(); + DCHECK(literal_indicator_byte_ != NULL); + } + + // Write all the buffered values as bit packed literals + for (int i = 0; i < num_buffered_values_; ++i) { + bool success = bit_writer_.PutValue(buffered_values_[i], bit_width_); + DCHECK(success) << "There is a bug in using CheckBufferFull()"; + } + num_buffered_values_ = 0; + + if (update_indicator_byte) { + // At this point we need to write the indicator byte for the literal run. + // We only reserve one byte, to allow for streaming writes of literal values. + // The logic makes sure we flush literal runs often enough to not overrun + // the 1 byte. + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + int32_t indicator_value = (num_groups << 1) | 1; + DCHECK_EQ(indicator_value & 0xFFFFFF00, 0); + *literal_indicator_byte_ = static_cast(indicator_value); + literal_indicator_byte_ = NULL; + literal_count_ = 0; + CheckBufferFull(); + } +} + +inline void RleEncoder::FlushRepeatedRun() { + DCHECK_GT(repeat_count_, 0); + bool result = true; + // The lsb of 0 indicates this is a repeated run + int32_t indicator_value = repeat_count_ << 1 | 0; + result &= bit_writer_.PutVlqInt(static_cast(indicator_value)); + result &= bit_writer_.PutAligned( + current_value_, static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8))); + DCHECK(result); + num_buffered_values_ = 0; + repeat_count_ = 0; + CheckBufferFull(); +} + +/// Flush the values that have been buffered. At this point we decide whether +/// we need to switch between the run types or continue the current one. +inline void RleEncoder::FlushBufferedValues(bool done) { + if (repeat_count_ >= 8) { + // Clear the buffered values. They are part of the repeated run now and we + // don't want to flush them out as literals. + num_buffered_values_ = 0; + if (literal_count_ != 0) { + // There was a current literal run. All the values in it have been flushed + // but we still need to update the indicator byte. + DCHECK_EQ(literal_count_ % 8, 0); + DCHECK_EQ(repeat_count_, 8); + FlushLiteralRun(true); + } + DCHECK_EQ(literal_count_, 0); + return; + } + + literal_count_ += num_buffered_values_; + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + if (num_groups + 1 >= (1 << 6)) { + // We need to start a new literal run because the indicator byte we've reserved + // cannot store more values. + DCHECK(literal_indicator_byte_ != NULL); + FlushLiteralRun(true); + } else { + FlushLiteralRun(done); + } + repeat_count_ = 0; +} + +inline int RleEncoder::Flush() { + if (literal_count_ > 0 || repeat_count_ > 0 || num_buffered_values_ > 0) { + bool all_repeat = literal_count_ == 0 && (repeat_count_ == num_buffered_values_ || + num_buffered_values_ == 0); + // There is something pending, figure out if it's a repeated or literal run + if (repeat_count_ > 0 && all_repeat) { + FlushRepeatedRun(); + } else { + DCHECK_EQ(literal_count_ % 8, 0); + // Buffer the last group of literals to 8 by padding with 0s. + for (; num_buffered_values_ != 0 && num_buffered_values_ < 8; + ++num_buffered_values_) { + buffered_values_[num_buffered_values_] = 0; + } + literal_count_ += num_buffered_values_; + FlushLiteralRun(true); + repeat_count_ = 0; + } + } + bit_writer_.Flush(); + DCHECK_EQ(num_buffered_values_, 0); + DCHECK_EQ(literal_count_, 0); + DCHECK_EQ(repeat_count_, 0); + + return bit_writer_.bytes_written(); +} + +inline void RleEncoder::CheckBufferFull() { + int bytes_written = bit_writer_.bytes_written(); + if (bytes_written + max_run_byte_size_ > bit_writer_.buffer_len()) { + buffer_full_ = true; + } +} + +inline void RleEncoder::Clear() { + buffer_full_ = false; + current_value_ = 0; + repeat_count_ = 0; + num_buffered_values_ = 0; + literal_count_ = 0; + literal_indicator_byte_ = NULL; + bit_writer_.Clear(); +} + +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h new file mode 100644 index 0000000000000000000000000000000000000000..ee9105d5f4beb431f155f8b47b7efdcc72452bc5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _MSC_VER +// MSVC x86_64/arm64 + +#if defined(_M_AMD64) || defined(_M_X64) +#include +#endif + +#else +// gcc/clang (possibly others) + +#if defined(ARROW_HAVE_BMI2) +#include +#endif + +#if defined(ARROW_HAVE_AVX2) || defined(ARROW_HAVE_AVX512) +#include +#elif defined(ARROW_HAVE_SSE4_2) +#include +#endif + +#ifdef ARROW_HAVE_NEON +#include +#endif + +#endif diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..cdffe0b2317e5ba555c37ec16e5294bc912a49d4 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace arrow { +namespace internal { + +template > +std::vector ArgSort(const std::vector& values, Cmp&& cmp = {}) { + std::vector indices(values.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), + [&](int64_t i, int64_t j) -> bool { return cmp(values[i], values[j]); }); + return indices; +} + +template +size_t Permute(const std::vector& indices, std::vector* values) { + if (indices.size() <= 1) { + return indices.size(); + } + + // mask indicating which of values are in the correct location + std::vector sorted(indices.size(), false); + + size_t cycle_count = 0; + + for (auto cycle_start = sorted.begin(); cycle_start != sorted.end(); + cycle_start = std::find(cycle_start, sorted.end(), false)) { + ++cycle_count; + + // position in which an element belongs WRT sort + auto sort_into = static_cast(cycle_start - sorted.begin()); + + if (indices[sort_into] == sort_into) { + // trivial cycle + sorted[sort_into] = true; + continue; + } + + // resolve this cycle + const auto end = sort_into; + for (int64_t take_from = indices[sort_into]; take_from != end; + take_from = indices[sort_into]) { + std::swap(values->at(sort_into), values->at(take_from)); + sorted[sort_into] = true; + sort_into = take_from; + } + sorted[sort_into] = true; + } + + return cycle_count; +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e377773f62f810d330c40e565d5acda0aabd4c --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +#include +#endif + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +ARROW_EXPORT std::string HexEncode(const uint8_t* data, size_t length); + +ARROW_EXPORT std::string Escape(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(std::string_view str); + +ARROW_EXPORT std::string Escape(std::string_view str); + +ARROW_EXPORT Status ParseHexValue(const char* hex_pair, uint8_t* out); + +ARROW_EXPORT Status ParseHexValues(std::string_view hex_string, uint8_t* out); + +namespace internal { + +/// Like std::string_view::starts_with in C++20 +inline bool StartsWith(std::string_view s, std::string_view prefix) { + return s.length() >= prefix.length() && + (s.empty() || s.substr(0, prefix.length()) == prefix); +} + +/// Like std::string_view::ends_with in C++20 +inline bool EndsWith(std::string_view s, std::string_view suffix) { + return s.length() >= suffix.length() && + (s.empty() || s.substr(s.length() - suffix.length()) == suffix); +} + +/// \brief Split a string with a delimiter +ARROW_EXPORT +std::vector SplitString(std::string_view v, char delim, + int64_t limit = 0); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Trim whitespace from left and right sides of string +ARROW_EXPORT +std::string TrimString(std::string value); + +ARROW_EXPORT +bool AsciiEqualsCaseInsensitive(std::string_view left, std::string_view right); + +ARROW_EXPORT +std::string AsciiToLower(std::string_view value); + +ARROW_EXPORT +std::string AsciiToUpper(std::string_view value); + +/// \brief Search for the first instance of a token and replace it or return nullopt if +/// the token is not found. +ARROW_EXPORT +std::optional Replace(std::string_view s, std::string_view token, + std::string_view replacement); + +/// \brief Get boolean value from string +/// +/// If "1", "true" (case-insensitive), returns true +/// If "0", "false" (case-insensitive), returns false +/// Otherwise, returns Status::Invalid +ARROW_EXPORT +arrow::Result ParseBoolean(std::string_view value); + +#if __has_include() + +namespace detail { +template +struct can_to_chars : public std::false_type {}; + +template +struct can_to_chars< + T, std::void_t(), std::declval(), + std::declval>()))>> + : public std::true_type {}; +} // namespace detail + +/// \brief Whether std::to_chars exists for the current value type. +/// +/// This is useful as some C++ libraries do not implement all specified overloads +/// for std::to_chars. +template +inline constexpr bool have_to_chars = detail::can_to_chars::value; + +/// \brief An ergonomic wrapper around std::to_chars, returning a std::string +/// +/// For most inputs, the std::string result will not incur any heap allocation +/// thanks to small string optimization. +/// +/// Compared to std::to_string, this function gives locale-agnostic results +/// and might also be faster. +template +std::string ToChars(T value, Args&&... args) { + if constexpr (!have_to_chars) { + // Some C++ standard libraries do not yet implement std::to_chars for all types, + // in which case we have to fallback to std::string. + return std::to_string(value); + } else { + // According to various sources, the GNU libstdc++ and Microsoft's C++ STL + // allow up to 15 bytes of small string optimization, while clang's libc++ + // goes up to 22 bytes. Choose the pessimistic value. + std::string out(15, 0); + auto res = std::to_chars(&out.front(), &out.back(), value, args...); + while (res.ec != std::errc{}) { + assert(res.ec == std::errc::value_too_large); + out.resize(out.capacity() * 2); + res = std::to_chars(&out.front(), &out.back(), value, args...); + } + const auto length = res.ptr - out.data(); + assert(length <= static_cast(out.length())); + out.resize(length); + return out; + } +} + +#else // !__has_include() + +template +inline constexpr bool have_to_chars = false; + +template +std::string ToChars(T value, Args&&... args) { + return std::to_string(value); +} + +#endif + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h new file mode 100644 index 0000000000000000000000000000000000000000..308df468840eb299ac35f1e308a643df4b8e0e4d --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// approximate quantiles from arbitrary length dataset with O(1) space +// based on 'Computing Extremely Accurate Quantiles Using t-Digests' from Dunning & Ertl +// - https://arxiv.org/abs/1902.04023 +// - https://github.com/tdunning/t-digest + +#pragma once + +#include +#include +#include + +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace internal { + +class ARROW_EXPORT TDigest { + public: + explicit TDigest(uint32_t delta = 100, uint32_t buffer_size = 500); + ~TDigest(); + TDigest(TDigest&&); + TDigest& operator=(TDigest&&); + + // reset and re-use this tdigest + void Reset(); + + // validate data integrity + Status Validate() const; + + // dump internal data, only for debug + void Dump() const; + + // buffer a single data point, consume internal buffer if full + // this function is intensively called and performance critical + // call it only if you are sure no NAN exists in input data + void Add(double value) { + DCHECK(!std::isnan(value)) << "cannot add NAN"; + if (ARROW_PREDICT_FALSE(input_.size() == input_.capacity())) { + MergeInput(); + } + input_.push_back(value); + } + + // skip NAN on adding + template + typename std::enable_if::value>::type NanAdd(T value) { + if (!std::isnan(value)) Add(value); + } + + template + typename std::enable_if::value>::type NanAdd(T value) { + Add(static_cast(value)); + } + + // merge with other t-digests, called infrequently + void Merge(const std::vector& others); + void Merge(const TDigest& other); + + // calculate quantile + double Quantile(double q) const; + + double Min() const { return Quantile(0); } + double Max() const { return Quantile(1); } + double Mean() const; + + // check if this tdigest contains no valid data points + bool is_empty() const; + + private: + // merge input data with current tdigest + void MergeInput() const; + + // input buffer, size = buffer_size * sizeof(double) + mutable std::vector input_; + + // hide other members with pimpl + class TDigestImpl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..511daed1ecaac688b6d444349bf1c63fb6c53ad6 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/iterator.h" + +namespace arrow { + +struct TestInt { + TestInt(); + TestInt(int i); // NOLINT runtime/explicit + int value; + + bool operator==(const TestInt& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestInt& v); +}; + +template <> +struct IterationTraits { + static TestInt End() { return TestInt(); } + static bool IsEnd(const TestInt& val) { return val == IterationTraits::End(); } +}; + +struct TestStr { + TestStr(); + TestStr(const std::string& s); // NOLINT runtime/explicit + TestStr(const char* s); // NOLINT runtime/explicit + explicit TestStr(const TestInt& test_int); + std::string value; + + bool operator==(const TestStr& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestStr& v); +}; + +template <> +struct IterationTraits { + static TestStr End() { return TestStr(); } + static bool IsEnd(const TestStr& val) { return val == IterationTraits::End(); } +}; + +std::vector RangeVector(unsigned int max, unsigned int step = 1); + +template +inline Iterator VectorIt(std::vector v) { + return MakeVectorIterator(std::move(v)); +} + +template +inline Iterator PossiblySlowVectorIt(std::vector v, bool slow = false) { + auto iterator = MakeVectorIterator(std::move(v)); + if (slow) { + return MakeTransformedIterator(std::move(iterator), + [](T item) -> Result> { + SleepABit(); + return TransformYield(item); + }); + } else { + return iterator; + } +} + +template +inline void AssertIteratorExhausted(Iterator& it) { + ASSERT_OK_AND_ASSIGN(T next, it.Next()); + ASSERT_TRUE(IsIterationEnd(next)); +} + +Transformer MakeFilter(std::function filter); + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..44b1e227b0e5fac7ed104df5c487bdc223e44f26 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h @@ -0,0 +1,620 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/cancel.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +#if defined(_MSC_VER) +// Disable harmless warning for decorated name length limit +#pragma warning(disable : 4503) +#endif + +namespace arrow { + +/// \brief Get the capacity of the global thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various CPU-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetCpuThreadPoolCapacity(). +ARROW_EXPORT int GetCpuThreadPoolCapacity(); + +/// \brief Set the capacity of the global thread pool +/// +/// Set the number of worker threads int the thread pool to which +/// Arrow dispatches various CPU-bound tasks. +/// +/// The current number is returned by GetCpuThreadPoolCapacity(). +ARROW_EXPORT Status SetCpuThreadPoolCapacity(int threads); + +namespace internal { + +// Hints about a task that may be used by an Executor. +// They are ignored by the provided ThreadPool implementation. +struct TaskHints { + // The lower, the more urgent + int32_t priority = 0; + // The IO transfer size in bytes + int64_t io_size = -1; + // The approximate CPU cost in number of instructions + int64_t cpu_cost = -1; + // An application-specific ID + int64_t external_id = -1; +}; + +class ARROW_EXPORT Executor { + public: + using StopCallback = internal::FnOnce; + + virtual ~Executor(); + + // Spawn a fire-and-forget task. + template + Status Spawn(Function&& func) { + return SpawnReal(TaskHints{}, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(Function&& func, StopToken stop_token) { + return SpawnReal(TaskHints{}, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func) { + return SpawnReal(hints, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token, + StopCallback stop_callback) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + std::move(stop_callback)); + } + + // Transfers a future to this executor. Any continuations added to the + // returned future will run in this executor. Otherwise they would run + // on the same thread that called MarkFinished. + // + // This is necessary when (for example) an I/O task is completing a future. + // The continuations of that future should run on the CPU thread pool keeping + // CPU heavy work off the I/O thread pool. So the I/O task should transfer + // the future to the CPU executor before returning. + // + // By default this method will only transfer if the future is not already completed. If + // the future is already completed then any callback would be run synchronously and so + // no transfer is typically necessary. However, in cases where you want to force a + // transfer (e.g. to help the scheduler break up units of work across multiple cores) + // then you can override this behavior with `always_transfer`. + template + Future Transfer(Future future) { + return DoTransfer(std::move(future), false); + } + + // Overload of Transfer which will always schedule callbacks on new threads even if the + // future is finished when the callback is added. + // + // This can be useful in cases where you want to ensure parallelism + template + Future TransferAlways(Future future) { + return DoTransfer(std::move(future), true); + } + + // Submit a callable and arguments for execution. Return a future that + // will return the callable's result value once. + // The callable's arguments are copied before execution. + template > + Result Submit(TaskHints hints, StopToken stop_token, Function&& func, + Args&&... args) { + using ValueType = typename FutureType::ValueType; + + auto future = FutureType::Make(); + auto task = std::bind(::arrow::detail::ContinueFuture{}, future, + std::forward(func), std::forward(args)...); + struct { + WeakFuture weak_fut; + + void operator()(const Status& st) { + auto fut = weak_fut.get(); + if (fut.is_valid()) { + fut.MarkFinished(st); + } + } + } stop_callback{WeakFuture(future)}; + ARROW_RETURN_NOT_OK(SpawnReal(hints, std::move(task), std::move(stop_token), + std::move(stop_callback))); + + return future; + } + + template > + Result Submit(StopToken stop_token, Function&& func, Args&&... args) { + return Submit(TaskHints{}, stop_token, std::forward(func), + std::forward(args)...); + } + + template > + Result Submit(TaskHints hints, Function&& func, Args&&... args) { + return Submit(std::move(hints), StopToken::Unstoppable(), + std::forward(func), std::forward(args)...); + } + + template > + Result Submit(Function&& func, Args&&... args) { + return Submit(TaskHints{}, StopToken::Unstoppable(), std::forward(func), + std::forward(args)...); + } + + // Return the level of parallelism (the number of tasks that may be executed + // concurrently). This may be an approximate number. + virtual int GetCapacity() = 0; + + // Return true if the thread from which this function is called is owned by this + // Executor. Returns false if this Executor does not support this property. + virtual bool OwnsThisThread() { return false; } + + // Return true if this is the current executor being called + // n.b. this defaults to just calling OwnsThisThread + // unless the threadpool is disabled + virtual bool IsCurrentExecutor() { return OwnsThisThread(); } + + /// \brief An interface to represent something with a custom destructor + /// + /// \see KeepAlive + class ARROW_EXPORT Resource { + public: + virtual ~Resource() = default; + }; + + /// \brief Keep a resource alive until all executor threads have terminated + /// + /// Executors may have static storage duration. In particular, the CPU and I/O + /// executors are currently implemented this way. These threads may access other + /// objects with static storage duration such as the OpenTelemetry runtime context + /// the default memory pool, or other static executors. + /// + /// The order in which these objects are destroyed is difficult to control. In order + /// to ensure those objects remain alive until all threads have finished those objects + /// should be wrapped in a Resource object and passed into this method. The given + /// shared_ptr will be kept alive until all threads have finished their worker loops. + virtual void KeepAlive(std::shared_ptr resource); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Executor); + + Executor() = default; + + template , typename FTSync = typename FT::SyncType> + Future DoTransfer(Future future, bool always_transfer = false) { + auto transferred = Future::Make(); + if (always_transfer) { + CallbackOptions callback_options = CallbackOptions::Defaults(); + callback_options.should_schedule = ShouldSchedule::Always; + callback_options.executor = this; + auto sync_callback = [transferred](const FTSync& result) mutable { + transferred.MarkFinished(result); + }; + future.AddCallback(sync_callback, callback_options); + return transferred; + } + + // We could use AddCallback's ShouldSchedule::IfUnfinished but we can save a bit of + // work by doing the test here. + auto callback = [this, transferred](const FTSync& result) mutable { + auto spawn_status = + Spawn([transferred, result]() mutable { transferred.MarkFinished(result); }); + if (!spawn_status.ok()) { + transferred.MarkFinished(spawn_status); + } + }; + auto callback_factory = [&callback]() { return callback; }; + if (future.TryAddCallback(callback_factory)) { + return transferred; + } + // If the future is already finished and we aren't going to force spawn a thread + // then we don't need to add another layer of callback and can return the original + // future + return future; + } + + // Subclassing API + virtual Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) = 0; +}; + +/// \brief An executor implementation that runs all tasks on a single thread using an +/// event loop. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT SerialExecutor : public Executor { + public: + template + using TopLevelTask = internal::FnOnce(Executor*)>; + + ~SerialExecutor() override; + + int GetCapacity() override { return 1; }; + bool OwnsThisThread() override; + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + /// \brief Runs the TopLevelTask and any scheduled tasks + /// + /// The TopLevelTask (or one of the tasks it schedules) must either return an invalid + /// status or call the finish signal. Failure to do this will result in a deadlock. For + /// this reason it is preferable (if possible) to use the helper methods (below) + /// RunSynchronously/RunSerially which delegates the responsibility onto a Future + /// producer's existing responsibility to always mark a future finished (which can + /// someday be aided by ARROW-12207). + template , + typename FTSync = typename FT::SyncType> + static FTSync RunInSerialExecutor(TopLevelTask initial_task) { + Future fut = SerialExecutor().Run(std::move(initial_task)); + return FutureToSync(fut); + } + + /// \brief Transform an AsyncGenerator into an Iterator + /// + /// An event loop will be created and each call to Next will power the event loop with + /// the calling thread until the next item is ready to be delivered. + /// + /// Note: The iterator's destructor will run until the given generator is fully + /// exhausted. If you wish to abandon iteration before completion then the correct + /// approach is to use a stop token to cause the generator to exhaust early. + template + static Iterator IterateGenerator( + internal::FnOnce()>>(Executor*)> initial_task) { + auto serial_executor = std::unique_ptr(new SerialExecutor()); + auto maybe_generator = std::move(initial_task)(serial_executor.get()); + if (!maybe_generator.ok()) { + return MakeErrorIterator(maybe_generator.status()); + } + auto generator = maybe_generator.MoveValueUnsafe(); + struct SerialIterator { + SerialIterator(std::unique_ptr executor, + std::function()> generator) + : executor(std::move(executor)), generator(std::move(generator)) {} + ARROW_DISALLOW_COPY_AND_ASSIGN(SerialIterator); + ARROW_DEFAULT_MOVE_AND_ASSIGN(SerialIterator); + ~SerialIterator() { + // A serial iterator must be consumed before it can be destroyed. Allowing it to + // do otherwise would lead to resource leakage. There will likely be deadlocks at + // this spot in the future but these will be the result of other bugs and not the + // fact that we are forcing consumption here. + + // If a streaming API needs to support early abandonment then it should be done so + // with a cancellation token and not simply discarding the iterator and expecting + // the underlying work to clean up correctly. + if (executor && !executor->IsFinished()) { + while (true) { + Result maybe_next = Next(); + if (!maybe_next.ok() || IsIterationEnd(*maybe_next)) { + break; + } + } + } + } + + Result Next() { + executor->Unpause(); + // This call may lead to tasks being scheduled in the serial executor + Future next_fut = generator(); + next_fut.AddCallback([this](const Result& res) { + // If we're done iterating we should drain the rest of the tasks in the executor + if (!res.ok() || IsIterationEnd(*res)) { + executor->Finish(); + return; + } + // Otherwise we will break out immediately, leaving the remaining tasks for + // the next call. + executor->Pause(); + }); +#ifdef ARROW_ENABLE_THREADING + // future must run on this thread + // Borrow this thread and run tasks until the future is finished + executor->RunLoop(); +#else + next_fut.Wait(); +#endif + if (!next_fut.is_finished()) { + // Not clear this is possible since RunLoop wouldn't generally exit + // unless we paused/finished which would imply next_fut has been + // finished. + return Status::Invalid( + "Serial executor terminated before next result computed"); + } + // At this point we may still have tasks in the executor, that is ok. + // We will run those tasks the next time through. + return next_fut.result(); + } + + std::unique_ptr executor; + std::function()> generator; + }; + return Iterator(SerialIterator{std::move(serial_executor), std::move(generator)}); + } + +#ifndef ARROW_ENABLE_THREADING + // run a pending task from loop + // returns true if any tasks were run in the last go round the loop (i.e. if it + // returns false, all executors are waiting) + static bool RunTasksOnAllExecutors(); + static SerialExecutor* GetCurrentExecutor(); + + bool IsCurrentExecutor() override; + +#endif + + protected: + virtual void RunLoop(); + + // State uses mutex + struct State; + std::shared_ptr state_; + + SerialExecutor(); + + // We mark the serial executor "finished" when there should be + // no more tasks scheduled on it. It's not strictly needed but + // can help catch bugs where we are trying to use the executor + // after we are done with it. + void Finish(); + bool IsFinished(); + // We pause the executor when we are running an async generator + // and we have received an item that we can deliver. + void Pause(); + void Unpause(); + + template ::SyncType> + Future Run(TopLevelTask initial_task) { + auto final_fut = std::move(initial_task)(this); + final_fut.AddCallback([this](const FTSync&) { Finish(); }); + RunLoop(); + return final_fut; + } + +#ifndef ARROW_ENABLE_THREADING + // we have to run tasks from all live executors + // during RunLoop if we don't have threading + static std::unordered_set all_executors; + // a pointer to the last one called by the loop + // so all tasks get spawned equally + // on multiple calls to RunTasksOnAllExecutors + static SerialExecutor* last_called_executor; + // without threading we can't tell which executor called the + // current process - so we set it in spawning the task + static SerialExecutor* current_executor; +#endif // ARROW_ENABLE_THREADING +}; + +#ifdef ARROW_ENABLE_THREADING + +/// An Executor implementation spawning tasks in FIFO manner on a fixed-size +/// pool of worker threads. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT ThreadPool : public Executor { + public: + // Construct a thread pool with the given number of worker threads + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + bool OwnsThisThread() override; + // Dynamically change the number of worker threads. + // + // This function always returns immediately. + // If fewer threads are running than this number, new threads are spawned + // on-demand when needed for task execution. + // If more threads are running than this number, excess threads are reaped + // as soon as possible. + Status SetCapacity(int threads); + + // Heuristic for the default capacity of a thread pool for CPU-bound tasks. + // This is exposed as a static method to help with testing. + static int DefaultCapacity(); + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + void KeepAlive(std::shared_ptr resource) override; + + struct State; + + protected: + FRIEND_TEST(TestThreadPool, SetCapacity); + FRIEND_TEST(TestGlobalThreadPool, Capacity); + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + ThreadPool(); + + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Collect finished worker threads, making sure the OS threads have exited + void CollectFinishedWorkersUnlocked(); + // Launch a given number of additional workers + void LaunchWorkersUnlocked(int threads); + // Get the current actual capacity + int GetActualCapacity(); + + static std::shared_ptr MakeCpuThreadPool(); + + std::shared_ptr sp_state_; + State* state_; + bool shutdown_on_destroy_; +}; +#else // ARROW_ENABLE_THREADING +// an executor implementation which pretends to be a thread pool but runs everything +// on the main thread using a static queue (shared between all thread pools, otherwise +// cross-threadpool dependencies will break everything) +class ARROW_EXPORT ThreadPool : public SerialExecutor { + public: + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + virtual int GetActualCapacity(); + + bool OwnsThisThread() override { return true; } + + // Dynamically change the number of worker threads. + // without threading this is equal to the + // number of tasks that can be running at once + // (inside each other) + Status SetCapacity(int threads); + + static int DefaultCapacity() { return 8; } + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + protected: + static std::shared_ptr MakeCpuThreadPool(); + ThreadPool(); +}; + +#endif // ARROW_ENABLE_THREADING + +// Return the process-global thread pool for CPU-bound tasks. +ARROW_EXPORT ThreadPool* GetCpuThreadPool(); + +/// \brief Potentially run an async operation serially (if use_threads is false) +/// \see RunSerially +/// +/// If `use_threads` is true, the global CPU executor is used. +/// If `use_threads` is false, a temporary SerialExecutor is used. +/// `get_future` is called (from this thread) with the chosen executor and must +/// return a future that will eventually finish. This function returns once the +/// future has finished. +template +typename Fut::SyncType RunSynchronously(FnOnce get_future, + bool use_threads) { + if (use_threads) { + auto fut = std::move(get_future)(GetCpuThreadPool()); + return FutureToSync(fut); + } else { + return SerialExecutor::RunInSerialExecutor(std::move(get_future)); + } +} + +/// \brief Potentially iterate an async generator serially (if use_threads is false) +/// \see IterateGenerator +/// +/// If `use_threads` is true, the global CPU executor will be used. Each call to +/// the iterator will simply wait until the next item is available. Tasks may run in +/// the background between calls. +/// +/// If `use_threads` is false, the calling thread only will be used. Each call to +/// the iterator will use the calling thread to do enough work to generate one item. +/// Tasks will be left in a queue until the next call and no work will be done between +/// calls. +template +Iterator IterateSynchronously( + FnOnce()>>(Executor*)> get_gen, bool use_threads) { + if (use_threads) { + auto maybe_gen = std::move(get_gen)(GetCpuThreadPool()); + if (!maybe_gen.ok()) { + return MakeErrorIterator(maybe_gen.status()); + } + return MakeGeneratorIterator(*maybe_gen); + } else { + return SerialExecutor::IterateGenerator(std::move(get_gen)); + } +} + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h new file mode 100644 index 0000000000000000000000000000000000000000..d7808256418eef0faaf54a189d11c6896583d68b --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { +namespace tracing { + +class ARROW_EXPORT SpanDetails { + public: + virtual ~SpanDetails() {} +}; + +class ARROW_EXPORT Span { + public: + Span() noexcept; + /// True if this span has been started with START_SPAN + bool valid() const; + /// End the span early + void reset(); + std::unique_ptr details; +}; + +} // namespace tracing +} // namespace util +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h new file mode 100644 index 0000000000000000000000000000000000000000..7815d4d1ecc1d66ba20c45eddb6c626833aa54e2 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A non-zero-terminated small string class. +// std::string usually has a small string optimization +// (see review at https://shaharmike.com/cpp/std-string/) +// but this one allows tight control and optimization of memory layout. +template +class SmallString { + public: + SmallString() : length_(0) {} + + template + SmallString(const T& v) { // NOLINT implicit constructor + *this = std::string_view(v); + } + + SmallString& operator=(const std::string_view s) { +#ifndef NDEBUG + CheckSize(s.size()); +#endif + length_ = static_cast(s.size()); + std::memcpy(data_, s.data(), length_); + return *this; + } + + SmallString& operator=(const std::string& s) { + *this = std::string_view(s); + return *this; + } + + SmallString& operator=(const char* s) { + *this = std::string_view(s); + return *this; + } + + explicit operator std::string_view() const { return std::string_view(data_, length_); } + + const char* data() const { return data_; } + size_t length() const { return length_; } + bool empty() const { return length_ == 0; } + char operator[](size_t pos) const { +#ifdef NDEBUG + assert(pos <= length_); +#endif + return data_[pos]; + } + + SmallString substr(size_t pos) const { + return SmallString(std::string_view(*this).substr(pos)); + } + + SmallString substr(size_t pos, size_t count) const { + return SmallString(std::string_view(*this).substr(pos, count)); + } + + template + bool operator==(T&& other) const { + return std::string_view(*this) == std::string_view(std::forward(other)); + } + + template + bool operator!=(T&& other) const { + return std::string_view(*this) != std::string_view(std::forward(other)); + } + + protected: + uint8_t length_; + char data_[N]; + + void CheckSize(size_t n) { assert(n <= N); } +}; + +template +std::ostream& operator<<(std::ostream& os, const SmallString& str) { + return os << std::string_view(str); +} + +// A trie class for byte strings, optimized for small sets of short strings. +// This class is immutable by design, use a TrieBuilder to construct it. +class ARROW_EXPORT Trie { + using index_type = int16_t; + using fast_index_type = int_fast16_t; + static constexpr auto kMaxIndex = std::numeric_limits::max(); + + public: + Trie() : size_(0) {} + Trie(Trie&&) = default; + Trie& operator=(Trie&&) = default; + + int32_t Find(std::string_view s) const { + const Node* node = &nodes_[0]; + fast_index_type pos = 0; + if (s.length() > static_cast(kMaxIndex)) { + return -1; + } + fast_index_type remaining = static_cast(s.length()); + + while (remaining > 0) { + auto substring_length = node->substring_length(); + if (substring_length > 0) { + auto substring_data = node->substring_data(); + if (remaining < substring_length) { + // Input too short + return -1; + } + for (fast_index_type i = 0; i < substring_length; ++i) { + if (s[pos++] != substring_data[i]) { + // Mismatching substring + return -1; + } + --remaining; + } + if (remaining == 0) { + // Matched node exactly + return node->found_index_; + } + } + // Lookup child using next input character + if (node->child_lookup_ == -1) { + // Input too long + return -1; + } + auto c = static_cast(s[pos++]); + --remaining; + auto child_index = lookup_table_[node->child_lookup_ * 256 + c]; + if (child_index == -1) { + // Child not found + return -1; + } + node = &nodes_[child_index]; + } + + // Input exhausted + if (node->substring_.empty()) { + // Matched node exactly + return node->found_index_; + } else { + return -1; + } + } + + Status Validate() const; + + void Dump() const; + + protected: + static constexpr size_t kNodeSize = 16; + static constexpr auto kMaxSubstringLength = + kNodeSize - 2 * sizeof(index_type) - sizeof(int8_t); + + struct Node { + // If this node is a valid end of string, index of found string, otherwise -1 + index_type found_index_; + // Base index for child lookup in lookup_table_ (-1 if no child nodes) + index_type child_lookup_; + // The substring for this node. + SmallString substring_; + + fast_index_type substring_length() const { + return static_cast(substring_.length()); + } + const char* substring_data() const { return substring_.data(); } + }; + + static_assert(sizeof(Node) == kNodeSize, "Unexpected node size"); + + ARROW_DISALLOW_COPY_AND_ASSIGN(Trie); + + void Dump(const Node* node, const std::string& indent) const; + + // Node table: entry 0 is the root node + std::vector nodes_; + + // Indexed lookup structure: gives index in node table, or -1 if not found + std::vector lookup_table_; + + // Number of entries + index_type size_; + + friend class TrieBuilder; +}; + +class ARROW_EXPORT TrieBuilder { + using index_type = Trie::index_type; + using fast_index_type = Trie::fast_index_type; + + public: + TrieBuilder(); + Status Append(std::string_view s, bool allow_duplicate = false); + Trie Finish(); + + protected: + // Extend the lookup table by 256 entries, return the index of the new span + Status ExtendLookupTable(index_type* out_lookup_index); + // Split the node given by the index at the substring index `split_at` + Status SplitNode(fast_index_type node_index, fast_index_type split_at); + // Append an already constructed child node to the parent + Status AppendChildNode(Trie::Node* parent, uint8_t ch, Trie::Node&& node); + // Create a matching child node from this parent + Status CreateChildNode(Trie::Node* parent, uint8_t ch, std::string_view substring); + Status CreateChildNode(Trie::Node* parent, char ch, std::string_view substring); + + Trie trie_; + + static constexpr auto kMaxIndex = std::numeric_limits::max(); +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..3174881f4d018c6193ff5c12a7d308e39ed75561 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { + +namespace internal { +struct Empty; +} // namespace internal + +template +class WeakFuture; +class FutureWaiter; + +class TimestampParser; + +namespace internal { + +class Executor; +class TaskGroup; +class ThreadPool; +class CpuInfo; + +namespace tracing { + +struct Scope; + +} // namespace tracing +} // namespace internal + +struct Compression { + /// \brief Compression algorithm + enum type { + UNCOMPRESSED, + SNAPPY, + GZIP, + BROTLI, + ZSTD, + LZ4, + LZ4_FRAME, + LZO, + BZ2, + LZ4_HADOOP + }; +}; + +namespace util { +class AsyncTaskScheduler; +class Compressor; +class Decompressor; +class Codec; +class Uri; +} // namespace util + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..c1906152423c97e11ef9f577f46c7f4d4d124597 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +/// \brief Metafunction to allow checking if a type matches any of another set of types +template +struct IsOneOf : std::false_type {}; /// Base case: nothing has matched + +template +struct IsOneOf { + /// Recursive case: T == U or T matches any other types provided (not including U). + static constexpr bool value = std::is_same::value || IsOneOf::value; +}; + +/// \brief Shorthand for using IsOneOf + std::enable_if +template +using EnableIfIsOneOf = typename std::enable_if::value, T>::type; + +/// \brief is_null_pointer from C++17 +template +struct is_null_pointer : std::is_same::type> { +}; + +} // namespace internal +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h new file mode 100644 index 0000000000000000000000000000000000000000..d2e383e714b3eb8e0a0b6a23b1086913093a5c29 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +#include + +namespace arrow { + +[[noreturn]] ARROW_EXPORT void Unreachable(const char* message = "Unreachable"); + +[[noreturn]] ARROW_EXPORT void Unreachable(std::string_view message); + +} // namespace arrow diff --git a/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h new file mode 100644 index 0000000000000000000000000000000000000000..ca93fab5b9f4e1f43d451689f0e75cb5572ce983 --- /dev/null +++ b/valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +// Convert a UTF8 string to a wstring (either UTF16 or UTF32, depending +// on the wchar_t width). +ARROW_EXPORT Result UTF8ToWideString(std::string_view source); + +// Similarly, convert a wstring to a UTF8 string. +ARROW_EXPORT Result WideStringToUTF8(const std::wstring& source); + +// Convert UTF8 string to a UTF16 string. +ARROW_EXPORT Result UTF8StringToUTF16(std::string_view source); + +// Convert UTF16 string to a UTF8 string. +ARROW_EXPORT Result UTF16StringToUTF8(std::u16string_view source); + +// This function needs to be called before doing UTF8 validation. +ARROW_EXPORT void InitializeUTF8(); + +ARROW_EXPORT bool ValidateUTF8(const uint8_t* data, int64_t size); + +ARROW_EXPORT bool ValidateUTF8(std::string_view str); + +// Skip UTF8 byte order mark, if any. +ARROW_EXPORT +Result SkipUTF8BOM(const uint8_t* data, int64_t size); + +static constexpr uint32_t kMaxUnicodeCodepoint = 0x110000; + +} // namespace util +} // namespace arrow