ZTWHHH commited on
Commit
fc13052
·
verified ·
1 Parent(s): d3c3a7f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc +0 -0
  2. parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc +0 -0
  3. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h +146 -0
  4. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h +60 -0
  5. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h +36 -0
  6. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h +112 -0
  7. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h +85 -0
  8. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h +64 -0
  9. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h +83 -0
  10. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h +201 -0
  11. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h +103 -0
  12. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h +72 -0
  13. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h +132 -0
  14. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h +89 -0
  15. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h +19 -0
  16. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h +80 -0
  17. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h +350 -0
  18. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h +81 -0
  19. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/vendored/pythoncapi_compat.h +1519 -0
  20. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h +33 -0
  21. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h +221 -0
  22. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h +2058 -0
  23. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h +71 -0
  24. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h +35 -0
  25. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h +115 -0
  26. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h +515 -0
  27. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h +369 -0
  28. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h +466 -0
  29. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h +43 -0
  30. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h +112 -0
  31. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h +88 -0
  32. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h +89 -0
  33. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h +34 -0
  34. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h +28 -0
  35. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h +28 -0
  36. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h +88 -0
  37. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h +241 -0
  38. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h +411 -0
  39. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h +60 -0
  40. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h +114 -0
  41. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h +535 -0
  42. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h +115 -0
  43. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h +209 -0
  44. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h +882 -0
  45. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h +66 -0
  46. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h +944 -0
  47. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h +137 -0
  48. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h +452 -0
  49. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h +575 -0
  50. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h +55 -0
parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
parrot/lib/python3.10/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc ADDED
Binary file (41 kB). View file
 
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <unordered_set>
28
+
29
+ #include "arrow/memory_pool.h"
30
+ #include "arrow/python/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Array;
35
+ class ChunkedArray;
36
+ class Column;
37
+ class DataType;
38
+ class MemoryPool;
39
+ class Status;
40
+ class Table;
41
+
42
+ namespace py {
43
+
44
+ enum class MapConversionType {
45
+ DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas
46
+ LOSSY, // report warnings when lossiness is encountered due to duplicate keys
47
+ STRICT_, // raise a Python exception when lossiness is encountered due to duplicate
48
+ // keys
49
+ };
50
+
51
+ struct PandasOptions {
52
+ /// arrow::MemoryPool to use for memory allocations
53
+ MemoryPool* pool = default_memory_pool();
54
+
55
+ /// If true, we will convert all string columns to categoricals
56
+ bool strings_to_categorical = false;
57
+ bool zero_copy_only = false;
58
+ bool integer_object_nulls = false;
59
+ bool date_as_object = false;
60
+ bool timestamp_as_object = false;
61
+ bool use_threads = false;
62
+
63
+ /// Coerce all date and timestamp to datetime64[ns]
64
+ bool coerce_temporal_nanoseconds = false;
65
+
66
+ /// Used to maintain backwards compatibility for
67
+ /// timezone bugs (see ARROW-9528). Should be removed
68
+ /// after Arrow 2.0 release.
69
+ bool ignore_timezone = false;
70
+
71
+ /// \brief If true, do not create duplicate PyObject versions of equal
72
+ /// objects. This only applies to immutable objects like strings or datetime
73
+ /// objects
74
+ bool deduplicate_objects = false;
75
+
76
+ /// \brief For certain data types, a cast is needed in order to store the
77
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
78
+ /// as nanoseconds in pandas). This option controls whether it is a safe
79
+ /// cast or not.
80
+ bool safe_cast = true;
81
+
82
+ /// \brief If true, create one block per column rather than consolidated
83
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
84
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
85
+ /// increased memory use, so keep this in mind if you are working on a
86
+ /// memory-constrained situation.
87
+ bool split_blocks = false;
88
+
89
+ /// \brief If true, allow non-writable zero-copy views to be created for
90
+ /// single column blocks. This option is also used to provide zero copy for
91
+ /// Series data
92
+ bool allow_zero_copy_blocks = false;
93
+
94
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
95
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
96
+ /// original context for this feature. Only currently implemented for Table
97
+ /// conversions
98
+ bool self_destruct = false;
99
+
100
+ /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to
101
+ /// Python association lists (list-of-tuples) in the same order as the Arrow
102
+ /// Map, as in [(key1, value1), (key2, value2), ...]
103
+ /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts.
104
+ /// This can change the ordering of (key, value) pairs, and will deduplicate
105
+ /// multiple keys, resulting in a possible loss of data.
106
+ /// If 'lossy', this key deduplication results in a warning printed
107
+ /// when detected. If 'strict', this instead results in an exception
108
+ /// being raised when detected.
109
+ MapConversionType maps_as_pydicts = MapConversionType::DEFAULT;
110
+
111
+ // Used internally for nested arrays.
112
+ bool decode_dictionaries = false;
113
+
114
+ // Columns that should be casted to categorical
115
+ std::unordered_set<std::string> categorical_columns;
116
+
117
+ // Columns that should be passed through to be converted to
118
+ // ExtensionArray/Block
119
+ std::unordered_set<std::string> extension_columns;
120
+
121
+ // Used internally to decipher between to_numpy() and to_pandas() when
122
+ // the expected output differs
123
+ bool to_numpy = false;
124
+ };
125
+
126
+ ARROW_PYTHON_EXPORT
127
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
128
+ PyObject* py_ref, PyObject** out);
129
+
130
+ ARROW_PYTHON_EXPORT
131
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
132
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
133
+ PyObject** out);
134
+
135
+ // Convert a whole table as efficiently as possible to a pandas.DataFrame.
136
+ //
137
+ // The returned Python object is a list of tuples consisting of the exact 2D
138
+ // BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
139
+ //
140
+ // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
141
+ ARROW_PYTHON_EXPORT
142
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
143
+ PyObject** out);
144
+
145
+ } // namespace py
146
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/future.h"
25
+
26
+ namespace arrow::py {
27
+
28
+ /// \brief Bind a Python callback to an arrow::Future.
29
+ ///
30
+ /// If the Future finishes successfully, py_wrapper is called with its
31
+ /// result value and should return a PyObject*. If py_wrapper is successful,
32
+ /// py_cb is called with its return value.
33
+ ///
34
+ /// If either the Future or py_wrapper fails, py_cb is called with the
35
+ /// associated Python exception.
36
+ ///
37
+ /// \param future The future to bind to.
38
+ /// \param py_cb The Python callback function. Will be passed the result of
39
+ /// py_wrapper, or a Python exception if the future failed or one was
40
+ /// raised by py_wrapper.
41
+ /// \param py_wrapper A function (likely defined in Cython) to convert the C++
42
+ /// result of the future to a Python object.
43
+ template <typename T, typename PyWrapper = PyObject* (*)(T)>
44
+ void BindFuture(Future<T> future, PyObject* py_cb, PyWrapper py_wrapper) {
45
+ Py_INCREF(py_cb);
46
+ OwnedRefNoGIL cb_ref(py_cb);
47
+
48
+ auto future_cb = [cb_ref = std::move(cb_ref),
49
+ py_wrapper = std::move(py_wrapper)](Result<T> result) {
50
+ SafeCallIntoPythonVoid([&]() {
51
+ OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))};
52
+ Py_XDECREF(
53
+ PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR));
54
+ ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call");
55
+ });
56
+ };
57
+ future.AddCallback(std::move(future_cb));
58
+ }
59
+
60
+ } // namespace arrow::py
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include "arrow/python/visibility.h"
23
+
24
+ namespace arrow {
25
+ namespace py {
26
+ namespace benchmark {
27
+
28
+ // Micro-benchmark routines for use from ASV
29
+
30
+ // Run PandasObjectIsNull() once over every object in *list*
31
+ ARROW_PYTHON_EXPORT
32
+ void Benchmark_PandasObjectIsNull(PyObject* list);
33
+
34
+ } // namespace benchmark
35
+ } // namespace py
36
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/python/serialize.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+
31
+ class RecordBatch;
32
+ class Tensor;
33
+
34
+ namespace io {
35
+
36
+ class RandomAccessFile;
37
+
38
+ } // namespace io
39
+
40
+ namespace py {
41
+
42
+ struct ARROW_PYTHON_EXPORT SparseTensorCounts {
43
+ int coo;
44
+ int csr;
45
+ int csc;
46
+ int csf;
47
+ int ndim_csf;
48
+
49
+ int num_total_tensors() const { return coo + csr + csc + csf; }
50
+ int num_total_buffers() const {
51
+ return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
52
+ }
53
+ };
54
+
55
+ /// \brief Read serialized Python sequence from file interface using Arrow IPC
56
+ /// \param[in] src a RandomAccessFile
57
+ /// \param[out] out the reconstructed data
58
+ /// \return Status
59
+ ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0")
60
+ ARROW_PYTHON_EXPORT
61
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
62
+
63
+ /// \brief Reconstruct SerializedPyObject from representation produced by
64
+ /// SerializedPyObject::GetComponents.
65
+ ///
66
+ /// \param[in] num_tensors number of tensors in the object
67
+ /// \param[in] num_sparse_tensors number of sparse tensors in the object
68
+ /// \param[in] num_ndarrays number of numpy Ndarrays in the object
69
+ /// \param[in] num_buffers number of buffers in the object
70
+ /// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
71
+ /// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
72
+ /// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
73
+ /// \param[out] out the reconstructed object
74
+ /// \return Status
75
+ ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0")
76
+ ARROW_PYTHON_EXPORT
77
+ Status GetSerializedFromComponents(int num_tensors,
78
+ const SparseTensorCounts& num_sparse_tensors,
79
+ int num_ndarrays, int num_buffers, PyObject* data,
80
+ SerializedPyObject* out);
81
+
82
+ /// \brief Reconstruct Python object from Arrow-serialized representation
83
+ /// \param[in] context Serialization context which contains custom serialization
84
+ /// and deserialization callbacks. Can be any Python object with a
85
+ /// _serialize_callback method for serialization and a _deserialize_callback
86
+ /// method for deserialization. If context is None, no custom serialization
87
+ /// will be attempted.
88
+ /// \param[in] object Object to deserialize
89
+ /// \param[in] base a Python object holding the underlying data that any NumPy
90
+ /// arrays will reference, to avoid premature deallocation
91
+ /// \param[out] out The returned object
92
+ /// \return Status
93
+ /// This acquires the GIL
94
+ ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0")
95
+ ARROW_PYTHON_EXPORT
96
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
97
+ PyObject* base, PyObject** out);
98
+
99
+ /// \brief Reconstruct Ndarray from Arrow-serialized representation
100
+ /// \param[in] object Object to deserialize
101
+ /// \param[out] out The deserialized tensor
102
+ /// \return Status
103
+ ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0")
104
+ ARROW_PYTHON_EXPORT
105
+ Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
106
+
107
+ ARROW_DEPRECATED("Deprecated in 18.0.0. Will be removed in 20.0.0")
108
+ ARROW_PYTHON_EXPORT
109
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
110
+
111
+ } // namespace py
112
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/extension_type.h"
24
+ #include "arrow/python/common.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType {
32
+ public:
33
+ // Implement extensionType API
34
+ std::string extension_name() const override { return extension_name_; }
35
+
36
+ std::string ToString(bool show_metadata = false) const override;
37
+
38
+ bool ExtensionEquals(const ExtensionType& other) const override;
39
+
40
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
41
+
42
+ Result<std::shared_ptr<DataType>> Deserialize(
43
+ std::shared_ptr<DataType> storage_type,
44
+ const std::string& serialized) const override;
45
+
46
+ std::string Serialize() const override;
47
+
48
+ // For use from Cython
49
+ // Assumes that `typ` is borrowed
50
+ static Status FromClass(const std::shared_ptr<DataType> storage_type,
51
+ const std::string extension_name, PyObject* typ,
52
+ std::shared_ptr<ExtensionType>* out);
53
+
54
+ // Return new ref
55
+ PyObject* GetInstance() const;
56
+ Status SetInstance(PyObject*) const;
57
+
58
+ protected:
59
+ PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
60
+ PyObject* inst = NULLPTR);
61
+ PyExtensionType(std::shared_ptr<DataType> storage_type, std::string extension_name,
62
+ PyObject* typ, PyObject* inst = NULLPTR);
63
+
64
+ std::string extension_name_;
65
+
66
+ // These fields are mutable because of two-step initialization.
67
+ mutable OwnedRefNoGIL type_class_;
68
+ // A weakref or null. Storing a strong reference to the Python extension type
69
+ // instance would create an unreclaimable reference cycle between Python and C++
70
+ // (the Python instance has to keep a strong reference to the C++ ExtensionType
71
+ // in other direction). Instead, we store a weakref to the instance.
72
+ // If the weakref is dead, we reconstruct the instance from its serialized form.
73
+ mutable OwnedRefNoGIL type_instance_;
74
+ // Empty if type_instance_ is null
75
+ mutable std::string serialized_;
76
+ };
77
+
78
+ ARROW_PYTHON_EXPORT std::string PyExtensionName();
79
+
80
+ ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr<DataType>&);
81
+
82
+ ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name);
83
+
84
+ } // namespace py
85
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+
27
+ #include "arrow/python/visibility.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/util/macros.h"
30
+
31
+ #include "common.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class Status;
37
+
38
+ namespace py {
39
+
40
+ // These functions take a sequence input, not arbitrary iterables
41
+
42
+ /// \brief Infer Arrow type from a Python sequence
43
+ /// \param[in] obj the sequence of values
44
+ /// \param[in] mask an optional mask where True values are null. May
45
+ /// be nullptr
46
+ /// \param[in] pandas_null_sentinels use pandas's null value markers
47
+ ARROW_PYTHON_EXPORT
48
+ Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
49
+ bool pandas_null_sentinels);
50
+
51
+ /// Checks whether the passed Python object is a boolean scalar
52
+ ARROW_PYTHON_EXPORT
53
+ bool IsPyBool(PyObject* obj);
54
+
55
+ /// Checks whether the passed Python object is an integer scalar
56
+ ARROW_PYTHON_EXPORT
57
+ bool IsPyInt(PyObject* obj);
58
+
59
+ /// Checks whether the passed Python object is a float scalar
60
+ ARROW_PYTHON_EXPORT
61
+ bool IsPyFloat(PyObject* obj);
62
+
63
+ } // namespace py
64
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Generated by Cython 3.0.11 */
2
+
3
+ #ifndef __PYX_HAVE__pyarrow__lib
4
+ #define __PYX_HAVE__pyarrow__lib
5
+
6
+ #include "Python.h"
7
+
8
+ #ifndef __PYX_HAVE_API__pyarrow__lib
9
+
10
+ #ifdef CYTHON_EXTERN_C
11
+ #undef __PYX_EXTERN_C
12
+ #define __PYX_EXTERN_C CYTHON_EXTERN_C
13
+ #elif defined(__PYX_EXTERN_C)
14
+ #ifdef _MSC_VER
15
+ #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
16
+ #else
17
+ #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
18
+ #endif
19
+ #else
20
+ #define __PYX_EXTERN_C extern "C++"
21
+ #endif
22
+
23
+ #ifndef DL_IMPORT
24
+ #define DL_IMPORT(_T) _T
25
+ #endif
26
+
27
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &);
28
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &);
29
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &);
30
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &);
31
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &);
32
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &);
33
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &);
34
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &);
35
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &);
36
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &);
37
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &);
38
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &);
39
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &);
40
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &);
41
+ __PYX_EXTERN_C PyObject *pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &);
42
+ __PYX_EXTERN_C std::shared_ptr< arrow::Buffer> pyarrow_unwrap_buffer(PyObject *);
43
+ __PYX_EXTERN_C std::shared_ptr< arrow::DataType> pyarrow_unwrap_data_type(PyObject *);
44
+ __PYX_EXTERN_C std::shared_ptr< arrow::Field> pyarrow_unwrap_field(PyObject *);
45
+ __PYX_EXTERN_C std::shared_ptr< arrow::Schema> pyarrow_unwrap_schema(PyObject *);
46
+ __PYX_EXTERN_C std::shared_ptr< arrow::Scalar> pyarrow_unwrap_scalar(PyObject *);
47
+ __PYX_EXTERN_C std::shared_ptr< arrow::Array> pyarrow_unwrap_array(PyObject *);
48
+ __PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> pyarrow_unwrap_chunked_array(PyObject *);
49
+ __PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> pyarrow_unwrap_sparse_coo_tensor(PyObject *);
50
+ __PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> pyarrow_unwrap_sparse_csc_matrix(PyObject *);
51
+ __PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> pyarrow_unwrap_sparse_csf_tensor(PyObject *);
52
+ __PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> pyarrow_unwrap_sparse_csr_matrix(PyObject *);
53
+ __PYX_EXTERN_C std::shared_ptr< arrow::Tensor> pyarrow_unwrap_tensor(PyObject *);
54
+ __PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> pyarrow_unwrap_batch(PyObject *);
55
+ __PYX_EXTERN_C std::shared_ptr< arrow::Table> pyarrow_unwrap_table(PyObject *);
56
+
57
+ #endif /* !__PYX_HAVE_API__pyarrow__lib */
58
+
59
+ /* WARNING: the interface of the module init function changed in CPython 3.5. */
60
+ /* It now returns a PyModuleDef instance instead of a PyModule instance. */
61
+
62
+ #if PY_MAJOR_VERSION < 3
63
+ PyMODINIT_FUNC initlib(void);
64
+ #else
65
+ /* WARNING: Use PyImport_AppendInittab("lib", PyInit_lib) instead of calling PyInit_lib directly from Python 3.5 */
66
+ PyMODINIT_FUNC PyInit_lib(void);
67
+
68
+ #if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
69
+ #if defined(__cplusplus) && __cplusplus >= 201402L
70
+ [[deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")]] inline
71
+ #elif defined(__GNUC__) || defined(__clang__)
72
+ __attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly."), __unused__)) __inline__
73
+ #elif defined(_MSC_VER)
74
+ __declspec(deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")) __inline
75
+ #endif
76
+ static PyObject* __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyObject* res) {
77
+ return res;
78
+ }
79
+ #define PyInit_lib() __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyInit_lib())
80
+ #endif
81
+ #endif
82
+
83
+ #endif /* !__PYX_HAVE__pyarrow__lib */
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Generated by Cython 3.0.11 */
2
+
3
+ #ifndef __PYX_HAVE_API__pyarrow__lib
4
+ #define __PYX_HAVE_API__pyarrow__lib
5
+ #ifdef __MINGW64__
6
+ #define MS_WIN64
7
+ #endif
8
+ #include "Python.h"
9
+ #include "lib.h"
10
+
11
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0;
12
+ #define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool
13
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0;
14
+ #define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer
15
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0;
16
+ #define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer
17
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0;
18
+ #define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type
19
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0;
20
+ #define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field
21
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0;
22
+ #define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema
23
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0;
24
+ #define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar
25
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0;
26
+ #define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array
27
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0;
28
+ #define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array
29
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0;
30
+ #define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor
31
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0;
32
+ #define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix
33
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0;
34
+ #define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor
35
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0;
36
+ #define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix
37
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0;
38
+ #define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor
39
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0;
40
+ #define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch
41
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0;
42
+ #define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table
43
+ static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0;
44
+ #define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer
45
+ static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0;
46
+ #define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type
47
+ static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0;
48
+ #define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field
49
+ static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0;
50
+ #define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema
51
+ static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0;
52
+ #define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar
53
+ static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0;
54
+ #define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array
55
+ static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0;
56
+ #define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array
57
+ static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0;
58
+ #define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor
59
+ static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0;
60
+ #define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix
61
+ static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0;
62
+ #define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor
63
+ static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0;
64
+ #define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix
65
+ static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0;
66
+ #define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor
67
+ static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0;
68
+ #define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch
69
+ static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0;
70
+ #define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table
71
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0;
72
+ #define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status
73
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0;
74
+ #define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status
75
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0;
76
+ #define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer
77
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0;
78
+ #define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type
79
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0;
80
+ #define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata
81
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0;
82
+ #define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field
83
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0;
84
+ #define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema
85
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0;
86
+ #define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array
87
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0;
88
+ #define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array
89
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0;
90
+ #define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar
91
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0;
92
+ #define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor
93
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0;
94
+ #define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor
95
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0;
96
+ #define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix
97
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0;
98
+ #define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix
99
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0;
100
+ #define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor
101
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0;
102
+ #define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table
103
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0;
104
+ #define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch
105
+ #ifndef __PYX_HAVE_RT_ImportFunction_3_0_11
106
+ #define __PYX_HAVE_RT_ImportFunction_3_0_11
107
+ static int __Pyx_ImportFunction_3_0_11(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
108
+ PyObject *d = 0;
109
+ PyObject *cobj = 0;
110
+ union {
111
+ void (*fp)(void);
112
+ void *p;
113
+ } tmp;
114
+ d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
115
+ if (!d)
116
+ goto bad;
117
+ cobj = PyDict_GetItemString(d, funcname);
118
+ if (!cobj) {
119
+ PyErr_Format(PyExc_ImportError,
120
+ "%.200s does not export expected C function %.200s",
121
+ PyModule_GetName(module), funcname);
122
+ goto bad;
123
+ }
124
+ if (!PyCapsule_IsValid(cobj, sig)) {
125
+ PyErr_Format(PyExc_TypeError,
126
+ "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
127
+ PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
128
+ goto bad;
129
+ }
130
+ tmp.p = PyCapsule_GetPointer(cobj, sig);
131
+ *f = tmp.fp;
132
+ if (!(*f))
133
+ goto bad;
134
+ Py_DECREF(d);
135
+ return 0;
136
+ bad:
137
+ Py_XDECREF(d);
138
+ return -1;
139
+ }
140
+ #endif
141
+
142
+
143
+ static int import_pyarrow__lib(void) {
144
+ PyObject *module = 0;
145
+ module = PyImport_ImportModule("pyarrow.lib");
146
+ if (!module) goto bad;
147
+ if (__Pyx_ImportFunction_3_0_11(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad;
148
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad;
149
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad;
150
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad;
151
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad;
152
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad;
153
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad;
154
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad;
155
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad;
156
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad;
157
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad;
158
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad;
159
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad;
160
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad;
161
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad;
162
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad;
163
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad;
164
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad;
165
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad;
166
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad;
167
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad;
168
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad;
169
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad;
170
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad;
171
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad;
172
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad;
173
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad;
174
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad;
175
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad;
176
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad;
177
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad;
178
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad;
179
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad;
180
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad;
181
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad;
182
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad;
183
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad;
184
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad;
185
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad;
186
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad;
187
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad;
188
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad;
189
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad;
190
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad;
191
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad;
192
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad;
193
+ if (__Pyx_ImportFunction_3_0_11(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad;
194
+ Py_DECREF(module); module = 0;
195
+ return 0;
196
+ bad:
197
+ Py_XDECREF(module);
198
+ return -1;
199
+ }
200
+
201
+ #endif /* !__PYX_HAVE_API__pyarrow__lib */
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h" // IWYU pragma: export
21
+
22
+ #include <numpy/numpyconfig.h> // IWYU pragma: export
23
+
24
+ // Don't use the deprecated Numpy functions
25
+ #ifdef NPY_1_7_API_VERSION
26
+ # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
27
+ #else
28
+ # define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED
29
+ # define NPY_ARRAY_ALIGNED NPY_ALIGNED
30
+ # define NPY_ARRAY_WRITEABLE NPY_WRITEABLE
31
+ # define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY
32
+ #endif
33
+
34
+ // This is required to be able to access the NumPy C API properly in C++ files
35
+ // other than init.cc.
36
+ #define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API
37
+ #ifndef NUMPY_IMPORT_ARRAY
38
+ # define NO_IMPORT_ARRAY
39
+ #endif
40
+
41
+ #include <numpy/arrayobject.h> // IWYU pragma: export
42
+ #include <numpy/arrayscalars.h> // IWYU pragma: export
43
+ #include <numpy/ufuncobject.h> // IWYU pragma: export
44
+
45
+ // A bit subtle. Numpy has 5 canonical integer types:
46
+ // (or, rather, type pairs: signed and unsigned)
47
+ // NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG
48
+ // It also has 4 fixed-width integer aliases.
49
+ // When mapping Arrow integer types to these 4 fixed-width aliases,
50
+ // we always miss one of the canonical types (even though it may
51
+ // have the same width as one of the aliases).
52
+ // Which one depends on the platform...
53
+ // On a LP64 system, NPY_INT64 maps to NPY_LONG and
54
+ // NPY_LONGLONG needs to be handled separately.
55
+ // On a LLP64 system, NPY_INT32 maps to NPY_LONG and
56
+ // NPY_INT needs to be handled separately.
57
+
58
+ #if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64
59
+ # define NPY_INT64_IS_LONG_LONG 1
60
+ #else
61
+ # define NPY_INT64_IS_LONG_LONG 0
62
+ #endif
63
+
64
+ #if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64
65
+ # define NPY_INT32_IS_INT 1
66
+ #else
67
+ # define NPY_INT32_IS_INT 0
68
+ #endif
69
+
70
+ // Backported NumPy 2 API (can be removed if numpy 2 is required)
71
+ #if NPY_ABI_VERSION < 0x02000000
72
+ # define PyDataType_ELSIZE(descr) ((descr)->elsize)
73
+ # define PyDataType_C_METADATA(descr) ((descr)->c_metadata)
74
+ # define PyDataType_FIELDS(descr) ((descr)->fields)
75
+ #endif
76
+
77
+ namespace arrow {
78
+ namespace py {
79
+
80
+ inline int import_numpy() {
81
+ #ifdef NUMPY_IMPORT_ARRAY
82
+ import_array1(-1);
83
+ import_umath1(-1);
84
+ #endif
85
+
86
+ return 0;
87
+ }
88
+
89
+ // See above about the missing Numpy integer type numbers
90
+ inline int fix_numpy_type_num(int type_num) {
91
+ #if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32
92
+ if (type_num == NPY_INT) return NPY_INT32;
93
+ if (type_num == NPY_UINT) return NPY_UINT32;
94
+ #endif
95
+ #if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64
96
+ if (type_num == NPY_LONGLONG) return NPY_INT64;
97
+ if (type_num == NPY_ULONGLONG) return NPY_UINT64;
98
+ #endif
99
+ return type_num;
100
+ }
101
+
102
+ } // namespace py
103
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Converting from pandas memory representation to Arrow data structures
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/python/platform.h"
23
+
24
+ #include <memory>
25
+
26
+ #include "arrow/compute/api.h"
27
+ #include "arrow/python/visibility.h"
28
+
29
+ namespace arrow {
30
+
31
+ class Array;
32
+ class ChunkedArray;
33
+ class DataType;
34
+ class MemoryPool;
35
+ class Status;
36
+
37
+ namespace py {
38
+
39
+ /// Convert NumPy arrays to Arrow. If target data type is not known, pass a
40
+ /// type with null
41
+ ///
42
+ /// \param[in] pool Memory pool for any memory allocations
43
+ /// \param[in] ao an ndarray with the array data
44
+ /// \param[in] mo an ndarray with a null mask (True is null), optional
45
+ /// \param[in] from_pandas If true, use pandas's null sentinels to determine
46
+ /// whether values are null
47
+ /// \param[in] type a specific type to cast to, may be null
48
+ /// \param[in] cast_options casting options
49
+ /// \param[out] out a ChunkedArray, to accommodate chunked output
50
+ ARROW_PYTHON_EXPORT
51
+ Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
52
+ const std::shared_ptr<DataType>& type,
53
+ const compute::CastOptions& cast_options,
54
+ std::shared_ptr<ChunkedArray>* out);
55
+
56
+ /// Safely convert NumPy arrays to Arrow. If target data type is not known,
57
+ /// pass a type with null.
58
+ ///
59
+ /// \param[in] pool Memory pool for any memory allocations
60
+ /// \param[in] ao an ndarray with the array data
61
+ /// \param[in] mo an ndarray with a null mask (True is null), optional
62
+ /// \param[in] from_pandas If true, use pandas's null sentinels to determine
63
+ /// whether values are null
64
+ /// \param[in] type a specific type to cast to, may be null
65
+ /// \param[out] out a ChunkedArray, to accommodate chunked output
66
+ ARROW_PYTHON_EXPORT
67
+ Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
68
+ const std::shared_ptr<DataType>& type,
69
+ std::shared_ptr<ChunkedArray>* out);
70
+
71
+ } // namespace py
72
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/python/visibility.h"
24
+ #include "arrow/util/macros.h"
25
+ #include "parquet/encryption/crypto_factory.h"
26
+ #include "parquet/encryption/kms_client.h"
27
+ #include "parquet/encryption/kms_client_factory.h"
28
+
29
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
30
+ # if defined(_MSC_VER)
31
+ # pragma warning(disable : 4251)
32
+ # else
33
+ # pragma GCC diagnostic ignored "-Wattributes"
34
+ # endif
35
+
36
+ # ifdef ARROW_PYTHON_STATIC
37
+ # define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT
38
+ # elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING)
39
+ # define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport)
40
+ # else
41
+ # define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport)
42
+ # endif
43
+
44
+ #else // Not Windows
45
+ # ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT
46
+ # define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default")))
47
+ # endif
48
+ #endif // Non-Windows
49
+
50
+ namespace arrow {
51
+ namespace py {
52
+ namespace parquet {
53
+ namespace encryption {
54
+
55
+ /// \brief A table of function pointers for calling from C++ into
56
+ /// Python.
57
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable {
58
+ public:
59
+ std::function<void(PyObject*, const std::string& key_bytes,
60
+ const std::string& master_key_identifier, std::string* out)>
61
+ wrap_key;
62
+ std::function<void(PyObject*, const std::string& wrapped_key,
63
+ const std::string& master_key_identifier, std::string* out)>
64
+ unwrap_key;
65
+ };
66
+
67
+ /// \brief A helper for KmsClient implementation in Python.
68
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient
69
+ : public ::parquet::encryption::KmsClient {
70
+ public:
71
+ PyKmsClient(PyObject* handler, PyKmsClientVtable vtable);
72
+ ~PyKmsClient() override;
73
+
74
+ std::string WrapKey(const std::string& key_bytes,
75
+ const std::string& master_key_identifier) override;
76
+
77
+ std::string UnwrapKey(const std::string& wrapped_key,
78
+ const std::string& master_key_identifier) override;
79
+
80
+ private:
81
+ OwnedRefNoGIL handler_;
82
+ PyKmsClientVtable vtable_;
83
+ };
84
+
85
+ /// \brief A table of function pointers for calling from C++ into
86
+ /// Python.
87
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable {
88
+ public:
89
+ std::function<void(
90
+ PyObject*, const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
91
+ std::shared_ptr<::parquet::encryption::KmsClient>* out)>
92
+ create_kms_client;
93
+ };
94
+
95
+ /// \brief A helper for KmsClientFactory implementation in Python.
96
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory
97
+ : public ::parquet::encryption::KmsClientFactory {
98
+ public:
99
+ PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable);
100
+ ~PyKmsClientFactory() override;
101
+
102
+ std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient(
103
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override;
104
+
105
+ private:
106
+ OwnedRefNoGIL handler_;
107
+ PyKmsClientFactoryVtable vtable_;
108
+ };
109
+
110
+ /// \brief A CryptoFactory that returns Results instead of throwing exceptions.
111
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory
112
+ : public ::parquet::encryption::CryptoFactory {
113
+ public:
114
+ arrow::Result<std::shared_ptr<::parquet::FileEncryptionProperties>>
115
+ SafeGetFileEncryptionProperties(
116
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
117
+ const ::parquet::encryption::EncryptionConfiguration& encryption_config);
118
+
119
+ /// The returned FileDecryptionProperties object will use the cache inside this
120
+ /// CryptoFactory object, so please keep this
121
+ /// CryptoFactory object alive along with the returned
122
+ /// FileDecryptionProperties object.
123
+ arrow::Result<std::shared_ptr<::parquet::FileDecryptionProperties>>
124
+ SafeGetFileDecryptionProperties(
125
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
126
+ const ::parquet::encryption::DecryptionConfiguration& decryption_config);
127
+ };
128
+
129
+ } // namespace encryption
130
+ } // namespace parquet
131
+ } // namespace py
132
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include <memory>
23
+
24
+ #include "arrow/python/visibility.h"
25
+
26
+ #include "arrow/sparse_tensor.h"
27
+
28
+ // Work around ARROW-2317 (C linkage warning from Cython)
29
+ extern "C++" {
30
+
31
+ namespace arrow {
32
+
33
+ class Array;
34
+ class Buffer;
35
+ class DataType;
36
+ class Field;
37
+ class RecordBatch;
38
+ class Schema;
39
+ class Status;
40
+ class Table;
41
+ class Tensor;
42
+
43
+ namespace py {
44
+
45
+ // Returns 0 on success, -1 on error.
46
+ ARROW_PYTHON_EXPORT int import_pyarrow();
47
+
48
+ #define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \
49
+ ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \
50
+ ARROW_PYTHON_EXPORT Result<std::shared_ptr<TYPE_NAME>> unwrap_##FUNC_SUFFIX( \
51
+ PyObject*); \
52
+ ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr<TYPE_NAME>&);
53
+
54
+ DECLARE_WRAP_FUNCTIONS(buffer, Buffer)
55
+
56
+ DECLARE_WRAP_FUNCTIONS(data_type, DataType)
57
+ DECLARE_WRAP_FUNCTIONS(field, Field)
58
+ DECLARE_WRAP_FUNCTIONS(schema, Schema)
59
+
60
+ DECLARE_WRAP_FUNCTIONS(scalar, Scalar)
61
+
62
+ DECLARE_WRAP_FUNCTIONS(array, Array)
63
+ DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray)
64
+
65
+ DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor)
66
+ DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix)
67
+ DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor)
68
+ DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix)
69
+ DECLARE_WRAP_FUNCTIONS(tensor, Tensor)
70
+
71
+ DECLARE_WRAP_FUNCTIONS(batch, RecordBatch)
72
+ DECLARE_WRAP_FUNCTIONS(table, Table)
73
+
74
+ #undef DECLARE_WRAP_FUNCTIONS
75
+
76
+ namespace internal {
77
+
78
+ // If status is ok, return 0.
79
+ // If status is not ok, set Python error indicator and return -1.
80
+ ARROW_PYTHON_EXPORT int check_status(const Status& status);
81
+
82
+ // Convert status to a Python exception object. Status must not be ok.
83
+ ARROW_PYTHON_EXPORT PyObject* convert_status(const Status& status);
84
+
85
+ } // namespace internal
86
+ } // namespace py
87
+ } // namespace arrow
88
+
89
+ } // extern "C++"
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // For backward compatibility.
19
+ #include "arrow/python/lib.h"
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <cstdint>
26
+ #include <memory>
27
+
28
+ #include "arrow/python/visibility.h"
29
+ #include "arrow/type.h"
30
+ #include "arrow/util/macros.h"
31
+
32
+ #include "arrow/python/common.h"
33
+
34
+ namespace arrow {
35
+
36
+ class Array;
37
+ class Status;
38
+
39
+ namespace py {
40
+
41
+ struct PyConversionOptions {
42
+ PyConversionOptions() = default;
43
+
44
+ PyConversionOptions(const std::shared_ptr<DataType>& type, int64_t size,
45
+ MemoryPool* pool, bool from_pandas)
46
+ : type(type), size(size), from_pandas(from_pandas) {}
47
+
48
+ // Set to null if to be inferred
49
+ std::shared_ptr<DataType> type;
50
+
51
+ // Default is -1, which indicates the size should the same as the input sequence
52
+ int64_t size = -1;
53
+
54
+ bool from_pandas = false;
55
+
56
+ /// Used to maintain backwards compatibility for
57
+ /// timezone bugs (see ARROW-9528). Should be removed
58
+ /// after Arrow 2.0 release.
59
+ bool ignore_timezone = false;
60
+
61
+ bool strict = false;
62
+ };
63
+
64
+ /// \brief Convert sequence (list, generator, NumPy array with dtype object) of
65
+ /// Python objects.
66
+ /// \param[in] obj the sequence to convert
67
+ /// \param[in] mask a NumPy array of true/false values to indicate whether
68
+ /// values in the sequence are null (true) or not null (false). This parameter
69
+ /// may be null
70
+ /// \param[in] options various conversion options
71
+ /// \param[in] pool MemoryPool to use for allocations
72
+ /// \return Result ChunkedArray
73
+ ARROW_PYTHON_EXPORT
74
+ Result<std::shared_ptr<ChunkedArray>> ConvertPySequence(
75
+ PyObject* obj, PyObject* mask, PyConversionOptions options,
76
+ MemoryPool* pool = default_memory_pool());
77
+
78
+ } // namespace py
79
+
80
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Internal header
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/python/platform.h"
23
+
24
+ #include <cstdint>
25
+ #include <limits>
26
+
27
+ #include "arrow/python/numpy_interop.h"
28
+
29
+ #include <numpy/halffloat.h>
30
+
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/util/logging.h"
33
+
34
+ namespace arrow {
35
+ namespace py {
36
+
37
+ static constexpr int64_t kPandasTimestampNull = std::numeric_limits<int64_t>::min();
38
+ constexpr int64_t kNanosecondsInDay = 86400000000000LL;
39
+
40
+ namespace internal {
41
+
42
+ //
43
+ // Type traits for Numpy -> Arrow equivalence
44
+ //
45
+ template <int TYPE>
46
+ struct npy_traits {};
47
+
48
+ template <>
49
+ struct npy_traits<NPY_BOOL> {
50
+ typedef uint8_t value_type;
51
+ using TypeClass = BooleanType;
52
+ using BuilderClass = BooleanBuilder;
53
+
54
+ static constexpr bool supports_nulls = false;
55
+ static inline bool isnull(uint8_t v) { return false; }
56
+ };
57
+
58
+ #define NPY_INT_DECL(TYPE, CapType, T) \
59
+ template <> \
60
+ struct npy_traits<NPY_##TYPE> { \
61
+ typedef T value_type; \
62
+ using TypeClass = CapType##Type; \
63
+ using BuilderClass = CapType##Builder; \
64
+ \
65
+ static constexpr bool supports_nulls = false; \
66
+ static inline bool isnull(T v) { return false; } \
67
+ };
68
+
69
+ NPY_INT_DECL(INT8, Int8, int8_t);
70
+ NPY_INT_DECL(INT16, Int16, int16_t);
71
+ NPY_INT_DECL(INT32, Int32, int32_t);
72
+ NPY_INT_DECL(INT64, Int64, int64_t);
73
+
74
+ NPY_INT_DECL(UINT8, UInt8, uint8_t);
75
+ NPY_INT_DECL(UINT16, UInt16, uint16_t);
76
+ NPY_INT_DECL(UINT32, UInt32, uint32_t);
77
+ NPY_INT_DECL(UINT64, UInt64, uint64_t);
78
+
79
+ #if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32
80
+ NPY_INT_DECL(INT, Int32, int32_t);
81
+ NPY_INT_DECL(UINT, UInt32, uint32_t);
82
+ #endif
83
+ #if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64
84
+ NPY_INT_DECL(LONGLONG, Int64, int64_t);
85
+ NPY_INT_DECL(ULONGLONG, UInt64, uint64_t);
86
+ #endif
87
+
88
+ template <>
89
+ struct npy_traits<NPY_FLOAT16> {
90
+ typedef npy_half value_type;
91
+ using TypeClass = HalfFloatType;
92
+ using BuilderClass = HalfFloatBuilder;
93
+
94
+ static constexpr npy_half na_sentinel = NPY_HALF_NAN;
95
+
96
+ static constexpr bool supports_nulls = true;
97
+
98
+ static inline bool isnull(npy_half v) { return v == NPY_HALF_NAN; }
99
+ };
100
+
101
+ template <>
102
+ struct npy_traits<NPY_FLOAT32> {
103
+ typedef float value_type;
104
+ using TypeClass = FloatType;
105
+ using BuilderClass = FloatBuilder;
106
+
107
+ // We need to use quiet_NaN here instead of the NAN macro as on Windows
108
+ // the NAN macro leads to "division-by-zero" compile-time error with clang.
109
+ static constexpr float na_sentinel = std::numeric_limits<float>::quiet_NaN();
110
+
111
+ static constexpr bool supports_nulls = true;
112
+
113
+ static inline bool isnull(float v) { return v != v; }
114
+ };
115
+
116
+ template <>
117
+ struct npy_traits<NPY_FLOAT64> {
118
+ typedef double value_type;
119
+ using TypeClass = DoubleType;
120
+ using BuilderClass = DoubleBuilder;
121
+
122
+ static constexpr double na_sentinel = std::numeric_limits<double>::quiet_NaN();
123
+
124
+ static constexpr bool supports_nulls = true;
125
+
126
+ static inline bool isnull(double v) { return v != v; }
127
+ };
128
+
129
+ template <>
130
+ struct npy_traits<NPY_DATETIME> {
131
+ typedef int64_t value_type;
132
+ using TypeClass = TimestampType;
133
+ using BuilderClass = TimestampBuilder;
134
+
135
+ static constexpr bool supports_nulls = true;
136
+
137
+ static inline bool isnull(int64_t v) {
138
+ // NaT = -2**63
139
+ // = -0x8000000000000000
140
+ // = -9223372036854775808;
141
+ // = std::numeric_limits<int64_t>::min()
142
+ return v == std::numeric_limits<int64_t>::min();
143
+ }
144
+ };
145
+
146
+ template <>
147
+ struct npy_traits<NPY_TIMEDELTA> {
148
+ typedef int64_t value_type;
149
+ using TypeClass = DurationType;
150
+ using BuilderClass = DurationBuilder;
151
+
152
+ static constexpr bool supports_nulls = true;
153
+
154
+ static inline bool isnull(int64_t v) {
155
+ // NaT = -2**63 = std::numeric_limits<int64_t>::min()
156
+ return v == std::numeric_limits<int64_t>::min();
157
+ }
158
+ };
159
+
160
+ template <>
161
+ struct npy_traits<NPY_OBJECT> {
162
+ typedef PyObject* value_type;
163
+ static constexpr bool supports_nulls = true;
164
+
165
+ static inline bool isnull(PyObject* v) { return v == Py_None; }
166
+ };
167
+
168
+ //
169
+ // Type traits for Arrow -> Numpy equivalence
170
+ // Note *supports_nulls* means the equivalent Numpy type support nulls
171
+ //
172
+ template <int TYPE>
173
+ struct arrow_traits {};
174
+
175
+ template <>
176
+ struct arrow_traits<Type::BOOL> {
177
+ static constexpr int npy_type = NPY_BOOL;
178
+ static constexpr bool supports_nulls = false;
179
+ typedef typename npy_traits<NPY_BOOL>::value_type T;
180
+ };
181
+
182
+ #define INT_DECL(TYPE) \
183
+ template <> \
184
+ struct arrow_traits<Type::TYPE> { \
185
+ static constexpr int npy_type = NPY_##TYPE; \
186
+ static constexpr bool supports_nulls = false; \
187
+ static constexpr double na_value = std::numeric_limits<double>::quiet_NaN(); \
188
+ typedef typename npy_traits<NPY_##TYPE>::value_type T; \
189
+ };
190
+
191
+ INT_DECL(INT8);
192
+ INT_DECL(INT16);
193
+ INT_DECL(INT32);
194
+ INT_DECL(INT64);
195
+ INT_DECL(UINT8);
196
+ INT_DECL(UINT16);
197
+ INT_DECL(UINT32);
198
+ INT_DECL(UINT64);
199
+
200
+ template <>
201
+ struct arrow_traits<Type::HALF_FLOAT> {
202
+ static constexpr int npy_type = NPY_FLOAT16;
203
+ static constexpr bool supports_nulls = true;
204
+ static constexpr uint16_t na_value = NPY_HALF_NAN;
205
+ typedef typename npy_traits<NPY_FLOAT16>::value_type T;
206
+ };
207
+
208
+ template <>
209
+ struct arrow_traits<Type::FLOAT> {
210
+ static constexpr int npy_type = NPY_FLOAT32;
211
+ static constexpr bool supports_nulls = true;
212
+ static constexpr float na_value = std::numeric_limits<float>::quiet_NaN();
213
+ typedef typename npy_traits<NPY_FLOAT32>::value_type T;
214
+ };
215
+
216
+ template <>
217
+ struct arrow_traits<Type::DOUBLE> {
218
+ static constexpr int npy_type = NPY_FLOAT64;
219
+ static constexpr bool supports_nulls = true;
220
+ static constexpr double na_value = std::numeric_limits<double>::quiet_NaN();
221
+ typedef typename npy_traits<NPY_FLOAT64>::value_type T;
222
+ };
223
+
224
+ template <>
225
+ struct arrow_traits<Type::TIMESTAMP> {
226
+ static constexpr int npy_type = NPY_DATETIME;
227
+ static constexpr int64_t npy_shift = 1;
228
+
229
+ static constexpr bool supports_nulls = true;
230
+ static constexpr int64_t na_value = kPandasTimestampNull;
231
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
232
+ };
233
+
234
+ template <>
235
+ struct arrow_traits<Type::DURATION> {
236
+ static constexpr int npy_type = NPY_TIMEDELTA;
237
+ static constexpr int64_t npy_shift = 1;
238
+
239
+ static constexpr bool supports_nulls = true;
240
+ static constexpr int64_t na_value = kPandasTimestampNull;
241
+ typedef typename npy_traits<NPY_TIMEDELTA>::value_type T;
242
+ };
243
+
244
+ template <>
245
+ struct arrow_traits<Type::DATE32> {
246
+ // Data stores as FR_D day unit
247
+ static constexpr int npy_type = NPY_DATETIME;
248
+ static constexpr int64_t npy_shift = 1;
249
+
250
+ static constexpr bool supports_nulls = true;
251
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
252
+
253
+ static constexpr int64_t na_value = kPandasTimestampNull;
254
+ static inline bool isnull(int64_t v) { return npy_traits<NPY_DATETIME>::isnull(v); }
255
+ };
256
+
257
+ template <>
258
+ struct arrow_traits<Type::DATE64> {
259
+ // Data stores as FR_D day unit
260
+ static constexpr int npy_type = NPY_DATETIME;
261
+
262
+ // There are 1000 * 60 * 60 * 24 = 86400000ms in a day
263
+ static constexpr int64_t npy_shift = 86400000;
264
+
265
+ static constexpr bool supports_nulls = true;
266
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
267
+
268
+ static constexpr int64_t na_value = kPandasTimestampNull;
269
+ static inline bool isnull(int64_t v) { return npy_traits<NPY_DATETIME>::isnull(v); }
270
+ };
271
+
272
+ template <>
273
+ struct arrow_traits<Type::TIME32> {
274
+ static constexpr int npy_type = NPY_OBJECT;
275
+ static constexpr bool supports_nulls = true;
276
+ static constexpr int64_t na_value = kPandasTimestampNull;
277
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
278
+ };
279
+
280
+ template <>
281
+ struct arrow_traits<Type::TIME64> {
282
+ static constexpr int npy_type = NPY_OBJECT;
283
+ static constexpr bool supports_nulls = true;
284
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
285
+ };
286
+
287
+ template <>
288
+ struct arrow_traits<Type::STRING> {
289
+ static constexpr int npy_type = NPY_OBJECT;
290
+ static constexpr bool supports_nulls = true;
291
+ };
292
+
293
+ template <>
294
+ struct arrow_traits<Type::BINARY> {
295
+ static constexpr int npy_type = NPY_OBJECT;
296
+ static constexpr bool supports_nulls = true;
297
+ };
298
+
299
+ static inline NPY_DATETIMEUNIT NumPyFrequency(TimeUnit::type unit) {
300
+ switch (unit) {
301
+ case TimestampType::Unit::SECOND:
302
+ return NPY_FR_s;
303
+ case TimestampType::Unit::MILLI:
304
+ return NPY_FR_ms;
305
+ break;
306
+ case TimestampType::Unit::MICRO:
307
+ return NPY_FR_us;
308
+ default:
309
+ // NANO
310
+ return NPY_FR_ns;
311
+ }
312
+ }
313
+
314
+ static inline int NumPyTypeSize(int npy_type) {
315
+ npy_type = fix_numpy_type_num(npy_type);
316
+
317
+ switch (npy_type) {
318
+ case NPY_BOOL:
319
+ case NPY_INT8:
320
+ case NPY_UINT8:
321
+ return 1;
322
+ case NPY_INT16:
323
+ case NPY_UINT16:
324
+ return 2;
325
+ case NPY_INT32:
326
+ case NPY_UINT32:
327
+ return 4;
328
+ case NPY_INT64:
329
+ case NPY_UINT64:
330
+ return 8;
331
+ case NPY_FLOAT16:
332
+ return 2;
333
+ case NPY_FLOAT32:
334
+ return 4;
335
+ case NPY_FLOAT64:
336
+ return 8;
337
+ case NPY_DATETIME:
338
+ return 8;
339
+ case NPY_OBJECT:
340
+ return sizeof(void*);
341
+ default:
342
+ ARROW_CHECK(false) << "unhandled numpy type";
343
+ break;
344
+ }
345
+ return -1;
346
+ }
347
+
348
+ } // namespace internal
349
+ } // namespace py
350
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/compute/exec.h"
21
+ #include "arrow/compute/function.h"
22
+ #include "arrow/compute/registry.h"
23
+ #include "arrow/python/platform.h"
24
+ #include "arrow/record_batch.h"
25
+ #include "arrow/util/iterator.h"
26
+
27
+ #include "arrow/python/common.h"
28
+ #include "arrow/python/pyarrow.h"
29
+ #include "arrow/python/visibility.h"
30
+
31
+ namespace arrow {
32
+
33
+ namespace py {
34
+
35
+ // TODO: TODO(ARROW-16041): UDF Options are not exposed to the Python
36
+ // users. This feature will be included when extending to provide advanced
37
+ // options for the users.
38
+ struct ARROW_PYTHON_EXPORT UdfOptions {
39
+ std::string func_name;
40
+ compute::Arity arity;
41
+ compute::FunctionDoc func_doc;
42
+ std::vector<std::shared_ptr<DataType>> input_types;
43
+ std::shared_ptr<DataType> output_type;
44
+ };
45
+
46
+ /// \brief A context passed as the first argument of UDF functions.
47
+ struct ARROW_PYTHON_EXPORT UdfContext {
48
+ MemoryPool* pool;
49
+ int64_t batch_length;
50
+ };
51
+
52
+ using UdfWrapperCallback = std::function<PyObject*(
53
+ PyObject* user_function, const UdfContext& context, PyObject* inputs)>;
54
+
55
+ /// \brief register a Scalar user-defined-function from Python
56
+ Status ARROW_PYTHON_EXPORT RegisterScalarFunction(
57
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
58
+ compute::FunctionRegistry* registry = NULLPTR);
59
+
60
+ /// \brief register a Table user-defined-function from Python
61
+ Status ARROW_PYTHON_EXPORT RegisterTabularFunction(
62
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
63
+ compute::FunctionRegistry* registry = NULLPTR);
64
+
65
+ /// \brief register a Aggregate user-defined-function from Python
66
+ Status ARROW_PYTHON_EXPORT RegisterAggregateFunction(
67
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
68
+ compute::FunctionRegistry* registry = NULLPTR);
69
+
70
+ /// \brief register a Vector user-defined-function from Python
71
+ Status ARROW_PYTHON_EXPORT RegisterVectorFunction(
72
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
73
+ compute::FunctionRegistry* registry = NULLPTR);
74
+
75
+ Result<std::shared_ptr<RecordBatchReader>> ARROW_PYTHON_EXPORT
76
+ CallTabularFunction(const std::string& func_name, const std::vector<Datum>& args,
77
+ compute::FunctionRegistry* registry = NULLPTR);
78
+
79
+ } // namespace py
80
+
81
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/vendored/pythoncapi_compat.h ADDED
@@ -0,0 +1,1519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Header file providing new C API functions to old Python versions.
2
+ //
3
+ // File distributed under the Zero Clause BSD (0BSD) license.
4
+ // Copyright Contributors to the pythoncapi_compat project.
5
+ //
6
+ // Homepage:
7
+ // https://github.com/python/pythoncapi_compat
8
+ //
9
+ // Latest version:
10
+ // https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h
11
+ //
12
+ // Vendored from git revision:
13
+ // 39e2663e6acc0b68d5dd75bdaad0af33152552ae
14
+ // https://raw.githubusercontent.com/python/pythoncapi-compat/39e2663e6acc0b68d5dd75bdaad0af33152552ae/pythoncapi_compat.h
15
+ //
16
+ // SPDX-License-Identifier: 0BSD
17
+
18
+ /* clang-format off */
19
+
20
+ #ifndef PYTHONCAPI_COMPAT
21
+ #define PYTHONCAPI_COMPAT
22
+
23
+ #ifdef __cplusplus
24
+ extern "C" {
25
+ #endif
26
+
27
+ #include <Python.h>
28
+
29
+ // Python 3.11.0b4 added PyFrame_Back() to Python.h
30
+ #if PY_VERSION_HEX < 0x030b00B4 && !defined(PYPY_VERSION)
31
+ # include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
32
+ #endif
33
+
34
+
35
+ #ifndef _Py_CAST
36
+ # define _Py_CAST(type, expr) ((type)(expr))
37
+ #endif
38
+
39
+ // Static inline functions should use _Py_NULL rather than using directly NULL
40
+ // to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer,
41
+ // _Py_NULL is defined as nullptr.
42
+ #if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \
43
+ || (defined(__cplusplus) && __cplusplus >= 201103)
44
+ # define _Py_NULL nullptr
45
+ #else
46
+ # define _Py_NULL NULL
47
+ #endif
48
+
49
+ // Cast argument to PyObject* type.
50
+ #ifndef _PyObject_CAST
51
+ # define _PyObject_CAST(op) _Py_CAST(PyObject*, op)
52
+ #endif
53
+
54
+
55
+ // bpo-42262 added Py_NewRef() to Python 3.10.0a3
56
+ #if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef)
57
+ static inline PyObject* _Py_NewRef(PyObject *obj)
58
+ {
59
+ Py_INCREF(obj);
60
+ return obj;
61
+ }
62
+ #define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
63
+ #endif
64
+
65
+
66
+ // bpo-42262 added Py_XNewRef() to Python 3.10.0a3
67
+ #if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef)
68
+ static inline PyObject* _Py_XNewRef(PyObject *obj)
69
+ {
70
+ Py_XINCREF(obj);
71
+ return obj;
72
+ }
73
+ #define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
74
+ #endif
75
+
76
+
77
+ // bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
78
+ #if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
79
+ static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
80
+ {
81
+ ob->ob_refcnt = refcnt;
82
+ }
83
+ #define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt)
84
+ #endif
85
+
86
+
87
+ // Py_SETREF() and Py_XSETREF() were added to Python 3.5.2.
88
+ // It is excluded from the limited C API.
89
+ #if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API)
90
+ #define Py_SETREF(dst, src) \
91
+ do { \
92
+ PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
93
+ PyObject *_tmp_dst = (*_tmp_dst_ptr); \
94
+ *_tmp_dst_ptr = _PyObject_CAST(src); \
95
+ Py_DECREF(_tmp_dst); \
96
+ } while (0)
97
+
98
+ #define Py_XSETREF(dst, src) \
99
+ do { \
100
+ PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
101
+ PyObject *_tmp_dst = (*_tmp_dst_ptr); \
102
+ *_tmp_dst_ptr = _PyObject_CAST(src); \
103
+ Py_XDECREF(_tmp_dst); \
104
+ } while (0)
105
+ #endif
106
+
107
+
108
+ // bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse()
109
+ // to Python 3.10.0b1.
110
+ #if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is)
111
+ # define Py_Is(x, y) ((x) == (y))
112
+ #endif
113
+ #if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone)
114
+ # define Py_IsNone(x) Py_Is(x, Py_None)
115
+ #endif
116
+ #if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsTrue)
117
+ # define Py_IsTrue(x) Py_Is(x, Py_True)
118
+ #endif
119
+ #if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsFalse)
120
+ # define Py_IsFalse(x) Py_Is(x, Py_False)
121
+ #endif
122
+
123
+
124
+ // bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
125
+ #if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
126
+ static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
127
+ {
128
+ ob->ob_type = type;
129
+ }
130
+ #define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type)
131
+ #endif
132
+
133
+
134
+ // bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
135
+ #if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
136
+ static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
137
+ {
138
+ ob->ob_size = size;
139
+ }
140
+ #define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
141
+ #endif
142
+
143
+
144
+ // bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
145
+ #if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION)
146
+ static inline PyCodeObject* PyFrame_GetCode(PyFrameObject *frame)
147
+ {
148
+ assert(frame != _Py_NULL);
149
+ assert(frame->f_code != _Py_NULL);
150
+ return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code));
151
+ }
152
+ #endif
153
+
154
+ static inline PyCodeObject* _PyFrame_GetCodeBorrow(PyFrameObject *frame)
155
+ {
156
+ PyCodeObject *code = PyFrame_GetCode(frame);
157
+ Py_DECREF(code);
158
+ return code;
159
+ }
160
+
161
+
162
+ // bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1
163
+ #if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION)
164
+ static inline PyFrameObject* PyFrame_GetBack(PyFrameObject *frame)
165
+ {
166
+ assert(frame != _Py_NULL);
167
+ return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back));
168
+ }
169
+ #endif
170
+
171
+ #if !defined(PYPY_VERSION)
172
+ static inline PyFrameObject* _PyFrame_GetBackBorrow(PyFrameObject *frame)
173
+ {
174
+ PyFrameObject *back = PyFrame_GetBack(frame);
175
+ Py_XDECREF(back);
176
+ return back;
177
+ }
178
+ #endif
179
+
180
+
181
+ // bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7
182
+ #if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
183
+ static inline PyObject* PyFrame_GetLocals(PyFrameObject *frame)
184
+ {
185
+ #if PY_VERSION_HEX >= 0x030400B1
186
+ if (PyFrame_FastToLocalsWithError(frame) < 0) {
187
+ return NULL;
188
+ }
189
+ #else
190
+ PyFrame_FastToLocals(frame);
191
+ #endif
192
+ return Py_NewRef(frame->f_locals);
193
+ }
194
+ #endif
195
+
196
+
197
+ // bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7
198
+ #if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
199
+ static inline PyObject* PyFrame_GetGlobals(PyFrameObject *frame)
200
+ {
201
+ return Py_NewRef(frame->f_globals);
202
+ }
203
+ #endif
204
+
205
+
206
+ // bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7
207
+ #if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
208
+ static inline PyObject* PyFrame_GetBuiltins(PyFrameObject *frame)
209
+ {
210
+ return Py_NewRef(frame->f_builtins);
211
+ }
212
+ #endif
213
+
214
+
215
+ // bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1
216
+ #if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
217
+ static inline int PyFrame_GetLasti(PyFrameObject *frame)
218
+ {
219
+ #if PY_VERSION_HEX >= 0x030A00A7
220
+ // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset,
221
+ // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes)
222
+ // instructions.
223
+ if (frame->f_lasti < 0) {
224
+ return -1;
225
+ }
226
+ return frame->f_lasti * 2;
227
+ #else
228
+ return frame->f_lasti;
229
+ #endif
230
+ }
231
+ #endif
232
+
233
+
234
+ // gh-91248 added PyFrame_GetVar() to Python 3.12.0a2
235
+ #if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
236
+ static inline PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name)
237
+ {
238
+ PyObject *locals, *value;
239
+
240
+ locals = PyFrame_GetLocals(frame);
241
+ if (locals == NULL) {
242
+ return NULL;
243
+ }
244
+ #if PY_VERSION_HEX >= 0x03000000
245
+ value = PyDict_GetItemWithError(locals, name);
246
+ #else
247
+ value = _PyDict_GetItemWithError(locals, name);
248
+ #endif
249
+ Py_DECREF(locals);
250
+
251
+ if (value == NULL) {
252
+ if (PyErr_Occurred()) {
253
+ return NULL;
254
+ }
255
+ #if PY_VERSION_HEX >= 0x03000000
256
+ PyErr_Format(PyExc_NameError, "variable %R does not exist", name);
257
+ #else
258
+ PyErr_SetString(PyExc_NameError, "variable does not exist");
259
+ #endif
260
+ return NULL;
261
+ }
262
+ return Py_NewRef(value);
263
+ }
264
+ #endif
265
+
266
+
267
+ // gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2
268
+ #if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
269
+ static inline PyObject*
270
+ PyFrame_GetVarString(PyFrameObject *frame, const char *name)
271
+ {
272
+ PyObject *name_obj, *value;
273
+ #if PY_VERSION_HEX >= 0x03000000
274
+ name_obj = PyUnicode_FromString(name);
275
+ #else
276
+ name_obj = PyString_FromString(name);
277
+ #endif
278
+ if (name_obj == NULL) {
279
+ return NULL;
280
+ }
281
+ value = PyFrame_GetVar(frame, name_obj);
282
+ Py_DECREF(name_obj);
283
+ return value;
284
+ }
285
+ #endif
286
+
287
+
288
+ // bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
289
+ #if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION)
290
+ static inline PyInterpreterState *
291
+ PyThreadState_GetInterpreter(PyThreadState *tstate)
292
+ {
293
+ assert(tstate != _Py_NULL);
294
+ return tstate->interp;
295
+ }
296
+ #endif
297
+
298
+
299
+ // bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
300
+ #if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION)
301
+ static inline PyFrameObject* PyThreadState_GetFrame(PyThreadState *tstate)
302
+ {
303
+ assert(tstate != _Py_NULL);
304
+ return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame));
305
+ }
306
+ #endif
307
+
308
+ #if !defined(PYPY_VERSION)
309
+ static inline PyFrameObject*
310
+ _PyThreadState_GetFrameBorrow(PyThreadState *tstate)
311
+ {
312
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
313
+ Py_XDECREF(frame);
314
+ return frame;
315
+ }
316
+ #endif
317
+
318
+
319
+ // bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
320
+ #if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION)
321
+ static inline PyInterpreterState* PyInterpreterState_Get(void)
322
+ {
323
+ PyThreadState *tstate;
324
+ PyInterpreterState *interp;
325
+
326
+ tstate = PyThreadState_GET();
327
+ if (tstate == _Py_NULL) {
328
+ Py_FatalError("GIL released (tstate is NULL)");
329
+ }
330
+ interp = tstate->interp;
331
+ if (interp == _Py_NULL) {
332
+ Py_FatalError("no current interpreter");
333
+ }
334
+ return interp;
335
+ }
336
+ #endif
337
+
338
+
339
+ // bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
340
+ #if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION)
341
+ static inline uint64_t PyThreadState_GetID(PyThreadState *tstate)
342
+ {
343
+ assert(tstate != _Py_NULL);
344
+ return tstate->id;
345
+ }
346
+ #endif
347
+
348
+ // bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2
349
+ #if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
350
+ static inline void PyThreadState_EnterTracing(PyThreadState *tstate)
351
+ {
352
+ tstate->tracing++;
353
+ #if PY_VERSION_HEX >= 0x030A00A1
354
+ tstate->cframe->use_tracing = 0;
355
+ #else
356
+ tstate->use_tracing = 0;
357
+ #endif
358
+ }
359
+ #endif
360
+
361
+ // bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2
362
+ #if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
363
+ static inline void PyThreadState_LeaveTracing(PyThreadState *tstate)
364
+ {
365
+ int use_tracing = (tstate->c_tracefunc != _Py_NULL
366
+ || tstate->c_profilefunc != _Py_NULL);
367
+ tstate->tracing--;
368
+ #if PY_VERSION_HEX >= 0x030A00A1
369
+ tstate->cframe->use_tracing = use_tracing;
370
+ #else
371
+ tstate->use_tracing = use_tracing;
372
+ #endif
373
+ }
374
+ #endif
375
+
376
+
377
+ // bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
378
+ // PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11
379
+ #if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1
380
+ static inline PyObject* PyObject_CallNoArgs(PyObject *func)
381
+ {
382
+ return PyObject_CallFunctionObjArgs(func, NULL);
383
+ }
384
+ #endif
385
+
386
+
387
+ // bpo-39245 made PyObject_CallOneArg() public (previously called
388
+ // _PyObject_CallOneArg) in Python 3.9.0a4
389
+ // PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11
390
+ #if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4
391
+ static inline PyObject* PyObject_CallOneArg(PyObject *func, PyObject *arg)
392
+ {
393
+ return PyObject_CallFunctionObjArgs(func, arg, NULL);
394
+ }
395
+ #endif
396
+
397
+
398
+ // bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3
399
+ #if PY_VERSION_HEX < 0x030A00A3
400
+ static inline int
401
+ PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value)
402
+ {
403
+ int res;
404
+
405
+ if (!value && !PyErr_Occurred()) {
406
+ // PyModule_AddObject() raises TypeError in this case
407
+ PyErr_SetString(PyExc_SystemError,
408
+ "PyModule_AddObjectRef() must be called "
409
+ "with an exception raised if value is NULL");
410
+ return -1;
411
+ }
412
+
413
+ Py_XINCREF(value);
414
+ res = PyModule_AddObject(module, name, value);
415
+ if (res < 0) {
416
+ Py_XDECREF(value);
417
+ }
418
+ return res;
419
+ }
420
+ #endif
421
+
422
+
423
+ // bpo-40024 added PyModule_AddType() to Python 3.9.0a5
424
+ #if PY_VERSION_HEX < 0x030900A5
425
+ static inline int PyModule_AddType(PyObject *module, PyTypeObject *type)
426
+ {
427
+ const char *name, *dot;
428
+
429
+ if (PyType_Ready(type) < 0) {
430
+ return -1;
431
+ }
432
+
433
+ // inline _PyType_Name()
434
+ name = type->tp_name;
435
+ assert(name != _Py_NULL);
436
+ dot = strrchr(name, '.');
437
+ if (dot != _Py_NULL) {
438
+ name = dot + 1;
439
+ }
440
+
441
+ return PyModule_AddObjectRef(module, name, _PyObject_CAST(type));
442
+ }
443
+ #endif
444
+
445
+
446
+ // bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
447
+ // bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
448
+ #if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION)
449
+ static inline int PyObject_GC_IsTracked(PyObject* obj)
450
+ {
451
+ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
452
+ }
453
+ #endif
454
+
455
+ // bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
456
+ // bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
457
+ #if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION)
458
+ static inline int PyObject_GC_IsFinalized(PyObject *obj)
459
+ {
460
+ PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1;
461
+ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc));
462
+ }
463
+ #endif
464
+
465
+
466
+ // bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
467
+ #if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
468
+ static inline int _Py_IS_TYPE(PyObject *ob, PyTypeObject *type) {
469
+ return Py_TYPE(ob) == type;
470
+ }
471
+ #define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type)
472
+ #endif
473
+
474
+
475
+ // bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7.
476
+ // bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1.
477
+ // Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal
478
+ // C API: Python 3.11a2-3.11a6 versions are not supported.
479
+ #if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
480
+ static inline int PyFloat_Pack2(double x, char *p, int le)
481
+ { return _PyFloat_Pack2(x, (unsigned char*)p, le); }
482
+
483
+ static inline double PyFloat_Unpack2(const char *p, int le)
484
+ { return _PyFloat_Unpack2((const unsigned char *)p, le); }
485
+ #endif
486
+
487
+
488
+ // bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and
489
+ // PyFloat_Unpack8() to Python 3.11a7.
490
+ // Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4()
491
+ // and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions
492
+ // are not supported.
493
+ #if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
494
+ static inline int PyFloat_Pack4(double x, char *p, int le)
495
+ { return _PyFloat_Pack4(x, (unsigned char*)p, le); }
496
+
497
+ static inline int PyFloat_Pack8(double x, char *p, int le)
498
+ { return _PyFloat_Pack8(x, (unsigned char*)p, le); }
499
+
500
+ static inline double PyFloat_Unpack4(const char *p, int le)
501
+ { return _PyFloat_Unpack4((const unsigned char *)p, le); }
502
+
503
+ static inline double PyFloat_Unpack8(const char *p, int le)
504
+ { return _PyFloat_Unpack8((const unsigned char *)p, le); }
505
+ #endif
506
+
507
+
508
+ // gh-92154 added PyCode_GetCode() to Python 3.11.0b1
509
+ #if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
510
+ static inline PyObject* PyCode_GetCode(PyCodeObject *code)
511
+ {
512
+ return Py_NewRef(code->co_code);
513
+ }
514
+ #endif
515
+
516
+
517
+ // gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1
518
+ #if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
519
+ static inline PyObject* PyCode_GetVarnames(PyCodeObject *code)
520
+ {
521
+ return Py_NewRef(code->co_varnames);
522
+ }
523
+ #endif
524
+
525
+ // gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1
526
+ #if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
527
+ static inline PyObject* PyCode_GetFreevars(PyCodeObject *code)
528
+ {
529
+ return Py_NewRef(code->co_freevars);
530
+ }
531
+ #endif
532
+
533
+ // gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1
534
+ #if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
535
+ static inline PyObject* PyCode_GetCellvars(PyCodeObject *code)
536
+ {
537
+ return Py_NewRef(code->co_cellvars);
538
+ }
539
+ #endif
540
+
541
+
542
+ // Py_UNUSED() was added to Python 3.4.0b2.
543
+ #if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED)
544
+ # if defined(__GNUC__) || defined(__clang__)
545
+ # define Py_UNUSED(name) _unused_ ## name __attribute__((unused))
546
+ # else
547
+ # define Py_UNUSED(name) _unused_ ## name
548
+ # endif
549
+ #endif
550
+
551
+
552
+ // gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1
553
+ #if PY_VERSION_HEX < 0x030D00A0
554
+ static inline PyObject* PyImport_AddModuleRef(const char *name)
555
+ {
556
+ return Py_XNewRef(PyImport_AddModule(name));
557
+ }
558
+ #endif
559
+
560
+
561
+ // gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1
562
+ #if PY_VERSION_HEX < 0x030D0000
563
+ static inline int PyWeakref_GetRef(PyObject *ref, PyObject **pobj)
564
+ {
565
+ PyObject *obj;
566
+ if (ref != NULL && !PyWeakref_Check(ref)) {
567
+ *pobj = NULL;
568
+ PyErr_SetString(PyExc_TypeError, "expected a weakref");
569
+ return -1;
570
+ }
571
+ obj = PyWeakref_GetObject(ref);
572
+ if (obj == NULL) {
573
+ // SystemError if ref is NULL
574
+ *pobj = NULL;
575
+ return -1;
576
+ }
577
+ if (obj == Py_None) {
578
+ *pobj = NULL;
579
+ return 0;
580
+ }
581
+ *pobj = Py_NewRef(obj);
582
+ return (*pobj != NULL);
583
+ }
584
+ #endif
585
+
586
+
587
+ // bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1
588
+ #ifndef PY_VECTORCALL_ARGUMENTS_OFFSET
589
+ # define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1))
590
+ #endif
591
+
592
+ // bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1
593
+ #if PY_VERSION_HEX < 0x030800B1
594
+ static inline Py_ssize_t PyVectorcall_NARGS(size_t n)
595
+ {
596
+ return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET;
597
+ }
598
+ #endif
599
+
600
+
601
+ // gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4
602
+ #if PY_VERSION_HEX < 0x030900A4
603
+ static inline PyObject*
604
+ PyObject_Vectorcall(PyObject *callable, PyObject *const *args,
605
+ size_t nargsf, PyObject *kwnames)
606
+ {
607
+ #if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION)
608
+ // bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1
609
+ return _PyObject_Vectorcall(callable, args, nargsf, kwnames);
610
+ #else
611
+ PyObject *posargs = NULL, *kwargs = NULL;
612
+ PyObject *res;
613
+ Py_ssize_t nposargs, nkwargs, i;
614
+
615
+ if (nargsf != 0 && args == NULL) {
616
+ PyErr_BadInternalCall();
617
+ goto error;
618
+ }
619
+ if (kwnames != NULL && !PyTuple_Check(kwnames)) {
620
+ PyErr_BadInternalCall();
621
+ goto error;
622
+ }
623
+
624
+ nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf);
625
+ if (kwnames) {
626
+ nkwargs = PyTuple_GET_SIZE(kwnames);
627
+ }
628
+ else {
629
+ nkwargs = 0;
630
+ }
631
+
632
+ posargs = PyTuple_New(nposargs);
633
+ if (posargs == NULL) {
634
+ goto error;
635
+ }
636
+ if (nposargs) {
637
+ for (i=0; i < nposargs; i++) {
638
+ PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args));
639
+ args++;
640
+ }
641
+ }
642
+
643
+ if (nkwargs) {
644
+ kwargs = PyDict_New();
645
+ if (kwargs == NULL) {
646
+ goto error;
647
+ }
648
+
649
+ for (i = 0; i < nkwargs; i++) {
650
+ PyObject *key = PyTuple_GET_ITEM(kwnames, i);
651
+ PyObject *value = *args;
652
+ args++;
653
+ if (PyDict_SetItem(kwargs, key, value) < 0) {
654
+ goto error;
655
+ }
656
+ }
657
+ }
658
+ else {
659
+ kwargs = NULL;
660
+ }
661
+
662
+ res = PyObject_Call(callable, posargs, kwargs);
663
+ Py_DECREF(posargs);
664
+ Py_XDECREF(kwargs);
665
+ return res;
666
+
667
+ error:
668
+ Py_DECREF(posargs);
669
+ Py_XDECREF(kwargs);
670
+ return NULL;
671
+ #endif
672
+ }
673
+ #endif
674
+
675
+
676
+ // gh-106521 added PyObject_GetOptionalAttr() and
677
+ // PyObject_GetOptionalAttrString() to Python 3.13.0a1
678
+ #if PY_VERSION_HEX < 0x030D00A1
679
+ static inline int
680
+ PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result)
681
+ {
682
+ // bpo-32571 added _PyObject_LookupAttr() to Python 3.7.0b1
683
+ #if PY_VERSION_HEX >= 0x030700B1 && !defined(PYPY_VERSION)
684
+ return _PyObject_LookupAttr(obj, attr_name, result);
685
+ #else
686
+ *result = PyObject_GetAttr(obj, attr_name);
687
+ if (*result != NULL) {
688
+ return 1;
689
+ }
690
+ if (!PyErr_Occurred()) {
691
+ return 0;
692
+ }
693
+ if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
694
+ PyErr_Clear();
695
+ return 0;
696
+ }
697
+ return -1;
698
+ #endif
699
+ }
700
+
701
+ static inline int
702
+ PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result)
703
+ {
704
+ PyObject *name_obj;
705
+ int rc;
706
+ #if PY_VERSION_HEX >= 0x03000000
707
+ name_obj = PyUnicode_FromString(attr_name);
708
+ #else
709
+ name_obj = PyString_FromString(attr_name);
710
+ #endif
711
+ if (name_obj == NULL) {
712
+ *result = NULL;
713
+ return -1;
714
+ }
715
+ rc = PyObject_GetOptionalAttr(obj, name_obj, result);
716
+ Py_DECREF(name_obj);
717
+ return rc;
718
+ }
719
+ #endif
720
+
721
+
722
+ // gh-106307 added PyObject_GetOptionalAttr() and
723
+ // PyMapping_GetOptionalItemString() to Python 3.13.0a1
724
+ #if PY_VERSION_HEX < 0x030D00A1
725
+ static inline int
726
+ PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result)
727
+ {
728
+ *result = PyObject_GetItem(obj, key);
729
+ if (*result) {
730
+ return 1;
731
+ }
732
+ if (!PyErr_ExceptionMatches(PyExc_KeyError)) {
733
+ return -1;
734
+ }
735
+ PyErr_Clear();
736
+ return 0;
737
+ }
738
+
739
+ static inline int
740
+ PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result)
741
+ {
742
+ PyObject *key_obj;
743
+ int rc;
744
+ #if PY_VERSION_HEX >= 0x03000000
745
+ key_obj = PyUnicode_FromString(key);
746
+ #else
747
+ key_obj = PyString_FromString(key);
748
+ #endif
749
+ if (key_obj == NULL) {
750
+ *result = NULL;
751
+ return -1;
752
+ }
753
+ rc = PyMapping_GetOptionalItem(obj, key_obj, result);
754
+ Py_DECREF(key_obj);
755
+ return rc;
756
+ }
757
+ #endif
758
+
759
+ // gh-108511 added PyMapping_HasKeyWithError() and
760
+ // PyMapping_HasKeyStringWithError() to Python 3.13.0a1
761
+ #if PY_VERSION_HEX < 0x030D00A1
762
+ static inline int
763
+ PyMapping_HasKeyWithError(PyObject *obj, PyObject *key)
764
+ {
765
+ PyObject *res;
766
+ int rc = PyMapping_GetOptionalItem(obj, key, &res);
767
+ Py_XDECREF(res);
768
+ return rc;
769
+ }
770
+
771
+ static inline int
772
+ PyMapping_HasKeyStringWithError(PyObject *obj, const char *key)
773
+ {
774
+ PyObject *res;
775
+ int rc = PyMapping_GetOptionalItemString(obj, key, &res);
776
+ Py_XDECREF(res);
777
+ return rc;
778
+ }
779
+ #endif
780
+
781
+
782
+ // gh-108511 added PyObject_HasAttrWithError() and
783
+ // PyObject_HasAttrStringWithError() to Python 3.13.0a1
784
+ #if PY_VERSION_HEX < 0x030D00A1
785
+ static inline int
786
+ PyObject_HasAttrWithError(PyObject *obj, PyObject *attr)
787
+ {
788
+ PyObject *res;
789
+ int rc = PyObject_GetOptionalAttr(obj, attr, &res);
790
+ Py_XDECREF(res);
791
+ return rc;
792
+ }
793
+
794
+ static inline int
795
+ PyObject_HasAttrStringWithError(PyObject *obj, const char *attr)
796
+ {
797
+ PyObject *res;
798
+ int rc = PyObject_GetOptionalAttrString(obj, attr, &res);
799
+ Py_XDECREF(res);
800
+ return rc;
801
+ }
802
+ #endif
803
+
804
+
805
+ // gh-106004 added PyDict_GetItemRef() and PyDict_GetItemStringRef()
806
+ // to Python 3.13.0a1
807
+ #if PY_VERSION_HEX < 0x030D00A1
808
+ static inline int
809
+ PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result)
810
+ {
811
+ #if PY_VERSION_HEX >= 0x03000000
812
+ PyObject *item = PyDict_GetItemWithError(mp, key);
813
+ #else
814
+ PyObject *item = _PyDict_GetItemWithError(mp, key);
815
+ #endif
816
+ if (item != NULL) {
817
+ *result = Py_NewRef(item);
818
+ return 1; // found
819
+ }
820
+ if (!PyErr_Occurred()) {
821
+ *result = NULL;
822
+ return 0; // not found
823
+ }
824
+ *result = NULL;
825
+ return -1;
826
+ }
827
+
828
+ static inline int
829
+ PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result)
830
+ {
831
+ int res;
832
+ #if PY_VERSION_HEX >= 0x03000000
833
+ PyObject *key_obj = PyUnicode_FromString(key);
834
+ #else
835
+ PyObject *key_obj = PyString_FromString(key);
836
+ #endif
837
+ if (key_obj == NULL) {
838
+ *result = NULL;
839
+ return -1;
840
+ }
841
+ res = PyDict_GetItemRef(mp, key_obj, result);
842
+ Py_DECREF(key_obj);
843
+ return res;
844
+ }
845
+ #endif
846
+
847
+
848
+ // gh-106307 added PyModule_Add() to Python 3.13.0a1
849
+ #if PY_VERSION_HEX < 0x030D00A1
850
+ static inline int
851
+ PyModule_Add(PyObject *mod, const char *name, PyObject *value)
852
+ {
853
+ int res = PyModule_AddObjectRef(mod, name, value);
854
+ Py_XDECREF(value);
855
+ return res;
856
+ }
857
+ #endif
858
+
859
+
860
+ // gh-108014 added Py_IsFinalizing() to Python 3.13.0a1
861
+ // bpo-1856 added _Py_Finalizing to Python 3.2.1b1.
862
+ // _Py_IsFinalizing() was added to PyPy 7.3.0.
863
+ #if (0x030201B1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030D00A1) \
864
+ && (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x7030000)
865
+ static inline int Py_IsFinalizing(void)
866
+ {
867
+ #if PY_VERSION_HEX >= 0x030700A1
868
+ // _Py_IsFinalizing() was added to Python 3.7.0a1.
869
+ return _Py_IsFinalizing();
870
+ #else
871
+ return (_Py_Finalizing != NULL);
872
+ #endif
873
+ }
874
+ #endif
875
+
876
+
877
+ // gh-108323 added PyDict_ContainsString() to Python 3.13.0a1
878
+ #if PY_VERSION_HEX < 0x030D00A1
879
+ static inline int PyDict_ContainsString(PyObject *op, const char *key)
880
+ {
881
+ PyObject *key_obj = PyUnicode_FromString(key);
882
+ if (key_obj == NULL) {
883
+ return -1;
884
+ }
885
+ int res = PyDict_Contains(op, key_obj);
886
+ Py_DECREF(key_obj);
887
+ return res;
888
+ }
889
+ #endif
890
+
891
+
892
+ // gh-108445 added PyLong_AsInt() to Python 3.13.0a1
893
+ #if PY_VERSION_HEX < 0x030D00A1
894
+ static inline int PyLong_AsInt(PyObject *obj)
895
+ {
896
+ #ifdef PYPY_VERSION
897
+ long value = PyLong_AsLong(obj);
898
+ if (value == -1 && PyErr_Occurred()) {
899
+ return -1;
900
+ }
901
+ if (value < (long)INT_MIN || (long)INT_MAX < value) {
902
+ PyErr_SetString(PyExc_OverflowError,
903
+ "Python int too large to convert to C int");
904
+ return -1;
905
+ }
906
+ return (int)value;
907
+ #else
908
+ return _PyLong_AsInt(obj);
909
+ #endif
910
+ }
911
+ #endif
912
+
913
+
914
+ // gh-107073 added PyObject_VisitManagedDict() to Python 3.13.0a1
915
+ #if PY_VERSION_HEX < 0x030D00A1
916
+ static inline int
917
+ PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg)
918
+ {
919
+ PyObject **dict = _PyObject_GetDictPtr(obj);
920
+ if (*dict == NULL) {
921
+ return -1;
922
+ }
923
+ Py_VISIT(*dict);
924
+ return 0;
925
+ }
926
+
927
+ static inline void
928
+ PyObject_ClearManagedDict(PyObject *obj)
929
+ {
930
+ PyObject **dict = _PyObject_GetDictPtr(obj);
931
+ if (*dict == NULL) {
932
+ return;
933
+ }
934
+ Py_CLEAR(*dict);
935
+ }
936
+ #endif
937
+
938
+ // gh-108867 added PyThreadState_GetUnchecked() to Python 3.13.0a1
939
+ // Python 3.5.2 added _PyThreadState_UncheckedGet().
940
+ #if PY_VERSION_HEX >= 0x03050200 && PY_VERSION_HEX < 0x030D00A1
941
+ static inline PyThreadState*
942
+ PyThreadState_GetUnchecked(void)
943
+ {
944
+ return _PyThreadState_UncheckedGet();
945
+ }
946
+ #endif
947
+
948
+ // gh-110289 added PyUnicode_EqualToUTF8() and PyUnicode_EqualToUTF8AndSize()
949
+ // to Python 3.13.0a1
950
+ #if PY_VERSION_HEX < 0x030D00A1
951
+ static inline int
952
+ PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *str, Py_ssize_t str_len)
953
+ {
954
+ Py_ssize_t len;
955
+ const void *utf8;
956
+ PyObject *exc_type, *exc_value, *exc_tb;
957
+ int res;
958
+
959
+ // API cannot report errors so save/restore the exception
960
+ PyErr_Fetch(&exc_type, &exc_value, &exc_tb);
961
+
962
+ // Python 3.3.0a1 added PyUnicode_AsUTF8AndSize()
963
+ #if PY_VERSION_HEX >= 0x030300A1
964
+ if (PyUnicode_IS_ASCII(unicode)) {
965
+ utf8 = PyUnicode_DATA(unicode);
966
+ len = PyUnicode_GET_LENGTH(unicode);
967
+ }
968
+ else {
969
+ utf8 = PyUnicode_AsUTF8AndSize(unicode, &len);
970
+ if (utf8 == NULL) {
971
+ // Memory allocation failure. The API cannot report error,
972
+ // so ignore the exception and return 0.
973
+ res = 0;
974
+ goto done;
975
+ }
976
+ }
977
+
978
+ if (len != str_len) {
979
+ res = 0;
980
+ goto done;
981
+ }
982
+ res = (memcmp(utf8, str, (size_t)len) == 0);
983
+ #else
984
+ PyObject *bytes = PyUnicode_AsUTF8String(unicode);
985
+ if (bytes == NULL) {
986
+ // Memory allocation failure. The API cannot report error,
987
+ // so ignore the exception and return 0.
988
+ res = 0;
989
+ goto done;
990
+ }
991
+
992
+ #if PY_VERSION_HEX >= 0x03000000
993
+ len = PyBytes_GET_SIZE(bytes);
994
+ utf8 = PyBytes_AS_STRING(bytes);
995
+ #else
996
+ len = PyString_GET_SIZE(bytes);
997
+ utf8 = PyString_AS_STRING(bytes);
998
+ #endif
999
+ if (len != str_len) {
1000
+ Py_DECREF(bytes);
1001
+ res = 0;
1002
+ goto done;
1003
+ }
1004
+
1005
+ res = (memcmp(utf8, str, (size_t)len) == 0);
1006
+ Py_DECREF(bytes);
1007
+ #endif
1008
+
1009
+ done:
1010
+ PyErr_Restore(exc_type, exc_value, exc_tb);
1011
+ return res;
1012
+ }
1013
+
1014
+ static inline int
1015
+ PyUnicode_EqualToUTF8(PyObject *unicode, const char *str)
1016
+ {
1017
+ return PyUnicode_EqualToUTF8AndSize(unicode, str, (Py_ssize_t)strlen(str));
1018
+ }
1019
+ #endif
1020
+
1021
+
1022
+ // gh-111138 added PyList_Extend() and PyList_Clear() to Python 3.13.0a2
1023
+ #if PY_VERSION_HEX < 0x030D00A2
1024
+ static inline int
1025
+ PyList_Extend(PyObject *list, PyObject *iterable)
1026
+ {
1027
+ return PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable);
1028
+ }
1029
+
1030
+ static inline int
1031
+ PyList_Clear(PyObject *list)
1032
+ {
1033
+ return PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL);
1034
+ }
1035
+ #endif
1036
+
1037
+ // gh-111262 added PyDict_Pop() and PyDict_PopString() to Python 3.13.0a2
1038
+ #if PY_VERSION_HEX < 0x030D00A2
1039
+ static inline int
1040
+ PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result)
1041
+ {
1042
+ PyObject *value;
1043
+
1044
+ if (!PyDict_Check(dict)) {
1045
+ PyErr_BadInternalCall();
1046
+ if (result) {
1047
+ *result = NULL;
1048
+ }
1049
+ return -1;
1050
+ }
1051
+
1052
+ // bpo-16991 added _PyDict_Pop() to Python 3.5.0b2.
1053
+ // Python 3.6.0b3 changed _PyDict_Pop() first argument type to PyObject*.
1054
+ // Python 3.13.0a1 removed _PyDict_Pop().
1055
+ #if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x030500b2 || PY_VERSION_HEX >= 0x030D0000
1056
+ value = PyObject_CallMethod(dict, "pop", "O", key);
1057
+ #elif PY_VERSION_HEX < 0x030600b3
1058
+ value = _PyDict_Pop(_Py_CAST(PyDictObject*, dict), key, NULL);
1059
+ #else
1060
+ value = _PyDict_Pop(dict, key, NULL);
1061
+ #endif
1062
+ if (value == NULL) {
1063
+ if (result) {
1064
+ *result = NULL;
1065
+ }
1066
+ if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_KeyError)) {
1067
+ return -1;
1068
+ }
1069
+ PyErr_Clear();
1070
+ return 0;
1071
+ }
1072
+ if (result) {
1073
+ *result = value;
1074
+ }
1075
+ else {
1076
+ Py_DECREF(value);
1077
+ }
1078
+ return 1;
1079
+ }
1080
+
1081
+ static inline int
1082
+ PyDict_PopString(PyObject *dict, const char *key, PyObject **result)
1083
+ {
1084
+ PyObject *key_obj = PyUnicode_FromString(key);
1085
+ if (key_obj == NULL) {
1086
+ if (result != NULL) {
1087
+ *result = NULL;
1088
+ }
1089
+ return -1;
1090
+ }
1091
+
1092
+ int res = PyDict_Pop(dict, key_obj, result);
1093
+ Py_DECREF(key_obj);
1094
+ return res;
1095
+ }
1096
+ #endif
1097
+
1098
+
1099
+ #if PY_VERSION_HEX < 0x030200A4
1100
+ // Python 3.2.0a4 added Py_hash_t type
1101
+ typedef Py_ssize_t Py_hash_t;
1102
+ #endif
1103
+
1104
+
1105
+ // gh-111545 added Py_HashPointer() to Python 3.13.0a3
1106
+ #if PY_VERSION_HEX < 0x030D00A3
1107
+ static inline Py_hash_t Py_HashPointer(const void *ptr)
1108
+ {
1109
+ #if PY_VERSION_HEX >= 0x030900A4 && !defined(PYPY_VERSION)
1110
+ return _Py_HashPointer(ptr);
1111
+ #else
1112
+ return _Py_HashPointer(_Py_CAST(void*, ptr));
1113
+ #endif
1114
+ }
1115
+ #endif
1116
+
1117
+
1118
+ // Python 3.13a4 added a PyTime API.
1119
+ // Use the private API added to Python 3.5.
1120
+ #if PY_VERSION_HEX < 0x030D00A4 && PY_VERSION_HEX >= 0x03050000
1121
+ typedef _PyTime_t PyTime_t;
1122
+ #define PyTime_MIN _PyTime_MIN
1123
+ #define PyTime_MAX _PyTime_MAX
1124
+
1125
+ static inline double PyTime_AsSecondsDouble(PyTime_t t)
1126
+ { return _PyTime_AsSecondsDouble(t); }
1127
+
1128
+ static inline int PyTime_Monotonic(PyTime_t *result)
1129
+ { return _PyTime_GetMonotonicClockWithInfo(result, NULL); }
1130
+
1131
+ static inline int PyTime_Time(PyTime_t *result)
1132
+ { return _PyTime_GetSystemClockWithInfo(result, NULL); }
1133
+
1134
+ static inline int PyTime_PerfCounter(PyTime_t *result)
1135
+ {
1136
+ #if PY_VERSION_HEX >= 0x03070000 && !defined(PYPY_VERSION)
1137
+ return _PyTime_GetPerfCounterWithInfo(result, NULL);
1138
+ #elif PY_VERSION_HEX >= 0x03070000
1139
+ // Call time.perf_counter_ns() and convert Python int object to PyTime_t.
1140
+ // Cache time.perf_counter_ns() function for best performance.
1141
+ static PyObject *func = NULL;
1142
+ if (func == NULL) {
1143
+ PyObject *mod = PyImport_ImportModule("time");
1144
+ if (mod == NULL) {
1145
+ return -1;
1146
+ }
1147
+
1148
+ func = PyObject_GetAttrString(mod, "perf_counter_ns");
1149
+ Py_DECREF(mod);
1150
+ if (func == NULL) {
1151
+ return -1;
1152
+ }
1153
+ }
1154
+
1155
+ PyObject *res = PyObject_CallNoArgs(func);
1156
+ if (res == NULL) {
1157
+ return -1;
1158
+ }
1159
+ long long value = PyLong_AsLongLong(res);
1160
+ Py_DECREF(res);
1161
+
1162
+ if (value == -1 && PyErr_Occurred()) {
1163
+ return -1;
1164
+ }
1165
+
1166
+ Py_BUILD_ASSERT(sizeof(value) >= sizeof(PyTime_t));
1167
+ *result = (PyTime_t)value;
1168
+ return 0;
1169
+ #else
1170
+ // Call time.perf_counter() and convert C double to PyTime_t.
1171
+ // Cache time.perf_counter() function for best performance.
1172
+ static PyObject *func = NULL;
1173
+ if (func == NULL) {
1174
+ PyObject *mod = PyImport_ImportModule("time");
1175
+ if (mod == NULL) {
1176
+ return -1;
1177
+ }
1178
+
1179
+ func = PyObject_GetAttrString(mod, "perf_counter");
1180
+ Py_DECREF(mod);
1181
+ if (func == NULL) {
1182
+ return -1;
1183
+ }
1184
+ }
1185
+
1186
+ PyObject *res = PyObject_CallNoArgs(func);
1187
+ if (res == NULL) {
1188
+ return -1;
1189
+ }
1190
+ double d = PyFloat_AsDouble(res);
1191
+ Py_DECREF(res);
1192
+
1193
+ if (d == -1.0 && PyErr_Occurred()) {
1194
+ return -1;
1195
+ }
1196
+
1197
+ // Avoid floor() to avoid having to link to libm
1198
+ *result = (PyTime_t)(d * 1e9);
1199
+ return 0;
1200
+ #endif
1201
+ }
1202
+
1203
+ #endif
1204
+
1205
+ // gh-111389 added hash constants to Python 3.13.0a5. These constants were
1206
+ // added first as private macros to Python 3.4.0b1 and PyPy 7.3.9.
1207
+ #if (!defined(PyHASH_BITS) \
1208
+ && ((!defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x030400B1) \
1209
+ || (defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03070000 \
1210
+ && PYPY_VERSION_NUM >= 0x07090000)))
1211
+ # define PyHASH_BITS _PyHASH_BITS
1212
+ # define PyHASH_MODULUS _PyHASH_MODULUS
1213
+ # define PyHASH_INF _PyHASH_INF
1214
+ # define PyHASH_IMAG _PyHASH_IMAG
1215
+ #endif
1216
+
1217
+
1218
+ // gh-111545 added Py_GetConstant() and Py_GetConstantBorrowed()
1219
+ // to Python 3.13.0a6
1220
+ #if PY_VERSION_HEX < 0x030D00A6 && !defined(Py_CONSTANT_NONE)
1221
+
1222
+ #define Py_CONSTANT_NONE 0
1223
+ #define Py_CONSTANT_FALSE 1
1224
+ #define Py_CONSTANT_TRUE 2
1225
+ #define Py_CONSTANT_ELLIPSIS 3
1226
+ #define Py_CONSTANT_NOT_IMPLEMENTED 4
1227
+ #define Py_CONSTANT_ZERO 5
1228
+ #define Py_CONSTANT_ONE 6
1229
+ #define Py_CONSTANT_EMPTY_STR 7
1230
+ #define Py_CONSTANT_EMPTY_BYTES 8
1231
+ #define Py_CONSTANT_EMPTY_TUPLE 9
1232
+
1233
+ static inline PyObject* Py_GetConstant(unsigned int constant_id)
1234
+ {
1235
+ static PyObject* constants[Py_CONSTANT_EMPTY_TUPLE + 1] = {NULL};
1236
+
1237
+ if (constants[Py_CONSTANT_NONE] == NULL) {
1238
+ constants[Py_CONSTANT_NONE] = Py_None;
1239
+ constants[Py_CONSTANT_FALSE] = Py_False;
1240
+ constants[Py_CONSTANT_TRUE] = Py_True;
1241
+ constants[Py_CONSTANT_ELLIPSIS] = Py_Ellipsis;
1242
+ constants[Py_CONSTANT_NOT_IMPLEMENTED] = Py_NotImplemented;
1243
+
1244
+ constants[Py_CONSTANT_ZERO] = PyLong_FromLong(0);
1245
+ if (constants[Py_CONSTANT_ZERO] == NULL) {
1246
+ goto fatal_error;
1247
+ }
1248
+
1249
+ constants[Py_CONSTANT_ONE] = PyLong_FromLong(1);
1250
+ if (constants[Py_CONSTANT_ONE] == NULL) {
1251
+ goto fatal_error;
1252
+ }
1253
+
1254
+ constants[Py_CONSTANT_EMPTY_STR] = PyUnicode_FromStringAndSize("", 0);
1255
+ if (constants[Py_CONSTANT_EMPTY_STR] == NULL) {
1256
+ goto fatal_error;
1257
+ }
1258
+
1259
+ constants[Py_CONSTANT_EMPTY_BYTES] = PyBytes_FromStringAndSize("", 0);
1260
+ if (constants[Py_CONSTANT_EMPTY_BYTES] == NULL) {
1261
+ goto fatal_error;
1262
+ }
1263
+
1264
+ constants[Py_CONSTANT_EMPTY_TUPLE] = PyTuple_New(0);
1265
+ if (constants[Py_CONSTANT_EMPTY_TUPLE] == NULL) {
1266
+ goto fatal_error;
1267
+ }
1268
+ // goto dance to avoid compiler warnings about Py_FatalError()
1269
+ goto init_done;
1270
+
1271
+ fatal_error:
1272
+ // This case should never happen
1273
+ Py_FatalError("Py_GetConstant() failed to get constants");
1274
+ }
1275
+
1276
+ init_done:
1277
+ if (constant_id <= Py_CONSTANT_EMPTY_TUPLE) {
1278
+ return Py_NewRef(constants[constant_id]);
1279
+ }
1280
+ else {
1281
+ PyErr_BadInternalCall();
1282
+ return NULL;
1283
+ }
1284
+ }
1285
+
1286
+ static inline PyObject* Py_GetConstantBorrowed(unsigned int constant_id)
1287
+ {
1288
+ PyObject *obj = Py_GetConstant(constant_id);
1289
+ Py_XDECREF(obj);
1290
+ return obj;
1291
+ }
1292
+ #endif
1293
+
1294
+
1295
+ // gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
1296
+ #if PY_VERSION_HEX < 0x030D00A4
1297
+ static inline PyObject *
1298
+ PyList_GetItemRef(PyObject *op, Py_ssize_t index)
1299
+ {
1300
+ PyObject *item = PyList_GetItem(op, index);
1301
+ Py_XINCREF(item);
1302
+ return item;
1303
+ }
1304
+ #endif
1305
+
1306
+
1307
+ // gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
1308
+ #if PY_VERSION_HEX < 0x030D00A4
1309
+ static inline int
1310
+ PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value,
1311
+ PyObject **result)
1312
+ {
1313
+ PyObject *value;
1314
+ if (PyDict_GetItemRef(d, key, &value) < 0) {
1315
+ // get error
1316
+ if (result) {
1317
+ *result = NULL;
1318
+ }
1319
+ return -1;
1320
+ }
1321
+ if (value != NULL) {
1322
+ // present
1323
+ if (result) {
1324
+ *result = value;
1325
+ }
1326
+ else {
1327
+ Py_DECREF(value);
1328
+ }
1329
+ return 1;
1330
+ }
1331
+
1332
+ // missing: set the item
1333
+ if (PyDict_SetItem(d, key, default_value) < 0) {
1334
+ // set error
1335
+ if (result) {
1336
+ *result = NULL;
1337
+ }
1338
+ return -1;
1339
+ }
1340
+ if (result) {
1341
+ *result = Py_NewRef(default_value);
1342
+ }
1343
+ return 0;
1344
+ }
1345
+ #endif
1346
+
1347
+ #if PY_VERSION_HEX < 0x030E0000 && PY_VERSION_HEX >= 0x03060000 && !defined(PYPY_VERSION)
1348
+ typedef struct PyUnicodeWriter PyUnicodeWriter;
1349
+
1350
+ static inline void PyUnicodeWriter_Discard(PyUnicodeWriter *writer)
1351
+ {
1352
+ _PyUnicodeWriter_Dealloc((_PyUnicodeWriter*)writer);
1353
+ PyMem_Free(writer);
1354
+ }
1355
+
1356
+ static inline PyUnicodeWriter* PyUnicodeWriter_Create(Py_ssize_t length)
1357
+ {
1358
+ if (length < 0) {
1359
+ PyErr_SetString(PyExc_ValueError,
1360
+ "length must be positive");
1361
+ return NULL;
1362
+ }
1363
+
1364
+ const size_t size = sizeof(_PyUnicodeWriter);
1365
+ PyUnicodeWriter *pub_writer = (PyUnicodeWriter *)PyMem_Malloc(size);
1366
+ if (pub_writer == _Py_NULL) {
1367
+ PyErr_NoMemory();
1368
+ return _Py_NULL;
1369
+ }
1370
+ _PyUnicodeWriter *writer = (_PyUnicodeWriter *)pub_writer;
1371
+
1372
+ _PyUnicodeWriter_Init(writer);
1373
+ if (_PyUnicodeWriter_Prepare(writer, length, 127) < 0) {
1374
+ PyUnicodeWriter_Discard(pub_writer);
1375
+ return NULL;
1376
+ }
1377
+ writer->overallocate = 1;
1378
+ return pub_writer;
1379
+ }
1380
+
1381
+ static inline PyObject* PyUnicodeWriter_Finish(PyUnicodeWriter *writer)
1382
+ {
1383
+ PyObject *str = _PyUnicodeWriter_Finish((_PyUnicodeWriter*)writer);
1384
+ assert(((_PyUnicodeWriter*)writer)->buffer == NULL);
1385
+ PyMem_Free(writer);
1386
+ return str;
1387
+ }
1388
+
1389
+ static inline int
1390
+ PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch)
1391
+ {
1392
+ if (ch > 0x10ffff) {
1393
+ PyErr_SetString(PyExc_ValueError,
1394
+ "character must be in range(0x110000)");
1395
+ return -1;
1396
+ }
1397
+
1398
+ return _PyUnicodeWriter_WriteChar((_PyUnicodeWriter*)writer, ch);
1399
+ }
1400
+
1401
+ static inline int
1402
+ PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj)
1403
+ {
1404
+ PyObject *str = PyObject_Str(obj);
1405
+ if (str == NULL) {
1406
+ return -1;
1407
+ }
1408
+
1409
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
1410
+ Py_DECREF(str);
1411
+ return res;
1412
+ }
1413
+
1414
+ static inline int
1415
+ PyUnicodeWriter_WriteRepr(PyUnicodeWriter *writer, PyObject *obj)
1416
+ {
1417
+ PyObject *str = PyObject_Repr(obj);
1418
+ if (str == NULL) {
1419
+ return -1;
1420
+ }
1421
+
1422
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
1423
+ Py_DECREF(str);
1424
+ return res;
1425
+ }
1426
+
1427
+ static inline int
1428
+ PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer,
1429
+ const char *str, Py_ssize_t size)
1430
+ {
1431
+ if (size < 0) {
1432
+ size = (Py_ssize_t)strlen(str);
1433
+ }
1434
+
1435
+ PyObject *str_obj = PyUnicode_FromStringAndSize(str, size);
1436
+ if (str_obj == _Py_NULL) {
1437
+ return -1;
1438
+ }
1439
+
1440
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
1441
+ Py_DECREF(str_obj);
1442
+ return res;
1443
+ }
1444
+
1445
+ static inline int
1446
+ PyUnicodeWriter_WriteWideChar(PyUnicodeWriter *writer,
1447
+ const wchar_t *str, Py_ssize_t size)
1448
+ {
1449
+ if (size < 0) {
1450
+ size = (Py_ssize_t)wcslen(str);
1451
+ }
1452
+
1453
+ PyObject *str_obj = PyUnicode_FromWideChar(str, size);
1454
+ if (str_obj == _Py_NULL) {
1455
+ return -1;
1456
+ }
1457
+
1458
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
1459
+ Py_DECREF(str_obj);
1460
+ return res;
1461
+ }
1462
+
1463
+ static inline int
1464
+ PyUnicodeWriter_WriteSubstring(PyUnicodeWriter *writer, PyObject *str,
1465
+ Py_ssize_t start, Py_ssize_t end)
1466
+ {
1467
+ if (!PyUnicode_Check(str)) {
1468
+ PyErr_Format(PyExc_TypeError, "expect str, not %T", str);
1469
+ return -1;
1470
+ }
1471
+ if (start < 0 || start > end) {
1472
+ PyErr_Format(PyExc_ValueError, "invalid start argument");
1473
+ return -1;
1474
+ }
1475
+ if (end > PyUnicode_GET_LENGTH(str)) {
1476
+ PyErr_Format(PyExc_ValueError, "invalid end argument");
1477
+ return -1;
1478
+ }
1479
+
1480
+ return _PyUnicodeWriter_WriteSubstring((_PyUnicodeWriter*)writer, str,
1481
+ start, end);
1482
+ }
1483
+
1484
+ static inline int
1485
+ PyUnicodeWriter_Format(PyUnicodeWriter *writer, const char *format, ...)
1486
+ {
1487
+ va_list vargs;
1488
+ va_start(vargs, format);
1489
+ PyObject *str = PyUnicode_FromFormatV(format, vargs);
1490
+ va_end(vargs);
1491
+ if (str == _Py_NULL) {
1492
+ return -1;
1493
+ }
1494
+
1495
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
1496
+ Py_DECREF(str);
1497
+ return res;
1498
+ }
1499
+ #endif // PY_VERSION_HEX < 0x030E0000
1500
+
1501
+ // gh-116560 added PyLong_GetSign() to Python 3.14.0a0
1502
+ #if PY_VERSION_HEX < 0x030E00A0
1503
+ static inline int PyLong_GetSign(PyObject *obj, int *sign)
1504
+ {
1505
+ if (!PyLong_Check(obj)) {
1506
+ PyErr_Format(PyExc_TypeError, "expect int, got %s", Py_TYPE(obj)->tp_name);
1507
+ return -1;
1508
+ }
1509
+
1510
+ *sign = _PyLong_Sign(obj);
1511
+ return 0;
1512
+ }
1513
+ #endif
1514
+
1515
+
1516
+ #ifdef __cplusplus
1517
+ }
1518
+ #endif
1519
+ #endif // PYTHONCAPI_COMPAT
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/result.h"
21
+
22
+ namespace arrow {
23
+
24
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
25
+ Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out,
26
+ UnaryOperation unary_op) {
27
+ for (; first != last; ++first, (void)++out) {
28
+ ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first));
29
+ }
30
+ return Status::OK();
31
+ }
32
+
33
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+
22
+ #include "arrow/memory_pool.h"
23
+ #include "arrow/type_fwd.h"
24
+ #include "arrow/util/bit_util.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ struct BitmapWordAlignParams {
30
+ int64_t leading_bits;
31
+ int64_t trailing_bits;
32
+ int64_t trailing_bit_offset;
33
+ const uint8_t* aligned_start;
34
+ int64_t aligned_bits;
35
+ int64_t aligned_words;
36
+ };
37
+
38
+ // Compute parameters for accessing a bitmap using aligned word instructions.
39
+ // The returned parameters describe:
40
+ // - a leading area of size `leading_bits` before the aligned words
41
+ // - a word-aligned area of size `aligned_bits`
42
+ // - a trailing area of size `trailing_bits` after the aligned words
43
+ template <uint64_t ALIGN_IN_BYTES>
44
+ inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset,
45
+ int64_t length) {
46
+ static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES),
47
+ "ALIGN_IN_BYTES should be a positive power of two");
48
+ constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8;
49
+
50
+ BitmapWordAlignParams p;
51
+
52
+ // Compute a "bit address" that we can align up to ALIGN_IN_BITS.
53
+ // We don't care about losing the upper bits since we are only interested in the
54
+ // difference between both addresses.
55
+ const uint64_t bit_addr =
56
+ reinterpret_cast<size_t>(data) * 8 + static_cast<uint64_t>(bit_offset);
57
+ const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS);
58
+
59
+ p.leading_bits = std::min<int64_t>(length, aligned_bit_addr - bit_addr);
60
+ p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS;
61
+ p.aligned_bits = p.aligned_words * ALIGN_IN_BITS;
62
+ p.trailing_bits = length - p.leading_bits - p.aligned_bits;
63
+ p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits;
64
+
65
+ p.aligned_start = data + (bit_offset + p.leading_bits) / 8;
66
+ return p;
67
+ }
68
+ } // namespace internal
69
+
70
+ namespace util {
71
+
72
+ // Functions to check if the provided Arrow object is aligned by the specified alignment
73
+
74
+ /// \brief Special alignment value to use data type-specific alignment
75
+ ///
76
+ /// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment
77
+ /// functions, then the function will ensure each buffer is suitably aligned
78
+ /// for the data type of the array. For example, given an int32 buffer the values
79
+ /// buffer's address must be a multiple of 4. Given a large_string buffer the offsets
80
+ /// buffer's address must be a multiple of 8.
81
+ constexpr int64_t kValueAlignment = -3;
82
+
83
+ /// \brief Calculate if the buffer's address is a multiple of `alignment`
84
+ ///
85
+ /// If `alignment` is less than or equal to 0 then this method will always return true
86
+ /// \param buffer the buffer to check
87
+ /// \param alignment the alignment (in bytes) to check for
88
+ ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment);
89
+ /// \brief Calculate if all buffers in the array data are aligned
90
+ ///
91
+ /// This will also check the buffers in the dictionary and any children
92
+ /// \param array the array data to check
93
+ /// \param alignment the alignment (in bytes) to check for
94
+ ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment);
95
+ /// \brief Calculate if all buffers in the array are aligned
96
+ ///
97
+ /// This will also check the buffers in the dictionary and any children
98
+ /// \param array the array to check
99
+ /// \param alignment the alignment (in bytes) to check for
100
+ ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment);
101
+
102
+ // Following functions require an additional boolean vector which stores the
103
+ // alignment check bits of the constituent objects.
104
+ // For example, needs_alignment vector for a ChunkedArray will contain the
105
+ // check bits of the constituent Arrays.
106
+ // The boolean vector check was introduced to minimize the repetitive checks
107
+ // of the constituent objects during the EnsureAlignment function where certain
108
+ // objects can be ignored for further checking if we already know that they are
109
+ // completely aligned.
110
+
111
+ /// \brief Calculate which (if any) chunks in a chunked array are unaligned
112
+ /// \param array the array to check
113
+ /// \param alignment the alignment (in bytes) to check for
114
+ /// \param needs_alignment an output vector that will store the results of the check
115
+ /// it must be set to a valid vector. Extra elements will be added to the end
116
+ /// of the vector for each chunk that is checked. `true` will be stored if
117
+ /// the chunk is unaligned.
118
+ /// \param offset the index of the chunk to start checking
119
+ /// \return true if all chunks (starting at `offset`) are aligned, false otherwise
120
+ ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment,
121
+ std::vector<bool>* needs_alignment, int offset = 0);
122
+
123
+ /// \brief calculate which (if any) columns in a record batch are unaligned
124
+ /// \param batch the batch to check
125
+ /// \param alignment the alignment (in bytes) to check for
126
+ /// \param needs_alignment an output vector that will store the results of the
127
+ /// check. It must be set to a valid vector. Extra elements will be added
128
+ /// to the end of the vector for each column that is checked. `true` will be
129
+ /// stored if the column is unaligned.
130
+ ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment,
131
+ std::vector<bool>* needs_alignment);
132
+
133
+ /// \brief calculate which (if any) columns in a table are unaligned
134
+ /// \param table the table to check
135
+ /// \param alignment the alignment (in bytes) to check for
136
+ /// \param needs_alignment an output vector that will store the results of the
137
+ /// check. It must be set to a valid vector. Extra elements will be added
138
+ /// to the end of the vector for each column that is checked. `true` will be
139
+ /// stored if the column is unaligned.
140
+ ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment,
141
+ std::vector<bool>* needs_alignment);
142
+
143
+ /// \brief return a buffer that has the given alignment and the same data as the input
144
+ /// buffer
145
+ ///
146
+ /// If the input buffer is already aligned then this method will return the input buffer
147
+ /// If the input buffer is not already aligned then this method will allocate a new
148
+ /// buffer. The alignment of the new buffer will have at least
149
+ /// max(kDefaultBufferAlignment, alignment) bytes of alignment.
150
+ ///
151
+ /// \param buffer the buffer to check
152
+ /// \param alignment the alignment (in bytes) to check for
153
+ /// \param memory_pool a memory pool that will be used to allocate a new buffer if the
154
+ /// input buffer is not sufficiently aligned
155
+ ARROW_EXPORT Result<std::shared_ptr<Buffer>> EnsureAlignment(
156
+ std::shared_ptr<Buffer> buffer, int64_t alignment, MemoryPool* memory_pool);
157
+
158
+ /// \brief return an array data where all buffers are aligned by the given alignment
159
+ ///
160
+ /// If any input buffer is already aligned then this method will reuse that same input
161
+ /// buffer.
162
+ ///
163
+ /// \param array_data the array data to check
164
+ /// \param alignment the alignment (in bytes) to check for
165
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
166
+ /// input buffer is not sufficiently aligned
167
+ ARROW_EXPORT Result<std::shared_ptr<ArrayData>> EnsureAlignment(
168
+ std::shared_ptr<ArrayData> array_data, int64_t alignment, MemoryPool* memory_pool);
169
+
170
+ /// \brief return an array where all buffers are aligned by the given alignment
171
+ ///
172
+ /// If any input buffer is already aligned then this method will reuse that same input
173
+ /// buffer.
174
+ ///
175
+ /// \param array the array to check
176
+ /// \param alignment the alignment (in bytes) to check for
177
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
178
+ /// input buffer is not sufficiently aligned
179
+ ARROW_EXPORT Result<std::shared_ptr<Array>> EnsureAlignment(std::shared_ptr<Array> array,
180
+ int64_t alignment,
181
+ MemoryPool* memory_pool);
182
+
183
+ /// \brief return a chunked array where all buffers are aligned by the given alignment
184
+ ///
185
+ /// If any input buffer is already aligned then this method will reuse that same input
186
+ /// buffer.
187
+ ///
188
+ /// \param array the chunked array to check
189
+ /// \param alignment the alignment (in bytes) to check for
190
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
191
+ /// input buffer is not sufficiently aligned
192
+ ARROW_EXPORT Result<std::shared_ptr<ChunkedArray>> EnsureAlignment(
193
+ std::shared_ptr<ChunkedArray> array, int64_t alignment, MemoryPool* memory_pool);
194
+
195
+ /// \brief return a record batch where all buffers are aligned by the given alignment
196
+ ///
197
+ /// If any input buffer is already aligned then this method will reuse that same input
198
+ /// buffer.
199
+ ///
200
+ /// \param batch the batch to check
201
+ /// \param alignment the alignment (in bytes) to check for
202
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
203
+ /// input buffer is not sufficiently aligned
204
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> EnsureAlignment(
205
+ std::shared_ptr<RecordBatch> batch, int64_t alignment, MemoryPool* memory_pool);
206
+
207
+ /// \brief return a table where all buffers are aligned by the given alignment
208
+ ///
209
+ /// If any input buffer is already aligned then this method will reuse that same input
210
+ /// buffer.
211
+ ///
212
+ /// \param table the table to check
213
+ /// \param alignment the alignment (in bytes) to check for
214
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
215
+ /// input buffer is not sufficiently aligned
216
+ ARROW_EXPORT Result<std::shared_ptr<Table>> EnsureAlignment(std::shared_ptr<Table> table,
217
+ int64_t alignment,
218
+ MemoryPool* memory_pool);
219
+
220
+ } // namespace util
221
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstring>
23
+ #include <deque>
24
+ #include <limits>
25
+ #include <optional>
26
+ #include <queue>
27
+
28
+ #include "arrow/util/async_generator_fwd.h"
29
+ #include "arrow/util/async_util.h"
30
+ #include "arrow/util/functional.h"
31
+ #include "arrow/util/future.h"
32
+ #include "arrow/util/io_util.h"
33
+ #include "arrow/util/iterator.h"
34
+ #include "arrow/util/mutex.h"
35
+ #include "arrow/util/queue.h"
36
+ #include "arrow/util/thread_pool.h"
37
+
38
+ namespace arrow {
39
+
40
+ // The methods in this file create, modify, and utilize AsyncGenerator which is an
41
+ // iterator of futures. This allows an asynchronous source (like file input) to be run
42
+ // through a pipeline in the same way that iterators can be used to create pipelined
43
+ // workflows.
44
+ //
45
+ // In order to support pipeline parallelism we introduce the concept of asynchronous
46
+ // reentrancy. This is different than synchronous reentrancy. With synchronous code a
47
+ // function is reentrant if the function can be called again while a previous call to that
48
+ // function is still running. Unless otherwise specified none of these generators are
49
+ // synchronously reentrant. Care should be taken to avoid calling them in such a way (and
50
+ // the utilities Visit/Collect/Await take care to do this).
51
+ //
52
+ // Asynchronous reentrancy on the other hand means the function is called again before the
53
+ // future returned by the function is marked finished (but after the call to get the
54
+ // future returns). Some of these generators are async-reentrant while others (e.g.
55
+ // those that depend on ordered processing like decompression) are not. Read the MakeXYZ
56
+ // function comments to determine which generators support async reentrancy.
57
+ //
58
+ // Note: Generators that are not asynchronously reentrant can still support readahead
59
+ // (\see MakeSerialReadaheadGenerator).
60
+ //
61
+ // Readahead operators, and some other operators, may introduce queueing. Any operators
62
+ // that introduce buffering should detail the amount of buffering they introduce in their
63
+ // MakeXYZ function comments.
64
+ //
65
+ // A generator should always be fully consumed before it is destroyed.
66
+ // A generator should not mark a future complete with an error status or a terminal value
67
+ // until all outstanding futures have completed. Generators that spawn multiple
68
+ // concurrent futures may need to hold onto an error while other concurrent futures wrap
69
+ // up.
70
+ template <typename T>
71
+ struct IterationTraits<AsyncGenerator<T>> {
72
+ /// \brief by default when iterating through a sequence of AsyncGenerator<T>,
73
+ /// an empty function indicates the end of iteration.
74
+ static AsyncGenerator<T> End() { return AsyncGenerator<T>(); }
75
+
76
+ static bool IsEnd(const AsyncGenerator<T>& val) { return !val; }
77
+ };
78
+
79
+ template <typename T>
80
+ Future<T> AsyncGeneratorEnd() {
81
+ return Future<T>::MakeFinished(IterationTraits<T>::End());
82
+ }
83
+
84
+ /// returning a future that completes when all have been visited
85
+ template <typename T, typename Visitor>
86
+ Future<> VisitAsyncGenerator(AsyncGenerator<T> generator, Visitor visitor) {
87
+ struct LoopBody {
88
+ struct Callback {
89
+ Result<ControlFlow<>> operator()(const T& next) {
90
+ if (IsIterationEnd(next)) {
91
+ return Break();
92
+ } else {
93
+ auto visited = visitor(next);
94
+ if (visited.ok()) {
95
+ return Continue();
96
+ } else {
97
+ return visited;
98
+ }
99
+ }
100
+ }
101
+
102
+ Visitor visitor;
103
+ };
104
+
105
+ Future<ControlFlow<>> operator()() {
106
+ Callback callback{visitor};
107
+ auto next = generator();
108
+ return next.Then(std::move(callback));
109
+ }
110
+
111
+ AsyncGenerator<T> generator;
112
+ Visitor visitor;
113
+ };
114
+
115
+ return Loop(LoopBody{std::move(generator), std::move(visitor)});
116
+ }
117
+
118
+ /// \brief Wait for an async generator to complete, discarding results.
119
+ template <typename T>
120
+ Future<> DiscardAllFromAsyncGenerator(AsyncGenerator<T> generator) {
121
+ std::function<Status(T)> visitor = [](const T&) { return Status::OK(); };
122
+ return VisitAsyncGenerator(generator, visitor);
123
+ }
124
+
125
+ /// \brief Collect the results of an async generator into a vector
126
+ template <typename T>
127
+ Future<std::vector<T>> CollectAsyncGenerator(AsyncGenerator<T> generator) {
128
+ auto vec = std::make_shared<std::vector<T>>();
129
+ auto loop_body = [generator = std::move(generator),
130
+ vec = std::move(vec)]() -> Future<ControlFlow<std::vector<T>>> {
131
+ auto next = generator();
132
+ return next.Then([vec](const T& result) -> Result<ControlFlow<std::vector<T>>> {
133
+ if (IsIterationEnd(result)) {
134
+ return Break(*vec);
135
+ } else {
136
+ vec->push_back(result);
137
+ return Continue();
138
+ }
139
+ });
140
+ };
141
+ return Loop(std::move(loop_body));
142
+ }
143
+
144
+ /// \see MakeMappedGenerator
145
+ template <typename T, typename V>
146
+ class MappingGenerator {
147
+ public:
148
+ MappingGenerator(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
149
+ : state_(std::make_shared<State>(std::move(source), std::move(map))) {}
150
+
151
+ Future<V> operator()() {
152
+ auto future = Future<V>::Make();
153
+ bool should_trigger;
154
+ {
155
+ auto guard = state_->mutex.Lock();
156
+ if (state_->finished) {
157
+ return AsyncGeneratorEnd<V>();
158
+ }
159
+ should_trigger = state_->waiting_jobs.empty();
160
+ state_->waiting_jobs.push_back(future);
161
+ }
162
+ if (should_trigger) {
163
+ state_->source().AddCallback(Callback{state_});
164
+ }
165
+ return future;
166
+ }
167
+
168
+ private:
169
+ struct State {
170
+ State(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
171
+ : source(std::move(source)),
172
+ map(std::move(map)),
173
+ waiting_jobs(),
174
+ mutex(),
175
+ finished(false) {}
176
+
177
+ void Purge() {
178
+ // This might be called by an original callback (if the source iterator fails or
179
+ // ends) or by a mapped callback (if the map function fails or ends prematurely).
180
+ // Either way it should only be called once and after finished is set so there is no
181
+ // need to guard access to `waiting_jobs`.
182
+ while (!waiting_jobs.empty()) {
183
+ waiting_jobs.front().MarkFinished(IterationTraits<V>::End());
184
+ waiting_jobs.pop_front();
185
+ }
186
+ }
187
+
188
+ AsyncGenerator<T> source;
189
+ std::function<Future<V>(const T&)> map;
190
+ std::deque<Future<V>> waiting_jobs;
191
+ util::Mutex mutex;
192
+ bool finished;
193
+ };
194
+
195
+ struct Callback;
196
+
197
+ struct MappedCallback {
198
+ void operator()(const Result<V>& maybe_next) {
199
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
200
+ bool should_purge = false;
201
+ if (end) {
202
+ {
203
+ auto guard = state->mutex.Lock();
204
+ should_purge = !state->finished;
205
+ state->finished = true;
206
+ }
207
+ }
208
+ sink.MarkFinished(maybe_next);
209
+ if (should_purge) {
210
+ state->Purge();
211
+ }
212
+ }
213
+ std::shared_ptr<State> state;
214
+ Future<V> sink;
215
+ };
216
+
217
+ struct Callback {
218
+ void operator()(const Result<T>& maybe_next) {
219
+ Future<V> sink;
220
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
221
+ bool should_purge = false;
222
+ bool should_trigger;
223
+ {
224
+ auto guard = state->mutex.Lock();
225
+ // A MappedCallback may have purged or be purging the queue;
226
+ // we shouldn't do anything here.
227
+ if (state->finished) return;
228
+ if (end) {
229
+ should_purge = !state->finished;
230
+ state->finished = true;
231
+ }
232
+ sink = state->waiting_jobs.front();
233
+ state->waiting_jobs.pop_front();
234
+ should_trigger = !end && !state->waiting_jobs.empty();
235
+ }
236
+ if (should_purge) {
237
+ state->Purge();
238
+ }
239
+ if (should_trigger) {
240
+ state->source().AddCallback(Callback{state});
241
+ }
242
+ if (maybe_next.ok()) {
243
+ const T& val = maybe_next.ValueUnsafe();
244
+ if (IsIterationEnd(val)) {
245
+ sink.MarkFinished(IterationTraits<V>::End());
246
+ } else {
247
+ Future<V> mapped_fut = state->map(val);
248
+ mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)});
249
+ }
250
+ } else {
251
+ sink.MarkFinished(maybe_next.status());
252
+ }
253
+ }
254
+
255
+ std::shared_ptr<State> state;
256
+ };
257
+
258
+ std::shared_ptr<State> state_;
259
+ };
260
+
261
+ /// \brief Create a generator that will apply the map function to each element of
262
+ /// source. The map function is not called on the end token.
263
+ ///
264
+ /// Note: This function makes a copy of `map` for each item
265
+ /// Note: Errors returned from the `map` function will be propagated
266
+ ///
267
+ /// If the source generator is async-reentrant then this generator will be also
268
+ template <typename T, typename MapFn,
269
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
270
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
271
+ AsyncGenerator<V> MakeMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
272
+ auto map_callback = [map = std::move(map)](const T& val) mutable -> Future<V> {
273
+ return ToFuture(map(val));
274
+ };
275
+ return MappingGenerator<T, V>(std::move(source_generator), std::move(map_callback));
276
+ }
277
+
278
+ /// \brief Create a generator that will apply the map function to
279
+ /// each element of source. The map function is not called on the end
280
+ /// token. The result of the map function should be another
281
+ /// generator; all these generators will then be flattened to produce
282
+ /// a single stream of items.
283
+ ///
284
+ /// Note: This function makes a copy of `map` for each item
285
+ /// Note: Errors returned from the `map` function will be propagated
286
+ ///
287
+ /// If the source generator is async-reentrant then this generator will be also
288
+ template <typename T, typename MapFn,
289
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
290
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
291
+ AsyncGenerator<T> MakeFlatMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
292
+ return MakeConcatenatedGenerator(
293
+ MakeMappedGenerator(std::move(source_generator), std::move(map)));
294
+ }
295
+
296
+ /// \see MakeSequencingGenerator
297
+ template <typename T, typename ComesAfter, typename IsNext>
298
+ class SequencingGenerator {
299
+ public:
300
+ SequencingGenerator(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next,
301
+ T initial_value)
302
+ : state_(std::make_shared<State>(std::move(source), std::move(compare),
303
+ std::move(is_next), std::move(initial_value))) {}
304
+
305
+ Future<T> operator()() {
306
+ {
307
+ auto guard = state_->mutex.Lock();
308
+ // We can send a result immediately if the top of the queue is either an
309
+ // error or the next item
310
+ if (!state_->queue.empty() &&
311
+ (!state_->queue.top().ok() ||
312
+ state_->is_next(state_->previous_value, *state_->queue.top()))) {
313
+ auto result = std::move(state_->queue.top());
314
+ if (result.ok()) {
315
+ state_->previous_value = *result;
316
+ }
317
+ state_->queue.pop();
318
+ return Future<T>::MakeFinished(result);
319
+ }
320
+ if (state_->finished) {
321
+ return AsyncGeneratorEnd<T>();
322
+ }
323
+ // The next item is not in the queue so we will need to wait
324
+ auto new_waiting_fut = Future<T>::Make();
325
+ state_->waiting_future = new_waiting_fut;
326
+ guard.Unlock();
327
+ state_->source().AddCallback(Callback{state_});
328
+ return new_waiting_fut;
329
+ }
330
+ }
331
+
332
+ private:
333
+ struct WrappedComesAfter {
334
+ bool operator()(const Result<T>& left, const Result<T>& right) {
335
+ if (!left.ok() || !right.ok()) {
336
+ // Should never happen
337
+ return false;
338
+ }
339
+ return compare(*left, *right);
340
+ }
341
+ ComesAfter compare;
342
+ };
343
+
344
+ struct State {
345
+ State(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next, T initial_value)
346
+ : source(std::move(source)),
347
+ is_next(std::move(is_next)),
348
+ previous_value(std::move(initial_value)),
349
+ waiting_future(),
350
+ queue(WrappedComesAfter{compare}),
351
+ finished(false),
352
+ mutex() {}
353
+
354
+ AsyncGenerator<T> source;
355
+ IsNext is_next;
356
+ T previous_value;
357
+ Future<T> waiting_future;
358
+ std::priority_queue<Result<T>, std::vector<Result<T>>, WrappedComesAfter> queue;
359
+ bool finished;
360
+ util::Mutex mutex;
361
+ };
362
+
363
+ class Callback {
364
+ public:
365
+ explicit Callback(std::shared_ptr<State> state) : state_(std::move(state)) {}
366
+
367
+ void operator()(const Result<T> result) {
368
+ Future<T> to_deliver;
369
+ bool finished;
370
+ {
371
+ auto guard = state_->mutex.Lock();
372
+ bool ready_to_deliver = false;
373
+ if (!result.ok()) {
374
+ // Clear any cached results
375
+ while (!state_->queue.empty()) {
376
+ state_->queue.pop();
377
+ }
378
+ ready_to_deliver = true;
379
+ state_->finished = true;
380
+ } else if (IsIterationEnd<T>(result.ValueUnsafe())) {
381
+ ready_to_deliver = state_->queue.empty();
382
+ state_->finished = true;
383
+ } else {
384
+ ready_to_deliver = state_->is_next(state_->previous_value, *result);
385
+ }
386
+
387
+ if (ready_to_deliver && state_->waiting_future.is_valid()) {
388
+ to_deliver = state_->waiting_future;
389
+ if (result.ok()) {
390
+ state_->previous_value = *result;
391
+ }
392
+ } else {
393
+ state_->queue.push(result);
394
+ }
395
+ // Capture state_->finished so we can access it outside the mutex
396
+ finished = state_->finished;
397
+ }
398
+ // Must deliver result outside of the mutex
399
+ if (to_deliver.is_valid()) {
400
+ to_deliver.MarkFinished(result);
401
+ } else {
402
+ // Otherwise, if we didn't get the next item (or a terminal item), we
403
+ // need to keep looking
404
+ if (!finished) {
405
+ state_->source().AddCallback(Callback{state_});
406
+ }
407
+ }
408
+ }
409
+
410
+ private:
411
+ const std::shared_ptr<State> state_;
412
+ };
413
+
414
+ const std::shared_ptr<State> state_;
415
+ };
416
+
417
+ /// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter
418
+ /// and IsNext determine the sequence order.
419
+ ///
420
+ /// ComesAfter should be a BinaryPredicate that only returns true if a comes after b
421
+ ///
422
+ /// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if
423
+ /// `b` follows immediately after `a`. It should return true given `initial_value` and
424
+ /// `b` if `b` is the first item in the sequence.
425
+ ///
426
+ /// This operator will queue unboundedly while waiting for the next item. It is intended
427
+ /// for jittery sources that might scatter an ordered sequence. It is NOT intended to
428
+ /// sort. Using it to try and sort could result in excessive RAM usage. This generator
429
+ /// will queue up to N blocks where N is the max "out of order"ness of the source.
430
+ ///
431
+ /// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3
432
+ /// blocks beyond where it belongs.
433
+ ///
434
+ /// This generator is not async-reentrant but it consists only of a simple log(n)
435
+ /// insertion into a priority queue.
436
+ template <typename T, typename ComesAfter, typename IsNext>
437
+ AsyncGenerator<T> MakeSequencingGenerator(AsyncGenerator<T> source_generator,
438
+ ComesAfter compare, IsNext is_next,
439
+ T initial_value) {
440
+ return SequencingGenerator<T, ComesAfter, IsNext>(
441
+ std::move(source_generator), std::move(compare), std::move(is_next),
442
+ std::move(initial_value));
443
+ }
444
+
445
+ /// \see MakeTransformedGenerator
446
+ template <typename T, typename V>
447
+ class TransformingGenerator {
448
+ // The transforming generator state will be referenced as an async generator but will
449
+ // also be referenced via callback to various futures. If the async generator owner
450
+ // moves it around we need the state to be consistent for future callbacks.
451
+ struct TransformingGeneratorState
452
+ : std::enable_shared_from_this<TransformingGeneratorState> {
453
+ TransformingGeneratorState(AsyncGenerator<T> generator, Transformer<T, V> transformer)
454
+ : generator_(std::move(generator)),
455
+ transformer_(std::move(transformer)),
456
+ last_value_(),
457
+ finished_() {}
458
+
459
+ Future<V> operator()() {
460
+ while (true) {
461
+ auto maybe_next_result = Pump();
462
+ if (!maybe_next_result.ok()) {
463
+ return Future<V>::MakeFinished(maybe_next_result.status());
464
+ }
465
+ auto maybe_next = std::move(maybe_next_result).ValueUnsafe();
466
+ if (maybe_next.has_value()) {
467
+ return Future<V>::MakeFinished(*std::move(maybe_next));
468
+ }
469
+
470
+ auto next_fut = generator_();
471
+ // If finished already, process results immediately inside the loop to avoid
472
+ // stack overflow
473
+ if (next_fut.is_finished()) {
474
+ auto next_result = next_fut.result();
475
+ if (next_result.ok()) {
476
+ last_value_ = *next_result;
477
+ } else {
478
+ return Future<V>::MakeFinished(next_result.status());
479
+ }
480
+ // Otherwise, if not finished immediately, add callback to process results
481
+ } else {
482
+ auto self = this->shared_from_this();
483
+ return next_fut.Then([self](const T& next_result) {
484
+ self->last_value_ = next_result;
485
+ return (*self)();
486
+ });
487
+ }
488
+ }
489
+ }
490
+
491
+ // See comment on TransformingIterator::Pump
492
+ Result<std::optional<V>> Pump() {
493
+ if (!finished_ && last_value_.has_value()) {
494
+ ARROW_ASSIGN_OR_RAISE(TransformFlow<V> next, transformer_(*last_value_));
495
+ if (next.ReadyForNext()) {
496
+ if (IsIterationEnd(*last_value_)) {
497
+ finished_ = true;
498
+ }
499
+ last_value_.reset();
500
+ }
501
+ if (next.Finished()) {
502
+ finished_ = true;
503
+ }
504
+ if (next.HasValue()) {
505
+ return next.Value();
506
+ }
507
+ }
508
+ if (finished_) {
509
+ return IterationTraits<V>::End();
510
+ }
511
+ return std::nullopt;
512
+ }
513
+
514
+ AsyncGenerator<T> generator_;
515
+ Transformer<T, V> transformer_;
516
+ std::optional<T> last_value_;
517
+ bool finished_;
518
+ };
519
+
520
+ public:
521
+ explicit TransformingGenerator(AsyncGenerator<T> generator,
522
+ Transformer<T, V> transformer)
523
+ : state_(std::make_shared<TransformingGeneratorState>(std::move(generator),
524
+ std::move(transformer))) {}
525
+
526
+ Future<V> operator()() { return (*state_)(); }
527
+
528
+ protected:
529
+ std::shared_ptr<TransformingGeneratorState> state_;
530
+ };
531
+
532
+ /// \brief Transform an async generator using a transformer function returning a new
533
+ /// AsyncGenerator
534
+ ///
535
+ /// The transform function here behaves exactly the same as the transform function in
536
+ /// MakeTransformedIterator and you can safely use the same transform function to
537
+ /// transform both synchronous and asynchronous streams.
538
+ ///
539
+ /// This generator is not async-reentrant
540
+ ///
541
+ /// This generator may queue up to 1 instance of T but will not delay
542
+ template <typename T, typename V>
543
+ AsyncGenerator<V> MakeTransformedGenerator(AsyncGenerator<T> generator,
544
+ Transformer<T, V> transformer) {
545
+ return TransformingGenerator<T, V>(generator, transformer);
546
+ }
547
+
548
+ /// \see MakeSerialReadaheadGenerator
549
+ template <typename T>
550
+ class SerialReadaheadGenerator {
551
+ public:
552
+ SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
553
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
554
+
555
+ Future<T> operator()() {
556
+ if (state_->first_) {
557
+ // Lazy generator, need to wait for the first ask to prime the pump
558
+ state_->first_ = false;
559
+ auto next = state_->source_();
560
+ return next.Then(Callback{state_}, ErrCallback{state_});
561
+ }
562
+
563
+ // This generator is not async-reentrant. We won't be called until the last
564
+ // future finished so we know there is something in the queue
565
+ auto finished = state_->finished_.load();
566
+ if (finished && state_->readahead_queue_.IsEmpty()) {
567
+ return AsyncGeneratorEnd<T>();
568
+ }
569
+
570
+ std::shared_ptr<Future<T>> next;
571
+ if (!state_->readahead_queue_.Read(next)) {
572
+ return Status::UnknownError("Could not read from readahead_queue");
573
+ }
574
+
575
+ auto last_available = state_->spaces_available_.fetch_add(1);
576
+ if (last_available == 0 && !finished) {
577
+ // Reader idled out, we need to restart it
578
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
579
+ }
580
+ return *next;
581
+ }
582
+
583
+ private:
584
+ struct State {
585
+ State(AsyncGenerator<T> source, int max_readahead)
586
+ : first_(true),
587
+ source_(std::move(source)),
588
+ finished_(false),
589
+ // There is one extra "space" for the in-flight request
590
+ spaces_available_(max_readahead + 1),
591
+ // The SPSC queue has size-1 "usable" slots so we need to overallocate 1
592
+ readahead_queue_(max_readahead + 1) {}
593
+
594
+ Status Pump(const std::shared_ptr<State>& self) {
595
+ // Can't do readahead_queue.write(source().Then(...)) because then the
596
+ // callback might run immediately and add itself to the queue before this gets added
597
+ // to the queue messing up the order.
598
+ auto next_slot = std::make_shared<Future<T>>();
599
+ auto written = readahead_queue_.Write(next_slot);
600
+ if (!written) {
601
+ return Status::UnknownError("Could not write to readahead_queue");
602
+ }
603
+ // If this Pump is being called from a callback it is possible for the source to
604
+ // poll and read from the queue between the Write and this spot where we fill the
605
+ // value in. However, it is not possible for the future to read this value we are
606
+ // writing. That is because this callback (the callback for future X) must be
607
+ // finished before future X is marked complete and this source is not pulled
608
+ // reentrantly so it will not poll for future X+1 until this callback has completed.
609
+ *next_slot = source_().Then(Callback{self}, ErrCallback{self});
610
+ return Status::OK();
611
+ }
612
+
613
+ // Only accessed by the consumer end
614
+ bool first_;
615
+ // Accessed by both threads
616
+ AsyncGenerator<T> source_;
617
+ std::atomic<bool> finished_;
618
+ // The queue has a size but it is not atomic. We keep track of how many spaces are
619
+ // left in the queue here so we know if we've just written the last value and we need
620
+ // to stop reading ahead or if we've just read from a full queue and we need to
621
+ // restart reading ahead
622
+ std::atomic<uint32_t> spaces_available_;
623
+ // Needs to be a queue of shared_ptr and not Future because we set the value of the
624
+ // future after we add it to the queue
625
+ util::SpscQueue<std::shared_ptr<Future<T>>> readahead_queue_;
626
+ };
627
+
628
+ struct Callback {
629
+ Result<T> operator()(const T& next) {
630
+ if (IsIterationEnd(next)) {
631
+ state_->finished_.store(true);
632
+ return next;
633
+ }
634
+ auto last_available = state_->spaces_available_.fetch_sub(1);
635
+ if (last_available > 1) {
636
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
637
+ }
638
+ return next;
639
+ }
640
+
641
+ std::shared_ptr<State> state_;
642
+ };
643
+
644
+ struct ErrCallback {
645
+ Result<T> operator()(const Status& st) {
646
+ state_->finished_.store(true);
647
+ return st;
648
+ }
649
+
650
+ std::shared_ptr<State> state_;
651
+ };
652
+
653
+ std::shared_ptr<State> state_;
654
+ };
655
+
656
+ /// \see MakeFromFuture
657
+ template <typename T>
658
+ class FutureFirstGenerator {
659
+ public:
660
+ explicit FutureFirstGenerator(Future<AsyncGenerator<T>> future)
661
+ : state_(std::make_shared<State>(std::move(future))) {}
662
+
663
+ Future<T> operator()() {
664
+ if (state_->source_) {
665
+ return state_->source_();
666
+ } else {
667
+ auto state = state_;
668
+ return state_->future_.Then([state](const AsyncGenerator<T>& source) {
669
+ state->source_ = source;
670
+ return state->source_();
671
+ });
672
+ }
673
+ }
674
+
675
+ private:
676
+ struct State {
677
+ explicit State(Future<AsyncGenerator<T>> future) : future_(future), source_() {}
678
+
679
+ Future<AsyncGenerator<T>> future_;
680
+ AsyncGenerator<T> source_;
681
+ };
682
+
683
+ std::shared_ptr<State> state_;
684
+ };
685
+
686
+ /// \brief Transform a Future<AsyncGenerator<T>> into an AsyncGenerator<T>
687
+ /// that waits for the future to complete as part of the first item.
688
+ ///
689
+ /// This generator is not async-reentrant (even if the generator yielded by future is)
690
+ ///
691
+ /// This generator does not queue
692
+ template <typename T>
693
+ AsyncGenerator<T> MakeFromFuture(Future<AsyncGenerator<T>> future) {
694
+ return FutureFirstGenerator<T>(std::move(future));
695
+ }
696
+
697
+ /// \brief Create a generator that will pull from the source into a queue. Unlike
698
+ /// MakeReadaheadGenerator this will not pull reentrantly from the source.
699
+ ///
700
+ /// The source generator does not need to be async-reentrant
701
+ ///
702
+ /// This generator is not async-reentrant (even if the source is)
703
+ ///
704
+ /// This generator may queue up to max_readahead additional instances of T
705
+ template <typename T>
706
+ AsyncGenerator<T> MakeSerialReadaheadGenerator(AsyncGenerator<T> source_generator,
707
+ int max_readahead) {
708
+ return SerialReadaheadGenerator<T>(std::move(source_generator), max_readahead);
709
+ }
710
+
711
+ /// \brief Create a generator that immediately pulls from the source
712
+ ///
713
+ /// Typical generators do not pull from their source until they themselves
714
+ /// are pulled. This generator does not follow that convention and will call
715
+ /// generator() once before it returns. The returned generator will otherwise
716
+ /// mirror the source.
717
+ ///
718
+ /// This generator forwards async-reentrant pressure to the source
719
+ /// This generator buffers one item (the first result) until it is delivered.
720
+ template <typename T>
721
+ AsyncGenerator<T> MakeAutoStartingGenerator(AsyncGenerator<T> generator) {
722
+ struct AutostartGenerator {
723
+ Future<T> operator()() {
724
+ if (first_future->is_valid()) {
725
+ Future<T> result = *first_future;
726
+ *first_future = Future<T>();
727
+ return result;
728
+ }
729
+ return source();
730
+ }
731
+
732
+ std::shared_ptr<Future<T>> first_future;
733
+ AsyncGenerator<T> source;
734
+ };
735
+
736
+ std::shared_ptr<Future<T>> first_future = std::make_shared<Future<T>>(generator());
737
+ return AutostartGenerator{std::move(first_future), std::move(generator)};
738
+ }
739
+
740
+ /// \see MakeReadaheadGenerator
741
+ template <typename T>
742
+ class ReadaheadGenerator {
743
+ public:
744
+ ReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
745
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
746
+
747
+ Future<T> AddMarkFinishedContinuation(Future<T> fut) {
748
+ auto state = state_;
749
+ return fut.Then(
750
+ [state](const T& result) -> Future<T> {
751
+ state->MarkFinishedIfDone(result);
752
+ if (state->finished.load()) {
753
+ if (state->num_running.fetch_sub(1) == 1) {
754
+ state->final_future.MarkFinished();
755
+ }
756
+ } else {
757
+ state->num_running.fetch_sub(1);
758
+ }
759
+ return result;
760
+ },
761
+ [state](const Status& err) -> Future<T> {
762
+ // If there is an error we need to make sure all running
763
+ // tasks finish before we return the error.
764
+ state->finished.store(true);
765
+ if (state->num_running.fetch_sub(1) == 1) {
766
+ state->final_future.MarkFinished();
767
+ }
768
+ return state->final_future.Then([err]() -> Result<T> { return err; });
769
+ });
770
+ }
771
+
772
+ Future<T> operator()() {
773
+ if (state_->readahead_queue.empty()) {
774
+ // This is the first request, let's pump the underlying queue
775
+ state_->num_running.store(state_->max_readahead);
776
+ for (int i = 0; i < state_->max_readahead; i++) {
777
+ auto next = state_->source_generator();
778
+ auto next_after_check = AddMarkFinishedContinuation(std::move(next));
779
+ state_->readahead_queue.push(std::move(next_after_check));
780
+ }
781
+ }
782
+ // Pop one and add one
783
+ auto result = state_->readahead_queue.front();
784
+ state_->readahead_queue.pop();
785
+ if (state_->finished.load()) {
786
+ state_->readahead_queue.push(AsyncGeneratorEnd<T>());
787
+ } else {
788
+ state_->num_running.fetch_add(1);
789
+ auto back_of_queue = state_->source_generator();
790
+ auto back_of_queue_after_check =
791
+ AddMarkFinishedContinuation(std::move(back_of_queue));
792
+ state_->readahead_queue.push(std::move(back_of_queue_after_check));
793
+ }
794
+ return result;
795
+ }
796
+
797
+ private:
798
+ struct State {
799
+ State(AsyncGenerator<T> source_generator, int max_readahead)
800
+ : source_generator(std::move(source_generator)), max_readahead(max_readahead) {}
801
+
802
+ void MarkFinishedIfDone(const T& next_result) {
803
+ if (IsIterationEnd(next_result)) {
804
+ finished.store(true);
805
+ }
806
+ }
807
+
808
+ AsyncGenerator<T> source_generator;
809
+ int max_readahead;
810
+ Future<> final_future = Future<>::Make();
811
+ std::atomic<int> num_running{0};
812
+ std::atomic<bool> finished{false};
813
+ std::queue<Future<T>> readahead_queue;
814
+ };
815
+
816
+ std::shared_ptr<State> state_;
817
+ };
818
+
819
+ /// \brief A generator where the producer pushes items on a queue.
820
+ ///
821
+ /// No back-pressure is applied, so this generator is mostly useful when
822
+ /// producing the values is neither CPU- nor memory-expensive (e.g. fetching
823
+ /// filesystem metadata).
824
+ ///
825
+ /// This generator is not async-reentrant.
826
+ template <typename T>
827
+ class PushGenerator {
828
+ struct State {
829
+ State() {}
830
+
831
+ util::Mutex mutex;
832
+ std::deque<Result<T>> result_q;
833
+ std::optional<Future<T>> consumer_fut;
834
+ bool finished = false;
835
+ };
836
+
837
+ public:
838
+ /// Producer API for PushGenerator
839
+ class Producer {
840
+ public:
841
+ explicit Producer(const std::shared_ptr<State>& state) : weak_state_(state) {}
842
+
843
+ /// \brief Push a value on the queue
844
+ ///
845
+ /// True is returned if the value was pushed, false if the generator is
846
+ /// already closed or destroyed. If the latter, it is recommended to stop
847
+ /// producing any further values.
848
+ bool Push(Result<T> result) {
849
+ auto state = weak_state_.lock();
850
+ if (!state) {
851
+ // Generator was destroyed
852
+ return false;
853
+ }
854
+ auto lock = state->mutex.Lock();
855
+ if (state->finished) {
856
+ // Closed early
857
+ return false;
858
+ }
859
+ if (state->consumer_fut.has_value()) {
860
+ auto fut = std::move(state->consumer_fut.value());
861
+ state->consumer_fut.reset();
862
+ lock.Unlock(); // unlock before potentially invoking a callback
863
+ fut.MarkFinished(std::move(result));
864
+ } else {
865
+ state->result_q.push_back(std::move(result));
866
+ }
867
+ return true;
868
+ }
869
+
870
+ /// \brief Tell the consumer we have finished producing
871
+ ///
872
+ /// It is allowed to call this and later call Push() again ("early close").
873
+ /// In this case, calls to Push() after the queue is closed are silently
874
+ /// ignored. This can help implementing non-trivial cancellation cases.
875
+ ///
876
+ /// True is returned on success, false if the generator is already closed
877
+ /// or destroyed.
878
+ bool Close() {
879
+ auto state = weak_state_.lock();
880
+ if (!state) {
881
+ // Generator was destroyed
882
+ return false;
883
+ }
884
+ auto lock = state->mutex.Lock();
885
+ if (state->finished) {
886
+ // Already closed
887
+ return false;
888
+ }
889
+ state->finished = true;
890
+ if (state->consumer_fut.has_value()) {
891
+ auto fut = std::move(state->consumer_fut.value());
892
+ state->consumer_fut.reset();
893
+ lock.Unlock(); // unlock before potentially invoking a callback
894
+ fut.MarkFinished(IterationTraits<T>::End());
895
+ }
896
+ return true;
897
+ }
898
+
899
+ /// Return whether the generator was closed or destroyed.
900
+ bool is_closed() const {
901
+ auto state = weak_state_.lock();
902
+ if (!state) {
903
+ // Generator was destroyed
904
+ return true;
905
+ }
906
+ auto lock = state->mutex.Lock();
907
+ return state->finished;
908
+ }
909
+
910
+ private:
911
+ const std::weak_ptr<State> weak_state_;
912
+ };
913
+
914
+ PushGenerator() : state_(std::make_shared<State>()) {}
915
+
916
+ /// Read an item from the queue
917
+ Future<T> operator()() const {
918
+ auto lock = state_->mutex.Lock();
919
+ assert(!state_->consumer_fut.has_value()); // Non-reentrant
920
+ if (!state_->result_q.empty()) {
921
+ auto fut = Future<T>::MakeFinished(std::move(state_->result_q.front()));
922
+ state_->result_q.pop_front();
923
+ return fut;
924
+ }
925
+ if (state_->finished) {
926
+ return AsyncGeneratorEnd<T>();
927
+ }
928
+ auto fut = Future<T>::Make();
929
+ state_->consumer_fut = fut;
930
+ return fut;
931
+ }
932
+
933
+ /// \brief Return producer-side interface
934
+ ///
935
+ /// The returned object must be used by the producer to push values on the queue.
936
+ /// Only a single Producer object should be instantiated.
937
+ Producer producer() { return Producer{state_}; }
938
+
939
+ private:
940
+ const std::shared_ptr<State> state_;
941
+ };
942
+
943
+ /// \brief Create a generator that pulls reentrantly from a source
944
+ /// This generator will pull reentrantly from a source, ensuring that max_readahead
945
+ /// requests are active at any given time.
946
+ ///
947
+ /// The source generator must be async-reentrant
948
+ ///
949
+ /// This generator itself is async-reentrant.
950
+ ///
951
+ /// This generator may queue up to max_readahead instances of T
952
+ template <typename T>
953
+ AsyncGenerator<T> MakeReadaheadGenerator(AsyncGenerator<T> source_generator,
954
+ int max_readahead) {
955
+ return ReadaheadGenerator<T>(std::move(source_generator), max_readahead);
956
+ }
957
+
958
+ /// \brief Creates a generator that will yield finished futures from a vector
959
+ ///
960
+ /// This generator is async-reentrant
961
+ template <typename T>
962
+ AsyncGenerator<T> MakeVectorGenerator(std::vector<T> vec) {
963
+ struct State {
964
+ explicit State(std::vector<T> vec_) : vec(std::move(vec_)), vec_idx(0) {}
965
+
966
+ std::vector<T> vec;
967
+ std::atomic<std::size_t> vec_idx;
968
+ };
969
+
970
+ auto state = std::make_shared<State>(std::move(vec));
971
+ return [state]() {
972
+ auto idx = state->vec_idx.fetch_add(1);
973
+ if (idx >= state->vec.size()) {
974
+ // Eagerly return memory
975
+ state->vec.clear();
976
+ return AsyncGeneratorEnd<T>();
977
+ }
978
+ return Future<T>::MakeFinished(state->vec[idx]);
979
+ };
980
+ }
981
+
982
+ /// \see MakeMergedGenerator
983
+ template <typename T>
984
+ class MergedGenerator {
985
+ // Note, the implementation of this class is quite complex at the moment (PRs to
986
+ // simplify are always welcome)
987
+ //
988
+ // Terminology is borrowed from rxjs. This is a pull based implementation of the
989
+ // mergeAll operator. The "outer subscription" refers to the async
990
+ // generator that the caller provided when creating this. The outer subscription
991
+ // yields generators.
992
+ //
993
+ // Each of these generators is then subscribed to (up to max_subscriptions) and these
994
+ // are referred to as "inner subscriptions".
995
+ //
996
+ // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For
997
+ // each inner subscription we will cache up to 1 value. This means we may have more
998
+ // values than we have been asked for. In our example, if a caller asks for one record
999
+ // batch we will start scanning `max_subscriptions` different files. For each file we
1000
+ // will only queue up to 1 batch (so a separate readahead is needed on the file if batch
1001
+ // readahead is desired).
1002
+ //
1003
+ // If the caller is slow we may accumulate ready-to-deliver items. These are stored
1004
+ // in `delivered_jobs`.
1005
+ //
1006
+ // If the caller is very quick we may accumulate requests. These are stored in
1007
+ // `waiting_jobs`.
1008
+ //
1009
+ // It may be helpful to consider an example, in the scanner the outer subscription
1010
+ // is some kind of asynchronous directory listing. The inner subscription is
1011
+ // then a scan on a file yielded by the directory listing.
1012
+ //
1013
+ // An "outstanding" request is when we have polled either the inner or outer
1014
+ // subscription but that future hasn't completed yet.
1015
+ //
1016
+ // There are three possible "events" that can happen.
1017
+ // * A caller could request the next future
1018
+ // * An outer callback occurs when the next subscription is ready (e.g. the directory
1019
+ // listing has produced a new file)
1020
+ // * An inner callback occurs when one of the inner subscriptions emits a value (e.g.
1021
+ // a file scan emits a record batch)
1022
+ //
1023
+ // Any time an event happens the logic is broken into two phases. First, we grab the
1024
+ // lock and modify the shared state. While doing this we figure out what callbacks we
1025
+ // will need to execute. Then, we give up the lock and execute these callbacks. It is
1026
+ // important to execute these callbacks without the lock to avoid deadlock.
1027
+ public:
1028
+ explicit MergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1029
+ int max_subscriptions)
1030
+ : state_(std::make_shared<State>(std::move(source), max_subscriptions)) {}
1031
+
1032
+ Future<T> operator()() {
1033
+ // A caller has requested a future
1034
+ Future<T> waiting_future;
1035
+ std::shared_ptr<DeliveredJob> delivered_job;
1036
+ bool mark_generator_complete = false;
1037
+ {
1038
+ auto guard = state_->mutex.Lock();
1039
+ if (!state_->delivered_jobs.empty()) {
1040
+ // If we have a job sitting around we can deliver it
1041
+ delivered_job = std::move(state_->delivered_jobs.front());
1042
+ state_->delivered_jobs.pop_front();
1043
+ if (state_->IsCompleteUnlocked(guard)) {
1044
+ // It's possible this waiting job was the only thing left to handle and
1045
+ // we have now completed the generator.
1046
+ mark_generator_complete = true;
1047
+ } else {
1048
+ // Since we had a job sitting around we also had an inner subscription
1049
+ // that had paused. We are going to restart this inner subscription and
1050
+ // so there will be a new outstanding request.
1051
+ state_->outstanding_requests++;
1052
+ }
1053
+ } else if (state_->broken ||
1054
+ (!state_->first && state_->num_running_subscriptions == 0)) {
1055
+ // If we are broken or exhausted then prepare a terminal item but
1056
+ // we won't complete it until we've finished.
1057
+ Result<T> end_res = IterationEnd<T>();
1058
+ if (!state_->final_error.ok()) {
1059
+ end_res = state_->final_error;
1060
+ state_->final_error = Status::OK();
1061
+ }
1062
+ return state_->all_finished.Then([end_res]() -> Result<T> { return end_res; });
1063
+ } else {
1064
+ // Otherwise we just queue the request and it will be completed when one of the
1065
+ // ongoing inner subscriptions delivers a result
1066
+ waiting_future = Future<T>::Make();
1067
+ state_->waiting_jobs.push_back(std::make_shared<Future<T>>(waiting_future));
1068
+ }
1069
+ if (state_->first) {
1070
+ // On the first request we are going to try and immediately fill our queue
1071
+ // of subscriptions. We assume we are going to be able to start them all.
1072
+ state_->outstanding_requests +=
1073
+ static_cast<int>(state_->active_subscriptions.size());
1074
+ state_->num_running_subscriptions +=
1075
+ static_cast<int>(state_->active_subscriptions.size());
1076
+ }
1077
+ }
1078
+ // If we grabbed a finished item from the delivered_jobs queue then we may need
1079
+ // to mark the generator finished or issue a request for a new item to fill in
1080
+ // the spot we just vacated. Notice that we issue that request to the same
1081
+ // subscription that delivered it (deliverer).
1082
+ if (delivered_job) {
1083
+ if (mark_generator_complete) {
1084
+ state_->all_finished.MarkFinished();
1085
+ } else {
1086
+ delivered_job->deliverer().AddCallback(
1087
+ InnerCallback(state_, delivered_job->index));
1088
+ }
1089
+ return std::move(delivered_job->value);
1090
+ }
1091
+ // On the first call we try and fill up our subscriptions. It's possible the outer
1092
+ // generator only has a few items and we can't fill up to what we were hoping. In
1093
+ // that case we have to bail early.
1094
+ if (state_->first) {
1095
+ state_->first = false;
1096
+ mark_generator_complete = false;
1097
+ for (int i = 0; i < static_cast<int>(state_->active_subscriptions.size()); i++) {
1098
+ state_->PullSource().AddCallback(
1099
+ OuterCallback{state_, static_cast<std::size_t>(i)});
1100
+ // If we have to bail early then we need to update the shared state again so
1101
+ // we need to reacquire the lock.
1102
+ auto guard = state_->mutex.Lock();
1103
+ if (state_->source_exhausted) {
1104
+ int excess_requests =
1105
+ static_cast<int>(state_->active_subscriptions.size()) - i - 1;
1106
+ state_->outstanding_requests -= excess_requests;
1107
+ state_->num_running_subscriptions -= excess_requests;
1108
+ if (excess_requests > 0) {
1109
+ // It's possible that we are completing the generator by reducing the number
1110
+ // of outstanding requests (e.g. this happens when the outer subscription and
1111
+ // all inner subscriptions are synchronous)
1112
+ mark_generator_complete = state_->IsCompleteUnlocked(guard);
1113
+ }
1114
+ break;
1115
+ }
1116
+ }
1117
+ if (mark_generator_complete) {
1118
+ state_->MarkFinishedAndPurge();
1119
+ }
1120
+ }
1121
+ return waiting_future;
1122
+ }
1123
+
1124
+ private:
1125
+ struct DeliveredJob {
1126
+ explicit DeliveredJob(AsyncGenerator<T> deliverer_, Result<T> value_,
1127
+ std::size_t index_)
1128
+ : deliverer(deliverer_), value(std::move(value_)), index(index_) {}
1129
+
1130
+ // The generator that delivered this result, we will request another item
1131
+ // from this generator once the result is delivered
1132
+ AsyncGenerator<T> deliverer;
1133
+ // The result we received from the generator
1134
+ Result<T> value;
1135
+ // The index of the generator (in active_subscriptions) that delivered this
1136
+ // result. This is used if we need to replace a finished generator.
1137
+ std::size_t index;
1138
+ };
1139
+
1140
+ struct State {
1141
+ State(AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions)
1142
+ : source(std::move(source)),
1143
+ active_subscriptions(max_subscriptions),
1144
+ delivered_jobs(),
1145
+ waiting_jobs(),
1146
+ mutex(),
1147
+ first(true),
1148
+ broken(false),
1149
+ source_exhausted(false),
1150
+ outstanding_requests(0),
1151
+ num_running_subscriptions(0),
1152
+ final_error(Status::OK()) {}
1153
+
1154
+ Future<AsyncGenerator<T>> PullSource() {
1155
+ // Need to guard access to source() so we don't pull sync-reentrantly which
1156
+ // is never valid.
1157
+ auto lock = mutex.Lock();
1158
+ return source();
1159
+ }
1160
+
1161
+ void SignalErrorUnlocked(const util::Mutex::Guard& guard) {
1162
+ broken = true;
1163
+ // Empty any results that have arrived but not asked for.
1164
+ while (!delivered_jobs.empty()) {
1165
+ delivered_jobs.pop_front();
1166
+ }
1167
+ }
1168
+
1169
+ // This function is called outside the mutex but it will only ever be
1170
+ // called once
1171
+ void MarkFinishedAndPurge() {
1172
+ all_finished.MarkFinished();
1173
+ while (!waiting_jobs.empty()) {
1174
+ waiting_jobs.front()->MarkFinished(IterationEnd<T>());
1175
+ waiting_jobs.pop_front();
1176
+ }
1177
+ }
1178
+
1179
+ // This is called outside the mutex but it is only ever called
1180
+ // once and Future<>::AddCallback is thread-safe
1181
+ void MarkFinalError(const Status& err, Future<T> maybe_sink) {
1182
+ if (maybe_sink.is_valid()) {
1183
+ // Someone is waiting for this error so lets mark it complete when
1184
+ // all the work is done
1185
+ all_finished.AddCallback([maybe_sink, err](const Status& status) mutable {
1186
+ maybe_sink.MarkFinished(err);
1187
+ });
1188
+ } else {
1189
+ // No one is waiting for this error right now so it will be delivered
1190
+ // next.
1191
+ final_error = err;
1192
+ }
1193
+ }
1194
+
1195
+ bool IsCompleteUnlocked(const util::Mutex::Guard& guard) {
1196
+ return outstanding_requests == 0 &&
1197
+ (broken || (source_exhausted && num_running_subscriptions == 0 &&
1198
+ delivered_jobs.empty()));
1199
+ }
1200
+
1201
+ bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) {
1202
+ --outstanding_requests;
1203
+ return IsCompleteUnlocked(guard);
1204
+ }
1205
+
1206
+ // The outer generator. Each item we pull from this will be its own generator
1207
+ // and become an inner subscription
1208
+ AsyncGenerator<AsyncGenerator<T>> source;
1209
+ // active_subscriptions and delivered_jobs will be bounded by max_subscriptions
1210
+ std::vector<AsyncGenerator<T>> active_subscriptions;
1211
+ // Results delivered by the inner subscriptions that weren't yet asked for by the
1212
+ // caller
1213
+ std::deque<std::shared_ptr<DeliveredJob>> delivered_jobs;
1214
+ // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the
1215
+ // backpressure
1216
+ std::deque<std::shared_ptr<Future<T>>> waiting_jobs;
1217
+ // A future that will be marked complete when the terminal item has arrived and all
1218
+ // outstanding futures have completed. It is used to hold off emission of an error
1219
+ // until all outstanding work is done.
1220
+ Future<> all_finished = Future<>::Make();
1221
+ util::Mutex mutex;
1222
+ // A flag cleared when the caller firsts asks for a future. Used to start polling.
1223
+ bool first;
1224
+ // A flag set when an error arrives, prevents us from issuing new requests.
1225
+ bool broken;
1226
+ // A flag set when the outer subscription has been exhausted. Prevents us from
1227
+ // pulling it further (even though it would be generally harmless) and lets us know we
1228
+ // are finishing up.
1229
+ bool source_exhausted;
1230
+ // The number of futures that we have requested from either the outer or inner
1231
+ // subscriptions that have not yet completed. We cannot mark all_finished until this
1232
+ // reaches 0. This will never be greater than max_subscriptions
1233
+ int outstanding_requests;
1234
+ // The number of running subscriptions. We ramp this up to `max_subscriptions` as
1235
+ // soon as the first item is requested and then it stays at that level (each exhausted
1236
+ // inner subscription is replaced by a new inner subscription) until the outer
1237
+ // subscription is exhausted at which point this descends to 0 (and source_exhausted)
1238
+ // is then set to true.
1239
+ int num_running_subscriptions;
1240
+ // If an error arrives, and the caller hasn't asked for that item, we store the error
1241
+ // here. It is analagous to delivered_jobs but for errors instead of finished
1242
+ // results.
1243
+ Status final_error;
1244
+ };
1245
+
1246
+ struct InnerCallback {
1247
+ InnerCallback(std::shared_ptr<State> state, std::size_t index, bool recursive = false)
1248
+ : state(std::move(state)), index(index), recursive(recursive) {}
1249
+
1250
+ void operator()(const Result<T>& maybe_next_ref) {
1251
+ // An item has been delivered by one of the inner subscriptions
1252
+ Future<T> next_fut;
1253
+ const Result<T>* maybe_next = &maybe_next_ref;
1254
+
1255
+ // When an item is delivered (and the caller has asked for it) we grab the
1256
+ // next item from the inner subscription. To avoid this behavior leading to an
1257
+ // infinite loop (this can happen if the caller's callback asks for the next item)
1258
+ // we use a while loop.
1259
+ while (true) {
1260
+ Future<T> sink;
1261
+ bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next);
1262
+ bool pull_next_sub = false;
1263
+ bool was_broken = false;
1264
+ bool should_mark_gen_complete = false;
1265
+ bool should_mark_final_error = false;
1266
+ {
1267
+ auto guard = state->mutex.Lock();
1268
+ if (state->broken) {
1269
+ // We've errored out previously so ignore the result. If anyone was waiting
1270
+ // for this they will get IterationEnd when we purge
1271
+ was_broken = true;
1272
+ } else {
1273
+ if (!sub_finished) {
1274
+ // There is a result to deliver. Either we can deliver it now or we will
1275
+ // queue it up
1276
+ if (state->waiting_jobs.empty()) {
1277
+ state->delivered_jobs.push_back(std::make_shared<DeliveredJob>(
1278
+ state->active_subscriptions[index], *maybe_next, index));
1279
+ } else {
1280
+ sink = std::move(*state->waiting_jobs.front());
1281
+ state->waiting_jobs.pop_front();
1282
+ }
1283
+ }
1284
+
1285
+ // If this is the first error then we transition the state to a broken state
1286
+ if (!maybe_next->ok()) {
1287
+ should_mark_final_error = true;
1288
+ state->SignalErrorUnlocked(guard);
1289
+ }
1290
+ }
1291
+
1292
+ // If we finished this inner subscription then we need to grab a new inner
1293
+ // subscription to take its spot. If we can't (because we're broken or
1294
+ // exhausted) then we aren't going to be starting any new futures and so
1295
+ // the number of running subscriptions drops.
1296
+ pull_next_sub = sub_finished && !state->source_exhausted && !was_broken;
1297
+ if (sub_finished && !pull_next_sub) {
1298
+ state->num_running_subscriptions--;
1299
+ }
1300
+ // There are three situations we won't pull again. If an error occurred or we
1301
+ // are already finished or if no one was waiting for our result and so we queued
1302
+ // it up. We will decrement outstanding_requests and possibly mark the
1303
+ // generator completed.
1304
+ if (state->broken || (!sink.is_valid() && !sub_finished) ||
1305
+ (sub_finished && state->source_exhausted)) {
1306
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1307
+ should_mark_gen_complete = true;
1308
+ }
1309
+ }
1310
+ }
1311
+
1312
+ // Now we have given up the lock and we can take all the actions we decided we
1313
+ // need to take.
1314
+ if (should_mark_final_error) {
1315
+ state->MarkFinalError(maybe_next->status(), std::move(sink));
1316
+ }
1317
+
1318
+ if (should_mark_gen_complete) {
1319
+ state->MarkFinishedAndPurge();
1320
+ }
1321
+
1322
+ // An error occurred elsewhere so there is no need to mark any future
1323
+ // finished (will happen during the purge) or pull from anything
1324
+ if (was_broken) {
1325
+ return;
1326
+ }
1327
+
1328
+ if (pull_next_sub) {
1329
+ if (recursive) {
1330
+ was_empty = true;
1331
+ return;
1332
+ }
1333
+ // We pulled an end token so we need to start a new subscription
1334
+ // in our spot
1335
+ state->PullSource().AddCallback(OuterCallback{state, index});
1336
+ } else if (sink.is_valid()) {
1337
+ // We pulled a valid result and there was someone waiting for it
1338
+ // so lets fetch the next result from our subscription
1339
+ sink.MarkFinished(*maybe_next);
1340
+ next_fut = state->active_subscriptions[index]();
1341
+ if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) {
1342
+ return;
1343
+ }
1344
+ // Already completed. Avoid very deep recursion by looping
1345
+ // here instead of relying on the callback.
1346
+ maybe_next = &next_fut.result();
1347
+ continue;
1348
+ }
1349
+ // else: We pulled a valid result but no one was waiting for it so
1350
+ // we can just stop.
1351
+ return;
1352
+ }
1353
+ }
1354
+ std::shared_ptr<State> state;
1355
+ std::size_t index;
1356
+ bool recursive;
1357
+ bool was_empty = false;
1358
+ };
1359
+
1360
+ struct OuterCallback {
1361
+ void operator()(const Result<AsyncGenerator<T>>& initial_maybe_next) {
1362
+ Result<AsyncGenerator<T>> maybe_next = initial_maybe_next;
1363
+ while (true) {
1364
+ // We have been given a new inner subscription
1365
+ bool should_continue = false;
1366
+ bool should_mark_gen_complete = false;
1367
+ bool should_deliver_error = false;
1368
+ bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next);
1369
+ Future<T> error_sink;
1370
+ {
1371
+ auto guard = state->mutex.Lock();
1372
+ if (!maybe_next.ok() || source_exhausted || state->broken) {
1373
+ // If here then we will not pull any more from the outer source
1374
+ if (!state->broken && !maybe_next.ok()) {
1375
+ state->SignalErrorUnlocked(guard);
1376
+ // If here then we are the first error so we need to deliver it
1377
+ should_deliver_error = true;
1378
+ if (!state->waiting_jobs.empty()) {
1379
+ error_sink = std::move(*state->waiting_jobs.front());
1380
+ state->waiting_jobs.pop_front();
1381
+ }
1382
+ }
1383
+ if (source_exhausted) {
1384
+ state->source_exhausted = true;
1385
+ state->num_running_subscriptions--;
1386
+ }
1387
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1388
+ should_mark_gen_complete = true;
1389
+ }
1390
+ } else {
1391
+ state->active_subscriptions[index] = *maybe_next;
1392
+ should_continue = true;
1393
+ }
1394
+ }
1395
+ if (should_deliver_error) {
1396
+ state->MarkFinalError(maybe_next.status(), std::move(error_sink));
1397
+ }
1398
+ if (should_mark_gen_complete) {
1399
+ state->MarkFinishedAndPurge();
1400
+ }
1401
+ if (should_continue) {
1402
+ // There is a possibility that a large sequence of immediately available inner
1403
+ // callbacks could lead to a stack overflow. To avoid this we need to
1404
+ // synchronously loop through inner/outer callbacks until we either find an
1405
+ // unfinished future or we find an actual item to deliver.
1406
+ Future<T> next_item = (*maybe_next)();
1407
+ if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) {
1408
+ // By setting recursive to true we signal to the inner callback that, if it is
1409
+ // empty, instead of adding a new outer callback, it should just immediately
1410
+ // return, flagging was_empty so that we know we need to check the next
1411
+ // subscription.
1412
+ InnerCallback immediate_inner(state, index, /*recursive=*/true);
1413
+ immediate_inner(next_item.result());
1414
+ if (immediate_inner.was_empty) {
1415
+ Future<AsyncGenerator<T>> next_source = state->PullSource();
1416
+ if (next_source.TryAddCallback([this] {
1417
+ return OuterCallback{state, index};
1418
+ })) {
1419
+ // We hit an unfinished future so we can stop looping
1420
+ return;
1421
+ }
1422
+ // The current subscription was immediately and synchronously empty
1423
+ // and we were able to synchronously pull the next subscription so we
1424
+ // can keep looping.
1425
+ maybe_next = next_source.result();
1426
+ continue;
1427
+ }
1428
+ }
1429
+ }
1430
+ return;
1431
+ }
1432
+ }
1433
+ std::shared_ptr<State> state;
1434
+ std::size_t index;
1435
+ };
1436
+
1437
+ std::shared_ptr<State> state_;
1438
+ };
1439
+
1440
+ /// \brief Create a generator that takes in a stream of generators and pulls from up to
1441
+ /// max_subscriptions at a time
1442
+ ///
1443
+ /// Note: This may deliver items out of sequence. For example, items from the third
1444
+ /// AsyncGenerator generated by the source may be emitted before some items from the first
1445
+ /// AsyncGenerator generated by the source.
1446
+ ///
1447
+ /// This generator will pull from source async-reentrantly unless max_subscriptions is 1
1448
+ /// This generator will not pull from the individual subscriptions reentrantly. Add
1449
+ /// readahead to the individual subscriptions if that is desired.
1450
+ /// This generator is async-reentrant
1451
+ ///
1452
+ /// This generator may queue up to max_subscriptions instances of T
1453
+ template <typename T>
1454
+ AsyncGenerator<T> MakeMergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1455
+ int max_subscriptions) {
1456
+ return MergedGenerator<T>(std::move(source), max_subscriptions);
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ Result<AsyncGenerator<T>> MakeSequencedMergedGenerator(
1461
+ AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions) {
1462
+ if (max_subscriptions < 0) {
1463
+ return Status::Invalid("max_subscriptions must be a positive integer");
1464
+ }
1465
+ if (max_subscriptions == 1) {
1466
+ return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1");
1467
+ }
1468
+ AsyncGenerator<AsyncGenerator<T>> autostarting_source = MakeMappedGenerator(
1469
+ std::move(source),
1470
+ [](const AsyncGenerator<T>& sub) { return MakeAutoStartingGenerator(sub); });
1471
+ AsyncGenerator<AsyncGenerator<T>> sub_readahead =
1472
+ MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1);
1473
+ return MakeConcatenatedGenerator(std::move(sub_readahead));
1474
+ }
1475
+
1476
+ /// \brief Create a generator that takes in a stream of generators and pulls from each
1477
+ /// one in sequence.
1478
+ ///
1479
+ /// This generator is async-reentrant but will never pull from source reentrantly and
1480
+ /// will never pull from any subscription reentrantly.
1481
+ ///
1482
+ /// This generator may queue 1 instance of T
1483
+ ///
1484
+ /// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that
1485
+ /// forwards async-reentrant requests instead of buffering them (which is what
1486
+ /// MergedGenerator does)
1487
+ template <typename T>
1488
+ AsyncGenerator<T> MakeConcatenatedGenerator(AsyncGenerator<AsyncGenerator<T>> source) {
1489
+ return MergedGenerator<T>(std::move(source), 1);
1490
+ }
1491
+
1492
+ template <typename T>
1493
+ struct Enumerated {
1494
+ T value;
1495
+ int index;
1496
+ bool last;
1497
+ };
1498
+
1499
+ template <typename T>
1500
+ struct IterationTraits<Enumerated<T>> {
1501
+ static Enumerated<T> End() { return Enumerated<T>{IterationEnd<T>(), -1, false}; }
1502
+ static bool IsEnd(const Enumerated<T>& val) { return val.index < 0; }
1503
+ };
1504
+
1505
+ /// \see MakeEnumeratedGenerator
1506
+ template <typename T>
1507
+ class EnumeratingGenerator {
1508
+ public:
1509
+ EnumeratingGenerator(AsyncGenerator<T> source, T initial_value)
1510
+ : state_(std::make_shared<State>(std::move(source), std::move(initial_value))) {}
1511
+
1512
+ Future<Enumerated<T>> operator()() {
1513
+ if (state_->finished) {
1514
+ return AsyncGeneratorEnd<Enumerated<T>>();
1515
+ } else {
1516
+ auto state = state_;
1517
+ return state->source().Then([state](const T& next) {
1518
+ auto finished = IsIterationEnd<T>(next);
1519
+ auto prev = Enumerated<T>{state->prev_value, state->prev_index, finished};
1520
+ state->prev_value = next;
1521
+ state->prev_index++;
1522
+ state->finished = finished;
1523
+ return prev;
1524
+ });
1525
+ }
1526
+ }
1527
+
1528
+ private:
1529
+ struct State {
1530
+ State(AsyncGenerator<T> source, T initial_value)
1531
+ : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) {
1532
+ finished = IsIterationEnd<T>(prev_value);
1533
+ }
1534
+
1535
+ AsyncGenerator<T> source;
1536
+ T prev_value;
1537
+ int prev_index;
1538
+ bool finished;
1539
+ };
1540
+
1541
+ std::shared_ptr<State> state_;
1542
+ };
1543
+
1544
+ /// Wrap items from a source generator with positional information
1545
+ ///
1546
+ /// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be
1547
+ /// processed in a "first-available" fashion and later resequenced which can reduce the
1548
+ /// impact of sources with erratic performance (e.g. a filesystem where some items may
1549
+ /// take longer to read than others).
1550
+ ///
1551
+ /// TODO(ARROW-12371) Would require this generator be async-reentrant
1552
+ ///
1553
+ /// \see MakeSequencingGenerator for an example of putting items back in order
1554
+ ///
1555
+ /// This generator is not async-reentrant
1556
+ ///
1557
+ /// This generator buffers one item (so it knows which item is the last item)
1558
+ template <typename T>
1559
+ AsyncGenerator<Enumerated<T>> MakeEnumeratedGenerator(AsyncGenerator<T> source) {
1560
+ return FutureFirstGenerator<Enumerated<T>>(
1561
+ source().Then([source](const T& initial_value) -> AsyncGenerator<Enumerated<T>> {
1562
+ return EnumeratingGenerator<T>(std::move(source), initial_value);
1563
+ }));
1564
+ }
1565
+
1566
+ /// \see MakeTransferredGenerator
1567
+ template <typename T>
1568
+ class TransferringGenerator {
1569
+ public:
1570
+ explicit TransferringGenerator(AsyncGenerator<T> source, internal::Executor* executor)
1571
+ : source_(std::move(source)), executor_(executor) {}
1572
+
1573
+ Future<T> operator()() { return executor_->Transfer(source_()); }
1574
+
1575
+ private:
1576
+ AsyncGenerator<T> source_;
1577
+ internal::Executor* executor_;
1578
+ };
1579
+
1580
+ /// \brief Transfer a future to an underlying executor.
1581
+ ///
1582
+ /// Continuations run on the returned future will be run on the given executor
1583
+ /// if they cannot be run synchronously.
1584
+ ///
1585
+ /// This is often needed to move computation off I/O threads or other external
1586
+ /// completion sources and back on to the CPU executor so the I/O thread can
1587
+ /// stay busy and focused on I/O
1588
+ ///
1589
+ /// Keep in mind that continuations called on an already completed future will
1590
+ /// always be run synchronously and so no transfer will happen in that case.
1591
+ ///
1592
+ /// This generator is async reentrant if the source is
1593
+ ///
1594
+ /// This generator will not queue
1595
+ template <typename T>
1596
+ AsyncGenerator<T> MakeTransferredGenerator(AsyncGenerator<T> source,
1597
+ internal::Executor* executor) {
1598
+ return TransferringGenerator<T>(std::move(source), executor);
1599
+ }
1600
+
1601
+ /// \see MakeBackgroundGenerator
1602
+ template <typename T>
1603
+ class BackgroundGenerator {
1604
+ public:
1605
+ explicit BackgroundGenerator(Iterator<T> it, internal::Executor* io_executor, int max_q,
1606
+ int q_restart)
1607
+ : state_(std::make_shared<State>(io_executor, std::move(it), max_q, q_restart)),
1608
+ cleanup_(std::make_shared<Cleanup>(state_.get())) {}
1609
+
1610
+ Future<T> operator()() {
1611
+ auto guard = state_->mutex.Lock();
1612
+ Future<T> waiting_future;
1613
+ if (state_->queue.empty()) {
1614
+ if (state_->finished) {
1615
+ return AsyncGeneratorEnd<T>();
1616
+ } else {
1617
+ waiting_future = Future<T>::Make();
1618
+ state_->waiting_future = waiting_future;
1619
+ }
1620
+ } else {
1621
+ auto next = Future<T>::MakeFinished(std::move(state_->queue.front()));
1622
+ state_->queue.pop();
1623
+ if (state_->NeedsRestart()) {
1624
+ return state_->RestartTask(state_, std::move(guard), std::move(next));
1625
+ }
1626
+ return next;
1627
+ }
1628
+ // This should only trigger the very first time this method is called
1629
+ if (state_->NeedsRestart()) {
1630
+ return state_->RestartTask(state_, std::move(guard), std::move(waiting_future));
1631
+ }
1632
+ return waiting_future;
1633
+ }
1634
+
1635
+ protected:
1636
+ static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits<uint64_t>::max()};
1637
+
1638
+ struct State {
1639
+ State(internal::Executor* io_executor, Iterator<T> it, int max_q, int q_restart)
1640
+ : io_executor(io_executor),
1641
+ max_q(max_q),
1642
+ q_restart(q_restart),
1643
+ it(std::move(it)),
1644
+ reading(false),
1645
+ finished(false),
1646
+ should_shutdown(false) {}
1647
+
1648
+ void ClearQueue() {
1649
+ while (!queue.empty()) {
1650
+ queue.pop();
1651
+ }
1652
+ }
1653
+
1654
+ bool TaskIsRunning() const { return task_finished.is_valid(); }
1655
+
1656
+ bool NeedsRestart() const {
1657
+ return !finished && !reading && static_cast<int>(queue.size()) <= q_restart;
1658
+ }
1659
+
1660
+ void DoRestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard) {
1661
+ // If we get here we are actually going to start a new task so let's create a
1662
+ // task_finished future for it
1663
+ state->task_finished = Future<>::Make();
1664
+ state->reading = true;
1665
+ auto spawn_status = io_executor->Spawn(
1666
+ [state]() { BackgroundGenerator::WorkerTask(std::move(state)); });
1667
+ if (!spawn_status.ok()) {
1668
+ // If we can't spawn a new task then send an error to the consumer (either via a
1669
+ // waiting future or the queue) and mark ourselves finished
1670
+ state->finished = true;
1671
+ state->task_finished = Future<>();
1672
+ if (waiting_future.has_value()) {
1673
+ auto to_deliver = std::move(waiting_future.value());
1674
+ waiting_future.reset();
1675
+ guard.Unlock();
1676
+ to_deliver.MarkFinished(spawn_status);
1677
+ } else {
1678
+ ClearQueue();
1679
+ queue.push(spawn_status);
1680
+ }
1681
+ }
1682
+ }
1683
+
1684
+ Future<T> RestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard,
1685
+ Future<T> next) {
1686
+ if (TaskIsRunning()) {
1687
+ // If the task is still cleaning up we need to wait for it to finish before
1688
+ // restarting. We also want to block the consumer until we've restarted the
1689
+ // reader to avoid multiple restarts
1690
+ return task_finished.Then([state, next]() {
1691
+ // This may appear dangerous (recursive mutex) but we should be guaranteed the
1692
+ // outer guard has been released by this point. We know...
1693
+ // * task_finished is not already finished (it would be invalid in that case)
1694
+ // * task_finished will not be marked complete until we've given up the mutex
1695
+ auto guard_ = state->mutex.Lock();
1696
+ state->DoRestartTask(state, std::move(guard_));
1697
+ return next;
1698
+ });
1699
+ }
1700
+ // Otherwise we can restart immediately
1701
+ DoRestartTask(std::move(state), std::move(guard));
1702
+ return next;
1703
+ }
1704
+
1705
+ internal::Executor* io_executor;
1706
+ const int max_q;
1707
+ const int q_restart;
1708
+ Iterator<T> it;
1709
+ std::atomic<uint64_t> worker_thread_id{kUnlikelyThreadId};
1710
+
1711
+ // If true, the task is actively pumping items from the queue and does not need a
1712
+ // restart
1713
+ bool reading;
1714
+ // Set to true when a terminal item arrives
1715
+ bool finished;
1716
+ // Signal to the background task to end early because consumers have given up on it
1717
+ bool should_shutdown;
1718
+ // If the queue is empty, the consumer will create a waiting future and wait for it
1719
+ std::queue<Result<T>> queue;
1720
+ std::optional<Future<T>> waiting_future;
1721
+ // Every background task is given a future to complete when it is entirely finished
1722
+ // processing and ready for the next task to start or for State to be destroyed
1723
+ Future<> task_finished;
1724
+ util::Mutex mutex;
1725
+ };
1726
+
1727
+ // Cleanup task that will be run when all consumer references to the generator are lost
1728
+ struct Cleanup {
1729
+ explicit Cleanup(State* state) : state(state) {}
1730
+ ~Cleanup() {
1731
+ /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and
1732
+ /// there is no need to perform this check.
1733
+ ///
1734
+ /// It's a deadlock if we enter cleanup from
1735
+ /// the worker thread but it can happen if the consumer doesn't transfer away
1736
+ assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId());
1737
+ Future<> finish_fut;
1738
+ {
1739
+ auto lock = state->mutex.Lock();
1740
+ if (!state->TaskIsRunning()) {
1741
+ return;
1742
+ }
1743
+ // Signal the current task to stop and wait for it to finish
1744
+ state->should_shutdown = true;
1745
+ finish_fut = state->task_finished;
1746
+ }
1747
+ // Using future as a condition variable here
1748
+ Status st = finish_fut.status();
1749
+ ARROW_UNUSED(st);
1750
+ }
1751
+ State* state;
1752
+ };
1753
+
1754
+ static void WorkerTask(std::shared_ptr<State> state) {
1755
+ state->worker_thread_id.store(::arrow::internal::GetThreadId());
1756
+ // We need to capture the state to read while outside the mutex
1757
+ bool reading = true;
1758
+ while (reading) {
1759
+ auto next = state->it.Next();
1760
+ // Need to capture state->waiting_future inside the mutex to mark finished outside
1761
+ Future<T> waiting_future;
1762
+ {
1763
+ auto guard = state->mutex.Lock();
1764
+
1765
+ if (state->should_shutdown) {
1766
+ state->finished = true;
1767
+ break;
1768
+ }
1769
+
1770
+ if (!next.ok() || IsIterationEnd<T>(*next)) {
1771
+ // Terminal item. Mark finished to true, send this last item, and quit
1772
+ state->finished = true;
1773
+ if (!next.ok()) {
1774
+ state->ClearQueue();
1775
+ }
1776
+ }
1777
+ // At this point we are going to send an item. Either we will add it to the
1778
+ // queue or deliver it to a waiting future.
1779
+ if (state->waiting_future.has_value()) {
1780
+ waiting_future = std::move(state->waiting_future.value());
1781
+ state->waiting_future.reset();
1782
+ } else {
1783
+ state->queue.push(std::move(next));
1784
+ // We just filled up the queue so it is time to quit. We may need to notify
1785
+ // a cleanup task so we transition to Quitting
1786
+ if (static_cast<int>(state->queue.size()) >= state->max_q) {
1787
+ state->reading = false;
1788
+ }
1789
+ }
1790
+ reading = state->reading && !state->finished;
1791
+ }
1792
+ // This should happen outside the mutex. Presumably there is a
1793
+ // transferring generator on the other end that will quickly transfer any
1794
+ // callbacks off of this thread so we can continue looping. Still, best not to
1795
+ // rely on that
1796
+ if (waiting_future.is_valid()) {
1797
+ waiting_future.MarkFinished(next);
1798
+ }
1799
+ }
1800
+ // Once we've sent our last item we can notify any waiters that we are done and so
1801
+ // either state can be cleaned up or a new background task can be started
1802
+ Future<> task_finished;
1803
+ {
1804
+ auto guard = state->mutex.Lock();
1805
+ // After we give up the mutex state can be safely deleted. We will no longer
1806
+ // reference it. We can safely transition to idle now.
1807
+ task_finished = state->task_finished;
1808
+ state->task_finished = Future<>();
1809
+ state->worker_thread_id.store(kUnlikelyThreadId);
1810
+ }
1811
+ task_finished.MarkFinished();
1812
+ }
1813
+
1814
+ std::shared_ptr<State> state_;
1815
+ // state_ is held by both the generator and the background thread so it won't be cleaned
1816
+ // up when all consumer references are relinquished. cleanup_ is only held by the
1817
+ // generator so it will be destructed when the last consumer reference is gone. We use
1818
+ // this to cleanup / stop the background generator in case the consuming end stops
1819
+ // listening (e.g. due to a downstream error)
1820
+ std::shared_ptr<Cleanup> cleanup_;
1821
+ };
1822
+
1823
+ constexpr int kDefaultBackgroundMaxQ = 32;
1824
+ constexpr int kDefaultBackgroundQRestart = 16;
1825
+
1826
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> on a background
1827
+ /// thread
1828
+ ///
1829
+ /// The parameter max_q and q_restart control queue size and background thread task
1830
+ /// management. If the background task is fast you typically don't want it creating a
1831
+ /// thread task for every item. Instead the background thread will run until it fills
1832
+ /// up a readahead queue.
1833
+ ///
1834
+ /// Once the queue has filled up the background thread task will terminate (allowing other
1835
+ /// I/O tasks to use the thread). Once the queue has been drained enough (specified by
1836
+ /// q_restart) then the background thread task will be restarted. If q_restart is too low
1837
+ /// then you may exhaust the queue waiting for the background thread task to start running
1838
+ /// again. If it is too high then it will be constantly stopping and restarting the
1839
+ /// background queue task
1840
+ ///
1841
+ /// The "background thread" is a logical thread and will run as tasks on the io_executor.
1842
+ /// This thread may stop and start when the queue fills up but there will only be one
1843
+ /// active background thread task at any given time. You MUST transfer away from this
1844
+ /// background generator. Otherwise there could be a race condition if a callback on the
1845
+ /// background thread deletes the last consumer reference to the background generator. You
1846
+ /// can transfer onto the same executor as the background thread, it is only necessary to
1847
+ /// create a new thread task, not to switch executors.
1848
+ ///
1849
+ /// This generator is not async-reentrant
1850
+ ///
1851
+ /// This generator will queue up to max_q blocks
1852
+ template <typename T>
1853
+ static Result<AsyncGenerator<T>> MakeBackgroundGenerator(
1854
+ Iterator<T> iterator, internal::Executor* io_executor,
1855
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) {
1856
+ if (max_q < q_restart) {
1857
+ return Status::Invalid("max_q must be >= q_restart");
1858
+ }
1859
+ return BackgroundGenerator<T>(std::move(iterator), io_executor, max_q, q_restart);
1860
+ }
1861
+
1862
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> synchronously
1863
+ ///
1864
+ /// This should only be used if you know the source iterator does not involve any
1865
+ /// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending
1866
+ /// on the complexity of the iterator, it may lead to deadlock.
1867
+ ///
1868
+ /// If you are not certain if there will be I/O then it is better to use
1869
+ /// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator
1870
+ /// equivalent of Future::MakeFinished
1871
+ ///
1872
+ /// It is impossible to call this in an async-reentrant manner since the returned
1873
+ /// future will be completed by the time it is polled.
1874
+ ///
1875
+ /// This generator does not queue
1876
+ template <typename T>
1877
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(
1878
+ std::shared_ptr<Iterator<T>> iterator) {
1879
+ return [it = std::move(iterator)]() mutable -> Future<T> {
1880
+ return Future<T>::MakeFinished(it->Next());
1881
+ };
1882
+ }
1883
+
1884
+ template <typename T>
1885
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(Iterator<T> iterator) {
1886
+ return MakeBlockingGenerator(std::make_shared<Iterator<T>>(std::move(iterator)));
1887
+ }
1888
+
1889
+ /// \see MakeGeneratorIterator
1890
+ template <typename T>
1891
+ class GeneratorIterator {
1892
+ public:
1893
+ explicit GeneratorIterator(AsyncGenerator<T> source) : source_(std::move(source)) {}
1894
+
1895
+ Result<T> Next() { return source_().result(); }
1896
+
1897
+ private:
1898
+ AsyncGenerator<T> source_;
1899
+ };
1900
+
1901
+ /// \brief Convert an AsyncGenerator<T> to an Iterator<T> which blocks until each future
1902
+ /// is finished
1903
+ template <typename T>
1904
+ Iterator<T> MakeGeneratorIterator(AsyncGenerator<T> source) {
1905
+ return Iterator<T>(GeneratorIterator<T>(std::move(source)));
1906
+ }
1907
+
1908
+ /// \brief Add readahead to an iterator using a background thread.
1909
+ ///
1910
+ /// Under the hood this is converting the iterator to a generator using
1911
+ /// MakeBackgroundGenerator, adding readahead to the converted generator with
1912
+ /// MakeReadaheadGenerator, and then converting back to an iterator using
1913
+ /// MakeGeneratorIterator.
1914
+ template <typename T>
1915
+ Result<Iterator<T>> MakeReadaheadIterator(Iterator<T> it, int readahead_queue_size) {
1916
+ ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1));
1917
+ auto max_q = readahead_queue_size;
1918
+ auto q_restart = std::max(1, max_q / 2);
1919
+ ARROW_ASSIGN_OR_RAISE(
1920
+ auto background_generator,
1921
+ MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart));
1922
+ // Capture io_executor to keep it alive as long as owned_bg_generator is still
1923
+ // referenced
1924
+ AsyncGenerator<T> owned_bg_generator = [io_executor, background_generator]() {
1925
+ return background_generator();
1926
+ };
1927
+ return MakeGeneratorIterator(std::move(owned_bg_generator));
1928
+ }
1929
+
1930
+ /// \brief Make a generator that returns a single pre-generated future
1931
+ ///
1932
+ /// This generator is async-reentrant.
1933
+ template <typename T>
1934
+ std::function<Future<T>()> MakeSingleFutureGenerator(Future<T> future) {
1935
+ assert(future.is_valid());
1936
+ auto state = std::make_shared<Future<T>>(std::move(future));
1937
+ return [state]() -> Future<T> {
1938
+ auto fut = std::move(*state);
1939
+ if (fut.is_valid()) {
1940
+ return fut;
1941
+ } else {
1942
+ return AsyncGeneratorEnd<T>();
1943
+ }
1944
+ };
1945
+ }
1946
+
1947
+ /// \brief Make a generator that immediately ends.
1948
+ ///
1949
+ /// This generator is async-reentrant.
1950
+ template <typename T>
1951
+ std::function<Future<T>()> MakeEmptyGenerator() {
1952
+ return []() -> Future<T> { return AsyncGeneratorEnd<T>(); };
1953
+ }
1954
+
1955
+ /// \brief Make a generator that always fails with a given error
1956
+ ///
1957
+ /// This generator is async-reentrant.
1958
+ template <typename T>
1959
+ AsyncGenerator<T> MakeFailingGenerator(Status st) {
1960
+ assert(!st.ok());
1961
+ auto state = std::make_shared<Status>(std::move(st));
1962
+ return [state]() -> Future<T> {
1963
+ auto st = std::move(*state);
1964
+ if (!st.ok()) {
1965
+ return st;
1966
+ } else {
1967
+ return AsyncGeneratorEnd<T>();
1968
+ }
1969
+ };
1970
+ }
1971
+
1972
+ /// \brief Make a generator that always fails with a given error
1973
+ ///
1974
+ /// This overload allows inferring the return type from the argument.
1975
+ template <typename T>
1976
+ AsyncGenerator<T> MakeFailingGenerator(const Result<T>& result) {
1977
+ return MakeFailingGenerator<T>(result.status());
1978
+ }
1979
+
1980
+ /// \brief Prepend initial_values onto a generator
1981
+ ///
1982
+ /// This generator is async-reentrant but will buffer requests and will not
1983
+ /// pull from following_values async-reentrantly.
1984
+ template <typename T>
1985
+ AsyncGenerator<T> MakeGeneratorStartsWith(std::vector<T> initial_values,
1986
+ AsyncGenerator<T> following_values) {
1987
+ auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values));
1988
+ auto gen_gen = MakeVectorGenerator<AsyncGenerator<T>>(
1989
+ {std::move(initial_values_vec_gen), std::move(following_values)});
1990
+ return MakeConcatenatedGenerator(std::move(gen_gen));
1991
+ }
1992
+
1993
+ template <typename T>
1994
+ struct CancellableGenerator {
1995
+ Future<T> operator()() {
1996
+ if (stop_token.IsStopRequested()) {
1997
+ return stop_token.Poll();
1998
+ }
1999
+ return source();
2000
+ }
2001
+
2002
+ AsyncGenerator<T> source;
2003
+ StopToken stop_token;
2004
+ };
2005
+
2006
+ /// \brief Allow an async generator to be cancelled
2007
+ ///
2008
+ /// This generator is async-reentrant
2009
+ template <typename T>
2010
+ AsyncGenerator<T> MakeCancellable(AsyncGenerator<T> source, StopToken stop_token) {
2011
+ return CancellableGenerator<T>{std::move(source), std::move(stop_token)};
2012
+ }
2013
+
2014
+ template <typename T>
2015
+ class DefaultIfEmptyGenerator {
2016
+ public:
2017
+ DefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value)
2018
+ : state_(std::make_shared<State>(std::move(source), std::move(or_value))) {}
2019
+
2020
+ Future<T> operator()() {
2021
+ if (state_->first) {
2022
+ state_->first = false;
2023
+ struct {
2024
+ T or_value;
2025
+
2026
+ Result<T> operator()(const T& value) {
2027
+ if (IterationTraits<T>::IsEnd(value)) {
2028
+ return std::move(or_value);
2029
+ }
2030
+ return value;
2031
+ }
2032
+ } Continuation;
2033
+ Continuation.or_value = std::move(state_->or_value);
2034
+ return state_->source().Then(std::move(Continuation));
2035
+ }
2036
+ return state_->source();
2037
+ }
2038
+
2039
+ private:
2040
+ struct State {
2041
+ AsyncGenerator<T> source;
2042
+ T or_value;
2043
+ bool first;
2044
+ State(AsyncGenerator<T> source_, T or_value_)
2045
+ : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {}
2046
+ };
2047
+ std::shared_ptr<State> state_;
2048
+ };
2049
+
2050
+ /// \brief If the generator is empty, return the given value, else
2051
+ /// forward the values from the generator.
2052
+ ///
2053
+ /// This generator is async-reentrant.
2054
+ template <typename T>
2055
+ AsyncGenerator<T> MakeDefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value) {
2056
+ return DefaultIfEmptyGenerator<T>(std::move(source), std::move(or_value));
2057
+ }
2058
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ template <typename T>
27
+ using AsyncGenerator = std::function<Future<T>()>;
28
+
29
+ template <typename T, typename V>
30
+ class MappingGenerator;
31
+
32
+ template <typename T, typename ComesAfter, typename IsNext>
33
+ class SequencingGenerator;
34
+
35
+ template <typename T, typename V>
36
+ class TransformingGenerator;
37
+
38
+ template <typename T>
39
+ class SerialReadaheadGenerator;
40
+
41
+ template <typename T>
42
+ class ReadaheadGenerator;
43
+
44
+ template <typename T>
45
+ class PushGenerator;
46
+
47
+ template <typename T>
48
+ class MergedGenerator;
49
+
50
+ template <typename T>
51
+ struct Enumerated;
52
+
53
+ template <typename T>
54
+ class EnumeratingGenerator;
55
+
56
+ template <typename T>
57
+ class TransferringGenerator;
58
+
59
+ template <typename T>
60
+ class BackgroundGenerator;
61
+
62
+ template <typename T>
63
+ class GeneratorIterator;
64
+
65
+ template <typename T>
66
+ struct CancellableGenerator;
67
+
68
+ template <typename T>
69
+ class DefaultIfEmptyGenerator;
70
+
71
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <string_view>
22
+
23
+ #include "arrow/util/visibility.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ ARROW_EXPORT
29
+ std::string base64_encode(std::string_view s);
30
+
31
+ ARROW_EXPORT
32
+ std::string base64_decode(std::string_view s);
33
+
34
+ } // namespace util
35
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string_view>
21
+ #include <utility>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/span.h"
25
+
26
+ namespace arrow::util {
27
+
28
+ inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) {
29
+ assert(size <= BinaryViewType::kInlineSize);
30
+ // Small string: inlined. Bytes beyond size are zeroed
31
+ BinaryViewType::c_type out;
32
+ out.inlined = {size, {}};
33
+ memcpy(&out.inlined.data, data, size);
34
+ return out;
35
+ }
36
+
37
+ inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) {
38
+ assert(v.size() <= BinaryViewType::kInlineSize);
39
+ return ToInlineBinaryView(v.data(), static_cast<int32_t>(v.size()));
40
+ }
41
+
42
+ inline BinaryViewType::c_type ToNonInlineBinaryView(const void* data, int32_t size,
43
+ int32_t buffer_index,
44
+ int32_t offset) {
45
+ // Large string: store index/offset.
46
+ BinaryViewType::c_type out;
47
+ out.ref = {size, {}, buffer_index, offset};
48
+ memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix));
49
+ return out;
50
+ }
51
+
52
+ inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size,
53
+ int32_t buffer_index, int32_t offset) {
54
+ if (size <= BinaryViewType::kInlineSize) {
55
+ return ToInlineBinaryView(data, size);
56
+ }
57
+ return ToNonInlineBinaryView(data, size, buffer_index, offset);
58
+ }
59
+
60
+ inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index,
61
+ int32_t offset) {
62
+ return ToBinaryView(v.data(), static_cast<int32_t>(v.size()), buffer_index, offset);
63
+ }
64
+
65
+ template <typename BufferPtr>
66
+ std::string_view FromBinaryView(const BinaryViewType::c_type& v,
67
+ const BufferPtr* data_buffers) {
68
+ auto* data = v.is_inline() ? v.inlined.data.data()
69
+ : data_buffers[v.ref.buffer_index]->data() + v.ref.offset;
70
+ return {reinterpret_cast<const char*>(data), static_cast<size_t>(v.size())};
71
+ }
72
+ template <typename BufferPtr>
73
+ std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete;
74
+
75
+ template <typename BufferPtr>
76
+ bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r,
77
+ const BufferPtr* l_buffers, const BufferPtr* r_buffers) {
78
+ int64_t l_size_and_prefix, r_size_and_prefix;
79
+ memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix));
80
+ memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix));
81
+
82
+ if (l_size_and_prefix != r_size_and_prefix) return false;
83
+
84
+ if (l.is_inline()) {
85
+ // The columnar spec mandates that the inlined part be zero-padded, so we can compare
86
+ // a word at a time regardless of the exact size.
87
+ int64_t l_inlined, r_inlined;
88
+ memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined));
89
+ memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined));
90
+ return l_inlined == r_inlined;
91
+ }
92
+
93
+ // Sizes are equal and this is not inline, therefore both are out
94
+ // of line and have kPrefixSize first in common.
95
+ const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset;
96
+ const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset;
97
+ return memcmp(l_data + BinaryViewType::kPrefixSize,
98
+ r_data + BinaryViewType::kPrefixSize,
99
+ l.size() - BinaryViewType::kPrefixSize) == 0;
100
+ }
101
+
102
+ /// \brief Compute the total size of a list of binary views including null
103
+ /// views.
104
+ ///
105
+ /// This is useful when calculating the necessary memory to store all the string
106
+ /// data from the views.
107
+ inline int64_t SumOfBinaryViewSizes(const BinaryViewType::c_type* views, int64_t length) {
108
+ int64_t total = 0;
109
+ for (int64_t i = 0; i < length; ++i) {
110
+ total += views[i].size();
111
+ }
112
+ return total;
113
+ }
114
+
115
+ } // namespace arrow::util
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <string>
24
+
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/bitmap_reader.h"
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ struct BitRun {
35
+ int64_t length;
36
+ // Whether bits are set at this point.
37
+ bool set;
38
+
39
+ std::string ToString() const {
40
+ return std::string("{Length: ") + std::to_string(length) +
41
+ ", set=" + std::to_string(set) + "}";
42
+ }
43
+ };
44
+
45
+ inline bool operator==(const BitRun& lhs, const BitRun& rhs) {
46
+ return lhs.length == rhs.length && lhs.set == rhs.set;
47
+ }
48
+
49
+ inline bool operator!=(const BitRun& lhs, const BitRun& rhs) {
50
+ return lhs.length != rhs.length || lhs.set != rhs.set;
51
+ }
52
+
53
+ class BitRunReaderLinear {
54
+ public:
55
+ BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length)
56
+ : reader_(bitmap, start_offset, length) {}
57
+
58
+ BitRun NextRun() {
59
+ BitRun rl = {/*length=*/0, reader_.IsSet()};
60
+ // Advance while the values are equal and not at the end of list.
61
+ while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) {
62
+ rl.length++;
63
+ reader_.Next();
64
+ }
65
+ return rl;
66
+ }
67
+
68
+ private:
69
+ BitmapReader reader_;
70
+ };
71
+
72
+ #if ARROW_LITTLE_ENDIAN
73
+ /// A convenience class for counting the number of contiguous set/unset bits
74
+ /// in a bitmap.
75
+ class ARROW_EXPORT BitRunReader {
76
+ public:
77
+ /// \brief Constructs new BitRunReader.
78
+ ///
79
+ /// \param[in] bitmap source data
80
+ /// \param[in] start_offset bit offset into the source data
81
+ /// \param[in] length number of bits to copy
82
+ BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length);
83
+
84
+ /// Returns a new BitRun containing the number of contiguous
85
+ /// bits with the same value. length == 0 indicates the
86
+ /// end of the bitmap.
87
+ BitRun NextRun() {
88
+ if (ARROW_PREDICT_FALSE(position_ >= length_)) {
89
+ return {/*length=*/0, false};
90
+ }
91
+ // This implementation relies on a efficient implementations of
92
+ // CountTrailingZeros and assumes that runs are more often then
93
+ // not. The logic is to incrementally find the next bit change
94
+ // from the current position. This is done by zeroing all
95
+ // bits in word_ up to position_ and using the TrailingZeroCount
96
+ // to find the index of the next set bit.
97
+
98
+ // The runs alternate on each call, so flip the bit.
99
+ current_run_bit_set_ = !current_run_bit_set_;
100
+
101
+ int64_t start_position = position_;
102
+ int64_t start_bit_offset = start_position & 63;
103
+ // Invert the word for proper use of CountTrailingZeros and
104
+ // clear bits so CountTrailingZeros can do it magic.
105
+ word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset);
106
+
107
+ // Go forward until the next change from unset to set.
108
+ int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset;
109
+ position_ += new_bits;
110
+
111
+ if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
112
+ ARROW_PREDICT_TRUE(position_ < length_)) {
113
+ // Continue extending position while we can advance an entire word.
114
+ // (updates position_ accordingly).
115
+ AdvanceUntilChange();
116
+ }
117
+
118
+ return {/*length=*/position_ - start_position, current_run_bit_set_};
119
+ }
120
+
121
+ private:
122
+ void AdvanceUntilChange() {
123
+ int64_t new_bits = 0;
124
+ do {
125
+ // Advance the position of the bitmap for loading.
126
+ bitmap_ += sizeof(uint64_t);
127
+ LoadNextWord();
128
+ new_bits = bit_util::CountTrailingZeros(word_);
129
+ // Continue calculating run length.
130
+ position_ += new_bits;
131
+ } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
132
+ ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0);
133
+ }
134
+
135
+ void LoadNextWord() { return LoadWord(length_ - position_); }
136
+
137
+ // Helper method for Loading the next word.
138
+ void LoadWord(int64_t bits_remaining) {
139
+ word_ = 0;
140
+ // we need at least an extra byte in this case.
141
+ if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) {
142
+ std::memcpy(&word_, bitmap_, 8);
143
+ } else {
144
+ int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining);
145
+ auto word_ptr = reinterpret_cast<uint8_t*>(&word_);
146
+ std::memcpy(word_ptr, bitmap_, bytes_to_load);
147
+ // Ensure stoppage at last bit in bitmap by reversing the next higher
148
+ // order bit.
149
+ bit_util::SetBitTo(word_ptr, bits_remaining,
150
+ !bit_util::GetBit(word_ptr, bits_remaining - 1));
151
+ }
152
+
153
+ // Two cases:
154
+ // 1. For unset, CountTrailingZeros works naturally so we don't
155
+ // invert the word.
156
+ // 2. Otherwise invert so we can use CountTrailingZeros.
157
+ if (current_run_bit_set_) {
158
+ word_ = ~word_;
159
+ }
160
+ }
161
+ const uint8_t* bitmap_;
162
+ int64_t position_;
163
+ int64_t length_;
164
+ uint64_t word_;
165
+ bool current_run_bit_set_;
166
+ };
167
+ #else
168
+ using BitRunReader = BitRunReaderLinear;
169
+ #endif
170
+
171
+ struct SetBitRun {
172
+ int64_t position;
173
+ int64_t length;
174
+
175
+ bool AtEnd() const { return length == 0; }
176
+
177
+ std::string ToString() const {
178
+ return std::string("{pos=") + std::to_string(position) +
179
+ ", len=" + std::to_string(length) + "}";
180
+ }
181
+
182
+ bool operator==(const SetBitRun& other) const {
183
+ return position == other.position && length == other.length;
184
+ }
185
+ bool operator!=(const SetBitRun& other) const {
186
+ return position != other.position || length != other.length;
187
+ }
188
+ };
189
+
190
+ template <bool Reverse>
191
+ class BaseSetBitRunReader {
192
+ public:
193
+ /// \brief Constructs new SetBitRunReader.
194
+ ///
195
+ /// \param[in] bitmap source data
196
+ /// \param[in] start_offset bit offset into the source data
197
+ /// \param[in] length number of bits to copy
198
+ ARROW_NOINLINE
199
+ BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
200
+ : bitmap_(util::MakeNonNull(bitmap)),
201
+ length_(length),
202
+ remaining_(length_),
203
+ current_word_(0),
204
+ current_num_bits_(0) {
205
+ if (Reverse) {
206
+ bitmap_ += (start_offset + length) / 8;
207
+ const int8_t end_bit_offset = static_cast<int8_t>((start_offset + length) % 8);
208
+ if (length > 0 && end_bit_offset) {
209
+ // Get LSBs from last byte
210
+ ++bitmap_;
211
+ current_num_bits_ =
212
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(end_bit_offset));
213
+ current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_);
214
+ }
215
+ } else {
216
+ bitmap_ += start_offset / 8;
217
+ const int8_t bit_offset = static_cast<int8_t>(start_offset % 8);
218
+ if (length > 0 && bit_offset) {
219
+ // Get MSBs from first byte
220
+ current_num_bits_ =
221
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(8 - bit_offset));
222
+ current_word_ = LoadPartialWord(bit_offset, current_num_bits_);
223
+ }
224
+ }
225
+ }
226
+
227
+ ARROW_NOINLINE
228
+ SetBitRun NextRun() {
229
+ int64_t pos = 0;
230
+ int64_t len = 0;
231
+ if (current_num_bits_) {
232
+ const auto run = FindCurrentRun();
233
+ assert(remaining_ >= 0);
234
+ if (run.length && current_num_bits_) {
235
+ // The run ends in current_word_
236
+ return AdjustRun(run);
237
+ }
238
+ pos = run.position;
239
+ len = run.length;
240
+ }
241
+ if (!len) {
242
+ // We didn't get any ones in current_word_, so we can skip any zeros
243
+ // in the following words
244
+ SkipNextZeros();
245
+ if (remaining_ == 0) {
246
+ return {0, 0};
247
+ }
248
+ assert(current_num_bits_);
249
+ pos = position();
250
+ } else if (!current_num_bits_) {
251
+ if (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
252
+ current_word_ = LoadFullWord();
253
+ current_num_bits_ = 64;
254
+ } else if (remaining_ > 0) {
255
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
256
+ current_num_bits_ = static_cast<int32_t>(remaining_);
257
+ } else {
258
+ // No bits remaining, perhaps we found a run?
259
+ return AdjustRun({pos, len});
260
+ }
261
+ // If current word starts with a zero, we got a full run
262
+ if (!(current_word_ & kFirstBit)) {
263
+ return AdjustRun({pos, len});
264
+ }
265
+ }
266
+ // Current word should now start with a set bit
267
+ len += CountNextOnes();
268
+ return AdjustRun({pos, len});
269
+ }
270
+
271
+ protected:
272
+ int64_t position() const {
273
+ if (Reverse) {
274
+ return remaining_;
275
+ } else {
276
+ return length_ - remaining_;
277
+ }
278
+ }
279
+
280
+ SetBitRun AdjustRun(SetBitRun run) {
281
+ if (Reverse) {
282
+ assert(run.position >= run.length);
283
+ run.position -= run.length;
284
+ }
285
+ return run;
286
+ }
287
+
288
+ uint64_t LoadFullWord() {
289
+ uint64_t word;
290
+ if (Reverse) {
291
+ bitmap_ -= 8;
292
+ }
293
+ memcpy(&word, bitmap_, 8);
294
+ if (!Reverse) {
295
+ bitmap_ += 8;
296
+ }
297
+ return bit_util::ToLittleEndian(word);
298
+ }
299
+
300
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
301
+ assert(num_bits > 0);
302
+ uint64_t word = 0;
303
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
304
+ if (Reverse) {
305
+ // Read in the most significant bytes of the word
306
+ bitmap_ -= num_bytes;
307
+ memcpy(reinterpret_cast<char*>(&word) + 8 - num_bytes, bitmap_, num_bytes);
308
+ // XXX MostSignificantBitmask
309
+ return (bit_util::ToLittleEndian(word) << bit_offset) &
310
+ ~bit_util::LeastSignificantBitMask(64 - num_bits);
311
+ } else {
312
+ memcpy(&word, bitmap_, num_bytes);
313
+ bitmap_ += num_bytes;
314
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
315
+ bit_util::LeastSignificantBitMask(num_bits);
316
+ }
317
+ }
318
+
319
+ void SkipNextZeros() {
320
+ assert(current_num_bits_ == 0);
321
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
322
+ current_word_ = LoadFullWord();
323
+ const auto num_zeros = CountFirstZeros(current_word_);
324
+ if (num_zeros < 64) {
325
+ // Run of zeros ends here
326
+ current_word_ = ConsumeBits(current_word_, num_zeros);
327
+ current_num_bits_ = 64 - num_zeros;
328
+ remaining_ -= num_zeros;
329
+ assert(remaining_ >= 0);
330
+ assert(current_num_bits_ >= 0);
331
+ return;
332
+ }
333
+ remaining_ -= 64;
334
+ }
335
+ // Run of zeros continues in last bitmap word
336
+ if (remaining_ > 0) {
337
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
338
+ current_num_bits_ = static_cast<int32_t>(remaining_);
339
+ const auto num_zeros =
340
+ std::min<int32_t>(current_num_bits_, CountFirstZeros(current_word_));
341
+ current_word_ = ConsumeBits(current_word_, num_zeros);
342
+ current_num_bits_ -= num_zeros;
343
+ remaining_ -= num_zeros;
344
+ assert(remaining_ >= 0);
345
+ assert(current_num_bits_ >= 0);
346
+ }
347
+ }
348
+
349
+ int64_t CountNextOnes() {
350
+ assert(current_word_ & kFirstBit);
351
+
352
+ int64_t len;
353
+ if (~current_word_) {
354
+ const auto num_ones = CountFirstZeros(~current_word_);
355
+ assert(num_ones <= current_num_bits_);
356
+ assert(num_ones <= remaining_);
357
+ remaining_ -= num_ones;
358
+ current_word_ = ConsumeBits(current_word_, num_ones);
359
+ current_num_bits_ -= num_ones;
360
+ if (current_num_bits_) {
361
+ // Run of ones ends here
362
+ return num_ones;
363
+ }
364
+ len = num_ones;
365
+ } else {
366
+ // current_word_ is all ones
367
+ remaining_ -= 64;
368
+ current_num_bits_ = 0;
369
+ len = 64;
370
+ }
371
+
372
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
373
+ current_word_ = LoadFullWord();
374
+ const auto num_ones = CountFirstZeros(~current_word_);
375
+ len += num_ones;
376
+ remaining_ -= num_ones;
377
+ if (num_ones < 64) {
378
+ // Run of ones ends here
379
+ current_word_ = ConsumeBits(current_word_, num_ones);
380
+ current_num_bits_ = 64 - num_ones;
381
+ return len;
382
+ }
383
+ }
384
+ // Run of ones continues in last bitmap word
385
+ if (remaining_ > 0) {
386
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
387
+ current_num_bits_ = static_cast<int32_t>(remaining_);
388
+ const auto num_ones = CountFirstZeros(~current_word_);
389
+ assert(num_ones <= current_num_bits_);
390
+ assert(num_ones <= remaining_);
391
+ current_word_ = ConsumeBits(current_word_, num_ones);
392
+ current_num_bits_ -= num_ones;
393
+ remaining_ -= num_ones;
394
+ len += num_ones;
395
+ }
396
+ return len;
397
+ }
398
+
399
+ SetBitRun FindCurrentRun() {
400
+ // Skip any pending zeros
401
+ const auto num_zeros = CountFirstZeros(current_word_);
402
+ if (num_zeros >= current_num_bits_) {
403
+ remaining_ -= current_num_bits_;
404
+ current_word_ = 0;
405
+ current_num_bits_ = 0;
406
+ return {0, 0};
407
+ }
408
+ assert(num_zeros <= remaining_);
409
+ current_word_ = ConsumeBits(current_word_, num_zeros);
410
+ current_num_bits_ -= num_zeros;
411
+ remaining_ -= num_zeros;
412
+ const int64_t pos = position();
413
+ // Count any ones
414
+ const auto num_ones = CountFirstZeros(~current_word_);
415
+ assert(num_ones <= current_num_bits_);
416
+ assert(num_ones <= remaining_);
417
+ current_word_ = ConsumeBits(current_word_, num_ones);
418
+ current_num_bits_ -= num_ones;
419
+ remaining_ -= num_ones;
420
+ return {pos, num_ones};
421
+ }
422
+
423
+ inline int CountFirstZeros(uint64_t word);
424
+ inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits);
425
+
426
+ const uint8_t* bitmap_;
427
+ const int64_t length_;
428
+ int64_t remaining_;
429
+ uint64_t current_word_;
430
+ int32_t current_num_bits_;
431
+
432
+ static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1;
433
+ };
434
+
435
+ template <>
436
+ inline int BaseSetBitRunReader<false>::CountFirstZeros(uint64_t word) {
437
+ return bit_util::CountTrailingZeros(word);
438
+ }
439
+
440
+ template <>
441
+ inline int BaseSetBitRunReader<true>::CountFirstZeros(uint64_t word) {
442
+ return bit_util::CountLeadingZeros(word);
443
+ }
444
+
445
+ template <>
446
+ inline uint64_t BaseSetBitRunReader<false>::ConsumeBits(uint64_t word, int32_t num_bits) {
447
+ return word >> num_bits;
448
+ }
449
+
450
+ template <>
451
+ inline uint64_t BaseSetBitRunReader<true>::ConsumeBits(uint64_t word, int32_t num_bits) {
452
+ return word << num_bits;
453
+ }
454
+
455
+ using SetBitRunReader = BaseSetBitRunReader</*Reverse=*/false>;
456
+ using ReverseSetBitRunReader = BaseSetBitRunReader</*Reverse=*/true>;
457
+
458
+ // Functional-style bit run visitors.
459
+
460
+ // XXX: Try to make this function small so the compiler can inline and optimize
461
+ // the `visit` function, which is normally a hot loop with vectorizable code.
462
+ // - don't inline SetBitRunReader constructor, it doesn't hurt performance
463
+ // - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases
464
+ template <typename Visit>
465
+ inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length,
466
+ Visit&& visit) {
467
+ if (bitmap == NULLPTR) {
468
+ // Assuming all set (as in a null bitmap)
469
+ return visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
470
+ }
471
+ SetBitRunReader reader(bitmap, offset, length);
472
+ while (true) {
473
+ const auto run = reader.NextRun();
474
+ if (run.length == 0) {
475
+ break;
476
+ }
477
+ ARROW_RETURN_NOT_OK(visit(run.position, run.length));
478
+ }
479
+ return Status::OK();
480
+ }
481
+
482
+ template <typename Visit>
483
+ inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
484
+ Visit&& visit) {
485
+ if (bitmap == NULLPTR) {
486
+ // Assuming all set (as in a null bitmap)
487
+ visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
488
+ return;
489
+ }
490
+ SetBitRunReader reader(bitmap, offset, length);
491
+ while (true) {
492
+ const auto run = reader.NextRun();
493
+ if (run.length == 0) {
494
+ break;
495
+ }
496
+ visit(run.position, run.length);
497
+ }
498
+ }
499
+
500
+ template <typename Visit>
501
+ inline Status VisitSetBitRuns(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
502
+ int64_t length, Visit&& visit) {
503
+ return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length,
504
+ std::forward<Visit>(visit));
505
+ }
506
+
507
+ template <typename Visit>
508
+ inline void VisitSetBitRunsVoid(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
509
+ int64_t length, Visit&& visit) {
510
+ VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length,
511
+ std::forward<Visit>(visit));
512
+ }
513
+
514
+ } // namespace internal
515
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_MSC_VER)
21
+ # if defined(_M_AMD64) || defined(_M_X64)
22
+ # include <intrin.h> // IWYU pragma: keep
23
+ # endif
24
+
25
+ # pragma intrinsic(_BitScanReverse)
26
+ # pragma intrinsic(_BitScanForward)
27
+ # define ARROW_POPCOUNT64 __popcnt64
28
+ # define ARROW_POPCOUNT32 __popcnt
29
+ #else
30
+ # define ARROW_POPCOUNT64 __builtin_popcountll
31
+ # define ARROW_POPCOUNT32 __builtin_popcount
32
+ #endif
33
+
34
+ #include <cstdint>
35
+ #include <type_traits>
36
+
37
+ #include "arrow/util/macros.h"
38
+ #include "arrow/util/visibility.h"
39
+
40
+ namespace arrow {
41
+ namespace detail {
42
+
43
+ template <typename Integer>
44
+ typename std::make_unsigned<Integer>::type as_unsigned(Integer x) {
45
+ return static_cast<typename std::make_unsigned<Integer>::type>(x);
46
+ }
47
+
48
+ } // namespace detail
49
+
50
+ namespace bit_util {
51
+
52
+ // The number of set bits in a given unsigned byte value, pre-computed
53
+ //
54
+ // Generated with the following Python code
55
+ // output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};'
56
+ // popcounts = [str(bin(i).count('1')) for i in range(0, 256)]
57
+ // print(output.format(', '.join(popcounts)))
58
+ static constexpr uint8_t kBytePopcount[] = {
59
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3,
60
+ 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4,
61
+ 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4,
62
+ 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,
63
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2,
64
+ 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
65
+ 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
66
+ 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6,
67
+ 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
68
+
69
+ static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); }
70
+ static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); }
71
+
72
+ //
73
+ // Bit-related computations on integer values
74
+ //
75
+
76
+ // Returns the ceil of value/divisor
77
+ constexpr int64_t CeilDiv(int64_t value, int64_t divisor) {
78
+ return (value == 0) ? 0 : 1 + (value - 1) / divisor;
79
+ }
80
+
81
+ // Return the number of bytes needed to fit the given number of bits
82
+ constexpr int64_t BytesForBits(int64_t bits) {
83
+ // This formula avoids integer overflow on very large `bits`
84
+ return (bits >> 3) + ((bits & 7) != 0);
85
+ }
86
+
87
+ constexpr bool IsPowerOf2(int64_t value) {
88
+ return value > 0 && (value & (value - 1)) == 0;
89
+ }
90
+
91
+ constexpr bool IsPowerOf2(uint64_t value) {
92
+ return value > 0 && (value & (value - 1)) == 0;
93
+ }
94
+
95
+ // Returns the smallest power of two that contains v. If v is already a
96
+ // power of two, it is returned as is.
97
+ static inline int64_t NextPower2(int64_t n) {
98
+ // Taken from
99
+ // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
100
+ n--;
101
+ n |= n >> 1;
102
+ n |= n >> 2;
103
+ n |= n >> 4;
104
+ n |= n >> 8;
105
+ n |= n >> 16;
106
+ n |= n >> 32;
107
+ n++;
108
+ return n;
109
+ }
110
+
111
+ constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; }
112
+
113
+ constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; }
114
+
115
+ // Returns a mask for the bit_index lower order bits.
116
+ // Only valid for bit_index in the range [0, 64).
117
+ constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) {
118
+ return (static_cast<uint64_t>(1) << bit_index) - 1;
119
+ }
120
+
121
+ // Returns 'value' rounded up to the nearest multiple of 'factor'
122
+ constexpr int64_t RoundUp(int64_t value, int64_t factor) {
123
+ return CeilDiv(value, factor) * factor;
124
+ }
125
+
126
+ // Returns 'value' rounded down to the nearest multiple of 'factor'
127
+ constexpr int64_t RoundDown(int64_t value, int64_t factor) {
128
+ return (value / factor) * factor;
129
+ }
130
+
131
+ // Returns 'value' rounded up to the nearest multiple of 'factor' when factor
132
+ // is a power of two.
133
+ // The result is undefined on overflow, i.e. if `value > 2**64 - factor`,
134
+ // since we cannot return the correct result which would be 2**64.
135
+ constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) {
136
+ // DCHECK(value >= 0);
137
+ // DCHECK(IsPowerOf2(factor));
138
+ return (value + (factor - 1)) & ~(factor - 1);
139
+ }
140
+
141
+ constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) {
142
+ // DCHECK(IsPowerOf2(factor));
143
+ return (value + (factor - 1)) & ~(factor - 1);
144
+ }
145
+
146
+ constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); }
147
+
148
+ constexpr int64_t RoundUpToMultipleOf64(int64_t num) {
149
+ return RoundUpToPowerOf2(num, 64);
150
+ }
151
+
152
+ // Returns the number of bytes covering a sliced bitmap. Find the length
153
+ // rounded to cover full bytes on both extremities.
154
+ //
155
+ // The following example represents a slice (offset=10, length=9)
156
+ //
157
+ // 0 8 16 24
158
+ // |-------|-------|------|
159
+ // [ ] (slice)
160
+ // [ ] (same slice aligned to bytes bounds, length=16)
161
+ //
162
+ // The covering bytes is the length (in bytes) of this new aligned slice.
163
+ constexpr int64_t CoveringBytes(int64_t offset, int64_t length) {
164
+ return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8;
165
+ }
166
+
167
+ // Returns the 'num_bits' least-significant bits of 'v'.
168
+ static inline uint64_t TrailingBits(uint64_t v, int num_bits) {
169
+ if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0;
170
+ if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v;
171
+ int n = 64 - num_bits;
172
+ return (v << n) >> n;
173
+ }
174
+
175
+ /// \brief Count the number of leading zeros in an unsigned integer.
176
+ static inline int CountLeadingZeros(uint32_t value) {
177
+ #if defined(__clang__) || defined(__GNUC__)
178
+ if (value == 0) return 32;
179
+ return static_cast<int>(__builtin_clz(value));
180
+ #elif defined(_MSC_VER)
181
+ unsigned long index; // NOLINT
182
+ if (_BitScanReverse(&index, static_cast<unsigned long>(value))) { // NOLINT
183
+ return 31 - static_cast<int>(index);
184
+ } else {
185
+ return 32;
186
+ }
187
+ #else
188
+ int bitpos = 0;
189
+ while (value != 0) {
190
+ value >>= 1;
191
+ ++bitpos;
192
+ }
193
+ return 32 - bitpos;
194
+ #endif
195
+ }
196
+
197
+ static inline int CountLeadingZeros(uint64_t value) {
198
+ #if defined(__clang__) || defined(__GNUC__)
199
+ if (value == 0) return 64;
200
+ return static_cast<int>(__builtin_clzll(value));
201
+ #elif defined(_MSC_VER)
202
+ unsigned long index; // NOLINT
203
+ if (_BitScanReverse64(&index, value)) { // NOLINT
204
+ return 63 - static_cast<int>(index);
205
+ } else {
206
+ return 64;
207
+ }
208
+ #else
209
+ int bitpos = 0;
210
+ while (value != 0) {
211
+ value >>= 1;
212
+ ++bitpos;
213
+ }
214
+ return 64 - bitpos;
215
+ #endif
216
+ }
217
+
218
+ static inline int CountTrailingZeros(uint32_t value) {
219
+ #if defined(__clang__) || defined(__GNUC__)
220
+ if (value == 0) return 32;
221
+ return static_cast<int>(__builtin_ctzl(value));
222
+ #elif defined(_MSC_VER)
223
+ unsigned long index; // NOLINT
224
+ if (_BitScanForward(&index, value)) {
225
+ return static_cast<int>(index);
226
+ } else {
227
+ return 32;
228
+ }
229
+ #else
230
+ int bitpos = 0;
231
+ if (value) {
232
+ while (value & 1 == 0) {
233
+ value >>= 1;
234
+ ++bitpos;
235
+ }
236
+ } else {
237
+ bitpos = 32;
238
+ }
239
+ return bitpos;
240
+ #endif
241
+ }
242
+
243
+ static inline int CountTrailingZeros(uint64_t value) {
244
+ #if defined(__clang__) || defined(__GNUC__)
245
+ if (value == 0) return 64;
246
+ return static_cast<int>(__builtin_ctzll(value));
247
+ #elif defined(_MSC_VER)
248
+ unsigned long index; // NOLINT
249
+ if (_BitScanForward64(&index, value)) {
250
+ return static_cast<int>(index);
251
+ } else {
252
+ return 64;
253
+ }
254
+ #else
255
+ int bitpos = 0;
256
+ if (value) {
257
+ while (value & 1 == 0) {
258
+ value >>= 1;
259
+ ++bitpos;
260
+ }
261
+ } else {
262
+ bitpos = 64;
263
+ }
264
+ return bitpos;
265
+ #endif
266
+ }
267
+
268
+ // Returns the minimum number of bits needed to represent an unsigned value
269
+ static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); }
270
+
271
+ // Returns ceil(log2(x)).
272
+ static inline int Log2(uint64_t x) {
273
+ // DCHECK_GT(x, 0);
274
+ return NumRequiredBits(x - 1);
275
+ }
276
+
277
+ //
278
+ // Utilities for reading and writing individual bits by their index
279
+ // in a memory area.
280
+ //
281
+
282
+ // Bitmask selecting the k-th bit in a byte
283
+ static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128};
284
+
285
+ // the bitwise complement version of kBitmask
286
+ static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127};
287
+
288
+ // Bitmask selecting the (k - 1) preceding bits in a byte
289
+ static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127};
290
+ static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127};
291
+
292
+ // the bitwise complement version of kPrecedingBitmask
293
+ static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128};
294
+
295
+ static constexpr bool GetBit(const uint8_t* bits, uint64_t i) {
296
+ return (bits[i >> 3] >> (i & 0x07)) & 1;
297
+ }
298
+
299
+ // Gets the i-th bit from a byte. Should only be used with i <= 7.
300
+ static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) {
301
+ return byte & kBitmask[i];
302
+ }
303
+
304
+ static inline void ClearBit(uint8_t* bits, int64_t i) {
305
+ bits[i / 8] &= kFlippedBitmask[i % 8];
306
+ }
307
+
308
+ static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; }
309
+
310
+ static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) {
311
+ // https://graphics.stanford.edu/~seander/bithacks.html
312
+ // "Conditionally set or clear bits without branching"
313
+ // NOTE: this seems to confuse Valgrind as it reads from potentially
314
+ // uninitialized memory
315
+ bits[i / 8] ^= static_cast<uint8_t>(-static_cast<uint8_t>(bit_is_set) ^ bits[i / 8]) &
316
+ kBitmask[i % 8];
317
+ }
318
+
319
+ /// \brief set or clear a range of bits quickly
320
+ ARROW_EXPORT
321
+ void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set);
322
+
323
+ /// \brief Sets all bits in the bitmap to true
324
+ ARROW_EXPORT
325
+ void SetBitmap(uint8_t* data, int64_t offset, int64_t length);
326
+
327
+ /// \brief Clears all bits in the bitmap (set to false)
328
+ ARROW_EXPORT
329
+ void ClearBitmap(uint8_t* data, int64_t offset, int64_t length);
330
+
331
+ /// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be
332
+ /// returned
333
+ /// ex:
334
+ /// ref: https://stackoverflow.com/a/59523400
335
+ template <typename Word>
336
+ constexpr Word PrecedingWordBitmask(unsigned int const i) {
337
+ return static_cast<Word>(static_cast<Word>(i < sizeof(Word) * 8)
338
+ << (i & (sizeof(Word) * 8 - 1))) -
339
+ 1;
340
+ }
341
+ static_assert(PrecedingWordBitmask<uint8_t>(0) == 0x00, "");
342
+ static_assert(PrecedingWordBitmask<uint8_t>(4) == 0x0f, "");
343
+ static_assert(PrecedingWordBitmask<uint8_t>(8) == 0xff, "");
344
+ static_assert(PrecedingWordBitmask<uint16_t>(8) == 0x00ff, "");
345
+
346
+ /// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits
347
+ /// from `high`.
348
+ /// Word ret
349
+ /// for (i = 0; i < sizeof(Word)*8; i++){
350
+ /// ret[i]= i < n ? low[i]: high[i];
351
+ /// }
352
+ template <typename Word>
353
+ constexpr Word SpliceWord(int n, Word low, Word high) {
354
+ return (high & ~PrecedingWordBitmask<Word>(n)) | (low & PrecedingWordBitmask<Word>(n));
355
+ }
356
+
357
+ /// \brief Pack integers into a bitmap in batches of 8
358
+ template <int batch_size>
359
+ void PackBits(const uint32_t* values, uint8_t* out) {
360
+ for (int i = 0; i < batch_size / 8; ++i) {
361
+ *out++ = static_cast<uint8_t>(values[0] | values[1] << 1 | values[2] << 2 |
362
+ values[3] << 3 | values[4] << 4 | values[5] << 5 |
363
+ values[6] << 6 | values[7] << 7);
364
+ values += 8;
365
+ }
366
+ }
367
+
368
+ } // namespace bit_util
369
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <utility>
30
+
31
+ #include "arrow/buffer.h"
32
+ #include "arrow/util/bit_util.h"
33
+ #include "arrow/util/bitmap_ops.h"
34
+ #include "arrow/util/bitmap_reader.h"
35
+ #include "arrow/util/bitmap_writer.h"
36
+ #include "arrow/util/compare.h"
37
+ #include "arrow/util/endian.h"
38
+ #include "arrow/util/functional.h"
39
+ #include "arrow/util/span.h"
40
+ #include "arrow/util/string_builder.h"
41
+ #include "arrow/util/visibility.h"
42
+
43
+ namespace arrow {
44
+
45
+ class BooleanArray;
46
+
47
+ namespace internal {
48
+
49
+ class ARROW_EXPORT Bitmap : public util::ToStringOstreamable<Bitmap>,
50
+ public util::EqualityComparable<Bitmap> {
51
+ public:
52
+ Bitmap() = default;
53
+
54
+ Bitmap(const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length)
55
+ : data_(buffer->data()), offset_(offset), length_(length) {
56
+ if (buffer->is_mutable()) {
57
+ mutable_data_ = buffer->mutable_data();
58
+ }
59
+ }
60
+
61
+ Bitmap(const void* data, int64_t offset, int64_t length)
62
+ : data_(reinterpret_cast<const uint8_t*>(data)), offset_(offset), length_(length) {}
63
+
64
+ Bitmap(void* data, int64_t offset, int64_t length)
65
+ : data_(reinterpret_cast<const uint8_t*>(data)),
66
+ mutable_data_(reinterpret_cast<uint8_t*>(data)),
67
+ offset_(offset),
68
+ length_(length) {}
69
+
70
+ Bitmap Slice(int64_t offset) const {
71
+ if (mutable_data_ != NULLPTR) {
72
+ return {mutable_data_, offset_ + offset, length_ - offset};
73
+ } else {
74
+ return {data_, offset_ + offset, length_ - offset};
75
+ }
76
+ }
77
+
78
+ Bitmap Slice(int64_t offset, int64_t length) const {
79
+ if (mutable_data_ != NULLPTR) {
80
+ return {mutable_data_, offset_ + offset, length};
81
+ } else {
82
+ return {data_, offset_ + offset, length};
83
+ }
84
+ }
85
+
86
+ std::string ToString() const;
87
+
88
+ bool Equals(const Bitmap& other) const;
89
+
90
+ std::string Diff(const Bitmap& other) const;
91
+
92
+ bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); }
93
+
94
+ bool operator[](int64_t i) const { return GetBit(i); }
95
+
96
+ void SetBitTo(int64_t i, bool v) const {
97
+ bit_util::SetBitTo(mutable_data_, i + offset_, v);
98
+ }
99
+
100
+ void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); }
101
+
102
+ void CopyFrom(const Bitmap& other);
103
+ void CopyFromInverted(const Bitmap& other);
104
+
105
+ /// \brief Visit bits from each bitmap as bitset<N>
106
+ ///
107
+ /// All bitmaps must have identical length.
108
+ template <size_t N, typename Visitor>
109
+ static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) {
110
+ int64_t bit_length = BitLength(bitmaps, N);
111
+ std::bitset<N> bits;
112
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
113
+ for (size_t i = 0; i < N; ++i) {
114
+ bits[i] = bitmaps[i].GetBit(bit_i);
115
+ }
116
+ visitor(bits);
117
+ }
118
+ }
119
+
120
+ /// \brief Visit bits from each bitmap as bitset<N>
121
+ ///
122
+ /// All bitmaps must have identical length.
123
+ template <size_t N, typename Visitor>
124
+ static void VisitBits(const std::array<Bitmap, N>& bitmaps, Visitor&& visitor) {
125
+ int64_t bit_length = BitLength(bitmaps);
126
+ std::bitset<N> bits;
127
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
128
+ for (size_t i = 0; i < N; ++i) {
129
+ bits[i] = bitmaps[i].GetBit(bit_i);
130
+ }
131
+ visitor(bits);
132
+ }
133
+ }
134
+
135
+ /// \brief Visit words of bits from each bitmap as array<Word, N>
136
+ ///
137
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
138
+ /// may be offset within the first visited word, but words will otherwise contain
139
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
140
+ /// returned.
141
+ ///
142
+ /// TODO(bkietz) allow for early termination
143
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
144
+ // It also has a large prolog / epilog overhead and should be used
145
+ // carefully in other cases.
146
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
147
+ // and BitmapUInt64Reader.
148
+ template <size_t N, typename Visitor,
149
+ typename Word = typename std::decay<
150
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
151
+ static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) {
152
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
153
+
154
+ // local, mutable variables which will be sliced/decremented to represent consumption:
155
+ Bitmap bitmaps[N];
156
+ int64_t offsets[N];
157
+ int64_t bit_length = BitLength(bitmaps_arg, N);
158
+ util::span<const Word> words[N];
159
+ for (size_t i = 0; i < N; ++i) {
160
+ bitmaps[i] = bitmaps_arg[i];
161
+ offsets[i] = bitmaps[i].template word_offset<Word>();
162
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
163
+ words[i] = bitmaps[i].template words<Word>();
164
+ }
165
+
166
+ auto consume = [&](int64_t consumed_bits) {
167
+ for (size_t i = 0; i < N; ++i) {
168
+ bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits);
169
+ offsets[i] = bitmaps[i].template word_offset<Word>();
170
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
171
+ words[i] = bitmaps[i].template words<Word>();
172
+ }
173
+ bit_length -= consumed_bits;
174
+ };
175
+
176
+ std::array<Word, N> visited_words;
177
+ visited_words.fill(0);
178
+
179
+ if (bit_length <= kBitWidth * 2) {
180
+ // bitmaps fit into one or two words so don't bother with optimization
181
+ while (bit_length > 0) {
182
+ auto leading_bits = std::min(bit_length, kBitWidth);
183
+ SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words);
184
+ visitor(visited_words);
185
+ consume(leading_bits);
186
+ }
187
+ return 0;
188
+ }
189
+
190
+ int64_t max_offset = *std::max_element(offsets, offsets + N);
191
+ int64_t min_offset = *std::min_element(offsets, offsets + N);
192
+ if (max_offset > 0) {
193
+ // consume leading bits
194
+ auto leading_bits = kBitWidth - min_offset;
195
+ SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words);
196
+ visitor(visited_words);
197
+ consume(leading_bits);
198
+ }
199
+ assert(*std::min_element(offsets, offsets + N) == 0);
200
+
201
+ int64_t whole_word_count = bit_length / kBitWidth;
202
+ assert(whole_word_count >= 1);
203
+
204
+ if (min_offset == max_offset) {
205
+ // all offsets were identical, all leading bits have been consumed
206
+ assert(
207
+ std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; }));
208
+
209
+ for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) {
210
+ for (size_t i = 0; i < N; ++i) {
211
+ visited_words[i] = words[i][word_i];
212
+ }
213
+ visitor(visited_words);
214
+ }
215
+ consume(whole_word_count * kBitWidth);
216
+ } else {
217
+ // leading bits from potentially incomplete words have been consumed
218
+
219
+ // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely
220
+ // within the bitmap for all i
221
+ for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) {
222
+ for (size_t i = 0; i < N; ++i) {
223
+ if (offsets[i] == 0) {
224
+ visited_words[i] = words[i][word_i];
225
+ } else {
226
+ auto words0 = bit_util::ToLittleEndian(words[i][word_i]);
227
+ auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]);
228
+ visited_words[i] = bit_util::FromLittleEndian(
229
+ (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i])));
230
+ }
231
+ }
232
+ visitor(visited_words);
233
+ }
234
+ consume((whole_word_count - 1) * kBitWidth);
235
+
236
+ SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words);
237
+
238
+ visitor(visited_words);
239
+ consume(kBitWidth);
240
+ }
241
+
242
+ // load remaining bits
243
+ if (bit_length > 0) {
244
+ SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words);
245
+ visitor(visited_words);
246
+ }
247
+
248
+ return min_offset;
249
+ }
250
+
251
+ template <size_t N, size_t M, typename ReaderT, typename WriterT, typename Visitor,
252
+ typename Word = typename std::decay<
253
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
254
+ static void RunVisitWordsAndWriteLoop(int64_t bit_length,
255
+ std::array<ReaderT, N>& readers,
256
+ std::array<WriterT, M>& writers,
257
+ Visitor&& visitor) {
258
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
259
+
260
+ std::array<Word, N> visited_words;
261
+ std::array<Word, M> output_words;
262
+
263
+ // every reader will have same number of words, since they are same length'ed
264
+ // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond
265
+ // Word boundary, every Word would have to be created from 2 adjoining Words
266
+ auto n_words = readers[0].words();
267
+ bit_length -= n_words * kBitWidth;
268
+ while (n_words--) {
269
+ // first collect all words to visited_words array
270
+ for (size_t i = 0; i < N; i++) {
271
+ visited_words[i] = readers[i].NextWord();
272
+ }
273
+ visitor(visited_words, &output_words);
274
+ for (size_t i = 0; i < M; i++) {
275
+ writers[i].PutNextWord(output_words[i]);
276
+ }
277
+ }
278
+
279
+ // every reader will have same number of trailing bytes, because of the above reason
280
+ // tailing portion could be more than one word! (ref: BitmapWordReader constructor)
281
+ // remaining full/ partial words to write
282
+
283
+ if (bit_length) {
284
+ // convert the word visitor lambda to a byte_visitor
285
+ auto byte_visitor = [&](const std::array<uint8_t, N>& in,
286
+ std::array<uint8_t, M>* out) {
287
+ std::array<Word, N> in_words;
288
+ std::array<Word, M> out_words;
289
+ std::copy(in.begin(), in.end(), in_words.begin());
290
+ visitor(in_words, &out_words);
291
+ for (size_t i = 0; i < M; i++) {
292
+ out->at(i) = static_cast<uint8_t>(out_words[i]);
293
+ }
294
+ };
295
+
296
+ std::array<uint8_t, N> visited_bytes;
297
+ std::array<uint8_t, M> output_bytes;
298
+ int n_bytes = readers[0].trailing_bytes();
299
+ while (n_bytes--) {
300
+ visited_bytes.fill(0);
301
+ output_bytes.fill(0);
302
+ int valid_bits;
303
+ for (size_t i = 0; i < N; i++) {
304
+ visited_bytes[i] = readers[i].NextTrailingByte(valid_bits);
305
+ }
306
+ byte_visitor(visited_bytes, &output_bytes);
307
+ for (size_t i = 0; i < M; i++) {
308
+ writers[i].PutNextTrailingByte(output_bytes[i], valid_bits);
309
+ }
310
+ }
311
+ }
312
+ }
313
+
314
+ /// \brief Visit words of bits from each input bitmap as array<Word, N> and collects
315
+ /// outputs to an array<Word, M>, to be written into the output bitmaps accordingly.
316
+ ///
317
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
318
+ /// may be offset within the first visited word, but words will otherwise contain
319
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
320
+ /// returned.
321
+ /// Visitor is expected to have the following signature
322
+ /// [](const std::array<Word, N>& in_words, std::array<Word, M>* out_words){...}
323
+ ///
324
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
325
+ // It also has a large prolog / epilog overhead and should be used
326
+ // carefully in other cases.
327
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
328
+ // and BitmapUInt64Reader.
329
+ template <size_t N, size_t M, typename Visitor,
330
+ typename Word = typename std::decay<
331
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
332
+ static void VisitWordsAndWrite(const std::array<Bitmap, N>& bitmaps_arg,
333
+ std::array<Bitmap, M>* out_bitmaps_arg,
334
+ Visitor&& visitor) {
335
+ int64_t bit_length = BitLength(bitmaps_arg);
336
+ assert(bit_length == BitLength(*out_bitmaps_arg));
337
+
338
+ // if both input and output bitmaps have no byte offset, then use special template
339
+ if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(),
340
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; }) &&
341
+ std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(),
342
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; })) {
343
+ std::array<BitmapWordReader<Word, /*may_have_byte_offset=*/false>, N> readers;
344
+ for (size_t i = 0; i < N; ++i) {
345
+ const Bitmap& in_bitmap = bitmaps_arg[i];
346
+ readers[i] = BitmapWordReader<Word, /*may_have_byte_offset=*/false>(
347
+ in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
348
+ }
349
+
350
+ std::array<BitmapWordWriter<Word, /*may_have_byte_offset=*/false>, M> writers;
351
+ for (size_t i = 0; i < M; ++i) {
352
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
353
+ writers[i] = BitmapWordWriter<Word, /*may_have_byte_offset=*/false>(
354
+ out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_);
355
+ }
356
+
357
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
358
+ } else {
359
+ std::array<BitmapWordReader<Word>, N> readers;
360
+ for (size_t i = 0; i < N; ++i) {
361
+ const Bitmap& in_bitmap = bitmaps_arg[i];
362
+ readers[i] =
363
+ BitmapWordReader<Word>(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
364
+ }
365
+
366
+ std::array<BitmapWordWriter<Word>, M> writers;
367
+ for (size_t i = 0; i < M; ++i) {
368
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
369
+ writers[i] = BitmapWordWriter<Word>(out_bitmap.mutable_data_, out_bitmap.offset_,
370
+ out_bitmap.length_);
371
+ }
372
+
373
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
374
+ }
375
+ }
376
+
377
+ const uint8_t* data() const { return data_; }
378
+ uint8_t* mutable_data() { return mutable_data_; }
379
+
380
+ /// offset of first bit relative to buffer().data()
381
+ int64_t offset() const { return offset_; }
382
+
383
+ /// number of bits in this Bitmap
384
+ int64_t length() const { return length_; }
385
+
386
+ /// span of all bytes which contain any bit in this Bitmap
387
+ util::span<const uint8_t> bytes() const {
388
+ auto byte_offset = offset_ / 8;
389
+ auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset;
390
+ return {data_ + byte_offset, static_cast<size_t>(byte_count)};
391
+ }
392
+
393
+ private:
394
+ /// span of all Words which contain any bit in this Bitmap
395
+ ///
396
+ /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36)
397
+ /// words() would span bits [16, 48).
398
+ ///
399
+ /// 0 16 32 48 64
400
+ /// |-------|-------|------|------| (buffer)
401
+ /// [ ] (bitmap)
402
+ /// |-------|------| (returned words)
403
+ ///
404
+ /// \warning The words may contain bytes which lie outside the buffer or are
405
+ /// uninitialized.
406
+ template <typename Word>
407
+ util::span<const Word> words() const {
408
+ auto bytes_addr = reinterpret_cast<intptr_t>(bytes().data());
409
+ auto words_addr = bytes_addr - bytes_addr % sizeof(Word);
410
+ auto word_byte_count =
411
+ bit_util::RoundUpToPowerOf2(static_cast<int64_t>(bytes_addr + bytes().size()),
412
+ static_cast<int64_t>(sizeof(Word))) -
413
+ words_addr;
414
+ return {reinterpret_cast<const Word*>(words_addr),
415
+ static_cast<size_t>(word_byte_count / sizeof(Word))};
416
+ }
417
+
418
+ /// offset of first bit relative to words<Word>().data()
419
+ template <typename Word>
420
+ int64_t word_offset() const {
421
+ return offset_ + 8 * (reinterpret_cast<intptr_t>(data_) -
422
+ reinterpret_cast<intptr_t>(words<Word>().data()));
423
+ }
424
+
425
+ /// load words from bitmaps bitwise
426
+ template <size_t N, typename Word>
427
+ static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset,
428
+ int64_t out_length, bool set_trailing_bits,
429
+ std::array<Word, N>* out) {
430
+ out->fill(0);
431
+
432
+ int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0;
433
+
434
+ Bitmap slices[N], out_bitmaps[N];
435
+ for (size_t i = 0; i < N; ++i) {
436
+ slices[i] = bitmaps[i].Slice(offset, out_length);
437
+ out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length);
438
+ }
439
+
440
+ int64_t bit_i = 0;
441
+ Bitmap::VisitBits(slices, [&](std::bitset<N> bits) {
442
+ for (size_t i = 0; i < N; ++i) {
443
+ out_bitmaps[i].SetBitTo(bit_i, bits[i]);
444
+ }
445
+ ++bit_i;
446
+ });
447
+ }
448
+
449
+ /// assert bitmaps have identical length and return that length
450
+ static int64_t BitLength(const Bitmap* bitmaps, size_t N);
451
+
452
+ template <size_t N>
453
+ static int64_t BitLength(const std::array<Bitmap, N>& bitmaps) {
454
+ for (size_t i = 1; i < N; ++i) {
455
+ assert(bitmaps[i].length() == bitmaps[0].length());
456
+ }
457
+ return bitmaps[0].length();
458
+ }
459
+
460
+ const uint8_t* data_ = NULLPTR;
461
+ uint8_t* mutable_data_ = NULLPTR;
462
+ int64_t offset_ = 0, length_ = 0;
463
+ };
464
+
465
+ } // namespace internal
466
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace internal {
30
+
31
+ /// \brief Generate Bitmap with all position to `value` except for one found
32
+ /// at `straggler_pos`.
33
+ ARROW_EXPORT
34
+ Result<std::shared_ptr<Buffer>> BitmapAllButOne(MemoryPool* pool, int64_t length,
35
+ int64_t straggler_pos, bool value = true);
36
+
37
+ /// \brief Convert vector of bytes to bitmap buffer
38
+ ARROW_EXPORT
39
+ Result<std::shared_ptr<Buffer>> BytesToBits(const std::vector<uint8_t>&,
40
+ MemoryPool* pool = default_memory_pool());
41
+
42
+ } // namespace internal
43
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/buffer.h"
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ // A std::generate() like function to write sequential bits into a bitmap area.
33
+ // Bits preceding the bitmap area are preserved, bits following the bitmap
34
+ // area may be clobbered.
35
+
36
+ template <class Generator>
37
+ void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) {
38
+ if (length == 0) {
39
+ return;
40
+ }
41
+ uint8_t* cur = bitmap + start_offset / 8;
42
+ uint8_t bit_mask = bit_util::kBitmask[start_offset % 8];
43
+ uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8];
44
+
45
+ for (int64_t index = 0; index < length; ++index) {
46
+ const bool bit = g();
47
+ current_byte = bit ? (current_byte | bit_mask) : current_byte;
48
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
49
+ if (bit_mask == 0) {
50
+ bit_mask = 1;
51
+ *cur++ = current_byte;
52
+ current_byte = 0;
53
+ }
54
+ }
55
+ if (bit_mask != 1) {
56
+ *cur++ = current_byte;
57
+ }
58
+ }
59
+
60
+ // Like GenerateBits(), but unrolls its main loop for higher performance.
61
+
62
+ template <class Generator>
63
+ void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length,
64
+ Generator&& g) {
65
+ static_assert(std::is_same<decltype(std::declval<Generator>()()), bool>::value,
66
+ "Functor passed to GenerateBitsUnrolled must return bool");
67
+
68
+ if (length == 0) {
69
+ return;
70
+ }
71
+ uint8_t current_byte;
72
+ uint8_t* cur = bitmap + start_offset / 8;
73
+ const uint64_t start_bit_offset = start_offset % 8;
74
+ uint8_t bit_mask = bit_util::kBitmask[start_bit_offset];
75
+ int64_t remaining = length;
76
+
77
+ if (bit_mask != 0x01) {
78
+ current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset];
79
+ while (bit_mask != 0 && remaining > 0) {
80
+ current_byte |= g() * bit_mask;
81
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
82
+ --remaining;
83
+ }
84
+ *cur++ = current_byte;
85
+ }
86
+
87
+ int64_t remaining_bytes = remaining / 8;
88
+ uint8_t out_results[8];
89
+ while (remaining_bytes-- > 0) {
90
+ for (int i = 0; i < 8; ++i) {
91
+ out_results[i] = g();
92
+ }
93
+ *cur++ = static_cast<uint8_t>(out_results[0] | out_results[1] << 1 |
94
+ out_results[2] << 2 | out_results[3] << 3 |
95
+ out_results[4] << 4 | out_results[5] << 5 |
96
+ out_results[6] << 6 | out_results[7] << 7);
97
+ }
98
+
99
+ int64_t remaining_bits = remaining % 8;
100
+ if (remaining_bits) {
101
+ current_byte = 0;
102
+ bit_mask = 0x01;
103
+ while (remaining_bits-- > 0) {
104
+ current_byte |= g() * bit_mask;
105
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
106
+ }
107
+ *cur++ = current_byte;
108
+ }
109
+ }
110
+
111
+ } // namespace internal
112
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/util/bit_util.h"
23
+ #include "arrow/util/bitmap_reader.h"
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ // A function that visits each bit in a bitmap and calls a visitor function with a
29
+ // boolean representation of that bit. This is intended to be analogous to
30
+ // GenerateBits.
31
+ template <class Visitor>
32
+ void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length,
33
+ Visitor&& visit) {
34
+ BitmapReader reader(bitmap, start_offset, length);
35
+ for (int64_t index = 0; index < length; ++index) {
36
+ visit(reader.IsSet());
37
+ reader.Next();
38
+ }
39
+ }
40
+
41
+ // Like VisitBits(), but unrolls its main loop for better performance.
42
+ template <class Visitor>
43
+ void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length,
44
+ Visitor&& visit) {
45
+ if (length == 0) {
46
+ return;
47
+ }
48
+
49
+ // Start by visiting any bits preceding the first full byte.
50
+ int64_t num_bits_before_full_bytes =
51
+ bit_util::RoundUpToMultipleOf8(start_offset) - start_offset;
52
+ // Truncate num_bits_before_full_bytes if it is greater than length.
53
+ if (num_bits_before_full_bytes > length) {
54
+ num_bits_before_full_bytes = length;
55
+ }
56
+ // Use the non loop-unrolled VisitBits since we don't want to add branches
57
+ VisitBits<Visitor>(bitmap, start_offset, num_bits_before_full_bytes, visit);
58
+
59
+ // Shift the start pointer to the first full byte and compute the
60
+ // number of full bytes to be read.
61
+ const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8);
62
+ const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8;
63
+
64
+ // Iterate over each full byte of the input bitmap and call the visitor in
65
+ // a loop-unrolled manner.
66
+ for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) {
67
+ // Get the current bit-packed byte value from the bitmap.
68
+ const uint8_t byte = *(first_full_byte + byte_index);
69
+
70
+ // Execute the visitor function on each bit of the current byte.
71
+ visit(bit_util::GetBitFromByte(byte, 0));
72
+ visit(bit_util::GetBitFromByte(byte, 1));
73
+ visit(bit_util::GetBitFromByte(byte, 2));
74
+ visit(bit_util::GetBitFromByte(byte, 3));
75
+ visit(bit_util::GetBitFromByte(byte, 4));
76
+ visit(bit_util::GetBitFromByte(byte, 5));
77
+ visit(bit_util::GetBitFromByte(byte, 6));
78
+ visit(bit_util::GetBitFromByte(byte, 7));
79
+ }
80
+
81
+ // Write any leftover bits in the last byte.
82
+ const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8;
83
+ VisitBits<Visitor>(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes,
84
+ visit);
85
+ }
86
+
87
+ } // namespace internal
88
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/buffer.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/bit_util.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/functional.h"
40
+ #include "arrow/util/macros.h"
41
+ #include "arrow/util/string_builder.h"
42
+ #include "arrow/util/type_traits.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace internal {
47
+
48
+ /// \brief Store a stack of bitsets efficiently. The top bitset may be
49
+ /// accessed and its bits may be modified, but it may not be resized.
50
+ class BitsetStack {
51
+ public:
52
+ using reference = typename std::vector<bool>::reference;
53
+
54
+ /// \brief push a bitset onto the stack
55
+ /// \param size number of bits in the next bitset
56
+ /// \param value initial value for bits in the pushed bitset
57
+ void Push(int size, bool value) {
58
+ offsets_.push_back(bit_count());
59
+ bits_.resize(bit_count() + size, value);
60
+ }
61
+
62
+ /// \brief number of bits in the bitset at the top of the stack
63
+ int TopSize() const {
64
+ if (offsets_.size() == 0) return 0;
65
+ return bit_count() - offsets_.back();
66
+ }
67
+
68
+ /// \brief pop a bitset off the stack
69
+ void Pop() {
70
+ bits_.resize(offsets_.back());
71
+ offsets_.pop_back();
72
+ }
73
+
74
+ /// \brief get the value of a bit in the top bitset
75
+ /// \param i index of the bit to access
76
+ bool operator[](int i) const { return bits_[offsets_.back() + i]; }
77
+
78
+ /// \brief get a mutable reference to a bit in the top bitset
79
+ /// \param i index of the bit to access
80
+ reference operator[](int i) { return bits_[offsets_.back() + i]; }
81
+
82
+ private:
83
+ int bit_count() const { return static_cast<int>(bits_.size()); }
84
+ std::vector<bool> bits_;
85
+ std::vector<int> offsets_;
86
+ };
87
+
88
+ } // namespace internal
89
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/endian.h"
21
+ #include "arrow/util/visibility.h"
22
+
23
+ #include <stdint.h>
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ ARROW_EXPORT
29
+ int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
30
+ ARROW_EXPORT
31
+ int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits);
32
+
33
+ } // namespace internal
34
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ namespace util {
27
+
28
+ /// \brief The sum of bytes in each buffer referenced by the array
29
+ ///
30
+ /// Note: An array may only reference a portion of a buffer.
31
+ /// This method will overestimate in this case and return the
32
+ /// byte size of the entire buffer.
33
+ /// Note: If a buffer is referenced multiple times then it will
34
+ /// only be counted once.
35
+ ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data);
36
+ /// \brief The sum of bytes in each buffer referenced by the array
37
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
38
+ ARROW_EXPORT int64_t TotalBufferSize(const Array& array);
39
+ /// \brief The sum of bytes in each buffer referenced by the array
40
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
41
+ ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array);
42
+ /// \brief The sum of bytes in each buffer referenced by the batch
43
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
44
+ ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch);
45
+ /// \brief The sum of bytes in each buffer referenced by the table
46
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
47
+ ARROW_EXPORT int64_t TotalBufferSize(const Table& table);
48
+
49
+ /// \brief Calculate the buffer ranges referenced by the array
50
+ ///
51
+ /// These ranges will take into account array offsets
52
+ ///
53
+ /// The ranges may contain duplicates
54
+ ///
55
+ /// Dictionary arrays will ignore the offset of their containing array
56
+ ///
57
+ /// The return value will be a struct array corresponding to the schema:
58
+ /// schema({field("start", uint64()), field("offset", uint64()), field("length",
59
+ /// uint64()))
60
+ ARROW_EXPORT Result<std::shared_ptr<Array>> ReferencedRanges(const ArrayData& array_data);
61
+
62
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
63
+ ///
64
+ /// Unlike TotalBufferSize this method will account for array
65
+ /// offsets.
66
+ ///
67
+ /// If buffers are shared between arrays then the shared
68
+ /// portion will be counted multiple times.
69
+ ///
70
+ /// Dictionary arrays will always be counted in their entirety
71
+ /// even if the array only references a portion of the dictionary.
72
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ArrayData& array_data);
73
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
74
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
75
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Array& array_data);
76
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
77
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
78
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ChunkedArray& array_data);
79
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
80
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
81
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const RecordBatch& array_data);
82
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
83
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
84
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Table& array_data);
85
+
86
+ } // namespace util
87
+
88
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/util/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace util {
33
+
34
+ constexpr int kUseDefaultCompressionLevel = std::numeric_limits<int>::min();
35
+
36
+ /// \brief Streaming compressor interface
37
+ ///
38
+ class ARROW_EXPORT Compressor {
39
+ public:
40
+ virtual ~Compressor() = default;
41
+
42
+ struct CompressResult {
43
+ int64_t bytes_read;
44
+ int64_t bytes_written;
45
+ };
46
+ struct FlushResult {
47
+ int64_t bytes_written;
48
+ bool should_retry;
49
+ };
50
+ struct EndResult {
51
+ int64_t bytes_written;
52
+ bool should_retry;
53
+ };
54
+
55
+ /// \brief Compress some input.
56
+ ///
57
+ /// If bytes_read is 0 on return, then a larger output buffer should be supplied.
58
+ virtual Result<CompressResult> Compress(int64_t input_len, const uint8_t* input,
59
+ int64_t output_len, uint8_t* output) = 0;
60
+
61
+ /// \brief Flush part of the compressed output.
62
+ ///
63
+ /// If should_retry is true on return, Flush() should be called again
64
+ /// with a larger buffer.
65
+ virtual Result<FlushResult> Flush(int64_t output_len, uint8_t* output) = 0;
66
+
67
+ /// \brief End compressing, doing whatever is necessary to end the stream.
68
+ ///
69
+ /// If should_retry is true on return, End() should be called again
70
+ /// with a larger buffer. Otherwise, the Compressor should not be used anymore.
71
+ ///
72
+ /// End() implies Flush().
73
+ virtual Result<EndResult> End(int64_t output_len, uint8_t* output) = 0;
74
+
75
+ // XXX add methods for buffer size heuristics?
76
+ };
77
+
78
+ /// \brief Streaming decompressor interface
79
+ ///
80
+ class ARROW_EXPORT Decompressor {
81
+ public:
82
+ virtual ~Decompressor() = default;
83
+
84
+ struct DecompressResult {
85
+ // XXX is need_more_output necessary? (Brotli?)
86
+ int64_t bytes_read;
87
+ int64_t bytes_written;
88
+ bool need_more_output;
89
+ };
90
+
91
+ /// \brief Decompress some input.
92
+ ///
93
+ /// If need_more_output is true on return, a larger output buffer needs
94
+ /// to be supplied.
95
+ virtual Result<DecompressResult> Decompress(int64_t input_len, const uint8_t* input,
96
+ int64_t output_len, uint8_t* output) = 0;
97
+
98
+ /// \brief Return whether the compressed stream is finished.
99
+ ///
100
+ /// This is a heuristic. If true is returned, then it is guaranteed
101
+ /// that the stream is finished. If false is returned, however, it may
102
+ /// simply be that the underlying library isn't able to provide the information.
103
+ virtual bool IsFinished() = 0;
104
+
105
+ /// \brief Reinitialize decompressor, making it ready for a new compressed stream.
106
+ virtual Status Reset() = 0;
107
+
108
+ // XXX add methods for buffer size heuristics?
109
+ };
110
+
111
+ /// \brief Compression codec options
112
+ class ARROW_EXPORT CodecOptions {
113
+ public:
114
+ explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel)
115
+ : compression_level(compression_level) {}
116
+
117
+ virtual ~CodecOptions() = default;
118
+
119
+ int compression_level;
120
+ };
121
+
122
+ // ----------------------------------------------------------------------
123
+ // GZip codec options implementation
124
+
125
+ enum class GZipFormat {
126
+ ZLIB,
127
+ DEFLATE,
128
+ GZIP,
129
+ };
130
+
131
+ class ARROW_EXPORT GZipCodecOptions : public CodecOptions {
132
+ public:
133
+ GZipFormat gzip_format = GZipFormat::GZIP;
134
+ std::optional<int> window_bits;
135
+ };
136
+
137
+ // ----------------------------------------------------------------------
138
+ // brotli codec options implementation
139
+
140
+ class ARROW_EXPORT BrotliCodecOptions : public CodecOptions {
141
+ public:
142
+ std::optional<int> window_bits;
143
+ };
144
+
145
+ /// \brief Compression codec
146
+ class ARROW_EXPORT Codec {
147
+ public:
148
+ virtual ~Codec() = default;
149
+
150
+ /// \brief Return special value to indicate that a codec implementation
151
+ /// should use its default compression level
152
+ static int UseDefaultCompressionLevel();
153
+
154
+ /// \brief Return a string name for compression type
155
+ static const std::string& GetCodecAsString(Compression::type t);
156
+
157
+ /// \brief Return compression type for name (all lower case)
158
+ static Result<Compression::type> GetCompressionType(const std::string& name);
159
+
160
+ /// \brief Create a codec for the given compression algorithm with CodecOptions
161
+ static Result<std::unique_ptr<Codec>> Create(
162
+ Compression::type codec, const CodecOptions& codec_options = CodecOptions{});
163
+
164
+ /// \brief Create a codec for the given compression algorithm
165
+ static Result<std::unique_ptr<Codec>> Create(Compression::type codec,
166
+ int compression_level);
167
+
168
+ /// \brief Return true if support for indicated codec has been enabled
169
+ static bool IsAvailable(Compression::type codec);
170
+
171
+ /// \brief Return true if indicated codec supports setting a compression level
172
+ static bool SupportsCompressionLevel(Compression::type codec);
173
+
174
+ /// \brief Return the smallest supported compression level for the codec
175
+ /// Note: This function creates a temporary Codec instance
176
+ static Result<int> MinimumCompressionLevel(Compression::type codec);
177
+
178
+ /// \brief Return the largest supported compression level for the codec
179
+ /// Note: This function creates a temporary Codec instance
180
+ static Result<int> MaximumCompressionLevel(Compression::type codec);
181
+
182
+ /// \brief Return the default compression level
183
+ /// Note: This function creates a temporary Codec instance
184
+ static Result<int> DefaultCompressionLevel(Compression::type codec);
185
+
186
+ /// \brief Return the smallest supported compression level
187
+ virtual int minimum_compression_level() const = 0;
188
+
189
+ /// \brief Return the largest supported compression level
190
+ virtual int maximum_compression_level() const = 0;
191
+
192
+ /// \brief Return the default compression level
193
+ virtual int default_compression_level() const = 0;
194
+
195
+ /// \brief One-shot decompression function
196
+ ///
197
+ /// output_buffer_len must be correct and therefore be obtained in advance.
198
+ /// The actual decompressed length is returned.
199
+ ///
200
+ /// \note One-shot decompression is not always compatible with streaming
201
+ /// compression. Depending on the codec (e.g. LZ4), different formats may
202
+ /// be used.
203
+ virtual Result<int64_t> Decompress(int64_t input_len, const uint8_t* input,
204
+ int64_t output_buffer_len,
205
+ uint8_t* output_buffer) = 0;
206
+
207
+ /// \brief One-shot compression function
208
+ ///
209
+ /// output_buffer_len must first have been computed using MaxCompressedLen().
210
+ /// The actual compressed length is returned.
211
+ ///
212
+ /// \note One-shot compression is not always compatible with streaming
213
+ /// decompression. Depending on the codec (e.g. LZ4), different formats may
214
+ /// be used.
215
+ virtual Result<int64_t> Compress(int64_t input_len, const uint8_t* input,
216
+ int64_t output_buffer_len, uint8_t* output_buffer) = 0;
217
+
218
+ virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0;
219
+
220
+ /// \brief Create a streaming compressor instance
221
+ virtual Result<std::shared_ptr<Compressor>> MakeCompressor() = 0;
222
+
223
+ /// \brief Create a streaming compressor instance
224
+ virtual Result<std::shared_ptr<Decompressor>> MakeDecompressor() = 0;
225
+
226
+ /// \brief This Codec's compression type
227
+ virtual Compression::type compression_type() const = 0;
228
+
229
+ /// \brief The name of this Codec's compression type
230
+ const std::string& name() const { return GetCodecAsString(compression_type()); }
231
+
232
+ /// \brief This Codec's compression level, if applicable
233
+ virtual int compression_level() const { return UseDefaultCompressionLevel(); }
234
+
235
+ private:
236
+ /// \brief Initializes the codec's resources.
237
+ virtual Status Init();
238
+ };
239
+
240
+ } // namespace util
241
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <string>
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/array.h"
24
+ #include "arrow/chunked_array.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/type.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/checked_cast.h"
29
+ #include "arrow/visit_type_inline.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
35
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
36
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
37
+ MemoryPool* pool);
38
+
39
+ template <typename Input, typename Options>
40
+ class Converter {
41
+ public:
42
+ using Self = Converter<Input, Options>;
43
+ using InputType = Input;
44
+ using OptionsType = Options;
45
+
46
+ virtual ~Converter() = default;
47
+
48
+ Status Construct(std::shared_ptr<DataType> type, OptionsType options,
49
+ MemoryPool* pool) {
50
+ type_ = std::move(type);
51
+ options_ = std::move(options);
52
+ return Init(pool);
53
+ }
54
+
55
+ virtual Status Append(InputType value) { return Status::NotImplemented("Append"); }
56
+
57
+ virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) {
58
+ return Status::NotImplemented("Extend");
59
+ }
60
+
61
+ virtual Status ExtendMasked(InputType values, InputType mask, int64_t size,
62
+ int64_t offset = 0) {
63
+ return Status::NotImplemented("ExtendMasked");
64
+ }
65
+
66
+ const std::shared_ptr<ArrayBuilder>& builder() const { return builder_; }
67
+
68
+ const std::shared_ptr<DataType>& type() const { return type_; }
69
+
70
+ OptionsType options() const { return options_; }
71
+
72
+ bool may_overflow() const { return may_overflow_; }
73
+
74
+ bool rewind_on_overflow() const { return rewind_on_overflow_; }
75
+
76
+ virtual Status Reserve(int64_t additional_capacity) {
77
+ return builder_->Reserve(additional_capacity);
78
+ }
79
+
80
+ Status AppendNull() { return builder_->AppendNull(); }
81
+
82
+ virtual Result<std::shared_ptr<Array>> ToArray() { return builder_->Finish(); }
83
+
84
+ virtual Result<std::shared_ptr<Array>> ToArray(int64_t length) {
85
+ ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray());
86
+ return arr->Slice(0, length);
87
+ }
88
+
89
+ virtual Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
90
+ ARROW_ASSIGN_OR_RAISE(auto array, ToArray());
91
+ std::vector<std::shared_ptr<Array>> chunks = {std::move(array)};
92
+ return std::make_shared<ChunkedArray>(chunks);
93
+ }
94
+
95
+ protected:
96
+ virtual Status Init(MemoryPool* pool) { return Status::OK(); }
97
+
98
+ std::shared_ptr<DataType> type_;
99
+ std::shared_ptr<ArrayBuilder> builder_;
100
+ OptionsType options_;
101
+ bool may_overflow_ = false;
102
+ bool rewind_on_overflow_ = false;
103
+ };
104
+
105
+ template <typename ArrowType, typename BaseConverter>
106
+ class PrimitiveConverter : public BaseConverter {
107
+ public:
108
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
109
+
110
+ protected:
111
+ Status Init(MemoryPool* pool) override {
112
+ this->builder_ = std::make_shared<BuilderType>(this->type_, pool);
113
+ // Narrow variable-sized binary types may overflow
114
+ this->may_overflow_ = is_binary_like(this->type_->id());
115
+ primitive_type_ = checked_cast<const ArrowType*>(this->type_.get());
116
+ primitive_builder_ = checked_cast<BuilderType*>(this->builder_.get());
117
+ return Status::OK();
118
+ }
119
+
120
+ const ArrowType* primitive_type_;
121
+ BuilderType* primitive_builder_;
122
+ };
123
+
124
+ template <typename ArrowType, typename BaseConverter,
125
+ template <typename...> class ConverterTrait>
126
+ class ListConverter : public BaseConverter {
127
+ public:
128
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
129
+ using ConverterType = typename ConverterTrait<ArrowType>::type;
130
+
131
+ protected:
132
+ Status Init(MemoryPool* pool) override {
133
+ list_type_ = checked_cast<const ArrowType*>(this->type_.get());
134
+ ARROW_ASSIGN_OR_RAISE(value_converter_,
135
+ (MakeConverter<BaseConverter, ConverterTrait>(
136
+ list_type_->value_type(), this->options_, pool)));
137
+ this->builder_ =
138
+ std::make_shared<BuilderType>(pool, value_converter_->builder(), this->type_);
139
+ list_builder_ = checked_cast<BuilderType*>(this->builder_.get());
140
+ // Narrow list types may overflow
141
+ this->may_overflow_ = this->rewind_on_overflow_ =
142
+ sizeof(typename ArrowType::offset_type) < sizeof(int64_t);
143
+ return Status::OK();
144
+ }
145
+
146
+ const ArrowType* list_type_;
147
+ BuilderType* list_builder_;
148
+ std::unique_ptr<BaseConverter> value_converter_;
149
+ };
150
+
151
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
152
+ class StructConverter : public BaseConverter {
153
+ public:
154
+ using ConverterType = typename ConverterTrait<StructType>::type;
155
+
156
+ Status Reserve(int64_t additional_capacity) override {
157
+ ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity));
158
+ for (const auto& child : children_) {
159
+ ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity));
160
+ }
161
+ return Status::OK();
162
+ }
163
+
164
+ protected:
165
+ Status Init(MemoryPool* pool) override {
166
+ std::unique_ptr<BaseConverter> child_converter;
167
+ std::vector<std::shared_ptr<ArrayBuilder>> child_builders;
168
+
169
+ struct_type_ = checked_cast<const StructType*>(this->type_.get());
170
+ for (const auto& field : struct_type_->fields()) {
171
+ ARROW_ASSIGN_OR_RAISE(child_converter,
172
+ (MakeConverter<BaseConverter, ConverterTrait>(
173
+ field->type(), this->options_, pool)));
174
+ this->may_overflow_ |= child_converter->may_overflow();
175
+ this->rewind_on_overflow_ = this->may_overflow_;
176
+ child_builders.push_back(child_converter->builder());
177
+ children_.push_back(std::move(child_converter));
178
+ }
179
+
180
+ this->builder_ =
181
+ std::make_shared<StructBuilder>(this->type_, pool, std::move(child_builders));
182
+ struct_builder_ = checked_cast<StructBuilder*>(this->builder_.get());
183
+
184
+ return Status::OK();
185
+ }
186
+
187
+ const StructType* struct_type_;
188
+ StructBuilder* struct_builder_;
189
+ std::vector<std::unique_ptr<BaseConverter>> children_;
190
+ };
191
+
192
+ template <typename ValueType, typename BaseConverter>
193
+ class DictionaryConverter : public BaseConverter {
194
+ public:
195
+ using BuilderType = DictionaryBuilder<ValueType>;
196
+
197
+ protected:
198
+ Status Init(MemoryPool* pool) override {
199
+ std::unique_ptr<ArrayBuilder> builder;
200
+ ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder));
201
+ this->builder_ = std::move(builder);
202
+ this->may_overflow_ = false;
203
+ dict_type_ = checked_cast<const DictionaryType*>(this->type_.get());
204
+ value_type_ = checked_cast<const ValueType*>(dict_type_->value_type().get());
205
+ value_builder_ = checked_cast<BuilderType*>(this->builder_.get());
206
+ return Status::OK();
207
+ }
208
+
209
+ const DictionaryType* dict_type_;
210
+ const ValueType* value_type_;
211
+ BuilderType* value_builder_;
212
+ };
213
+
214
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
215
+ struct MakeConverterImpl {
216
+ template <typename T, typename ConverterType = typename ConverterTrait<T>::type>
217
+ Status Visit(const T&) {
218
+ out.reset(new ConverterType());
219
+ return out->Construct(std::move(type), std::move(options), pool);
220
+ }
221
+
222
+ Status Visit(const DictionaryType& t) {
223
+ switch (t.value_type()->id()) {
224
+ #define DICTIONARY_CASE(TYPE) \
225
+ case TYPE::type_id: \
226
+ out = std::make_unique< \
227
+ typename ConverterTrait<DictionaryType>::template dictionary_type<TYPE>>(); \
228
+ break;
229
+ DICTIONARY_CASE(BooleanType);
230
+ DICTIONARY_CASE(Int8Type);
231
+ DICTIONARY_CASE(Int16Type);
232
+ DICTIONARY_CASE(Int32Type);
233
+ DICTIONARY_CASE(Int64Type);
234
+ DICTIONARY_CASE(UInt8Type);
235
+ DICTIONARY_CASE(UInt16Type);
236
+ DICTIONARY_CASE(UInt32Type);
237
+ DICTIONARY_CASE(UInt64Type);
238
+ DICTIONARY_CASE(FloatType);
239
+ DICTIONARY_CASE(DoubleType);
240
+ DICTIONARY_CASE(BinaryType);
241
+ DICTIONARY_CASE(StringType);
242
+ DICTIONARY_CASE(FixedSizeBinaryType);
243
+ #undef DICTIONARY_CASE
244
+ default:
245
+ return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(),
246
+ " not implemented");
247
+ }
248
+ return out->Construct(std::move(type), std::move(options), pool);
249
+ }
250
+
251
+ Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); }
252
+
253
+ std::shared_ptr<DataType> type;
254
+ typename BaseConverter::OptionsType options;
255
+ MemoryPool* pool;
256
+ std::unique_ptr<BaseConverter> out;
257
+ };
258
+
259
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
260
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
261
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
262
+ MemoryPool* pool) {
263
+ MakeConverterImpl<BaseConverter, ConverterTrait> visitor{
264
+ std::move(type), std::move(options), pool, NULLPTR};
265
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor));
266
+ return std::move(visitor.out);
267
+ }
268
+
269
+ template <typename Converter>
270
+ class Chunker {
271
+ public:
272
+ using InputType = typename Converter::InputType;
273
+
274
+ explicit Chunker(std::unique_ptr<Converter> converter)
275
+ : converter_(std::move(converter)) {}
276
+
277
+ Status Reserve(int64_t additional_capacity) {
278
+ ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity));
279
+ reserved_ += additional_capacity;
280
+ return Status::OK();
281
+ }
282
+
283
+ Status AppendNull() {
284
+ auto status = converter_->AppendNull();
285
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
286
+ if (converter_->builder()->length() == 0) {
287
+ // Builder length == 0 means the individual element is too large to append.
288
+ // In this case, no need to try again.
289
+ return status;
290
+ }
291
+ ARROW_RETURN_NOT_OK(FinishChunk());
292
+ return converter_->AppendNull();
293
+ }
294
+ ++length_;
295
+ return status;
296
+ }
297
+
298
+ Status Append(InputType value) {
299
+ auto status = converter_->Append(value);
300
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
301
+ if (converter_->builder()->length() == 0) {
302
+ return status;
303
+ }
304
+ ARROW_RETURN_NOT_OK(FinishChunk());
305
+ return Append(value);
306
+ }
307
+ ++length_;
308
+ return status;
309
+ }
310
+
311
+ Status Extend(InputType values, int64_t size, int64_t offset = 0) {
312
+ while (offset < size) {
313
+ auto length_before = converter_->builder()->length();
314
+ auto status = converter_->Extend(values, size, offset);
315
+ auto length_after = converter_->builder()->length();
316
+ auto num_converted = length_after - length_before;
317
+
318
+ offset += num_converted;
319
+ length_ += num_converted;
320
+
321
+ if (status.IsCapacityError()) {
322
+ if (converter_->builder()->length() == 0) {
323
+ // Builder length == 0 means the individual element is too large to append.
324
+ // In this case, no need to try again.
325
+ return status;
326
+ } else if (converter_->rewind_on_overflow()) {
327
+ // The list-like and binary-like conversion paths may raise a capacity error,
328
+ // we need to handle them differently. While the binary-like converters check
329
+ // the capacity before append/extend the list-like converters just check after
330
+ // append/extend. Thus depending on the implementation semantics we may need
331
+ // to rewind (slice) the output chunk by one.
332
+ length_ -= 1;
333
+ offset -= 1;
334
+ }
335
+ ARROW_RETURN_NOT_OK(FinishChunk());
336
+ } else if (!status.ok()) {
337
+ return status;
338
+ }
339
+ }
340
+ return Status::OK();
341
+ }
342
+
343
+ Status ExtendMasked(InputType values, InputType mask, int64_t size,
344
+ int64_t offset = 0) {
345
+ while (offset < size) {
346
+ auto length_before = converter_->builder()->length();
347
+ auto status = converter_->ExtendMasked(values, mask, size, offset);
348
+ auto length_after = converter_->builder()->length();
349
+ auto num_converted = length_after - length_before;
350
+
351
+ offset += num_converted;
352
+ length_ += num_converted;
353
+
354
+ if (status.IsCapacityError()) {
355
+ if (converter_->builder()->length() == 0) {
356
+ // Builder length == 0 means the individual element is too large to append.
357
+ // In this case, no need to try again.
358
+ return status;
359
+ } else if (converter_->rewind_on_overflow()) {
360
+ // The list-like and binary-like conversion paths may raise a capacity error,
361
+ // we need to handle them differently. While the binary-like converters check
362
+ // the capacity before append/extend the list-like converters just check after
363
+ // append/extend. Thus depending on the implementation semantics we may need
364
+ // to rewind (slice) the output chunk by one.
365
+ length_ -= 1;
366
+ offset -= 1;
367
+ }
368
+ ARROW_RETURN_NOT_OK(FinishChunk());
369
+ } else if (!status.ok()) {
370
+ return status;
371
+ }
372
+ }
373
+ return Status::OK();
374
+ }
375
+
376
+ Status FinishChunk() {
377
+ ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_));
378
+ chunks_.push_back(chunk);
379
+ // Reserve space for the remaining items.
380
+ // Besides being an optimization, it is also required if the converter's
381
+ // implementation relies on unsafe builder methods in converter->Append().
382
+ auto remaining = reserved_ - length_;
383
+ Reset();
384
+ return Reserve(remaining);
385
+ }
386
+
387
+ Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
388
+ ARROW_RETURN_NOT_OK(FinishChunk());
389
+ return std::make_shared<ChunkedArray>(chunks_);
390
+ }
391
+
392
+ protected:
393
+ void Reset() {
394
+ converter_->builder()->Reset();
395
+ length_ = 0;
396
+ reserved_ = 0;
397
+ }
398
+
399
+ int64_t length_ = 0;
400
+ int64_t reserved_ = 0;
401
+ std::unique_ptr<Converter> converter_;
402
+ std::vector<std::shared_ptr<Array>> chunks_;
403
+ };
404
+
405
+ template <typename T>
406
+ static Result<std::unique_ptr<Chunker<T>>> MakeChunker(std::unique_ptr<T> converter) {
407
+ return std::make_unique<Chunker<T>>(std::move(converter));
408
+ }
409
+
410
+ } // namespace internal
411
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #ifndef ARROW_COUNTING_SEMAPHORE_H
19
+ #define ARROW_COUNTING_SEMAPHORE_H
20
+
21
+ #include <memory>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ /// \brief Simple mutex-based counting semaphore with timeout
29
+ class ARROW_EXPORT CountingSemaphore {
30
+ public:
31
+ /// \brief Create an instance with initial_avail starting permits
32
+ ///
33
+ /// \param[in] initial_avail The semaphore will start with this many permits available
34
+ /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations
35
+ /// will return Status::Invalid if this timeout elapses
36
+ explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10);
37
+ ~CountingSemaphore();
38
+ /// \brief Block until num_permits permits are available
39
+ Status Acquire(uint32_t num_permits);
40
+ /// \brief Make num_permits permits available
41
+ Status Release(uint32_t num_permits);
42
+ /// \brief Wait until num_waiters are waiting on permits
43
+ ///
44
+ /// This method is non-standard but useful in unit tests to ensure sequencing
45
+ Status WaitForWaiters(uint32_t num_waiters);
46
+ /// \brief Immediately time out any waiters
47
+ ///
48
+ /// This method will return Status::OK only if there were no waiters to time out.
49
+ /// Once closed any operation on this instance will return an invalid status.
50
+ Status Close();
51
+
52
+ private:
53
+ class Impl;
54
+ std::unique_ptr<Impl> impl_;
55
+ };
56
+
57
+ } // namespace util
58
+ } // namespace arrow
59
+
60
+ #endif // ARROW_COUNTING_SEMAPHORE_H
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal
19
+ // set of functions needed for Apache Arrow / Apache parquet-cpp
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace internal {
32
+
33
+ /// CpuInfo is an interface to query for cpu information at runtime. The caller can
34
+ /// ask for the sizes of the caches and what hardware features are supported.
35
+ /// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and
36
+ /// /sys/devices)
37
+ class ARROW_EXPORT CpuInfo {
38
+ public:
39
+ ~CpuInfo();
40
+
41
+ /// x86 features
42
+ static constexpr int64_t SSSE3 = (1LL << 0);
43
+ static constexpr int64_t SSE4_1 = (1LL << 1);
44
+ static constexpr int64_t SSE4_2 = (1LL << 2);
45
+ static constexpr int64_t POPCNT = (1LL << 3);
46
+ static constexpr int64_t AVX = (1LL << 4);
47
+ static constexpr int64_t AVX2 = (1LL << 5);
48
+ static constexpr int64_t AVX512F = (1LL << 6);
49
+ static constexpr int64_t AVX512CD = (1LL << 7);
50
+ static constexpr int64_t AVX512VL = (1LL << 8);
51
+ static constexpr int64_t AVX512DQ = (1LL << 9);
52
+ static constexpr int64_t AVX512BW = (1LL << 10);
53
+ static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW;
54
+ static constexpr int64_t BMI1 = (1LL << 11);
55
+ static constexpr int64_t BMI2 = (1LL << 12);
56
+
57
+ /// Arm features
58
+ static constexpr int64_t ASIMD = (1LL << 32);
59
+
60
+ /// Cache enums for L1 (data), L2 and L3
61
+ enum class CacheLevel { L1 = 0, L2, L3, Last = L3 };
62
+
63
+ /// CPU vendors
64
+ enum class Vendor { Unknown, Intel, AMD };
65
+
66
+ static const CpuInfo* GetInstance();
67
+
68
+ /// Returns all the flags for this cpu
69
+ int64_t hardware_flags() const;
70
+
71
+ /// Returns the number of cores (including hyper-threaded) on this machine.
72
+ int num_cores() const;
73
+
74
+ /// Returns the vendor of the cpu.
75
+ Vendor vendor() const;
76
+
77
+ /// Returns the model name of the cpu (e.g. Intel i7-2600)
78
+ const std::string& model_name() const;
79
+
80
+ /// Returns the size of the cache in KB at this cache level
81
+ int64_t CacheSize(CacheLevel level) const;
82
+
83
+ /// \brief Returns whether or not the given feature is enabled.
84
+ ///
85
+ /// IsSupported() is true iff IsDetected() is also true and the feature
86
+ /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL
87
+ /// environment variable).
88
+ bool IsSupported(int64_t flags) const;
89
+
90
+ /// Returns whether or not the given feature is available on the CPU.
91
+ bool IsDetected(int64_t flags) const;
92
+
93
+ /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error
94
+ /// and terminate.
95
+ void VerifyCpuRequirements() const;
96
+
97
+ /// Toggle a hardware feature on and off. It is not valid to turn on a feature
98
+ /// that the underlying hardware cannot support. This is useful for testing.
99
+ void EnableFeature(int64_t flag, bool enable);
100
+
101
+ bool HasEfficientBmi2() const {
102
+ // BMI2 (pext, pdep) is only efficient on Intel X86 processors.
103
+ return vendor() == Vendor::Intel && IsSupported(BMI2);
104
+ }
105
+
106
+ private:
107
+ CpuInfo();
108
+
109
+ struct Impl;
110
+ std::unique_ptr<Impl> impl_;
111
+ };
112
+
113
+ } // namespace internal
114
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <iosfwd>
22
+ #include <limits>
23
+ #include <string>
24
+ #include <string_view>
25
+ #include <utility>
26
+
27
+ #include "arrow/result.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/type_fwd.h"
30
+ #include "arrow/util/basic_decimal.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Decimal64;
35
+
36
+ /// Represents a signed 32-bit decimal value in two's complement.
37
+ /// Calulations wrap around and overflow is ignored.
38
+ /// The max decimal precision that can be safely represented is
39
+ /// 9 significant digits.
40
+ ///
41
+ /// The implementation is split into two parts :
42
+ ///
43
+ /// 1. BasicDecimal32
44
+ /// - can be safely compiled to IR without references to libstdc++
45
+ /// 2. Decimal32
46
+ /// - has additional functionality on top of BasicDecimal32 to deal with
47
+ /// strings and streams
48
+ class ARROW_EXPORT Decimal32 : public BasicDecimal32 {
49
+ public:
50
+ /// \cond FALSE
51
+ // (need to avoid a duplicate definition in sphinx)
52
+ using BasicDecimal32::BasicDecimal32;
53
+ /// \endcond
54
+
55
+ /// \brief constructor creates a Decimal32 from a BasicDecimal32
56
+ constexpr Decimal32(const BasicDecimal32& value) noexcept // NOLINT runtime/explicit
57
+ : BasicDecimal32(value) {}
58
+
59
+ /// \brief Parse the number from a base 10 string representation
60
+ explicit Decimal32(const std::string& value);
61
+
62
+ /// \brief Empty constructor creates a Decimal32 with a value of 0
63
+ /// this is required for some older compilers
64
+ constexpr Decimal32() noexcept : BasicDecimal32() {}
65
+
66
+ /// \brief Divide this number by right and return the result.
67
+ ///
68
+ /// This operation is not destructive.
69
+ /// The answer rounds to zero. Signs work like:
70
+ /// 21 / 5 -> 4, 1
71
+ /// -21 / 5 -> -4, -1
72
+ /// 21 / -5 -> -4, 1
73
+ /// -21 / -5 -> 4, -1
74
+ /// \param[in] divisor the number to divide by
75
+ /// \return the pair of the quotient and the remainder
76
+ Result<std::pair<Decimal32, Decimal32>> Divide(const Decimal32& divisor) const {
77
+ std::pair<Decimal32, Decimal32> result;
78
+ auto dstatus = BasicDecimal32::Divide(divisor, &result.first, &result.second);
79
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
80
+ return result;
81
+ }
82
+
83
+ /// \brief Convert the Decimal32 value to a base 10 decimal string with the given scale
84
+ std::string ToString(int32_t scale) const;
85
+
86
+ /// \brief Convert the value to an integer string
87
+ std::string ToIntegerString() const;
88
+
89
+ /// \brief Cast this value to an int64_t
90
+ explicit operator int64_t() const;
91
+
92
+ explicit operator Decimal64() const;
93
+
94
+ /// \brief Convert a decimal string to a Decimal value, optionally including
95
+ /// precision and scale if they're passed in and not null.
96
+ static Status FromString(std::string_view s, Decimal32* out, int32_t* precision,
97
+ int32_t* scale = NULLPTR);
98
+ static Status FromString(const std::string& s, Decimal32* out, int32_t* precision,
99
+ int32_t* scale = NULLPTR);
100
+ static Status FromString(const char* s, Decimal32* out, int32_t* precision,
101
+ int32_t* scale = NULLPTR);
102
+ static Result<Decimal32> FromString(std::string_view s);
103
+ static Result<Decimal32> FromString(const std::string& s);
104
+ static Result<Decimal32> FromString(const char* s);
105
+
106
+ static Result<Decimal32> FromReal(double real, int32_t precision, int32_t scale);
107
+ static Result<Decimal32> FromReal(float real, int32_t precision, int32_t scale);
108
+
109
+ /// \brief Convert from a big-endian byte representation. The length must be
110
+ /// between 1 and 4
111
+ /// \return error statis if the length is an invalid value
112
+ static Result<Decimal32> FromBigEndian(const uint8_t* data, int32_t length);
113
+
114
+ /// \brief Convert Decimal32 from one scale to another
115
+ Result<Decimal32> Rescale(int32_t original_scale, int32_t new_scale) const {
116
+ Decimal32 out;
117
+ auto dstatus = BasicDecimal32::Rescale(original_scale, new_scale, &out);
118
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
119
+ return out;
120
+ }
121
+
122
+ /// \brief Convert to a signed integer
123
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
124
+ Result<T> ToInteger() const {
125
+ return static_cast<T>(value_);
126
+ }
127
+
128
+ /// \brief Convert to a signed integer
129
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
130
+ Status ToInteger(T* out) const {
131
+ return ToInteger<T>().Value(out);
132
+ }
133
+
134
+ /// \brief Convert to a floating-point number (scaled)
135
+ float ToFloat(int32_t scale) const;
136
+ /// \brief Convert to a floating-point number (scaled)
137
+ double ToDouble(int32_t scale) const;
138
+
139
+ /// \brief Convert to a floating-point number (scaled)
140
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
141
+ T ToReal(int32_t scale) const {
142
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
143
+ "Unexpected floating-point type");
144
+ if constexpr (std::is_same_v<T, float>) {
145
+ return ToFloat(scale);
146
+ } else {
147
+ return ToDouble(scale);
148
+ }
149
+ }
150
+
151
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
152
+ const Decimal32& decimal);
153
+
154
+ private:
155
+ /// Converts internal error code to Status
156
+ Status ToArrowStatus(DecimalStatus dstatus) const;
157
+ };
158
+
159
+ class ARROW_EXPORT Decimal64 : public BasicDecimal64 {
160
+ public:
161
+ /// \cond FALSE
162
+ // (need to avoid a duplicate definition in sphinx)
163
+ using BasicDecimal64::BasicDecimal64;
164
+ /// \endcond
165
+
166
+ /// \brief constructor creates a Decimal64 from a BasicDecimal64
167
+ constexpr Decimal64(const BasicDecimal64& value) noexcept // NOLINT runtime/explicit
168
+ : BasicDecimal64(value) {}
169
+
170
+ explicit Decimal64(const BasicDecimal32& value) noexcept
171
+ : BasicDecimal64(static_cast<int64_t>(value.value())) {}
172
+
173
+ /// \brief Parse the number from a base 10 string representation
174
+ explicit Decimal64(const std::string& value);
175
+
176
+ /// \brief Empty constructor creates a Decimal64 with a value of 0
177
+ /// this is required for some older compilers
178
+ constexpr Decimal64() noexcept : BasicDecimal64() {}
179
+
180
+ /// \brief Divide this number by right and return the result.
181
+ ///
182
+ /// This operation is not destructive.
183
+ /// The answer rounds to zero. Signs work like:
184
+ /// 21 / 5 -> 4, 1
185
+ /// -21 / 5 -> -4, -1
186
+ /// 21 / -5 -> -4, 1
187
+ /// -21 / -5 -> 4, -1
188
+ /// \param[in] divisor the number to divide by
189
+ /// \return the pair of the quotient and the remainder
190
+ Result<std::pair<Decimal64, Decimal64>> Divide(const Decimal64& divisor) const {
191
+ std::pair<Decimal64, Decimal64> result;
192
+ auto dstatus = BasicDecimal64::Divide(divisor, &result.first, &result.second);
193
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
194
+ return result;
195
+ }
196
+
197
+ /// \brief Convert the Decimal64 value to a base 10 decimal string with the given scale
198
+ std::string ToString(int32_t scale) const;
199
+
200
+ /// \brief Convert the value to an integer string
201
+ std::string ToIntegerString() const;
202
+
203
+ /// \brief Cast this value to an int64_t
204
+ explicit operator int64_t() const;
205
+
206
+ /// \brief Convert a decimal string to a Decimal value, optionally including
207
+ /// precision and scale if they're passed in and not null.
208
+ static Status FromString(std::string_view s, Decimal64* out, int32_t* precision,
209
+ int32_t* scale = NULLPTR);
210
+ static Status FromString(const std::string& s, Decimal64* out, int32_t* precision,
211
+ int32_t* scale = NULLPTR);
212
+ static Status FromString(const char* s, Decimal64* out, int32_t* precision,
213
+ int32_t* scale = NULLPTR);
214
+ static Result<Decimal64> FromString(std::string_view s);
215
+ static Result<Decimal64> FromString(const std::string& s);
216
+ static Result<Decimal64> FromString(const char* s);
217
+
218
+ static Result<Decimal64> FromReal(double real, int32_t precision, int32_t scale);
219
+ static Result<Decimal64> FromReal(float real, int32_t precision, int32_t scale);
220
+
221
+ /// \brief Convert from a big-endian byte representation. The length must be
222
+ /// between 1 and 4
223
+ /// \return error statis if the length is an invalid value
224
+ static Result<Decimal64> FromBigEndian(const uint8_t* data, int32_t length);
225
+
226
+ /// \brief Convert Decimal64 from one scale to another
227
+ Result<Decimal64> Rescale(int32_t original_scale, int32_t new_scale) const {
228
+ Decimal64 out;
229
+ auto dstatus = BasicDecimal64::Rescale(original_scale, new_scale, &out);
230
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
231
+ return out;
232
+ }
233
+
234
+ /// \brief Convert to a signed integer
235
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
236
+ Result<T> ToInteger() const {
237
+ return static_cast<T>(value_);
238
+ }
239
+
240
+ /// \brief Convert to a signed integer
241
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
242
+ Status ToInteger(T* out) const {
243
+ return ToInteger<T>().Value(out);
244
+ }
245
+
246
+ /// \brief Convert to a floating-point number (scaled)
247
+ float ToFloat(int32_t scale) const;
248
+ /// \brief Convert to a floating-point number (scaled)
249
+ double ToDouble(int32_t scale) const;
250
+
251
+ /// \brief Convert to a floating-point number (scaled)
252
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
253
+ T ToReal(int32_t scale) const {
254
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
255
+ "Unexpected floating-point type");
256
+ if constexpr (std::is_same_v<T, float>) {
257
+ return ToFloat(scale);
258
+ } else {
259
+ return ToDouble(scale);
260
+ }
261
+ }
262
+
263
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
264
+ const Decimal64& decimal);
265
+
266
+ private:
267
+ /// Converts internal error code to Status
268
+ Status ToArrowStatus(DecimalStatus dstatus) const;
269
+ };
270
+
271
+ /// Represents a signed 128-bit integer in two's complement.
272
+ /// Calculations wrap around and overflow is ignored.
273
+ /// The max decimal precision that can be safely represented is
274
+ /// 38 significant digits.
275
+ ///
276
+ /// For a discussion of the algorithms, look at Knuth's volume 2,
277
+ /// Semi-numerical Algorithms section 4.3.1.
278
+ ///
279
+ /// Adapted from the Apache ORC C++ implementation
280
+ ///
281
+ /// The implementation is split into two parts :
282
+ ///
283
+ /// 1. BasicDecimal128
284
+ /// - can be safely compiled to IR without references to libstdc++.
285
+ /// 2. Decimal128
286
+ /// - has additional functionality on top of BasicDecimal128 to deal with
287
+ /// strings and streams.
288
+ class ARROW_EXPORT Decimal128 : public BasicDecimal128 {
289
+ public:
290
+ /// \cond FALSE
291
+ // (need to avoid a duplicate definition in Sphinx)
292
+ using BasicDecimal128::BasicDecimal128;
293
+ /// \endcond
294
+
295
+ /// \brief constructor creates a Decimal128 from a BasicDecimal128.
296
+ constexpr Decimal128(const BasicDecimal128& value) noexcept // NOLINT runtime/explicit
297
+ : BasicDecimal128(value) {}
298
+
299
+ /// \brief Parse the number from a base 10 string representation.
300
+ explicit Decimal128(const std::string& value);
301
+
302
+ /// \brief Empty constructor creates a Decimal128 with a value of 0.
303
+ // This is required on some older compilers.
304
+ constexpr Decimal128() noexcept : BasicDecimal128() {}
305
+
306
+ /// Divide this number by right and return the result.
307
+ ///
308
+ /// This operation is not destructive.
309
+ /// The answer rounds to zero. Signs work like:
310
+ /// 21 / 5 -> 4, 1
311
+ /// -21 / 5 -> -4, -1
312
+ /// 21 / -5 -> -4, 1
313
+ /// -21 / -5 -> 4, -1
314
+ /// \param[in] divisor the number to divide by
315
+ /// \return the pair of the quotient and the remainder
316
+ Result<std::pair<Decimal128, Decimal128>> Divide(const Decimal128& divisor) const {
317
+ std::pair<Decimal128, Decimal128> result;
318
+ auto dstatus = BasicDecimal128::Divide(divisor, &result.first, &result.second);
319
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
320
+ return result;
321
+ }
322
+
323
+ /// \brief Convert the Decimal128 value to a base 10 decimal string with the given
324
+ /// scale.
325
+ std::string ToString(int32_t scale) const;
326
+
327
+ /// \brief Convert the value to an integer string
328
+ std::string ToIntegerString() const;
329
+
330
+ /// \brief Cast this value to an int64_t.
331
+ explicit operator int64_t() const;
332
+
333
+ /// \brief Convert a decimal string to a Decimal128 value, optionally including
334
+ /// precision and scale if they're passed in and not null.
335
+ static Status FromString(std::string_view s, Decimal128* out, int32_t* precision,
336
+ int32_t* scale = NULLPTR);
337
+ static Status FromString(const std::string& s, Decimal128* out, int32_t* precision,
338
+ int32_t* scale = NULLPTR);
339
+ static Status FromString(const char* s, Decimal128* out, int32_t* precision,
340
+ int32_t* scale = NULLPTR);
341
+ static Result<Decimal128> FromString(std::string_view s);
342
+ static Result<Decimal128> FromString(const std::string& s);
343
+ static Result<Decimal128> FromString(const char* s);
344
+
345
+ static Result<Decimal128> FromReal(double real, int32_t precision, int32_t scale);
346
+ static Result<Decimal128> FromReal(float real, int32_t precision, int32_t scale);
347
+
348
+ /// \brief Convert from a big-endian byte representation. The length must be
349
+ /// between 1 and 16.
350
+ /// \return error status if the length is an invalid value
351
+ static Result<Decimal128> FromBigEndian(const uint8_t* data, int32_t length);
352
+
353
+ /// \brief Convert Decimal128 from one scale to another
354
+ Result<Decimal128> Rescale(int32_t original_scale, int32_t new_scale) const {
355
+ Decimal128 out;
356
+ auto dstatus = BasicDecimal128::Rescale(original_scale, new_scale, &out);
357
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
358
+ return out;
359
+ }
360
+
361
+ /// \brief Convert to a signed integer
362
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
363
+ Result<T> ToInteger() const {
364
+ constexpr auto min_value = std::numeric_limits<T>::min();
365
+ constexpr auto max_value = std::numeric_limits<T>::max();
366
+ const auto& self = *this;
367
+ if (self < min_value || self > max_value) {
368
+ return Status::Invalid("Invalid cast from Decimal128 to ", sizeof(T),
369
+ " byte integer");
370
+ }
371
+ return static_cast<T>(low_bits());
372
+ }
373
+
374
+ /// \brief Convert to a signed integer
375
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
376
+ Status ToInteger(T* out) const {
377
+ return ToInteger<T>().Value(out);
378
+ }
379
+
380
+ /// \brief Convert to a floating-point number (scaled)
381
+ float ToFloat(int32_t scale) const;
382
+ /// \brief Convert to a floating-point number (scaled)
383
+ double ToDouble(int32_t scale) const;
384
+
385
+ /// \brief Convert to a floating-point number (scaled)
386
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
387
+ T ToReal(int32_t scale) const {
388
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
389
+ "Unexpected floating-point type");
390
+ if constexpr (std::is_same_v<T, float>) {
391
+ return ToFloat(scale);
392
+ } else {
393
+ return ToDouble(scale);
394
+ }
395
+ }
396
+
397
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
398
+ const Decimal128& decimal);
399
+
400
+ private:
401
+ /// Converts internal error code to Status
402
+ Status ToArrowStatus(DecimalStatus dstatus) const;
403
+ };
404
+
405
+ /// Represents a signed 256-bit integer in two's complement.
406
+ /// The max decimal precision that can be safely represented is
407
+ /// 76 significant digits.
408
+ ///
409
+ /// The implementation is split into two parts :
410
+ ///
411
+ /// 1. BasicDecimal256
412
+ /// - can be safely compiled to IR without references to libstdc++.
413
+ /// 2. Decimal256
414
+ /// - (TODO) has additional functionality on top of BasicDecimal256 to deal with
415
+ /// strings and streams.
416
+ class ARROW_EXPORT Decimal256 : public BasicDecimal256 {
417
+ public:
418
+ /// \cond FALSE
419
+ // (need to avoid a duplicate definition in Sphinx)
420
+ using BasicDecimal256::BasicDecimal256;
421
+ /// \endcond
422
+
423
+ /// \brief constructor creates a Decimal256 from a BasicDecimal256.
424
+ constexpr Decimal256(const BasicDecimal256& value) noexcept // NOLINT(runtime/explicit)
425
+ : BasicDecimal256(value) {}
426
+
427
+ /// \brief Parse the number from a base 10 string representation.
428
+ explicit Decimal256(const std::string& value);
429
+
430
+ /// \brief Empty constructor creates a Decimal256 with a value of 0.
431
+ // This is required on some older compilers.
432
+ constexpr Decimal256() noexcept : BasicDecimal256() {}
433
+
434
+ /// \brief Convert the Decimal256 value to a base 10 decimal string with the given
435
+ /// scale.
436
+ std::string ToString(int32_t scale) const;
437
+
438
+ /// \brief Convert the value to an integer string
439
+ std::string ToIntegerString() const;
440
+
441
+ /// \brief Convert a decimal string to a Decimal256 value, optionally including
442
+ /// precision and scale if they're passed in and not null.
443
+ static Status FromString(std::string_view s, Decimal256* out, int32_t* precision,
444
+ int32_t* scale = NULLPTR);
445
+ static Status FromString(const std::string& s, Decimal256* out, int32_t* precision,
446
+ int32_t* scale = NULLPTR);
447
+ static Status FromString(const char* s, Decimal256* out, int32_t* precision,
448
+ int32_t* scale = NULLPTR);
449
+ static Result<Decimal256> FromString(std::string_view s);
450
+ static Result<Decimal256> FromString(const std::string& s);
451
+ static Result<Decimal256> FromString(const char* s);
452
+
453
+ /// \brief Convert Decimal256 from one scale to another
454
+ Result<Decimal256> Rescale(int32_t original_scale, int32_t new_scale) const {
455
+ Decimal256 out;
456
+ auto dstatus = BasicDecimal256::Rescale(original_scale, new_scale, &out);
457
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
458
+ return out;
459
+ }
460
+
461
+ /// Divide this number by right and return the result.
462
+ ///
463
+ /// This operation is not destructive.
464
+ /// The answer rounds to zero. Signs work like:
465
+ /// 21 / 5 -> 4, 1
466
+ /// -21 / 5 -> -4, -1
467
+ /// 21 / -5 -> -4, 1
468
+ /// -21 / -5 -> 4, -1
469
+ /// \param[in] divisor the number to divide by
470
+ /// \return the pair of the quotient and the remainder
471
+ Result<std::pair<Decimal256, Decimal256>> Divide(const Decimal256& divisor) const {
472
+ std::pair<Decimal256, Decimal256> result;
473
+ auto dstatus = BasicDecimal256::Divide(divisor, &result.first, &result.second);
474
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
475
+ return result;
476
+ }
477
+
478
+ /// \brief Convert from a big-endian byte representation. The length must be
479
+ /// between 1 and 32.
480
+ /// \return error status if the length is an invalid value
481
+ static Result<Decimal256> FromBigEndian(const uint8_t* data, int32_t length);
482
+
483
+ static Result<Decimal256> FromReal(double real, int32_t precision, int32_t scale);
484
+ static Result<Decimal256> FromReal(float real, int32_t precision, int32_t scale);
485
+
486
+ /// \brief Convert to a floating-point number (scaled).
487
+ /// May return infinity in case of overflow.
488
+ float ToFloat(int32_t scale) const;
489
+ /// \brief Convert to a floating-point number (scaled)
490
+ double ToDouble(int32_t scale) const;
491
+
492
+ /// \brief Convert to a floating-point number (scaled)
493
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
494
+ T ToReal(int32_t scale) const {
495
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
496
+ "Unexpected floating-point type");
497
+ if constexpr (std::is_same_v<T, float>) {
498
+ return ToFloat(scale);
499
+ } else {
500
+ return ToDouble(scale);
501
+ }
502
+ }
503
+
504
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
505
+ const Decimal256& decimal);
506
+
507
+ private:
508
+ /// Converts internal error code to Status
509
+ Status ToArrowStatus(DecimalStatus dstatus) const;
510
+ };
511
+
512
+ /// For an integer type, return the max number of decimal digits
513
+ /// (=minimal decimal precision) it can represent.
514
+ inline Result<int32_t> MaxDecimalDigitsForInteger(Type::type type_id) {
515
+ switch (type_id) {
516
+ case Type::INT8:
517
+ case Type::UINT8:
518
+ return 3;
519
+ case Type::INT16:
520
+ case Type::UINT16:
521
+ return 5;
522
+ case Type::INT32:
523
+ case Type::UINT32:
524
+ return 10;
525
+ case Type::INT64:
526
+ return 19;
527
+ case Type::UINT64:
528
+ return 20;
529
+ default:
530
+ break;
531
+ }
532
+ return Status::Invalid("Not an integer type: ", type_id);
533
+ }
534
+
535
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/cpu_info.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ enum class DispatchLevel : int {
30
+ // These dispatch levels, corresponding to instruction set features,
31
+ // are sorted in increasing order of preference.
32
+ NONE = 0,
33
+ SSE4_2,
34
+ AVX2,
35
+ AVX512,
36
+ NEON,
37
+ MAX
38
+ };
39
+
40
+ /*
41
+ A facility for dynamic dispatch according to available DispatchLevel.
42
+
43
+ Typical use:
44
+
45
+ static void my_function_default(...);
46
+ static void my_function_avx2(...);
47
+
48
+ struct MyDynamicFunction {
49
+ using FunctionType = decltype(&my_function_default);
50
+
51
+ static std::vector<std::pair<DispatchLevel, FunctionType>> implementations() {
52
+ return {
53
+ { DispatchLevel::NONE, my_function_default }
54
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
55
+ , { DispatchLevel::AVX2, my_function_avx2 }
56
+ #endif
57
+ };
58
+ }
59
+ };
60
+
61
+ void my_function(...) {
62
+ static DynamicDispatch<MyDynamicFunction> dispatch;
63
+ return dispatch.func(...);
64
+ }
65
+ */
66
+ template <typename DynamicFunction>
67
+ class DynamicDispatch {
68
+ protected:
69
+ using FunctionType = typename DynamicFunction::FunctionType;
70
+ using Implementation = std::pair<DispatchLevel, FunctionType>;
71
+
72
+ public:
73
+ DynamicDispatch() { Resolve(DynamicFunction::implementations()); }
74
+
75
+ FunctionType func = {};
76
+
77
+ protected:
78
+ // Use the Implementation with the highest DispatchLevel
79
+ void Resolve(const std::vector<Implementation>& implementations) {
80
+ Implementation cur{DispatchLevel::NONE, {}};
81
+
82
+ for (const auto& impl : implementations) {
83
+ if (impl.first >= cur.first && IsSupported(impl.first)) {
84
+ // Higher (or same) level than current
85
+ cur = impl;
86
+ }
87
+ }
88
+
89
+ if (!cur.second) {
90
+ Status::Invalid("No appropriate implementation found").Abort();
91
+ }
92
+ func = cur.second;
93
+ }
94
+
95
+ private:
96
+ bool IsSupported(DispatchLevel level) const {
97
+ static const auto cpu_info = arrow::internal::CpuInfo::GetInstance();
98
+
99
+ switch (level) {
100
+ case DispatchLevel::NONE:
101
+ return true;
102
+ case DispatchLevel::SSE4_2:
103
+ return cpu_info->IsSupported(CpuInfo::SSE4_2);
104
+ case DispatchLevel::AVX2:
105
+ return cpu_info->IsSupported(CpuInfo::AVX2);
106
+ case DispatchLevel::AVX512:
107
+ return cpu_info->IsSupported(CpuInfo::AVX512);
108
+ default:
109
+ return false;
110
+ }
111
+ }
112
+ };
113
+
114
+ } // namespace internal
115
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <array>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <iosfwd>
24
+ #include <limits>
25
+ #include <type_traits>
26
+
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/ubsan.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+ namespace util {
34
+
35
+ /// \brief Class representing an IEEE half-precision float, encoded as a `uint16_t`
36
+ ///
37
+ /// The exact format is as follows (from LSB to MSB):
38
+ /// - bits 0-10: mantissa
39
+ /// - bits 10-15: exponent
40
+ /// - bit 15: sign
41
+ ///
42
+ class ARROW_EXPORT Float16 {
43
+ public:
44
+ Float16() = default;
45
+ explicit Float16(float f) : Float16(FromFloat(f)) {}
46
+ explicit Float16(double d) : Float16(FromDouble(d)) {}
47
+ template <typename T,
48
+ typename std::enable_if_t<std::is_convertible_v<T, double>>* = NULLPTR>
49
+ explicit Float16(T v) : Float16(static_cast<double>(v)) {}
50
+
51
+ /// \brief Create a `Float16` from its exact binary representation
52
+ constexpr static Float16 FromBits(uint16_t bits) { return Float16{bits, bool{}}; }
53
+ /// \brief Create a `Float16` from a 32-bit float (may lose precision)
54
+ static Float16 FromFloat(float f);
55
+ /// \brief Create a `Float16` from a 64-bit float (may lose precision)
56
+ static Float16 FromDouble(double d);
57
+
58
+ /// \brief Read a `Float16` from memory in native-endian byte order
59
+ static Float16 FromBytes(const uint8_t* src) {
60
+ return FromBits(SafeLoadAs<uint16_t>(src));
61
+ }
62
+
63
+ /// \brief Read a `Float16` from memory in little-endian byte order
64
+ static Float16 FromLittleEndian(const uint8_t* src) {
65
+ return FromBits(::arrow::bit_util::FromLittleEndian(SafeLoadAs<uint16_t>(src)));
66
+ }
67
+
68
+ /// \brief Read a `Float16` from memory in big-endian byte order
69
+ static Float16 FromBigEndian(const uint8_t* src) {
70
+ return FromBits(::arrow::bit_util::FromBigEndian(SafeLoadAs<uint16_t>(src)));
71
+ }
72
+
73
+ /// \brief Return the value's binary representation as a `uint16_t`
74
+ constexpr uint16_t bits() const { return bits_; }
75
+
76
+ /// \brief Return true if the value is negative (sign bit is set)
77
+ constexpr bool signbit() const { return (bits_ & 0x8000) != 0; }
78
+
79
+ /// \brief Return true if the value is NaN
80
+ constexpr bool is_nan() const { return (bits_ & 0x7fff) > 0x7c00; }
81
+ /// \brief Return true if the value is positive/negative infinity
82
+ constexpr bool is_infinity() const { return (bits_ & 0x7fff) == 0x7c00; }
83
+ /// \brief Return true if the value is finite and not NaN
84
+ constexpr bool is_finite() const { return (bits_ & 0x7c00) != 0x7c00; }
85
+ /// \brief Return true if the value is positive/negative zero
86
+ constexpr bool is_zero() const { return (bits_ & 0x7fff) == 0; }
87
+
88
+ /// \brief Convert to a 32-bit float
89
+ float ToFloat() const;
90
+ /// \brief Convert to a 64-bit float
91
+ double ToDouble() const;
92
+
93
+ explicit operator float() const { return ToFloat(); }
94
+ explicit operator double() const { return ToDouble(); }
95
+
96
+ /// \brief Copy the value's bytes in native-endian byte order
97
+ void ToBytes(uint8_t* dest) const { std::memcpy(dest, &bits_, sizeof(bits_)); }
98
+ /// \brief Return the value's bytes in native-endian byte order
99
+ constexpr std::array<uint8_t, 2> ToBytes() const {
100
+ #if ARROW_LITTLE_ENDIAN
101
+ return ToLittleEndian();
102
+ #else
103
+ return ToBigEndian();
104
+ #endif
105
+ }
106
+
107
+ /// \brief Copy the value's bytes in little-endian byte order
108
+ void ToLittleEndian(uint8_t* dest) const {
109
+ const auto bytes = ToLittleEndian();
110
+ std::memcpy(dest, bytes.data(), bytes.size());
111
+ }
112
+ /// \brief Return the value's bytes in little-endian byte order
113
+ constexpr std::array<uint8_t, 2> ToLittleEndian() const {
114
+ #if ARROW_LITTLE_ENDIAN
115
+ return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)};
116
+ #else
117
+ return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)};
118
+ #endif
119
+ }
120
+
121
+ /// \brief Copy the value's bytes in big-endian byte order
122
+ void ToBigEndian(uint8_t* dest) const {
123
+ const auto bytes = ToBigEndian();
124
+ std::memcpy(dest, bytes.data(), bytes.size());
125
+ }
126
+ /// \brief Return the value's bytes in big-endian byte order
127
+ constexpr std::array<uint8_t, 2> ToBigEndian() const {
128
+ #if ARROW_LITTLE_ENDIAN
129
+ return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)};
130
+ #else
131
+ return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)};
132
+ #endif
133
+ }
134
+
135
+ constexpr Float16 operator-() const { return FromBits(bits_ ^ 0x8000); }
136
+ constexpr Float16 operator+() const { return FromBits(bits_); }
137
+
138
+ friend constexpr bool operator==(Float16 lhs, Float16 rhs) {
139
+ if (lhs.is_nan() || rhs.is_nan()) return false;
140
+ return Float16::CompareEq(lhs, rhs);
141
+ }
142
+ friend constexpr bool operator!=(Float16 lhs, Float16 rhs) { return !(lhs == rhs); }
143
+
144
+ friend constexpr bool operator<(Float16 lhs, Float16 rhs) {
145
+ if (lhs.is_nan() || rhs.is_nan()) return false;
146
+ return Float16::CompareLt(lhs, rhs);
147
+ }
148
+ friend constexpr bool operator>(Float16 lhs, Float16 rhs) { return rhs < lhs; }
149
+
150
+ friend constexpr bool operator<=(Float16 lhs, Float16 rhs) {
151
+ if (lhs.is_nan() || rhs.is_nan()) return false;
152
+ return !Float16::CompareLt(rhs, lhs);
153
+ }
154
+ friend constexpr bool operator>=(Float16 lhs, Float16 rhs) { return rhs <= lhs; }
155
+
156
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, Float16 arg);
157
+
158
+ protected:
159
+ uint16_t bits_;
160
+
161
+ private:
162
+ constexpr Float16(uint16_t bits, bool) : bits_(bits) {}
163
+
164
+ // Comparison helpers that assume neither operand is NaN
165
+ static constexpr bool CompareEq(Float16 lhs, Float16 rhs) {
166
+ return (lhs.bits() == rhs.bits()) || (lhs.is_zero() && rhs.is_zero());
167
+ }
168
+ static constexpr bool CompareLt(Float16 lhs, Float16 rhs) {
169
+ if (lhs.signbit()) {
170
+ if (rhs.signbit()) {
171
+ // Both are negative
172
+ return lhs.bits() > rhs.bits();
173
+ } else {
174
+ // Handle +/-0
175
+ return !lhs.is_zero() || rhs.bits() != 0;
176
+ }
177
+ } else if (rhs.signbit()) {
178
+ return false;
179
+ } else {
180
+ // Both are positive
181
+ return lhs.bits() < rhs.bits();
182
+ }
183
+ }
184
+ };
185
+
186
+ static_assert(std::is_trivial_v<Float16>);
187
+
188
+ } // namespace util
189
+ } // namespace arrow
190
+
191
+ // TODO: Not complete
192
+ template <>
193
+ class std::numeric_limits<arrow::util::Float16> {
194
+ using T = arrow::util::Float16;
195
+
196
+ public:
197
+ static constexpr bool is_specialized = true;
198
+ static constexpr bool is_signed = true;
199
+ static constexpr bool has_infinity = true;
200
+ static constexpr bool has_quiet_NaN = true;
201
+
202
+ static constexpr T min() { return T::FromBits(0b0000010000000000); }
203
+ static constexpr T max() { return T::FromBits(0b0111101111111111); }
204
+ static constexpr T lowest() { return -max(); }
205
+
206
+ static constexpr T infinity() { return T::FromBits(0b0111110000000000); }
207
+
208
+ static constexpr T quiet_NaN() { return T::FromBits(0b0111111111111111); }
209
+ };
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cmath>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <type_traits>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/result.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/type_traits.h"
33
+ #include "arrow/util/config.h"
34
+ #include "arrow/util/functional.h"
35
+ #include "arrow/util/macros.h"
36
+ #include "arrow/util/tracing.h"
37
+ #include "arrow/util/type_fwd.h"
38
+ #include "arrow/util/visibility.h"
39
+
40
+ namespace arrow {
41
+
42
+ template <typename>
43
+ struct EnsureFuture;
44
+
45
+ namespace detail {
46
+
47
+ template <typename>
48
+ struct is_future : std::false_type {};
49
+
50
+ template <typename T>
51
+ struct is_future<Future<T>> : std::true_type {};
52
+
53
+ template <typename Signature, typename Enable = void>
54
+ struct result_of;
55
+
56
+ template <typename Fn, typename... A>
57
+ struct result_of<Fn(A...),
58
+ internal::void_t<decltype(std::declval<Fn>()(std::declval<A>()...))>> {
59
+ using type = decltype(std::declval<Fn>()(std::declval<A>()...));
60
+ };
61
+
62
+ template <typename Signature>
63
+ using result_of_t = typename result_of<Signature>::type;
64
+
65
+ // Helper to find the synchronous counterpart for a Future
66
+ template <typename T>
67
+ struct SyncType {
68
+ using type = Result<T>;
69
+ };
70
+
71
+ template <>
72
+ struct SyncType<internal::Empty> {
73
+ using type = Status;
74
+ };
75
+
76
+ template <typename Fn>
77
+ using first_arg_is_status =
78
+ std::is_same<typename std::decay<internal::call_traits::argument_type<0, Fn>>::type,
79
+ Status>;
80
+
81
+ template <typename Fn, typename Then, typename Else,
82
+ typename Count = internal::call_traits::argument_count<Fn>>
83
+ using if_has_no_args = typename std::conditional<Count::value == 0, Then, Else>::type;
84
+
85
+ /// Creates a callback that can be added to a future to mark a `dest` future finished
86
+ template <typename Source, typename Dest, bool SourceEmpty = Source::is_empty,
87
+ bool DestEmpty = Dest::is_empty>
88
+ struct MarkNextFinished {};
89
+
90
+ /// If the source and dest are both empty we can pass on the status
91
+ template <typename Source, typename Dest>
92
+ struct MarkNextFinished<Source, Dest, true, true> {
93
+ void operator()(const Status& status) && { next.MarkFinished(status); }
94
+ Dest next;
95
+ };
96
+
97
+ /// If the source is not empty but the dest is then we can take the
98
+ /// status out of the result
99
+ template <typename Source, typename Dest>
100
+ struct MarkNextFinished<Source, Dest, false, true> {
101
+ void operator()(const Result<typename Source::ValueType>& res) && {
102
+ next.MarkFinished(internal::Empty::ToResult(res.status()));
103
+ }
104
+ Dest next;
105
+ };
106
+
107
+ /// If neither are empty we pass on the result
108
+ template <typename Source, typename Dest>
109
+ struct MarkNextFinished<Source, Dest, false, false> {
110
+ void operator()(const Result<typename Source::ValueType>& res) && {
111
+ next.MarkFinished(res);
112
+ }
113
+ Dest next;
114
+ };
115
+
116
+ /// Helper that contains information about how to apply a continuation
117
+ struct ContinueFuture {
118
+ template <typename Return>
119
+ struct ForReturnImpl;
120
+
121
+ template <typename Return>
122
+ using ForReturn = typename ForReturnImpl<Return>::type;
123
+
124
+ template <typename Signature>
125
+ using ForSignature = ForReturn<result_of_t<Signature>>;
126
+
127
+ // If the callback returns void then we return Future<> that always finishes OK.
128
+ template <typename ContinueFunc, typename... Args,
129
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
130
+ typename NextFuture = ForReturn<ContinueResult>>
131
+ typename std::enable_if<std::is_void<ContinueResult>::value>::type operator()(
132
+ NextFuture next, ContinueFunc&& f, Args&&... a) const {
133
+ std::forward<ContinueFunc>(f)(std::forward<Args>(a)...);
134
+ next.MarkFinished();
135
+ }
136
+
137
+ /// If the callback returns a non-future then we return Future<T>
138
+ /// and mark the future finished with the callback result. It will get promoted
139
+ /// to Result<T> as part of MarkFinished if it isn't already.
140
+ ///
141
+ /// If the callback returns Status and we return Future<> then also send the callback
142
+ /// result as-is to the destination future.
143
+ template <typename ContinueFunc, typename... Args,
144
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
145
+ typename NextFuture = ForReturn<ContinueResult>>
146
+ typename std::enable_if<
147
+ !std::is_void<ContinueResult>::value && !is_future<ContinueResult>::value &&
148
+ (!NextFuture::is_empty || std::is_same<ContinueResult, Status>::value)>::type
149
+ operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const {
150
+ next.MarkFinished(std::forward<ContinueFunc>(f)(std::forward<Args>(a)...));
151
+ }
152
+
153
+ /// If the callback returns a Result and the next future is Future<> then we mark
154
+ /// the future finished with the callback result.
155
+ ///
156
+ /// It may seem odd that the next future is Future<> when the callback returns a
157
+ /// result but this can occur if the OnFailure callback returns a result while the
158
+ /// OnSuccess callback is void/Status (e.g. you would get this calling the one-arg
159
+ /// version of Then with an OnSuccess callback that returns void)
160
+ template <typename ContinueFunc, typename... Args,
161
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
162
+ typename NextFuture = ForReturn<ContinueResult>>
163
+ typename std::enable_if<!std::is_void<ContinueResult>::value &&
164
+ !is_future<ContinueResult>::value && NextFuture::is_empty &&
165
+ !std::is_same<ContinueResult, Status>::value>::type
166
+ operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const {
167
+ next.MarkFinished(std::forward<ContinueFunc>(f)(std::forward<Args>(a)...).status());
168
+ }
169
+
170
+ /// If the callback returns a Future<T> then we return Future<T>. We create a new
171
+ /// future and add a callback to the future given to us by the user that forwards the
172
+ /// result to the future we just created
173
+ template <typename ContinueFunc, typename... Args,
174
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
175
+ typename NextFuture = ForReturn<ContinueResult>>
176
+ typename std::enable_if<is_future<ContinueResult>::value>::type operator()(
177
+ NextFuture next, ContinueFunc&& f, Args&&... a) const {
178
+ ContinueResult signal_to_complete_next =
179
+ std::forward<ContinueFunc>(f)(std::forward<Args>(a)...);
180
+ MarkNextFinished<ContinueResult, NextFuture> callback{std::move(next)};
181
+ signal_to_complete_next.AddCallback(std::move(callback));
182
+ }
183
+
184
+ /// Helpers to conditionally ignore arguments to ContinueFunc
185
+ template <typename ContinueFunc, typename NextFuture, typename... Args>
186
+ void IgnoringArgsIf(std::true_type, NextFuture&& next, ContinueFunc&& f,
187
+ Args&&...) const {
188
+ operator()(std::forward<NextFuture>(next), std::forward<ContinueFunc>(f));
189
+ }
190
+ template <typename ContinueFunc, typename NextFuture, typename... Args>
191
+ void IgnoringArgsIf(std::false_type, NextFuture&& next, ContinueFunc&& f,
192
+ Args&&... a) const {
193
+ operator()(std::forward<NextFuture>(next), std::forward<ContinueFunc>(f),
194
+ std::forward<Args>(a)...);
195
+ }
196
+ };
197
+
198
+ /// Helper struct which tells us what kind of Future gets returned from `Then` based on
199
+ /// the return type of the OnSuccess callback
200
+ template <>
201
+ struct ContinueFuture::ForReturnImpl<void> {
202
+ using type = Future<>;
203
+ };
204
+
205
+ template <>
206
+ struct ContinueFuture::ForReturnImpl<Status> {
207
+ using type = Future<>;
208
+ };
209
+
210
+ template <typename R>
211
+ struct ContinueFuture::ForReturnImpl {
212
+ using type = Future<R>;
213
+ };
214
+
215
+ template <typename T>
216
+ struct ContinueFuture::ForReturnImpl<Result<T>> {
217
+ using type = Future<T>;
218
+ };
219
+
220
+ template <typename T>
221
+ struct ContinueFuture::ForReturnImpl<Future<T>> {
222
+ using type = Future<T>;
223
+ };
224
+
225
+ } // namespace detail
226
+
227
+ /// A Future's execution or completion status
228
+ enum class FutureState : int8_t { PENDING, SUCCESS, FAILURE };
229
+
230
+ inline bool IsFutureFinished(FutureState state) { return state != FutureState::PENDING; }
231
+
232
+ /// \brief Describe whether the callback should be scheduled or run synchronously
233
+ enum class ShouldSchedule {
234
+ /// Always run the callback synchronously (the default)
235
+ Never = 0,
236
+ /// Schedule a new task only if the future is not finished when the
237
+ /// callback is added
238
+ IfUnfinished = 1,
239
+ /// Always schedule the callback as a new task
240
+ Always = 2,
241
+ /// Schedule a new task only if it would run on an executor other than
242
+ /// the specified executor.
243
+ IfDifferentExecutor = 3,
244
+ };
245
+
246
+ /// \brief Options that control how a continuation is run
247
+ struct CallbackOptions {
248
+ /// Describe whether the callback should be run synchronously or scheduled
249
+ ShouldSchedule should_schedule = ShouldSchedule::Never;
250
+ /// If the callback is scheduled then this is the executor it should be scheduled
251
+ /// on. If this is NULL then should_schedule must be Never
252
+ internal::Executor* executor = NULLPTR;
253
+
254
+ static CallbackOptions Defaults() { return {}; }
255
+ };
256
+
257
+ // Untyped private implementation
258
+ class ARROW_EXPORT FutureImpl : public std::enable_shared_from_this<FutureImpl> {
259
+ public:
260
+ FutureImpl();
261
+ virtual ~FutureImpl() = default;
262
+
263
+ FutureState state() { return state_.load(); }
264
+
265
+ static std::unique_ptr<FutureImpl> Make();
266
+ static std::unique_ptr<FutureImpl> MakeFinished(FutureState state);
267
+
268
+ #ifdef ARROW_WITH_OPENTELEMETRY
269
+ void SetSpan(util::tracing::Span* span) { span_ = span; }
270
+ #endif
271
+
272
+ // Future API
273
+ void MarkFinished();
274
+ void MarkFailed();
275
+ void Wait();
276
+ bool Wait(double seconds);
277
+ template <typename ValueType>
278
+ Result<ValueType>* CastResult() const {
279
+ return static_cast<Result<ValueType>*>(result_.get());
280
+ }
281
+
282
+ using Callback = internal::FnOnce<void(const FutureImpl& impl)>;
283
+ void AddCallback(Callback callback, CallbackOptions opts);
284
+ bool TryAddCallback(const std::function<Callback()>& callback_factory,
285
+ CallbackOptions opts);
286
+
287
+ std::atomic<FutureState> state_{FutureState::PENDING};
288
+
289
+ // Type erased storage for arbitrary results
290
+ // XXX small objects could be stored inline instead of boxed in a pointer
291
+ using Storage = std::unique_ptr<void, void (*)(void*)>;
292
+ Storage result_{NULLPTR, NULLPTR};
293
+
294
+ struct CallbackRecord {
295
+ Callback callback;
296
+ CallbackOptions options;
297
+ };
298
+ std::vector<CallbackRecord> callbacks_;
299
+ #ifdef ARROW_WITH_OPENTELEMETRY
300
+ util::tracing::Span* span_ = NULLPTR;
301
+ #endif
302
+ };
303
+
304
+ // ---------------------------------------------------------------------
305
+ // Public API
306
+
307
+ /// \brief EXPERIMENTAL A std::future-like class with more functionality.
308
+ ///
309
+ /// A Future represents the results of a past or future computation.
310
+ /// The Future API has two sides: a producer side and a consumer side.
311
+ ///
312
+ /// The producer API allows creating a Future and setting its result or
313
+ /// status, possibly after running a computation function.
314
+ ///
315
+ /// The consumer API allows querying a Future's current state, wait for it
316
+ /// to complete, and composing futures with callbacks.
317
+ template <typename T>
318
+ class [[nodiscard]] Future {
319
+ public:
320
+ using ValueType = T;
321
+ using SyncType = typename detail::SyncType<T>::type;
322
+ static constexpr bool is_empty = std::is_same<T, internal::Empty>::value;
323
+ // The default constructor creates an invalid Future. Use Future::Make()
324
+ // for a valid Future. This constructor is mostly for the convenience
325
+ // of being able to presize a vector of Futures.
326
+ Future() = default;
327
+
328
+ #ifdef ARROW_WITH_OPENTELEMETRY
329
+ void SetSpan(util::tracing::Span* span) { impl_->SetSpan(span); }
330
+ #endif
331
+
332
+ // Consumer API
333
+
334
+ bool is_valid() const { return impl_ != NULLPTR; }
335
+
336
+ /// \brief Return the Future's current state
337
+ ///
338
+ /// A return value of PENDING is only indicative, as the Future can complete
339
+ /// concurrently. A return value of FAILURE or SUCCESS is definitive, though.
340
+ FutureState state() const {
341
+ CheckValid();
342
+ return impl_->state();
343
+ }
344
+
345
+ /// \brief Whether the Future is finished
346
+ ///
347
+ /// A false return value is only indicative, as the Future can complete
348
+ /// concurrently. A true return value is definitive, though.
349
+ bool is_finished() const {
350
+ CheckValid();
351
+ return IsFutureFinished(impl_->state());
352
+ }
353
+
354
+ /// \brief Wait for the Future to complete and return its Result
355
+ const Result<ValueType>& result() const& {
356
+ Wait();
357
+ return *GetResult();
358
+ }
359
+
360
+ /// \brief Returns an rvalue to the result. This method is potentially unsafe
361
+ ///
362
+ /// The future is not the unique owner of the result, copies of a future will
363
+ /// also point to the same result. You must make sure that no other copies
364
+ /// of the future exist. Attempts to add callbacks after you move the result
365
+ /// will result in undefined behavior.
366
+ Result<ValueType>&& MoveResult() {
367
+ Wait();
368
+ return std::move(*GetResult());
369
+ }
370
+
371
+ /// \brief Wait for the Future to complete and return its Status
372
+ const Status& status() const { return result().status(); }
373
+
374
+ /// \brief Future<T> is convertible to Future<>, which views only the
375
+ /// Status of the original. Marking the returned Future Finished is not supported.
376
+ explicit operator Future<>() const {
377
+ Future<> status_future;
378
+ status_future.impl_ = impl_;
379
+ return status_future;
380
+ }
381
+
382
+ /// \brief Wait for the Future to complete
383
+ void Wait() const {
384
+ CheckValid();
385
+ impl_->Wait();
386
+ }
387
+
388
+ /// \brief Wait for the Future to complete, or for the timeout to expire
389
+ ///
390
+ /// `true` is returned if the Future completed, `false` if the timeout expired.
391
+ /// Note a `false` value is only indicative, as the Future can complete
392
+ /// concurrently.
393
+ bool Wait(double seconds) const {
394
+ CheckValid();
395
+ return impl_->Wait(seconds);
396
+ }
397
+
398
+ // Producer API
399
+
400
+ /// \brief Producer API: mark Future finished
401
+ ///
402
+ /// The Future's result is set to `res`.
403
+ void MarkFinished(Result<ValueType> res) { DoMarkFinished(std::move(res)); }
404
+
405
+ /// \brief Mark a Future<> completed with the provided Status.
406
+ template <typename E = ValueType, typename = typename std::enable_if<
407
+ std::is_same<E, internal::Empty>::value>::type>
408
+ void MarkFinished(Status s = Status::OK()) {
409
+ return DoMarkFinished(E::ToResult(std::move(s)));
410
+ }
411
+
412
+ /// \brief Producer API: instantiate a valid Future
413
+ ///
414
+ /// The Future's state is initialized with PENDING. If you are creating a future with
415
+ /// this method you must ensure that future is eventually completed (with success or
416
+ /// failure). Creating a future, returning it, and never completing the future can lead
417
+ /// to memory leaks (for example, see Loop).
418
+ static Future Make() {
419
+ Future fut;
420
+ fut.impl_ = FutureImpl::Make();
421
+ return fut;
422
+ }
423
+
424
+ /// \brief Producer API: instantiate a finished Future
425
+ static Future<ValueType> MakeFinished(Result<ValueType> res) {
426
+ Future<ValueType> fut;
427
+ fut.InitializeFromResult(std::move(res));
428
+ return fut;
429
+ }
430
+
431
+ /// \brief Make a finished Future<> with the provided Status.
432
+ template <typename E = ValueType, typename = typename std::enable_if<
433
+ std::is_same<E, internal::Empty>::value>::type>
434
+ static Future<> MakeFinished(Status s = Status::OK()) {
435
+ return MakeFinished(E::ToResult(std::move(s)));
436
+ }
437
+
438
+ struct WrapResultOnComplete {
439
+ template <typename OnComplete>
440
+ struct Callback {
441
+ void operator()(const FutureImpl& impl) && {
442
+ std::move(on_complete)(*impl.CastResult<ValueType>());
443
+ }
444
+ OnComplete on_complete;
445
+ };
446
+ };
447
+
448
+ struct WrapStatusyOnComplete {
449
+ template <typename OnComplete>
450
+ struct Callback {
451
+ static_assert(std::is_same<internal::Empty, ValueType>::value,
452
+ "Only callbacks for Future<> should accept Status and not Result");
453
+
454
+ void operator()(const FutureImpl& impl) && {
455
+ std::move(on_complete)(impl.CastResult<ValueType>()->status());
456
+ }
457
+ OnComplete on_complete;
458
+ };
459
+ };
460
+
461
+ template <typename OnComplete>
462
+ using WrapOnComplete = typename std::conditional<
463
+ detail::first_arg_is_status<OnComplete>::value, WrapStatusyOnComplete,
464
+ WrapResultOnComplete>::type::template Callback<OnComplete>;
465
+
466
+ /// \brief Consumer API: Register a callback to run when this future completes
467
+ ///
468
+ /// The callback should receive the result of the future (const Result<T>&)
469
+ /// For a void or statusy future this should be (const Status&)
470
+ ///
471
+ /// There is no guarantee to the order in which callbacks will run. In
472
+ /// particular, callbacks added while the future is being marked complete
473
+ /// may be executed immediately, ahead of, or even the same time as, other
474
+ /// callbacks that have been previously added.
475
+ ///
476
+ /// WARNING: callbacks may hold arbitrary references, including cyclic references.
477
+ /// Since callbacks will only be destroyed after they are invoked, this can lead to
478
+ /// memory leaks if a Future is never marked finished (abandoned):
479
+ ///
480
+ /// {
481
+ /// auto fut = Future<>::Make();
482
+ /// fut.AddCallback([fut]() {});
483
+ /// }
484
+ ///
485
+ /// In this example `fut` falls out of scope but is not destroyed because it holds a
486
+ /// cyclic reference to itself through the callback.
487
+ template <typename OnComplete, typename Callback = WrapOnComplete<OnComplete>>
488
+ void AddCallback(OnComplete on_complete,
489
+ CallbackOptions opts = CallbackOptions::Defaults()) const {
490
+ // We know impl_ will not be dangling when invoking callbacks because at least one
491
+ // thread will be waiting for MarkFinished to return. Thus it's safe to keep a
492
+ // weak reference to impl_ here
493
+ impl_->AddCallback(Callback{std::move(on_complete)}, opts);
494
+ }
495
+
496
+ /// \brief Overload of AddCallback that will return false instead of running
497
+ /// synchronously
498
+ ///
499
+ /// This overload will guarantee the callback is never run synchronously. If the future
500
+ /// is already finished then it will simply return false. This can be useful to avoid
501
+ /// stack overflow in a situation where you have recursive Futures. For an example
502
+ /// see the Loop function
503
+ ///
504
+ /// Takes in a callback factory function to allow moving callbacks (the factory function
505
+ /// will only be called if the callback can successfully be added)
506
+ ///
507
+ /// Returns true if a callback was actually added and false if the callback failed
508
+ /// to add because the future was marked complete.
509
+ template <typename CallbackFactory,
510
+ typename OnComplete = detail::result_of_t<CallbackFactory()>,
511
+ typename Callback = WrapOnComplete<OnComplete>>
512
+ bool TryAddCallback(CallbackFactory callback_factory,
513
+ CallbackOptions opts = CallbackOptions::Defaults()) const {
514
+ return impl_->TryAddCallback([&]() { return Callback{callback_factory()}; }, opts);
515
+ }
516
+
517
+ template <typename OnSuccess, typename OnFailure>
518
+ struct ThenOnComplete {
519
+ static constexpr bool has_no_args =
520
+ internal::call_traits::argument_count<OnSuccess>::value == 0;
521
+
522
+ using ContinuedFuture = detail::ContinueFuture::ForSignature<
523
+ detail::if_has_no_args<OnSuccess, OnSuccess && (), OnSuccess && (const T&)>>;
524
+
525
+ static_assert(
526
+ std::is_same<detail::ContinueFuture::ForSignature<OnFailure && (const Status&)>,
527
+ ContinuedFuture>::value,
528
+ "OnSuccess and OnFailure must continue with the same future type");
529
+
530
+ struct DummyOnSuccess {
531
+ void operator()(const T&);
532
+ };
533
+ using OnSuccessArg = typename std::decay<internal::call_traits::argument_type<
534
+ 0, detail::if_has_no_args<OnSuccess, DummyOnSuccess, OnSuccess>>>::type;
535
+
536
+ static_assert(
537
+ !std::is_same<OnSuccessArg, typename EnsureResult<OnSuccessArg>::type>::value,
538
+ "OnSuccess' argument should not be a Result");
539
+
540
+ void operator()(const Result<T>& result) && {
541
+ detail::ContinueFuture continue_future;
542
+ if (ARROW_PREDICT_TRUE(result.ok())) {
543
+ // move on_failure to a(n immediately destroyed) temporary to free its resources
544
+ ARROW_UNUSED(OnFailure(std::move(on_failure)));
545
+ continue_future.IgnoringArgsIf(
546
+ detail::if_has_no_args<OnSuccess, std::true_type, std::false_type>{},
547
+ std::move(next), std::move(on_success), result.ValueOrDie());
548
+ } else {
549
+ ARROW_UNUSED(OnSuccess(std::move(on_success)));
550
+ continue_future(std::move(next), std::move(on_failure), result.status());
551
+ }
552
+ }
553
+
554
+ OnSuccess on_success;
555
+ OnFailure on_failure;
556
+ ContinuedFuture next;
557
+ };
558
+
559
+ template <typename OnSuccess>
560
+ struct PassthruOnFailure {
561
+ using ContinuedFuture = detail::ContinueFuture::ForSignature<
562
+ detail::if_has_no_args<OnSuccess, OnSuccess && (), OnSuccess && (const T&)>>;
563
+
564
+ Result<typename ContinuedFuture::ValueType> operator()(const Status& s) { return s; }
565
+ };
566
+
567
+ /// \brief Consumer API: Register a continuation to run when this future completes
568
+ ///
569
+ /// The continuation will run in the same thread that called MarkFinished (whatever
570
+ /// callback is registered with this function will run before MarkFinished returns).
571
+ /// Avoid long-running callbacks in favor of submitting a task to an Executor and
572
+ /// returning the future.
573
+ ///
574
+ /// Two callbacks are supported:
575
+ /// - OnSuccess, called with the result (const ValueType&) on successful completion.
576
+ /// for an empty future this will be called with nothing ()
577
+ /// - OnFailure, called with the error (const Status&) on failed completion.
578
+ /// This callback is optional and defaults to a passthru of any errors.
579
+ ///
580
+ /// Then() returns a Future whose ValueType is derived from the return type of the
581
+ /// callbacks. If a callback returns:
582
+ /// - void, a Future<> will be returned which will completes successfully as soon
583
+ /// as the callback runs.
584
+ /// - Status, a Future<> will be returned which will complete with the returned Status
585
+ /// as soon as the callback runs.
586
+ /// - V or Result<V>, a Future<V> will be returned which will complete with the result
587
+ /// of invoking the callback as soon as the callback runs.
588
+ /// - Future<V>, a Future<V> will be returned which will be marked complete when the
589
+ /// future returned by the callback completes (and will complete with the same
590
+ /// result).
591
+ ///
592
+ /// The continued Future type must be the same for both callbacks.
593
+ ///
594
+ /// Note that OnFailure can swallow errors, allowing continued Futures to successfully
595
+ /// complete even if this Future fails.
596
+ ///
597
+ /// If this future is already completed then the callback will be run immediately
598
+ /// and the returned future may already be marked complete.
599
+ ///
600
+ /// See AddCallback for general considerations when writing callbacks.
601
+ template <typename OnSuccess, typename OnFailure = PassthruOnFailure<OnSuccess>,
602
+ typename OnComplete = ThenOnComplete<OnSuccess, OnFailure>,
603
+ typename ContinuedFuture = typename OnComplete::ContinuedFuture>
604
+ ContinuedFuture Then(OnSuccess on_success, OnFailure on_failure = {},
605
+ CallbackOptions options = CallbackOptions::Defaults()) const {
606
+ auto next = ContinuedFuture::Make();
607
+ AddCallback(OnComplete{std::forward<OnSuccess>(on_success),
608
+ std::forward<OnFailure>(on_failure), next},
609
+ options);
610
+ return next;
611
+ }
612
+
613
+ /// \brief Implicit constructor to create a finished future from a value
614
+ Future(ValueType val) : Future() { // NOLINT runtime/explicit
615
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
616
+ SetResult(std::move(val));
617
+ }
618
+
619
+ /// \brief Implicit constructor to create a future from a Result, enabling use
620
+ /// of macros like ARROW_ASSIGN_OR_RAISE.
621
+ Future(Result<ValueType> res) : Future() { // NOLINT runtime/explicit
622
+ if (ARROW_PREDICT_TRUE(res.ok())) {
623
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
624
+ } else {
625
+ impl_ = FutureImpl::MakeFinished(FutureState::FAILURE);
626
+ }
627
+ SetResult(std::move(res));
628
+ }
629
+
630
+ /// \brief Implicit constructor to create a future from a Status, enabling use
631
+ /// of macros like ARROW_RETURN_NOT_OK.
632
+ Future(Status s) // NOLINT runtime/explicit
633
+ : Future(Result<ValueType>(std::move(s))) {}
634
+
635
+ protected:
636
+ void InitializeFromResult(Result<ValueType> res) {
637
+ if (ARROW_PREDICT_TRUE(res.ok())) {
638
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
639
+ } else {
640
+ impl_ = FutureImpl::MakeFinished(FutureState::FAILURE);
641
+ }
642
+ SetResult(std::move(res));
643
+ }
644
+
645
+ void Initialize() { impl_ = FutureImpl::Make(); }
646
+
647
+ Result<ValueType>* GetResult() const { return impl_->CastResult<ValueType>(); }
648
+
649
+ void SetResult(Result<ValueType> res) {
650
+ impl_->result_ = {new Result<ValueType>(std::move(res)),
651
+ [](void* p) { delete static_cast<Result<ValueType>*>(p); }};
652
+ }
653
+
654
+ void DoMarkFinished(Result<ValueType> res) {
655
+ SetResult(std::move(res));
656
+
657
+ if (ARROW_PREDICT_TRUE(GetResult()->ok())) {
658
+ impl_->MarkFinished();
659
+ } else {
660
+ impl_->MarkFailed();
661
+ }
662
+ }
663
+
664
+ void CheckValid() const {
665
+ #ifndef NDEBUG
666
+ if (!is_valid()) {
667
+ Status::Invalid("Invalid Future (default-initialized?)").Abort();
668
+ }
669
+ #endif
670
+ }
671
+
672
+ explicit Future(std::shared_ptr<FutureImpl> impl) : impl_(std::move(impl)) {}
673
+
674
+ std::shared_ptr<FutureImpl> impl_;
675
+
676
+ friend struct detail::ContinueFuture;
677
+
678
+ template <typename U>
679
+ friend class Future;
680
+ friend class WeakFuture<T>;
681
+
682
+ FRIEND_TEST(FutureRefTest, ChainRemoved);
683
+ FRIEND_TEST(FutureRefTest, TailRemoved);
684
+ FRIEND_TEST(FutureRefTest, HeadRemoved);
685
+ };
686
+
687
+ template <typename T>
688
+ typename Future<T>::SyncType FutureToSync(const Future<T>& fut) {
689
+ return fut.result();
690
+ }
691
+
692
+ template <>
693
+ inline typename Future<internal::Empty>::SyncType FutureToSync<internal::Empty>(
694
+ const Future<internal::Empty>& fut) {
695
+ return fut.status();
696
+ }
697
+
698
+ template <>
699
+ inline Future<>::Future(Status s) : Future(internal::Empty::ToResult(std::move(s))) {}
700
+
701
+ template <typename T>
702
+ class WeakFuture {
703
+ public:
704
+ explicit WeakFuture(const Future<T>& future) : impl_(future.impl_) {}
705
+
706
+ Future<T> get() { return Future<T>{impl_.lock()}; }
707
+
708
+ private:
709
+ std::weak_ptr<FutureImpl> impl_;
710
+ };
711
+
712
+ /// \defgroup future-utilities Functions for working with Futures
713
+ /// @{
714
+
715
+ /// If a Result<Future> holds an error instead of a Future, construct a finished Future
716
+ /// holding that error.
717
+ template <typename T>
718
+ static Future<T> DeferNotOk(Result<Future<T>> maybe_future) {
719
+ if (ARROW_PREDICT_FALSE(!maybe_future.ok())) {
720
+ return Future<T>::MakeFinished(std::move(maybe_future).status());
721
+ }
722
+ return std::move(maybe_future).MoveValueUnsafe();
723
+ }
724
+
725
+ /// \brief Create a Future which completes when all of `futures` complete.
726
+ ///
727
+ /// The future's result is a vector of the results of `futures`.
728
+ /// Note that this future will never be marked "failed"; failed results
729
+ /// will be stored in the result vector alongside successful results.
730
+ template <typename T>
731
+ Future<std::vector<Result<T>>> All(std::vector<Future<T>> futures) {
732
+ struct State {
733
+ explicit State(std::vector<Future<T>> f)
734
+ : futures(std::move(f)), n_remaining(futures.size()) {}
735
+
736
+ std::vector<Future<T>> futures;
737
+ std::atomic<size_t> n_remaining;
738
+ };
739
+
740
+ if (futures.size() == 0) {
741
+ return {std::vector<Result<T>>{}};
742
+ }
743
+
744
+ auto state = std::make_shared<State>(std::move(futures));
745
+
746
+ auto out = Future<std::vector<Result<T>>>::Make();
747
+ for (const Future<T>& future : state->futures) {
748
+ future.AddCallback([state, out](const Result<T>&) mutable {
749
+ if (state->n_remaining.fetch_sub(1) != 1) return;
750
+
751
+ std::vector<Result<T>> results(state->futures.size());
752
+ for (size_t i = 0; i < results.size(); ++i) {
753
+ results[i] = state->futures[i].result();
754
+ }
755
+ out.MarkFinished(std::move(results));
756
+ });
757
+ }
758
+ return out;
759
+ }
760
+
761
+ /// \brief Create a Future which completes when all of `futures` complete.
762
+ ///
763
+ /// The future will be marked complete if all `futures` complete
764
+ /// successfully. Otherwise, it will be marked failed with the status of
765
+ /// the first failing future.
766
+ ARROW_EXPORT
767
+ Future<> AllComplete(const std::vector<Future<>>& futures);
768
+
769
+ /// \brief Create a Future which completes when all of `futures` complete.
770
+ ///
771
+ /// The future will finish with an ok status if all `futures` finish with
772
+ /// an ok status. Otherwise, it will be marked failed with the status of
773
+ /// one of the failing futures.
774
+ ///
775
+ /// Unlike AllComplete this Future will not complete immediately when a
776
+ /// failure occurs. It will wait until all futures have finished.
777
+ ARROW_EXPORT
778
+ Future<> AllFinished(const std::vector<Future<>>& futures);
779
+
780
+ /// @}
781
+
782
+ struct Continue {
783
+ template <typename T>
784
+ operator std::optional<T>() && { // NOLINT explicit
785
+ return {};
786
+ }
787
+ };
788
+
789
+ template <typename T = internal::Empty>
790
+ std::optional<T> Break(T break_value = {}) {
791
+ return std::optional<T>{std::move(break_value)};
792
+ }
793
+
794
+ template <typename T = internal::Empty>
795
+ using ControlFlow = std::optional<T>;
796
+
797
+ /// \brief Loop through an asynchronous sequence
798
+ ///
799
+ /// \param[in] iterate A generator of Future<ControlFlow<BreakValue>>. On completion
800
+ /// of each yielded future the resulting ControlFlow will be examined. A Break will
801
+ /// terminate the loop, while a Continue will re-invoke `iterate`.
802
+ ///
803
+ /// \return A future which will complete when a Future returned by iterate completes with
804
+ /// a Break
805
+ template <typename Iterate,
806
+ typename Control = typename detail::result_of_t<Iterate()>::ValueType,
807
+ typename BreakValueType = typename Control::value_type>
808
+ Future<BreakValueType> Loop(Iterate iterate) {
809
+ struct Callback {
810
+ bool CheckForTermination(const Result<Control>& control_res) {
811
+ if (!control_res.ok()) {
812
+ break_fut.MarkFinished(control_res.status());
813
+ return true;
814
+ }
815
+ if (control_res->has_value()) {
816
+ break_fut.MarkFinished(**control_res);
817
+ return true;
818
+ }
819
+ return false;
820
+ }
821
+
822
+ void operator()(const Result<Control>& maybe_control) && {
823
+ if (CheckForTermination(maybe_control)) return;
824
+
825
+ auto control_fut = iterate();
826
+ while (true) {
827
+ if (control_fut.TryAddCallback([this]() { return *this; })) {
828
+ // Adding a callback succeeded; control_fut was not finished
829
+ // and we must wait to CheckForTermination.
830
+ return;
831
+ }
832
+ // Adding a callback failed; control_fut was finished and we
833
+ // can CheckForTermination immediately. This also avoids recursion and potential
834
+ // stack overflow.
835
+ if (CheckForTermination(control_fut.result())) return;
836
+
837
+ control_fut = iterate();
838
+ }
839
+ }
840
+
841
+ Iterate iterate;
842
+
843
+ // If the future returned by control_fut is never completed then we will be hanging on
844
+ // to break_fut forever even if the listener has given up listening on it. Instead we
845
+ // rely on the fact that a producer (the caller of Future<>::Make) is always
846
+ // responsible for completing the futures they create.
847
+ // TODO: Could avoid this kind of situation with "future abandonment" similar to mesos
848
+ Future<BreakValueType> break_fut;
849
+ };
850
+
851
+ auto break_fut = Future<BreakValueType>::Make();
852
+ auto control_fut = iterate();
853
+ control_fut.AddCallback(Callback{std::move(iterate), break_fut});
854
+
855
+ return break_fut;
856
+ }
857
+
858
+ inline Future<> ToFuture(Status status) {
859
+ return Future<>::MakeFinished(std::move(status));
860
+ }
861
+
862
+ template <typename T>
863
+ Future<T> ToFuture(T value) {
864
+ return Future<T>::MakeFinished(std::move(value));
865
+ }
866
+
867
+ template <typename T>
868
+ Future<T> ToFuture(Result<T> maybe_value) {
869
+ return Future<T>::MakeFinished(std::move(maybe_value));
870
+ }
871
+
872
+ template <typename T>
873
+ Future<T> ToFuture(Future<T> fut) {
874
+ return fut;
875
+ }
876
+
877
+ template <typename T>
878
+ struct EnsureFuture {
879
+ using type = decltype(ToFuture(std::declval<T>()));
880
+ };
881
+
882
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ namespace arrow {
21
+ namespace internal {
22
+
23
+ // ----------------------------------------------------------------------
24
+ // BEGIN Hash utilities from Boost
25
+
26
+ namespace detail {
27
+
28
+ #if defined(_MSC_VER)
29
+ # define ARROW_HASH_ROTL32(x, r) _rotl(x, r)
30
+ #else
31
+ # define ARROW_HASH_ROTL32(x, r) (x << r) | (x >> (32 - r))
32
+ #endif
33
+
34
+ template <typename SizeT>
35
+ inline void hash_combine_impl(SizeT& seed, SizeT value) {
36
+ seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
37
+ }
38
+
39
+ inline void hash_combine_impl(uint32_t& h1, uint32_t k1) {
40
+ const uint32_t c1 = 0xcc9e2d51;
41
+ const uint32_t c2 = 0x1b873593;
42
+
43
+ k1 *= c1;
44
+ k1 = ARROW_HASH_ROTL32(k1, 15);
45
+ k1 *= c2;
46
+
47
+ h1 ^= k1;
48
+ h1 = ARROW_HASH_ROTL32(h1, 13);
49
+ h1 = h1 * 5 + 0xe6546b64;
50
+ }
51
+
52
+ #undef ARROW_HASH_ROTL32
53
+
54
+ } // namespace detail
55
+
56
+ template <class T>
57
+ inline void hash_combine(std::size_t& seed, T const& v) {
58
+ std::hash<T> hasher;
59
+ return ::arrow::internal::detail::hash_combine_impl(seed, hasher(v));
60
+ }
61
+
62
+ // END Hash utilities from Boost
63
+ // ----------------------------------------------------------------------
64
+
65
+ } // namespace internal
66
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Private header, not to be exported
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cassert>
24
+ #include <cmath>
25
+ #include <cstdint>
26
+ #include <cstring>
27
+ #include <limits>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <type_traits>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array/builder_binary.h"
35
+ #include "arrow/buffer_builder.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/type_fwd.h"
39
+ #include "arrow/type_traits.h"
40
+ #include "arrow/util/bit_util.h"
41
+ #include "arrow/util/bitmap_builders.h"
42
+ #include "arrow/util/endian.h"
43
+ #include "arrow/util/logging.h"
44
+ #include "arrow/util/macros.h"
45
+ #include "arrow/util/ubsan.h"
46
+
47
+ #define XXH_INLINE_ALL
48
+
49
+ #include "arrow/vendored/xxhash.h" // IWYU pragma: keep
50
+
51
+ namespace arrow {
52
+ namespace internal {
53
+
54
+ // XXX would it help to have a 32-bit hash value on large datasets?
55
+ typedef uint64_t hash_t;
56
+
57
+ // Notes about the choice of a hash function.
58
+ // - XXH3 is extremely fast on most data sizes, from small to huge;
59
+ // faster even than HW CRC-based hashing schemes
60
+ // - our custom hash function for tiny values (< 16 bytes) is still
61
+ // significantly faster (~30%), at least on this machine and compiler
62
+
63
+ template <uint64_t AlgNum>
64
+ inline hash_t ComputeStringHash(const void* data, int64_t length);
65
+
66
+ /// \brief A hash function for bitmaps that can handle offsets and lengths in
67
+ /// terms of number of bits. The hash only depends on the bits actually hashed.
68
+ ///
69
+ /// It's the caller's responsibility to ensure that bits_offset + num_bits are
70
+ /// readable from the bitmap.
71
+ ///
72
+ /// \pre bits_offset >= 0
73
+ /// \pre num_bits >= 0
74
+ /// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap
75
+ ///
76
+ /// \param bitmap The pointer to the bitmap.
77
+ /// \param seed The seed for the hash function (useful when chaining hash functions).
78
+ /// \param bits_offset The offset in bits relative to the start of the bitmap.
79
+ /// \param num_bits The number of bits after the offset to be hashed.
80
+ ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed,
81
+ int64_t bits_offset, int64_t num_bits);
82
+
83
+ template <typename Scalar, uint64_t AlgNum>
84
+ struct ScalarHelperBase {
85
+ static bool CompareScalars(Scalar u, Scalar v) { return u == v; }
86
+
87
+ static hash_t ComputeHash(const Scalar& value) {
88
+ // Generic hash computation for scalars. Simply apply the string hash
89
+ // to the bit representation of the value.
90
+
91
+ // XXX in the case of FP values, we'd like equal values to have the same hash,
92
+ // even if they have different bit representations...
93
+ return ComputeStringHash<AlgNum>(&value, sizeof(value));
94
+ }
95
+ };
96
+
97
+ template <typename Scalar, uint64_t AlgNum = 0, typename Enable = void>
98
+ struct ScalarHelper : public ScalarHelperBase<Scalar, AlgNum> {};
99
+
100
+ template <typename Scalar, uint64_t AlgNum>
101
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_integral<Scalar>::value>>
102
+ : public ScalarHelperBase<Scalar, AlgNum> {
103
+ // ScalarHelper specialization for integers
104
+
105
+ static hash_t ComputeHash(const Scalar& value) {
106
+ // Faster hash computation for integers.
107
+
108
+ // Two of xxhash's prime multipliers (which are chosen for their
109
+ // bit dispersion properties)
110
+ static constexpr uint64_t multipliers[] = {11400714785074694791ULL,
111
+ 14029467366897019727ULL};
112
+
113
+ // Multiplying by the prime number mixes the low bits into the high bits,
114
+ // then byte-swapping (which is a single CPU instruction) allows the
115
+ // combined high and low bits to participate in the initial hash table index.
116
+ auto h = static_cast<hash_t>(value);
117
+ return bit_util::ByteSwap(multipliers[AlgNum] * h);
118
+ }
119
+ };
120
+
121
+ template <typename Scalar, uint64_t AlgNum>
122
+ struct ScalarHelper<Scalar, AlgNum,
123
+ enable_if_t<std::is_same<std::string_view, Scalar>::value>>
124
+ : public ScalarHelperBase<Scalar, AlgNum> {
125
+ // ScalarHelper specialization for std::string_view
126
+
127
+ static hash_t ComputeHash(std::string_view value) {
128
+ return ComputeStringHash<AlgNum>(value.data(), static_cast<int64_t>(value.size()));
129
+ }
130
+ };
131
+
132
+ template <typename Scalar, uint64_t AlgNum>
133
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_floating_point<Scalar>::value>>
134
+ : public ScalarHelperBase<Scalar, AlgNum> {
135
+ // ScalarHelper specialization for reals
136
+
137
+ static bool CompareScalars(Scalar u, Scalar v) {
138
+ if (std::isnan(u)) {
139
+ // XXX should we do a bit-precise comparison?
140
+ return std::isnan(v);
141
+ }
142
+ return u == v;
143
+ }
144
+ };
145
+
146
+ template <uint64_t AlgNum = 0>
147
+ hash_t ComputeStringHash(const void* data, int64_t length) {
148
+ if (ARROW_PREDICT_TRUE(length <= 16)) {
149
+ // Specialize for small hash strings, as they are quite common as
150
+ // hash table keys. Even XXH3 isn't quite as fast.
151
+ auto p = reinterpret_cast<const uint8_t*>(data);
152
+ auto n = static_cast<uint32_t>(length);
153
+ if (n <= 8) {
154
+ if (n <= 3) {
155
+ if (n == 0) {
156
+ return 1U;
157
+ }
158
+ uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1];
159
+ return ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
160
+ }
161
+ // 4 <= length <= 8
162
+ // We can read the string as two overlapping 32-bit ints, apply
163
+ // different hash functions to each of them in parallel, then XOR
164
+ // the results
165
+ uint32_t x, y;
166
+ hash_t hx, hy;
167
+ x = util::SafeLoadAs<uint32_t>(p + n - 4);
168
+ y = util::SafeLoadAs<uint32_t>(p);
169
+ hx = ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
170
+ hy = ScalarHelper<uint32_t, AlgNum ^ 1>::ComputeHash(y);
171
+ return n ^ hx ^ hy;
172
+ }
173
+ // 8 <= length <= 16
174
+ // Apply the same principle as above
175
+ uint64_t x, y;
176
+ hash_t hx, hy;
177
+ x = util::SafeLoadAs<uint64_t>(p + n - 8);
178
+ y = util::SafeLoadAs<uint64_t>(p);
179
+ hx = ScalarHelper<uint64_t, AlgNum>::ComputeHash(x);
180
+ hy = ScalarHelper<uint64_t, AlgNum ^ 1>::ComputeHash(y);
181
+ return n ^ hx ^ hy;
182
+ }
183
+
184
+ #if XXH3_SECRET_SIZE_MIN != 136
185
+ # error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets
186
+ #endif
187
+
188
+ // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow.
189
+ // Instead, we use hard-coded random secrets. To maximize cache efficiency,
190
+ // they reuse the same memory area.
191
+ static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = {
192
+ 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f,
193
+ 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24,
194
+ 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26,
195
+ 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75,
196
+ 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce,
197
+ 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3,
198
+ 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42,
199
+ 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1,
200
+ 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5,
201
+ 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87};
202
+
203
+ static_assert(AlgNum < 2, "AlgNum too large");
204
+ static constexpr auto secret = kXxh3Secrets + AlgNum;
205
+ return XXH3_64bits_withSecret(data, static_cast<size_t>(length), secret,
206
+ XXH3_SECRET_SIZE_MIN);
207
+ }
208
+
209
+ // XXX add a HashEq<ArrowType> struct with both hash and compare functions?
210
+
211
+ // ----------------------------------------------------------------------
212
+ // An open-addressing insert-only hash table (no deletes)
213
+
214
+ template <typename Payload>
215
+ class HashTable {
216
+ public:
217
+ static constexpr hash_t kSentinel = 0ULL;
218
+ static constexpr int64_t kLoadFactor = 2UL;
219
+
220
+ struct Entry {
221
+ hash_t h;
222
+ Payload payload;
223
+
224
+ // An entry is valid if the hash is different from the sentinel value
225
+ operator bool() const { return h != kSentinel; }
226
+ };
227
+
228
+ HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) {
229
+ DCHECK_NE(pool, nullptr);
230
+ // Minimum of 32 elements
231
+ capacity = std::max<uint64_t>(capacity, 32UL);
232
+ capacity_ = bit_util::NextPower2(capacity);
233
+ capacity_mask_ = capacity_ - 1;
234
+ size_ = 0;
235
+
236
+ DCHECK_OK(UpsizeBuffer(capacity_));
237
+ }
238
+
239
+ // Lookup with non-linear probing
240
+ // cmp_func should have signature bool(const Payload*).
241
+ // Return a (Entry*, found) pair.
242
+ template <typename CmpFunc>
243
+ std::pair<Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) {
244
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
245
+ std::forward<CmpFunc>(cmp_func));
246
+ return {&entries_[p.first], p.second};
247
+ }
248
+
249
+ template <typename CmpFunc>
250
+ std::pair<const Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) const {
251
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
252
+ std::forward<CmpFunc>(cmp_func));
253
+ return {&entries_[p.first], p.second};
254
+ }
255
+
256
+ Status Insert(Entry* entry, hash_t h, const Payload& payload) {
257
+ // Ensure entry is empty before inserting
258
+ assert(!*entry);
259
+ entry->h = FixHash(h);
260
+ entry->payload = payload;
261
+ ++size_;
262
+
263
+ if (ARROW_PREDICT_FALSE(NeedUpsizing())) {
264
+ // Resize less frequently since it is expensive
265
+ return Upsize(capacity_ * kLoadFactor * 2);
266
+ }
267
+ return Status::OK();
268
+ }
269
+
270
+ uint64_t size() const { return size_; }
271
+
272
+ // Visit all non-empty entries in the table
273
+ // The visit_func should have signature void(const Entry*)
274
+ template <typename VisitFunc>
275
+ void VisitEntries(VisitFunc&& visit_func) const {
276
+ for (uint64_t i = 0; i < capacity_; i++) {
277
+ const auto& entry = entries_[i];
278
+ if (entry) {
279
+ visit_func(&entry);
280
+ }
281
+ }
282
+ }
283
+
284
+ protected:
285
+ // NoCompare is for when the value is known not to exist in the table
286
+ enum CompareKind { DoCompare, NoCompare };
287
+
288
+ // The workhorse lookup function
289
+ template <CompareKind CKind, typename CmpFunc>
290
+ std::pair<uint64_t, bool> Lookup(hash_t h, const Entry* entries, uint64_t size_mask,
291
+ CmpFunc&& cmp_func) const {
292
+ static constexpr uint8_t perturb_shift = 5;
293
+
294
+ uint64_t index, perturb;
295
+ const Entry* entry;
296
+
297
+ h = FixHash(h);
298
+ index = h & size_mask;
299
+ perturb = (h >> perturb_shift) + 1U;
300
+
301
+ while (true) {
302
+ entry = &entries[index];
303
+ if (CompareEntry<CKind, CmpFunc>(h, entry, std::forward<CmpFunc>(cmp_func))) {
304
+ // Found
305
+ return {index, true};
306
+ }
307
+ if (entry->h == kSentinel) {
308
+ // Empty slot
309
+ return {index, false};
310
+ }
311
+
312
+ // Perturbation logic inspired from CPython's set / dict object.
313
+ // The goal is that all 64 bits of the unmasked hash value eventually
314
+ // participate in the probing sequence, to minimize clustering.
315
+ index = (index + perturb) & size_mask;
316
+ perturb = (perturb >> perturb_shift) + 1U;
317
+ }
318
+ }
319
+
320
+ template <CompareKind CKind, typename CmpFunc>
321
+ bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const {
322
+ if (CKind == NoCompare) {
323
+ return false;
324
+ } else {
325
+ return entry->h == h && cmp_func(&entry->payload);
326
+ }
327
+ }
328
+
329
+ bool NeedUpsizing() const {
330
+ // Keep the load factor <= 1/2
331
+ return size_ * kLoadFactor >= capacity_;
332
+ }
333
+
334
+ Status UpsizeBuffer(uint64_t capacity) {
335
+ RETURN_NOT_OK(entries_builder_.Resize(capacity));
336
+ entries_ = entries_builder_.mutable_data();
337
+ memset(static_cast<void*>(entries_), 0, capacity * sizeof(Entry));
338
+
339
+ return Status::OK();
340
+ }
341
+
342
+ Status Upsize(uint64_t new_capacity) {
343
+ assert(new_capacity > capacity_);
344
+ uint64_t new_mask = new_capacity - 1;
345
+ assert((new_capacity & new_mask) == 0); // it's a power of two
346
+
347
+ // Stash old entries and seal builder, effectively resetting the Buffer
348
+ const Entry* old_entries = entries_;
349
+ ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_));
350
+ // Allocate new buffer
351
+ RETURN_NOT_OK(UpsizeBuffer(new_capacity));
352
+
353
+ for (uint64_t i = 0; i < capacity_; i++) {
354
+ const auto& entry = old_entries[i];
355
+ if (entry) {
356
+ // Dummy compare function will not be called
357
+ auto p = Lookup<NoCompare>(entry.h, entries_, new_mask,
358
+ [](const Payload*) { return false; });
359
+ // Lookup<NoCompare> (and CompareEntry<NoCompare>) ensure that an
360
+ // empty slots is always returned
361
+ assert(!p.second);
362
+ entries_[p.first] = entry;
363
+ }
364
+ }
365
+ capacity_ = new_capacity;
366
+ capacity_mask_ = new_mask;
367
+
368
+ return Status::OK();
369
+ }
370
+
371
+ hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; }
372
+
373
+ // The number of slots available in the hash table array.
374
+ uint64_t capacity_;
375
+ uint64_t capacity_mask_;
376
+ // The number of used slots in the hash table array.
377
+ uint64_t size_;
378
+
379
+ Entry* entries_;
380
+ TypedBufferBuilder<Entry> entries_builder_;
381
+ };
382
+
383
+ // XXX typedef memo_index_t int32_t ?
384
+
385
+ constexpr int32_t kKeyNotFound = -1;
386
+
387
+ // ----------------------------------------------------------------------
388
+ // A base class for memoization table.
389
+
390
+ class MemoTable {
391
+ public:
392
+ virtual ~MemoTable() = default;
393
+
394
+ virtual int32_t size() const = 0;
395
+ };
396
+
397
+ // ----------------------------------------------------------------------
398
+ // A memoization table for memory-cheap scalar values.
399
+
400
+ // The memoization table remembers and allows to look up the insertion
401
+ // index for each key.
402
+
403
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
404
+ class ScalarMemoTable : public MemoTable {
405
+ public:
406
+ explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0)
407
+ : hash_table_(pool, static_cast<uint64_t>(entries)) {}
408
+
409
+ int32_t Get(const Scalar& value) const {
410
+ auto cmp_func = [value](const Payload* payload) -> bool {
411
+ return ScalarHelper<Scalar, 0>::CompareScalars(payload->value, value);
412
+ };
413
+ hash_t h = ComputeHash(value);
414
+ auto p = hash_table_.Lookup(h, cmp_func);
415
+ if (p.second) {
416
+ return p.first->payload.memo_index;
417
+ } else {
418
+ return kKeyNotFound;
419
+ }
420
+ }
421
+
422
+ template <typename Func1, typename Func2>
423
+ Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found,
424
+ int32_t* out_memo_index) {
425
+ auto cmp_func = [value](const Payload* payload) -> bool {
426
+ return ScalarHelper<Scalar, 0>::CompareScalars(value, payload->value);
427
+ };
428
+ hash_t h = ComputeHash(value);
429
+ auto p = hash_table_.Lookup(h, cmp_func);
430
+ int32_t memo_index;
431
+ if (p.second) {
432
+ memo_index = p.first->payload.memo_index;
433
+ on_found(memo_index);
434
+ } else {
435
+ memo_index = size();
436
+ RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index}));
437
+ on_not_found(memo_index);
438
+ }
439
+ *out_memo_index = memo_index;
440
+ return Status::OK();
441
+ }
442
+
443
+ Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) {
444
+ return GetOrInsert(
445
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
446
+ }
447
+
448
+ int32_t GetNull() const { return null_index_; }
449
+
450
+ template <typename Func1, typename Func2>
451
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
452
+ int32_t memo_index = GetNull();
453
+ if (memo_index != kKeyNotFound) {
454
+ on_found(memo_index);
455
+ } else {
456
+ null_index_ = memo_index = size();
457
+ on_not_found(memo_index);
458
+ }
459
+ return memo_index;
460
+ }
461
+
462
+ int32_t GetOrInsertNull() {
463
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
464
+ }
465
+
466
+ // The number of entries in the memo table +1 if null was added.
467
+ // (which is also 1 + the largest memo index)
468
+ int32_t size() const override {
469
+ return static_cast<int32_t>(hash_table_.size()) + (GetNull() != kKeyNotFound);
470
+ }
471
+
472
+ // Copy values starting from index `start` into `out_data`
473
+ void CopyValues(int32_t start, Scalar* out_data) const {
474
+ hash_table_.VisitEntries([=](const HashTableEntry* entry) {
475
+ int32_t index = entry->payload.memo_index - start;
476
+ if (index >= 0) {
477
+ out_data[index] = entry->payload.value;
478
+ }
479
+ });
480
+ // Zero-initialize the null entry
481
+ if (null_index_ != kKeyNotFound) {
482
+ int32_t index = null_index_ - start;
483
+ if (index >= 0) {
484
+ out_data[index] = Scalar{};
485
+ }
486
+ }
487
+ }
488
+
489
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
490
+
491
+ protected:
492
+ struct Payload {
493
+ Scalar value;
494
+ int32_t memo_index;
495
+ };
496
+
497
+ using HashTableType = HashTableTemplateType<Payload>;
498
+ using HashTableEntry = typename HashTableType::Entry;
499
+ HashTableType hash_table_;
500
+ int32_t null_index_ = kKeyNotFound;
501
+
502
+ hash_t ComputeHash(const Scalar& value) const {
503
+ return ScalarHelper<Scalar, 0>::ComputeHash(value);
504
+ }
505
+
506
+ public:
507
+ // defined here so that `HashTableType` is visible
508
+ // Merge entries from `other_table` into `this->hash_table_`.
509
+ Status MergeTable(const ScalarMemoTable& other_table) {
510
+ const HashTableType& other_hashtable = other_table.hash_table_;
511
+
512
+ other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) {
513
+ int32_t unused;
514
+ DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused));
515
+ });
516
+ // TODO: ARROW-17074 - implement proper error handling
517
+ return Status::OK();
518
+ }
519
+ };
520
+
521
+ // ----------------------------------------------------------------------
522
+ // A memoization table for small scalar values, using direct indexing
523
+
524
+ template <typename Scalar, typename Enable = void>
525
+ struct SmallScalarTraits {};
526
+
527
+ template <>
528
+ struct SmallScalarTraits<bool> {
529
+ static constexpr int32_t cardinality = 2;
530
+
531
+ static uint32_t AsIndex(bool value) { return value ? 1 : 0; }
532
+ };
533
+
534
+ template <typename Scalar>
535
+ struct SmallScalarTraits<Scalar, enable_if_t<std::is_integral<Scalar>::value>> {
536
+ using Unsigned = typename std::make_unsigned<Scalar>::type;
537
+
538
+ static constexpr int32_t cardinality = 1U + std::numeric_limits<Unsigned>::max();
539
+
540
+ static uint32_t AsIndex(Scalar value) { return static_cast<Unsigned>(value); }
541
+ };
542
+
543
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
544
+ class SmallScalarMemoTable : public MemoTable {
545
+ public:
546
+ explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) {
547
+ std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound);
548
+ index_to_value_.reserve(cardinality);
549
+ }
550
+
551
+ int32_t Get(const Scalar value) const {
552
+ auto value_index = AsIndex(value);
553
+ return value_to_index_[value_index];
554
+ }
555
+
556
+ template <typename Func1, typename Func2>
557
+ Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found,
558
+ int32_t* out_memo_index) {
559
+ auto value_index = AsIndex(value);
560
+ auto memo_index = value_to_index_[value_index];
561
+ if (memo_index == kKeyNotFound) {
562
+ memo_index = static_cast<int32_t>(index_to_value_.size());
563
+ index_to_value_.push_back(value);
564
+ value_to_index_[value_index] = memo_index;
565
+ DCHECK_LT(memo_index, cardinality + 1);
566
+ on_not_found(memo_index);
567
+ } else {
568
+ on_found(memo_index);
569
+ }
570
+ *out_memo_index = memo_index;
571
+ return Status::OK();
572
+ }
573
+
574
+ Status GetOrInsert(const Scalar value, int32_t* out_memo_index) {
575
+ return GetOrInsert(
576
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
577
+ }
578
+
579
+ int32_t GetNull() const { return value_to_index_[cardinality]; }
580
+
581
+ template <typename Func1, typename Func2>
582
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
583
+ auto memo_index = GetNull();
584
+ if (memo_index == kKeyNotFound) {
585
+ memo_index = value_to_index_[cardinality] = size();
586
+ index_to_value_.push_back(0);
587
+ on_not_found(memo_index);
588
+ } else {
589
+ on_found(memo_index);
590
+ }
591
+ return memo_index;
592
+ }
593
+
594
+ int32_t GetOrInsertNull() {
595
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
596
+ }
597
+
598
+ // The number of entries in the memo table
599
+ // (which is also 1 + the largest memo index)
600
+ int32_t size() const override { return static_cast<int32_t>(index_to_value_.size()); }
601
+
602
+ // Merge entries from `other_table` into `this`.
603
+ Status MergeTable(const SmallScalarMemoTable& other_table) {
604
+ for (const Scalar& other_val : other_table.index_to_value_) {
605
+ int32_t unused;
606
+ RETURN_NOT_OK(this->GetOrInsert(other_val, &unused));
607
+ }
608
+ return Status::OK();
609
+ }
610
+
611
+ // Copy values starting from index `start` into `out_data`
612
+ void CopyValues(int32_t start, Scalar* out_data) const {
613
+ DCHECK_GE(start, 0);
614
+ DCHECK_LE(static_cast<size_t>(start), index_to_value_.size());
615
+ int64_t offset = start * static_cast<int32_t>(sizeof(Scalar));
616
+ memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar));
617
+ }
618
+
619
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
620
+
621
+ const std::vector<Scalar>& values() const { return index_to_value_; }
622
+
623
+ protected:
624
+ static constexpr auto cardinality = SmallScalarTraits<Scalar>::cardinality;
625
+ static_assert(cardinality <= 256, "cardinality too large for direct-addressed table");
626
+
627
+ uint32_t AsIndex(Scalar value) const {
628
+ return SmallScalarTraits<Scalar>::AsIndex(value);
629
+ }
630
+
631
+ // The last index is reserved for the null element.
632
+ int32_t value_to_index_[cardinality + 1];
633
+ std::vector<Scalar> index_to_value_;
634
+ };
635
+
636
+ // ----------------------------------------------------------------------
637
+ // A memoization table for variable-sized binary data.
638
+
639
+ template <typename BinaryBuilderT>
640
+ class BinaryMemoTable : public MemoTable {
641
+ public:
642
+ using builder_offset_type = typename BinaryBuilderT::offset_type;
643
+ explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0,
644
+ int64_t values_size = -1)
645
+ : hash_table_(pool, static_cast<uint64_t>(entries)), binary_builder_(pool) {
646
+ const int64_t data_size = (values_size < 0) ? entries * 4 : values_size;
647
+ DCHECK_OK(binary_builder_.Resize(entries));
648
+ DCHECK_OK(binary_builder_.ReserveData(data_size));
649
+ }
650
+
651
+ int32_t Get(const void* data, builder_offset_type length) const {
652
+ hash_t h = ComputeStringHash<0>(data, length);
653
+ auto p = Lookup(h, data, length);
654
+ if (p.second) {
655
+ return p.first->payload.memo_index;
656
+ } else {
657
+ return kKeyNotFound;
658
+ }
659
+ }
660
+
661
+ int32_t Get(std::string_view value) const {
662
+ return Get(value.data(), static_cast<builder_offset_type>(value.length()));
663
+ }
664
+
665
+ template <typename Func1, typename Func2>
666
+ Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found,
667
+ Func2&& on_not_found, int32_t* out_memo_index) {
668
+ hash_t h = ComputeStringHash<0>(data, length);
669
+ auto p = Lookup(h, data, length);
670
+ int32_t memo_index;
671
+ if (p.second) {
672
+ memo_index = p.first->payload.memo_index;
673
+ on_found(memo_index);
674
+ } else {
675
+ memo_index = size();
676
+ // Insert string value
677
+ RETURN_NOT_OK(binary_builder_.Append(static_cast<const char*>(data), length));
678
+ // Insert hash entry
679
+ RETURN_NOT_OK(
680
+ hash_table_.Insert(const_cast<HashTableEntry*>(p.first), h, {memo_index}));
681
+
682
+ on_not_found(memo_index);
683
+ }
684
+ *out_memo_index = memo_index;
685
+ return Status::OK();
686
+ }
687
+
688
+ template <typename Func1, typename Func2>
689
+ Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found,
690
+ int32_t* out_memo_index) {
691
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
692
+ std::forward<Func1>(on_found), std::forward<Func2>(on_not_found),
693
+ out_memo_index);
694
+ }
695
+
696
+ Status GetOrInsert(const void* data, builder_offset_type length,
697
+ int32_t* out_memo_index) {
698
+ return GetOrInsert(
699
+ data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
700
+ }
701
+
702
+ Status GetOrInsert(std::string_view value, int32_t* out_memo_index) {
703
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
704
+ out_memo_index);
705
+ }
706
+
707
+ int32_t GetNull() const { return null_index_; }
708
+
709
+ template <typename Func1, typename Func2>
710
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
711
+ int32_t memo_index = GetNull();
712
+ if (memo_index == kKeyNotFound) {
713
+ memo_index = null_index_ = size();
714
+ DCHECK_OK(binary_builder_.AppendNull());
715
+ on_not_found(memo_index);
716
+ } else {
717
+ on_found(memo_index);
718
+ }
719
+ return memo_index;
720
+ }
721
+
722
+ int32_t GetOrInsertNull() {
723
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
724
+ }
725
+
726
+ // The number of entries in the memo table
727
+ // (which is also 1 + the largest memo index)
728
+ int32_t size() const override {
729
+ return static_cast<int32_t>(hash_table_.size() + (GetNull() != kKeyNotFound));
730
+ }
731
+
732
+ int64_t values_size() const { return binary_builder_.value_data_length(); }
733
+
734
+ // Copy (n + 1) offsets starting from index `start` into `out_data`
735
+ template <class Offset>
736
+ void CopyOffsets(int32_t start, Offset* out_data) const {
737
+ DCHECK_LE(start, size());
738
+
739
+ const builder_offset_type* offsets = binary_builder_.offsets_data();
740
+ const builder_offset_type delta =
741
+ start < binary_builder_.length() ? offsets[start] : 0;
742
+ for (int32_t i = start; i < size(); ++i) {
743
+ const builder_offset_type adjusted_offset = offsets[i] - delta;
744
+ Offset cast_offset = static_cast<Offset>(adjusted_offset);
745
+ assert(static_cast<builder_offset_type>(cast_offset) ==
746
+ adjusted_offset); // avoid truncation
747
+ *out_data++ = cast_offset;
748
+ }
749
+
750
+ // Copy last value since BinaryBuilder only materializes it on in Finish()
751
+ *out_data = static_cast<Offset>(binary_builder_.value_data_length() - delta);
752
+ }
753
+
754
+ template <class Offset>
755
+ void CopyOffsets(Offset* out_data) const {
756
+ CopyOffsets(0, out_data);
757
+ }
758
+
759
+ // Copy values starting from index `start` into `out_data`
760
+ void CopyValues(int32_t start, uint8_t* out_data) const {
761
+ CopyValues(start, -1, out_data);
762
+ }
763
+
764
+ // Same as above, but check output size in debug mode
765
+ void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const {
766
+ DCHECK_LE(start, size());
767
+
768
+ // The absolute byte offset of `start` value in the binary buffer.
769
+ const builder_offset_type offset = binary_builder_.offset(start);
770
+ const auto length = binary_builder_.value_data_length() - static_cast<size_t>(offset);
771
+
772
+ if (out_size != -1) {
773
+ assert(static_cast<int64_t>(length) <= out_size);
774
+ }
775
+
776
+ auto view = binary_builder_.GetView(start);
777
+ memcpy(out_data, view.data(), length);
778
+ }
779
+
780
+ void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); }
781
+
782
+ void CopyValues(int64_t out_size, uint8_t* out_data) const {
783
+ CopyValues(0, out_size, out_data);
784
+ }
785
+
786
+ void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size,
787
+ uint8_t* out_data) const {
788
+ // This method exists to cope with the fact that the BinaryMemoTable does
789
+ // not know the fixed width when inserting the null value. The data
790
+ // buffer hold a zero length string for the null value (if found).
791
+ //
792
+ // Thus, the method will properly inject an empty value of the proper width
793
+ // in the output buffer.
794
+ //
795
+ if (start >= size()) {
796
+ return;
797
+ }
798
+
799
+ int32_t null_index = GetNull();
800
+ if (null_index < start) {
801
+ // Nothing to skip, proceed as usual.
802
+ CopyValues(start, out_size, out_data);
803
+ return;
804
+ }
805
+
806
+ builder_offset_type left_offset = binary_builder_.offset(start);
807
+
808
+ // Ensure that the data length is exactly missing width_size bytes to fit
809
+ // in the expected output (n_values * width_size).
810
+ #ifndef NDEBUG
811
+ int64_t data_length = values_size() - static_cast<size_t>(left_offset);
812
+ assert(data_length + width_size == out_size);
813
+ ARROW_UNUSED(data_length);
814
+ #endif
815
+
816
+ auto in_data = binary_builder_.value_data() + left_offset;
817
+ // The null use 0-length in the data, slice the data in 2 and skip by
818
+ // width_size in out_data. [part_1][width_size][part_2]
819
+ auto null_data_offset = binary_builder_.offset(null_index);
820
+ auto left_size = null_data_offset - left_offset;
821
+ if (left_size > 0) {
822
+ memcpy(out_data, in_data + left_offset, left_size);
823
+ }
824
+ // Zero-initialize the null entry
825
+ memset(out_data + left_size, 0, width_size);
826
+
827
+ auto right_size = values_size() - static_cast<size_t>(null_data_offset);
828
+ if (right_size > 0) {
829
+ // skip the null fixed size value.
830
+ auto out_offset = left_size + width_size;
831
+ assert(out_data + out_offset + right_size == out_data + out_size);
832
+ memcpy(out_data + out_offset, in_data + null_data_offset, right_size);
833
+ }
834
+ }
835
+
836
+ // Visit the stored values in insertion order.
837
+ // The visitor function should have the signature `void(std::string_view)`
838
+ // or `void(const std::string_view&)`.
839
+ template <typename VisitFunc>
840
+ void VisitValues(int32_t start, VisitFunc&& visit) const {
841
+ for (int32_t i = start; i < size(); ++i) {
842
+ visit(binary_builder_.GetView(i));
843
+ }
844
+ }
845
+
846
+ protected:
847
+ struct Payload {
848
+ int32_t memo_index;
849
+ };
850
+
851
+ using HashTableType = HashTable<Payload>;
852
+ using HashTableEntry = typename HashTable<Payload>::Entry;
853
+ HashTableType hash_table_;
854
+ BinaryBuilderT binary_builder_;
855
+
856
+ int32_t null_index_ = kKeyNotFound;
857
+
858
+ std::pair<const HashTableEntry*, bool> Lookup(hash_t h, const void* data,
859
+ builder_offset_type length) const {
860
+ auto cmp_func = [&](const Payload* payload) {
861
+ std::string_view lhs = binary_builder_.GetView(payload->memo_index);
862
+ std::string_view rhs(static_cast<const char*>(data), length);
863
+ return lhs == rhs;
864
+ };
865
+ return hash_table_.Lookup(h, cmp_func);
866
+ }
867
+
868
+ public:
869
+ Status MergeTable(const BinaryMemoTable& other_table) {
870
+ other_table.VisitValues(0, [this](std::string_view other_value) {
871
+ int32_t unused;
872
+ DCHECK_OK(this->GetOrInsert(other_value, &unused));
873
+ });
874
+ return Status::OK();
875
+ }
876
+ };
877
+
878
+ template <typename T, typename Enable = void>
879
+ struct HashTraits {};
880
+
881
+ template <>
882
+ struct HashTraits<BooleanType> {
883
+ using MemoTableType = SmallScalarMemoTable<bool>;
884
+ };
885
+
886
+ template <typename T>
887
+ struct HashTraits<T, enable_if_8bit_int<T>> {
888
+ using c_type = typename T::c_type;
889
+ using MemoTableType = SmallScalarMemoTable<typename T::c_type>;
890
+ };
891
+
892
+ template <typename T>
893
+ struct HashTraits<T, enable_if_t<has_c_type<T>::value && !is_8bit_int<T>::value>> {
894
+ using c_type = typename T::c_type;
895
+ using MemoTableType = ScalarMemoTable<c_type, HashTable>;
896
+ };
897
+
898
+ template <typename T>
899
+ struct HashTraits<T, enable_if_t<has_string_view<T>::value &&
900
+ !std::is_base_of<LargeBinaryType, T>::value>> {
901
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
902
+ };
903
+
904
+ template <typename T>
905
+ struct HashTraits<T, enable_if_decimal<T>> {
906
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
907
+ };
908
+
909
+ template <typename T>
910
+ struct HashTraits<T, enable_if_t<std::is_base_of<LargeBinaryType, T>::value>> {
911
+ using MemoTableType = BinaryMemoTable<LargeBinaryBuilder>;
912
+ };
913
+
914
+ template <typename MemoTableType>
915
+ static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table,
916
+ int64_t start_offset, int64_t* null_count,
917
+ std::shared_ptr<Buffer>* null_bitmap) {
918
+ int64_t dict_length = static_cast<int64_t>(memo_table.size()) - start_offset;
919
+ int64_t null_index = memo_table.GetNull();
920
+
921
+ *null_count = 0;
922
+ *null_bitmap = nullptr;
923
+
924
+ if (null_index != kKeyNotFound && null_index >= start_offset) {
925
+ null_index -= start_offset;
926
+ *null_count = 1;
927
+ ARROW_ASSIGN_OR_RAISE(*null_bitmap,
928
+ internal::BitmapAllButOne(pool, dict_length, null_index));
929
+ }
930
+
931
+ return Status::OK();
932
+ }
933
+
934
+ struct StringViewHash {
935
+ // std::hash compatible hasher for use with std::unordered_*
936
+ // (the std::hash specialization provided by nonstd constructs std::string
937
+ // temporaries then invokes std::hash<std::string> against those)
938
+ hash_t operator()(std::string_view value) const {
939
+ return ComputeStringHash<0>(value.data(), static_cast<int64_t>(value.size()));
940
+ }
941
+ };
942
+
943
+ } // namespace internal
944
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <type_traits>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ class DataType;
30
+ struct ArraySpan;
31
+ struct Scalar;
32
+
33
+ namespace internal {
34
+
35
+ ARROW_EXPORT
36
+ uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1);
37
+
38
+ ARROW_EXPORT
39
+ uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes,
40
+ int64_t length, uint8_t min_width = 1);
41
+
42
+ ARROW_EXPORT
43
+ uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1);
44
+
45
+ ARROW_EXPORT
46
+ uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length,
47
+ uint8_t min_width = 1);
48
+
49
+ ARROW_EXPORT
50
+ void DowncastInts(const int64_t* source, int8_t* dest, int64_t length);
51
+
52
+ ARROW_EXPORT
53
+ void DowncastInts(const int64_t* source, int16_t* dest, int64_t length);
54
+
55
+ ARROW_EXPORT
56
+ void DowncastInts(const int64_t* source, int32_t* dest, int64_t length);
57
+
58
+ ARROW_EXPORT
59
+ void DowncastInts(const int64_t* source, int64_t* dest, int64_t length);
60
+
61
+ ARROW_EXPORT
62
+ void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length);
63
+
64
+ ARROW_EXPORT
65
+ void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length);
66
+
67
+ ARROW_EXPORT
68
+ void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length);
69
+
70
+ ARROW_EXPORT
71
+ void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length);
72
+
73
+ ARROW_EXPORT
74
+ void UpcastInts(const int32_t* source, int64_t* dest, int64_t length);
75
+
76
+ template <typename InputInt, typename OutputInt>
77
+ inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts(
78
+ const InputInt* source, OutputInt* dest, int64_t length) {
79
+ DowncastInts(source, dest, length);
80
+ }
81
+
82
+ template <typename InputInt, typename OutputInt>
83
+ inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts(
84
+ const InputInt* source, OutputInt* dest, int64_t length) {
85
+ UpcastInts(source, dest, length);
86
+ }
87
+
88
+ template <typename InputInt, typename OutputInt>
89
+ ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length,
90
+ const int32_t* transpose_map);
91
+
92
+ ARROW_EXPORT
93
+ Status TransposeInts(const DataType& src_type, const DataType& dest_type,
94
+ const uint8_t* src, uint8_t* dest, int64_t src_offset,
95
+ int64_t dest_offset, int64_t length, const int32_t* transpose_map);
96
+
97
+ /// \brief Do vectorized boundschecking of integer-type array indices. The
98
+ /// indices must be nonnegative and strictly less than the passed upper
99
+ /// limit (which is usually the length of an array that is being indexed-into).
100
+ ARROW_EXPORT
101
+ Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit);
102
+
103
+ /// \brief Boundscheck integer values to determine if they are all between the
104
+ /// passed upper and lower limits (inclusive). Upper and lower bounds must be
105
+ /// the same type as the data and are not currently casted.
106
+ ARROW_EXPORT
107
+ Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower,
108
+ const Scalar& bound_upper);
109
+
110
+ /// \brief Use CheckIntegersInRange to determine whether the passed integers
111
+ /// can fit safely in the passed integer type. This helps quickly determine if
112
+ /// integer narrowing (e.g. int64->int32) is safe to do.
113
+ ARROW_EXPORT
114
+ Status IntegersCanFit(const ArraySpan& values, const DataType& target_type);
115
+
116
+ /// \brief Convenience for boundschecking a single Scalar value
117
+ ARROW_EXPORT
118
+ Status IntegersCanFit(const Scalar& value, const DataType& target_type);
119
+
120
+ /// Upcast an integer to the largest possible width (currently 64 bits)
121
+
122
+ template <typename Integer>
123
+ typename std::enable_if<
124
+ std::is_integral<Integer>::value && std::is_signed<Integer>::value, int64_t>::type
125
+ UpcastInt(Integer v) {
126
+ return v;
127
+ }
128
+
129
+ template <typename Integer>
130
+ typename std::enable_if<
131
+ std::is_integral<Integer>::value && std::is_unsigned<Integer>::value, uint64_t>::type
132
+ UpcastInt(Integer v) {
133
+ return v;
134
+ }
135
+
136
+ } // namespace internal
137
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifndef _WIN32
21
+ # define ARROW_HAVE_SIGACTION 1
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #if ARROW_HAVE_SIGACTION
32
+ # include <csignal> // Needed for struct sigaction
33
+ #endif
34
+
35
+ #include "arrow/result.h"
36
+ #include "arrow/status.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/windows_fixup.h"
40
+
41
+ namespace arrow::internal {
42
+
43
+ // NOTE: 8-bit path strings on Windows are encoded using UTF-8.
44
+ // Using MBCS would fail encoding some paths.
45
+
46
+ #if defined(_WIN32)
47
+ using NativePathString = std::wstring;
48
+ #else
49
+ using NativePathString = std::string;
50
+ #endif
51
+
52
+ class ARROW_EXPORT PlatformFilename {
53
+ public:
54
+ struct Impl;
55
+
56
+ ~PlatformFilename();
57
+ PlatformFilename();
58
+ PlatformFilename(const PlatformFilename&);
59
+ PlatformFilename(PlatformFilename&&);
60
+ PlatformFilename& operator=(const PlatformFilename&);
61
+ PlatformFilename& operator=(PlatformFilename&&);
62
+ explicit PlatformFilename(NativePathString path);
63
+ explicit PlatformFilename(const NativePathString::value_type* path);
64
+
65
+ const NativePathString& ToNative() const;
66
+ std::string ToString() const;
67
+
68
+ PlatformFilename Parent() const;
69
+ Result<PlatformFilename> Real() const;
70
+
71
+ // These functions can fail for character encoding reasons.
72
+ static Result<PlatformFilename> FromString(std::string_view file_name);
73
+ Result<PlatformFilename> Join(std::string_view child_name) const;
74
+
75
+ PlatformFilename Join(const PlatformFilename& child_name) const;
76
+
77
+ bool operator==(const PlatformFilename& other) const;
78
+ bool operator!=(const PlatformFilename& other) const;
79
+
80
+ // Made public to avoid the proliferation of friend declarations.
81
+ const Impl* impl() const { return impl_.get(); }
82
+
83
+ private:
84
+ std::unique_ptr<Impl> impl_;
85
+
86
+ explicit PlatformFilename(Impl impl);
87
+ };
88
+
89
+ /// Create a directory if it doesn't exist.
90
+ ///
91
+ /// Return whether the directory was created.
92
+ ARROW_EXPORT
93
+ Result<bool> CreateDir(const PlatformFilename& dir_path);
94
+
95
+ /// Create a directory and its parents if it doesn't exist.
96
+ ///
97
+ /// Return whether the directory was created.
98
+ ARROW_EXPORT
99
+ Result<bool> CreateDirTree(const PlatformFilename& dir_path);
100
+
101
+ /// Delete a directory's contents (but not the directory itself) if it exists.
102
+ ///
103
+ /// Return whether the directory existed.
104
+ ARROW_EXPORT
105
+ Result<bool> DeleteDirContents(const PlatformFilename& dir_path,
106
+ bool allow_not_found = true);
107
+
108
+ /// Delete a directory tree if it exists.
109
+ ///
110
+ /// Return whether the directory existed.
111
+ ARROW_EXPORT
112
+ Result<bool> DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true);
113
+
114
+ // Non-recursively list the contents of the given directory.
115
+ // The returned names are the children's base names, not including dir_path.
116
+ ARROW_EXPORT
117
+ Result<std::vector<PlatformFilename>> ListDir(const PlatformFilename& dir_path);
118
+
119
+ /// Delete a file if it exists.
120
+ ///
121
+ /// Return whether the file existed.
122
+ ARROW_EXPORT
123
+ Result<bool> DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true);
124
+
125
+ /// Return whether a file exists.
126
+ ARROW_EXPORT
127
+ Result<bool> FileExists(const PlatformFilename& path);
128
+
129
+ // TODO expose this more publicly to make it available from io/file.h?
130
+ /// A RAII wrapper for a file descriptor.
131
+ ///
132
+ /// The underlying file descriptor is automatically closed on destruction.
133
+ /// Moving is supported with well-defined semantics.
134
+ /// Furthermore, closing is idempotent.
135
+ class ARROW_EXPORT FileDescriptor {
136
+ public:
137
+ FileDescriptor() = default;
138
+ explicit FileDescriptor(int fd) : fd_(fd) {}
139
+ FileDescriptor(FileDescriptor&&);
140
+ FileDescriptor& operator=(FileDescriptor&&);
141
+
142
+ ~FileDescriptor();
143
+
144
+ Status Close();
145
+
146
+ /// May return -1 if closed or default-initialized
147
+ int fd() const { return fd_.load(); }
148
+
149
+ /// Detach and return the underlying file descriptor
150
+ int Detach();
151
+
152
+ bool closed() const { return fd_.load() == -1; }
153
+
154
+ protected:
155
+ static void CloseFromDestructor(int fd);
156
+
157
+ std::atomic<int> fd_{-1};
158
+ };
159
+
160
+ /// Open a file for reading and return a file descriptor.
161
+ ARROW_EXPORT
162
+ Result<FileDescriptor> FileOpenReadable(const PlatformFilename& file_name);
163
+
164
+ /// Open a file for writing and return a file descriptor.
165
+ ARROW_EXPORT
166
+ Result<FileDescriptor> FileOpenWritable(const PlatformFilename& file_name,
167
+ bool write_only = true, bool truncate = true,
168
+ bool append = false);
169
+
170
+ /// Read from current file position. Return number of bytes read.
171
+ ARROW_EXPORT
172
+ Result<int64_t> FileRead(int fd, uint8_t* buffer, int64_t nbytes);
173
+ /// Read from given file position. Return number of bytes read.
174
+ ARROW_EXPORT
175
+ Result<int64_t> FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes);
176
+
177
+ ARROW_EXPORT
178
+ Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes);
179
+ ARROW_EXPORT
180
+ Status FileTruncate(int fd, const int64_t size);
181
+
182
+ ARROW_EXPORT
183
+ Status FileSeek(int fd, int64_t pos);
184
+ ARROW_EXPORT
185
+ Status FileSeek(int fd, int64_t pos, int whence);
186
+ ARROW_EXPORT
187
+ Result<int64_t> FileTell(int fd);
188
+ ARROW_EXPORT
189
+ Result<int64_t> FileGetSize(int fd);
190
+
191
+ ARROW_EXPORT
192
+ Status FileClose(int fd);
193
+
194
+ struct Pipe {
195
+ FileDescriptor rfd;
196
+ FileDescriptor wfd;
197
+
198
+ Status Close() { return rfd.Close() & wfd.Close(); }
199
+ };
200
+
201
+ ARROW_EXPORT
202
+ Result<Pipe> CreatePipe();
203
+
204
+ ARROW_EXPORT
205
+ Status SetPipeFileDescriptorNonBlocking(int fd);
206
+
207
+ class ARROW_EXPORT SelfPipe {
208
+ public:
209
+ static Result<std::shared_ptr<SelfPipe>> Make(bool signal_safe);
210
+ virtual ~SelfPipe();
211
+
212
+ /// \brief Wait for a wakeup.
213
+ ///
214
+ /// Status::Invalid is returned if the pipe has been shutdown.
215
+ /// Otherwise the next sent payload is returned.
216
+ virtual Result<uint64_t> Wait() = 0;
217
+
218
+ /// \brief Wake up the pipe by sending a payload.
219
+ ///
220
+ /// This method is async-signal-safe if `signal_safe` was set to true.
221
+ virtual void Send(uint64_t payload) = 0;
222
+
223
+ /// \brief Wake up the pipe and shut it down.
224
+ virtual Status Shutdown() = 0;
225
+ };
226
+
227
+ ARROW_EXPORT
228
+ int64_t GetPageSize();
229
+
230
+ struct MemoryRegion {
231
+ void* addr;
232
+ size_t size;
233
+ };
234
+
235
+ ARROW_EXPORT
236
+ Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes,
237
+ void** new_addr);
238
+ ARROW_EXPORT
239
+ Status MemoryAdviseWillNeed(const std::vector<MemoryRegion>& regions);
240
+
241
+ ARROW_EXPORT
242
+ Result<std::string> GetEnvVar(const char* name);
243
+ ARROW_EXPORT
244
+ Result<std::string> GetEnvVar(const std::string& name);
245
+ ARROW_EXPORT
246
+ Result<NativePathString> GetEnvVarNative(const char* name);
247
+ ARROW_EXPORT
248
+ Result<NativePathString> GetEnvVarNative(const std::string& name);
249
+
250
+ ARROW_EXPORT
251
+ Status SetEnvVar(const char* name, const char* value);
252
+ ARROW_EXPORT
253
+ Status SetEnvVar(const std::string& name, const std::string& value);
254
+ ARROW_EXPORT
255
+ Status DelEnvVar(const char* name);
256
+ ARROW_EXPORT
257
+ Status DelEnvVar(const std::string& name);
258
+
259
+ ARROW_EXPORT
260
+ std::string ErrnoMessage(int errnum);
261
+ #if _WIN32
262
+ ARROW_EXPORT
263
+ std::string WinErrorMessage(int errnum);
264
+ #endif
265
+
266
+ ARROW_EXPORT
267
+ std::shared_ptr<StatusDetail> StatusDetailFromErrno(int errnum);
268
+ ARROW_EXPORT
269
+ std::optional<int> ErrnoFromStatusDetail(const StatusDetail& detail);
270
+ #if _WIN32
271
+ ARROW_EXPORT
272
+ std::shared_ptr<StatusDetail> StatusDetailFromWinError(int errnum);
273
+ #endif
274
+ ARROW_EXPORT
275
+ std::shared_ptr<StatusDetail> StatusDetailFromSignal(int signum);
276
+
277
+ template <typename... Args>
278
+ Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) {
279
+ return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum),
280
+ std::forward<Args>(args)...);
281
+ }
282
+
283
+ template <typename... Args>
284
+ Status IOErrorFromErrno(int errnum, Args&&... args) {
285
+ return StatusFromErrno(errnum, StatusCode::IOError, std::forward<Args>(args)...);
286
+ }
287
+
288
+ #if _WIN32
289
+ template <typename... Args>
290
+ Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) {
291
+ return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum),
292
+ std::forward<Args>(args)...);
293
+ }
294
+
295
+ template <typename... Args>
296
+ Status IOErrorFromWinError(int errnum, Args&&... args) {
297
+ return StatusFromWinError(errnum, StatusCode::IOError, std::forward<Args>(args)...);
298
+ }
299
+ #endif
300
+
301
+ template <typename... Args>
302
+ Status StatusFromSignal(int signum, StatusCode code, Args&&... args) {
303
+ return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum),
304
+ std::forward<Args>(args)...);
305
+ }
306
+
307
+ template <typename... Args>
308
+ Status CancelledFromSignal(int signum, Args&&... args) {
309
+ return StatusFromSignal(signum, StatusCode::Cancelled, std::forward<Args>(args)...);
310
+ }
311
+
312
+ ARROW_EXPORT
313
+ int ErrnoFromStatus(const Status&);
314
+
315
+ // Always returns 0 on non-Windows platforms (for Python).
316
+ ARROW_EXPORT
317
+ int WinErrorFromStatus(const Status&);
318
+
319
+ ARROW_EXPORT
320
+ int SignalFromStatus(const Status&);
321
+
322
+ class ARROW_EXPORT TemporaryDir {
323
+ public:
324
+ ~TemporaryDir();
325
+
326
+ /// '/'-terminated path to the temporary dir
327
+ const PlatformFilename& path() { return path_; }
328
+
329
+ /// Create a temporary subdirectory in the system temporary dir,
330
+ /// named starting with `prefix`.
331
+ static Result<std::unique_ptr<TemporaryDir>> Make(const std::string& prefix);
332
+
333
+ private:
334
+ PlatformFilename path_;
335
+
336
+ explicit TemporaryDir(PlatformFilename&&);
337
+ };
338
+
339
+ class ARROW_EXPORT SignalHandler {
340
+ public:
341
+ using Callback = void (*)(int);
342
+
343
+ SignalHandler();
344
+ explicit SignalHandler(Callback cb);
345
+ #if ARROW_HAVE_SIGACTION
346
+ explicit SignalHandler(const struct sigaction& sa);
347
+ #endif
348
+
349
+ Callback callback() const;
350
+ #if ARROW_HAVE_SIGACTION
351
+ const struct sigaction& action() const;
352
+ #endif
353
+
354
+ protected:
355
+ #if ARROW_HAVE_SIGACTION
356
+ // Storing the full sigaction allows to restore the entire signal handling
357
+ // configuration.
358
+ struct sigaction sa_;
359
+ #else
360
+ Callback cb_;
361
+ #endif
362
+ };
363
+
364
+ /// \brief Return the current handler for the given signal number.
365
+ ARROW_EXPORT
366
+ Result<SignalHandler> GetSignalHandler(int signum);
367
+
368
+ /// \brief Set a new handler for the given signal number.
369
+ ///
370
+ /// The old signal handler is returned.
371
+ ARROW_EXPORT
372
+ Result<SignalHandler> SetSignalHandler(int signum, const SignalHandler& handler);
373
+
374
+ /// \brief Reinstate the signal handler
375
+ ///
376
+ /// For use in signal handlers. This is needed on platforms without sigaction()
377
+ /// such as Windows, as the default signal handler is restored there as
378
+ /// soon as a signal is raised.
379
+ ARROW_EXPORT
380
+ void ReinstateSignalHandler(int signum, SignalHandler::Callback handler);
381
+
382
+ /// \brief Send a signal to the current process
383
+ ///
384
+ /// The thread which will receive the signal is unspecified.
385
+ ARROW_EXPORT
386
+ Status SendSignal(int signum);
387
+
388
+ /// \brief Send a signal to the given thread
389
+ ///
390
+ /// This function isn't supported on Windows.
391
+ ARROW_EXPORT
392
+ Status SendSignalToThread(int signum, uint64_t thread_id);
393
+
394
+ /// \brief Get an unpredictable random seed
395
+ ///
396
+ /// This function may be slightly costly, so should only be used to initialize
397
+ /// a PRNG, not to generate a large amount of random numbers.
398
+ /// It is better to use this function rather than std::random_device, unless
399
+ /// absolutely necessary (e.g. to generate a cryptographic secret).
400
+ ARROW_EXPORT
401
+ int64_t GetRandomSeed();
402
+
403
+ /// \brief Get the current thread id
404
+ ///
405
+ /// In addition to having the same properties as std::thread, the returned value
406
+ /// is a regular integer value, which is more convenient than an opaque type.
407
+ ARROW_EXPORT
408
+ uint64_t GetThreadId();
409
+
410
+ /// \brief Get the current memory used by the current process in bytes
411
+ ///
412
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
413
+ ARROW_EXPORT
414
+ int64_t GetCurrentRSS();
415
+
416
+ /// \brief Get the total memory available to the system in bytes
417
+ ///
418
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
419
+ ARROW_EXPORT
420
+ int64_t GetTotalMemoryBytes();
421
+
422
+ /// \brief Load a dynamic library
423
+ ///
424
+ /// This wraps dlopen() except on Windows, where LoadLibrary() is called.
425
+ /// These two platforms handle absolute paths consistently; relative paths
426
+ /// or the library's bare name may be handled but inconsistently.
427
+ ///
428
+ /// \return An opaque handle for the dynamic library, which can be used for
429
+ /// subsequent symbol lookup. Nullptr will never be returned; instead
430
+ /// an error will be raised.
431
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const PlatformFilename& path);
432
+
433
+ /// \brief Load a dynamic library
434
+ ///
435
+ /// An overload taking null terminated string.
436
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const char* path);
437
+
438
+ /// \brief Retrieve a symbol by name from a library handle.
439
+ ///
440
+ /// This wraps dlsym() except on Windows, where GetProcAddress() is called.
441
+ ///
442
+ /// \return The address associated with the named symbol. Nullptr will never be
443
+ /// returned; instead an error will be raised.
444
+ ARROW_EXPORT Result<void*> GetSymbol(void* handle, const char* name);
445
+
446
+ template <typename T>
447
+ Result<T*> GetSymbolAs(void* handle, const char* name) {
448
+ ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name));
449
+ return reinterpret_cast<T*>(sym);
450
+ }
451
+
452
+ } // namespace arrow::internal
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <tuple>
25
+ #include <type_traits>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/result.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/util/compare.h"
32
+ #include "arrow/util/functional.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ template <typename T>
39
+ class Iterator;
40
+
41
+ template <typename T>
42
+ struct IterationTraits {
43
+ /// \brief a reserved value which indicates the end of iteration. By
44
+ /// default this is NULLPTR since most iterators yield pointer types.
45
+ /// Specialize IterationTraits if different end semantics are required.
46
+ ///
47
+ /// Note: This should not be used to determine if a given value is a
48
+ /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This
49
+ /// is only for returning terminal values.
50
+ static T End() { return T(NULLPTR); }
51
+
52
+ /// \brief Checks to see if the value is a terminal value.
53
+ /// A method is used here since T is not necessarily comparable in many
54
+ /// cases even though it has a distinct final value
55
+ static bool IsEnd(const T& val) { return val == End(); }
56
+ };
57
+
58
+ template <typename T>
59
+ T IterationEnd() {
60
+ return IterationTraits<T>::End();
61
+ }
62
+
63
+ template <typename T>
64
+ bool IsIterationEnd(const T& val) {
65
+ return IterationTraits<T>::IsEnd(val);
66
+ }
67
+
68
+ template <typename T>
69
+ struct IterationTraits<std::optional<T>> {
70
+ /// \brief by default when iterating through a sequence of optional,
71
+ /// nullopt indicates the end of iteration.
72
+ /// Specialize IterationTraits if different end semantics are required.
73
+ static std::optional<T> End() { return std::nullopt; }
74
+
75
+ /// \brief by default when iterating through a sequence of optional,
76
+ /// nullopt (!has_value()) indicates the end of iteration.
77
+ /// Specialize IterationTraits if different end semantics are required.
78
+ static bool IsEnd(const std::optional<T>& val) { return !val.has_value(); }
79
+
80
+ // TODO(bkietz) The range-for loop over Iterator<optional<T>> yields
81
+ // Result<optional<T>> which is unnecessary (since only the unyielded end optional
82
+ // is nullopt. Add IterationTraits::GetRangeElement() to handle this case
83
+ };
84
+
85
+ /// \brief A generic Iterator that can return errors
86
+ template <typename T>
87
+ class Iterator : public util::EqualityComparable<Iterator<T>> {
88
+ public:
89
+ /// \brief Iterator may be constructed from any type which has a member function
90
+ /// with signature Result<T> Next();
91
+ /// End of iterator is signalled by returning IteratorTraits<T>::End();
92
+ ///
93
+ /// The argument is moved or copied to the heap and kept in a unique_ptr<void>. Only
94
+ /// its destructor and its Next method (which are stored in function pointers) are
95
+ /// referenced after construction.
96
+ ///
97
+ /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using
98
+ /// an abstract template base class: instead of being inlined as usual for a template
99
+ /// function the base's virtual destructor will be exported, leading to multiple
100
+ /// definition errors when linking to any other TU where the base is instantiated.
101
+ template <typename Wrapped>
102
+ explicit Iterator(Wrapped has_next)
103
+ : ptr_(new Wrapped(std::move(has_next)), Delete<Wrapped>), next_(Next<Wrapped>) {}
104
+
105
+ Iterator() : ptr_(NULLPTR, [](void*) {}) {}
106
+
107
+ /// \brief Return the next element of the sequence, IterationTraits<T>::End() when the
108
+ /// iteration is completed.
109
+ Result<T> Next() {
110
+ if (ptr_) {
111
+ auto next_result = next_(ptr_.get());
112
+ if (next_result.ok() && IsIterationEnd(next_result.ValueUnsafe())) {
113
+ ptr_.reset(NULLPTR);
114
+ }
115
+ return next_result;
116
+ } else {
117
+ return IterationTraits<T>::End();
118
+ }
119
+ }
120
+
121
+ /// Pass each element of the sequence to a visitor. Will return any error status
122
+ /// returned by the visitor, terminating iteration.
123
+ template <typename Visitor>
124
+ Status Visit(Visitor&& visitor) {
125
+ for (;;) {
126
+ ARROW_ASSIGN_OR_RAISE(auto value, Next());
127
+
128
+ if (IsIterationEnd(value)) break;
129
+
130
+ ARROW_RETURN_NOT_OK(visitor(std::move(value)));
131
+ }
132
+
133
+ return Status::OK();
134
+ }
135
+
136
+ /// Iterators will only compare equal if they are both null.
137
+ /// Equality comparability is required to make an Iterator of Iterators
138
+ /// (to check for the end condition).
139
+ bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; }
140
+
141
+ explicit operator bool() const { return ptr_ != NULLPTR; }
142
+
143
+ class RangeIterator {
144
+ public:
145
+ RangeIterator() : value_(IterationTraits<T>::End()) {}
146
+
147
+ explicit RangeIterator(Iterator i)
148
+ : value_(IterationTraits<T>::End()),
149
+ iterator_(std::make_shared<Iterator>(std::move(i))) {
150
+ Next();
151
+ }
152
+
153
+ bool operator!=(const RangeIterator& other) const { return value_ != other.value_; }
154
+
155
+ RangeIterator& operator++() {
156
+ Next();
157
+ return *this;
158
+ }
159
+
160
+ Result<T> operator*() {
161
+ ARROW_RETURN_NOT_OK(value_.status());
162
+
163
+ auto value = std::move(value_);
164
+ value_ = IterationTraits<T>::End();
165
+ return value;
166
+ }
167
+
168
+ private:
169
+ void Next() {
170
+ if (!value_.ok()) {
171
+ value_ = IterationTraits<T>::End();
172
+ return;
173
+ }
174
+ value_ = iterator_->Next();
175
+ }
176
+
177
+ Result<T> value_;
178
+ std::shared_ptr<Iterator> iterator_;
179
+ };
180
+
181
+ RangeIterator begin() { return RangeIterator(std::move(*this)); }
182
+
183
+ RangeIterator end() { return RangeIterator(); }
184
+
185
+ /// \brief Move every element of this iterator into a vector.
186
+ Result<std::vector<T>> ToVector() {
187
+ std::vector<T> out;
188
+ for (auto maybe_element : *this) {
189
+ ARROW_ASSIGN_OR_RAISE(auto element, maybe_element);
190
+ out.push_back(std::move(element));
191
+ }
192
+ return out;
193
+ }
194
+
195
+ private:
196
+ /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and
197
+ /// deletes that.
198
+ template <typename HasNext>
199
+ static void Delete(void* ptr) {
200
+ delete static_cast<HasNext*>(ptr);
201
+ }
202
+
203
+ /// Implementation of Next: Casts from void* to the wrapped type and invokes that
204
+ /// type's Next member function.
205
+ template <typename HasNext>
206
+ static Result<T> Next(void* ptr) {
207
+ return static_cast<HasNext*>(ptr)->Next();
208
+ }
209
+
210
+ /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first
211
+ /// casts from void* to a pointer to the wrapped type then deletes that.
212
+ std::unique_ptr<void, void (*)(void*)> ptr_;
213
+
214
+ /// next_ is a function pointer which first casts from void* to a pointer to the wrapped
215
+ /// type then invokes its Next member function.
216
+ Result<T> (*next_)(void*) = NULLPTR;
217
+ };
218
+
219
+ template <typename T>
220
+ struct TransformFlow {
221
+ using YieldValueType = T;
222
+
223
+ TransformFlow(YieldValueType value, bool ready_for_next)
224
+ : finished_(false),
225
+ ready_for_next_(ready_for_next),
226
+ yield_value_(std::move(value)) {}
227
+ TransformFlow(bool finished, bool ready_for_next)
228
+ : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {}
229
+
230
+ bool HasValue() const { return yield_value_.has_value(); }
231
+ bool Finished() const { return finished_; }
232
+ bool ReadyForNext() const { return ready_for_next_; }
233
+ T Value() const { return *yield_value_; }
234
+
235
+ bool finished_ = false;
236
+ bool ready_for_next_ = false;
237
+ std::optional<YieldValueType> yield_value_;
238
+ };
239
+
240
+ struct TransformFinish {
241
+ template <typename T>
242
+ operator TransformFlow<T>() && { // NOLINT explicit
243
+ return TransformFlow<T>(true, true);
244
+ }
245
+ };
246
+
247
+ struct TransformSkip {
248
+ template <typename T>
249
+ operator TransformFlow<T>() && { // NOLINT explicit
250
+ return TransformFlow<T>(false, true);
251
+ }
252
+ };
253
+
254
+ template <typename T>
255
+ TransformFlow<T> TransformYield(T value = {}, bool ready_for_next = true) {
256
+ return TransformFlow<T>(std::move(value), ready_for_next);
257
+ }
258
+
259
+ template <typename T, typename V>
260
+ using Transformer = std::function<Result<TransformFlow<V>>(T)>;
261
+
262
+ template <typename T, typename V>
263
+ class TransformIterator {
264
+ public:
265
+ explicit TransformIterator(Iterator<T> it, Transformer<T, V> transformer)
266
+ : it_(std::move(it)),
267
+ transformer_(std::move(transformer)),
268
+ last_value_(),
269
+ finished_() {}
270
+
271
+ Result<V> Next() {
272
+ while (!finished_) {
273
+ ARROW_ASSIGN_OR_RAISE(std::optional<V> next, Pump());
274
+ if (next.has_value()) {
275
+ return std::move(*next);
276
+ }
277
+ ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next());
278
+ }
279
+ return IterationTraits<V>::End();
280
+ }
281
+
282
+ private:
283
+ // Calls the transform function on the current value. Can return in several ways
284
+ // * If the next value is requested (e.g. skip) it will return an empty optional
285
+ // * If an invalid status is encountered that will be returned
286
+ // * If finished it will return IterationTraits<V>::End()
287
+ // * If a value is returned by the transformer that will be returned
288
+ Result<std::optional<V>> Pump() {
289
+ if (!finished_ && last_value_.has_value()) {
290
+ auto next_res = transformer_(*last_value_);
291
+ if (!next_res.ok()) {
292
+ finished_ = true;
293
+ return next_res.status();
294
+ }
295
+ auto next = *next_res;
296
+ if (next.ReadyForNext()) {
297
+ if (IsIterationEnd(*last_value_)) {
298
+ finished_ = true;
299
+ }
300
+ last_value_.reset();
301
+ }
302
+ if (next.Finished()) {
303
+ finished_ = true;
304
+ }
305
+ if (next.HasValue()) {
306
+ return next.Value();
307
+ }
308
+ }
309
+ if (finished_) {
310
+ return IterationTraits<V>::End();
311
+ }
312
+ return std::nullopt;
313
+ }
314
+
315
+ Iterator<T> it_;
316
+ Transformer<T, V> transformer_;
317
+ std::optional<T> last_value_;
318
+ bool finished_ = false;
319
+ };
320
+
321
+ /// \brief Transforms an iterator according to a transformer, returning a new Iterator.
322
+ ///
323
+ /// The transformer will be called on each element of the source iterator and for each
324
+ /// call it can yield a value, skip, or finish the iteration. When yielding a value the
325
+ /// transformer can choose to consume the source item (the default, ready_for_next = true)
326
+ /// or to keep it and it will be called again on the same value.
327
+ ///
328
+ /// This is essentially a more generic form of the map operation that can return 0, 1, or
329
+ /// many values for each of the source items.
330
+ ///
331
+ /// The transformer will be exposed to the end of the source sequence
332
+ /// (IterationTraits::End) in case it needs to return some penultimate item(s).
333
+ ///
334
+ /// Any invalid status returned by the transformer will be returned immediately.
335
+ template <typename T, typename V>
336
+ Iterator<V> MakeTransformedIterator(Iterator<T> it, Transformer<T, V> op) {
337
+ return Iterator<V>(TransformIterator<T, V>(std::move(it), std::move(op)));
338
+ }
339
+
340
+ template <typename T>
341
+ struct IterationTraits<Iterator<T>> {
342
+ // The end condition for an Iterator of Iterators is a default constructed (null)
343
+ // Iterator.
344
+ static Iterator<T> End() { return Iterator<T>(); }
345
+ static bool IsEnd(const Iterator<T>& val) { return !val; }
346
+ };
347
+
348
+ template <typename Fn, typename T>
349
+ class FunctionIterator {
350
+ public:
351
+ explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {}
352
+
353
+ Result<T> Next() { return fn_(); }
354
+
355
+ private:
356
+ Fn fn_;
357
+ };
358
+
359
+ /// \brief Construct an Iterator which invokes a callable on Next()
360
+ template <typename Fn,
361
+ typename Ret = typename internal::call_traits::return_type<Fn>::ValueType>
362
+ Iterator<Ret> MakeFunctionIterator(Fn fn) {
363
+ return Iterator<Ret>(FunctionIterator<Fn, Ret>(std::move(fn)));
364
+ }
365
+
366
+ template <typename T>
367
+ Iterator<T> MakeEmptyIterator() {
368
+ return MakeFunctionIterator([]() -> Result<T> { return IterationTraits<T>::End(); });
369
+ }
370
+
371
+ template <typename T>
372
+ Iterator<T> MakeErrorIterator(Status s) {
373
+ return MakeFunctionIterator([s]() -> Result<T> {
374
+ ARROW_RETURN_NOT_OK(s);
375
+ return IterationTraits<T>::End();
376
+ });
377
+ }
378
+
379
+ /// \brief Simple iterator which yields the elements of a std::vector
380
+ template <typename T>
381
+ class VectorIterator {
382
+ public:
383
+ explicit VectorIterator(std::vector<T> v) : elements_(std::move(v)) {}
384
+
385
+ Result<T> Next() {
386
+ if (i_ == elements_.size()) {
387
+ return IterationTraits<T>::End();
388
+ }
389
+ return std::move(elements_[i_++]);
390
+ }
391
+
392
+ private:
393
+ std::vector<T> elements_;
394
+ size_t i_ = 0;
395
+ };
396
+
397
+ template <typename T>
398
+ Iterator<T> MakeVectorIterator(std::vector<T> v) {
399
+ return Iterator<T>(VectorIterator<T>(std::move(v)));
400
+ }
401
+
402
+ /// \brief Simple iterator which yields *pointers* to the elements of a std::vector<T>.
403
+ /// This is provided to support T where IterationTraits<T>::End is not specialized
404
+ template <typename T>
405
+ class VectorPointingIterator {
406
+ public:
407
+ explicit VectorPointingIterator(std::vector<T> v) : elements_(std::move(v)) {}
408
+
409
+ Result<T*> Next() {
410
+ if (i_ == elements_.size()) {
411
+ return NULLPTR;
412
+ }
413
+ return &elements_[i_++];
414
+ }
415
+
416
+ private:
417
+ std::vector<T> elements_;
418
+ size_t i_ = 0;
419
+ };
420
+
421
+ template <typename T>
422
+ Iterator<T*> MakeVectorPointingIterator(std::vector<T> v) {
423
+ return Iterator<T*>(VectorPointingIterator<T>(std::move(v)));
424
+ }
425
+
426
+ /// \brief MapIterator takes ownership of an iterator and a function to apply
427
+ /// on every element. The mapped function is not allowed to fail.
428
+ template <typename Fn, typename I, typename O>
429
+ class MapIterator {
430
+ public:
431
+ explicit MapIterator(Fn map, Iterator<I> it)
432
+ : map_(std::move(map)), it_(std::move(it)) {}
433
+
434
+ Result<O> Next() {
435
+ ARROW_ASSIGN_OR_RAISE(I i, it_.Next());
436
+
437
+ if (IsIterationEnd(i)) {
438
+ return IterationTraits<O>::End();
439
+ }
440
+
441
+ return map_(std::move(i));
442
+ }
443
+
444
+ private:
445
+ Fn map_;
446
+ Iterator<I> it_;
447
+ };
448
+
449
+ /// \brief MapIterator takes ownership of an iterator and a function to apply
450
+ /// on every element. The mapped function is not allowed to fail.
451
+ template <typename Fn, typename From = internal::call_traits::argument_type<0, Fn>,
452
+ typename To = internal::call_traits::return_type<Fn>>
453
+ Iterator<To> MakeMapIterator(Fn map, Iterator<From> it) {
454
+ return Iterator<To>(MapIterator<Fn, From, To>(std::move(map), std::move(it)));
455
+ }
456
+
457
+ /// \brief Like MapIterator, but where the function can fail.
458
+ template <typename Fn, typename From = internal::call_traits::argument_type<0, Fn>,
459
+ typename To = typename internal::call_traits::return_type<Fn>::ValueType>
460
+ Iterator<To> MakeMaybeMapIterator(Fn map, Iterator<From> it) {
461
+ return Iterator<To>(MapIterator<Fn, From, To>(std::move(map), std::move(it)));
462
+ }
463
+
464
+ struct FilterIterator {
465
+ enum Action { ACCEPT, REJECT };
466
+
467
+ template <typename To>
468
+ static Result<std::pair<To, Action>> Reject() {
469
+ return std::make_pair(IterationTraits<To>::End(), REJECT);
470
+ }
471
+
472
+ template <typename To>
473
+ static Result<std::pair<To, Action>> Accept(To out) {
474
+ return std::make_pair(std::move(out), ACCEPT);
475
+ }
476
+
477
+ template <typename To>
478
+ static Result<std::pair<To, Action>> MaybeAccept(Result<To> maybe_out) {
479
+ return std::move(maybe_out).Map(Accept<To>);
480
+ }
481
+
482
+ template <typename To>
483
+ static Result<std::pair<To, Action>> Error(Status s) {
484
+ return s;
485
+ }
486
+
487
+ template <typename Fn, typename From, typename To>
488
+ class Impl {
489
+ public:
490
+ explicit Impl(Fn filter, Iterator<From> it) : filter_(filter), it_(std::move(it)) {}
491
+
492
+ Result<To> Next() {
493
+ To out = IterationTraits<To>::End();
494
+ Action action;
495
+
496
+ for (;;) {
497
+ ARROW_ASSIGN_OR_RAISE(From i, it_.Next());
498
+
499
+ if (IsIterationEnd(i)) {
500
+ return IterationTraits<To>::End();
501
+ }
502
+
503
+ ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i)));
504
+
505
+ if (action == ACCEPT) return out;
506
+ }
507
+ }
508
+
509
+ private:
510
+ Fn filter_;
511
+ Iterator<From> it_;
512
+ };
513
+ };
514
+
515
+ /// \brief Like MapIterator, but where the function can fail or reject elements.
516
+ template <
517
+ typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>,
518
+ typename Ret = typename internal::call_traits::return_type<Fn>::ValueType,
519
+ typename To = typename std::tuple_element<0, Ret>::type,
520
+ typename Enable = typename std::enable_if<std::is_same<
521
+ typename std::tuple_element<1, Ret>::type, FilterIterator::Action>::value>::type>
522
+ Iterator<To> MakeFilterIterator(Fn filter, Iterator<From> it) {
523
+ return Iterator<To>(
524
+ FilterIterator::Impl<Fn, From, To>(std::move(filter), std::move(it)));
525
+ }
526
+
527
+ /// \brief FlattenIterator takes an iterator generating iterators and yields a
528
+ /// unified iterator that flattens/concatenates in a single stream.
529
+ template <typename T>
530
+ class FlattenIterator {
531
+ public:
532
+ explicit FlattenIterator(Iterator<Iterator<T>> it) : parent_(std::move(it)) {}
533
+
534
+ Result<T> Next() {
535
+ if (IsIterationEnd(child_)) {
536
+ // Pop from parent's iterator.
537
+ ARROW_ASSIGN_OR_RAISE(child_, parent_.Next());
538
+
539
+ // Check if final iteration reached.
540
+ if (IsIterationEnd(child_)) {
541
+ return IterationTraits<T>::End();
542
+ }
543
+
544
+ return Next();
545
+ }
546
+
547
+ // Pop from child_ and check for depletion.
548
+ ARROW_ASSIGN_OR_RAISE(T out, child_.Next());
549
+ if (IsIterationEnd(out)) {
550
+ // Reset state such that we pop from parent on the recursive call
551
+ child_ = IterationTraits<Iterator<T>>::End();
552
+
553
+ return Next();
554
+ }
555
+
556
+ return out;
557
+ }
558
+
559
+ private:
560
+ Iterator<Iterator<T>> parent_;
561
+ Iterator<T> child_ = IterationTraits<Iterator<T>>::End();
562
+ };
563
+
564
+ template <typename T>
565
+ Iterator<T> MakeFlattenIterator(Iterator<Iterator<T>> it) {
566
+ return Iterator<T>(FlattenIterator<T>(std::move(it)));
567
+ }
568
+
569
+ template <typename Reader>
570
+ Iterator<typename Reader::ValueType> MakeIteratorFromReader(
571
+ const std::shared_ptr<Reader>& reader) {
572
+ return MakeFunctionIterator([reader] { return reader->Next(); });
573
+ }
574
+
575
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <utility>
22
+
23
+ #include "arrow/array/data.h"
24
+ #include "arrow/result.h"
25
+
26
+ namespace arrow {
27
+ namespace list_util {
28
+ namespace internal {
29
+
30
+ /// \brief Calculate the smallest continuous range of values used by the
31
+ /// var-length list-like input (list, map and list-view types).
32
+ ///
33
+ /// \param input The input array such that is_var_length_list_like(input.type)
34
+ /// is true
35
+ /// \return A pair of (offset, length) describing the range
36
+ ARROW_EXPORT Result<std::pair<int64_t, int64_t>> RangeOfValuesUsed(
37
+ const ArraySpan& input);
38
+
39
+ /// \brief Calculate the sum of the sizes of all valid lists or list-views
40
+ ///
41
+ /// This is usually the same as the length of the RangeOfValuesUsed() range, but
42
+ /// it can be:
43
+ /// - Smaller: when the child array contains many values that are not
44
+ /// referenced by the lists or list-views in the parent array
45
+ /// - Greater: when the list-views share child array ranges
46
+ ///
47
+ /// \param input The input array such that is_var_length_list_like(input.type)
48
+ /// is true
49
+ /// \return The sum of all list or list-view sizes
50
+ ARROW_EXPORT Result<int64_t> SumOfLogicalListSizes(const ArraySpan& input);
51
+
52
+ } // namespace internal
53
+
54
+ } // namespace list_util
55
+ } // namespace arrow