ZTWHHH commited on
Commit
4b5844f
·
verified ·
1 Parent(s): f67ec27

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h +146 -0
  2. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h +36 -0
  3. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h +42 -0
  4. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h +106 -0
  5. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h +29 -0
  6. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h +162 -0
  7. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h +64 -0
  8. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h +26 -0
  9. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h +201 -0
  10. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h +103 -0
  11. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h +72 -0
  12. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h +132 -0
  13. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h +24 -0
  14. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h +41 -0
  15. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h +89 -0
  16. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h +19 -0
  17. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h +42 -0
  18. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h +80 -0
  19. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h +81 -0
  20. valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h +39 -0
  21. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h +33 -0
  22. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h +221 -0
  23. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h +2058 -0
  24. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h +71 -0
  25. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h +35 -0
  26. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h +95 -0
  27. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h +466 -0
  28. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h +43 -0
  29. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h +112 -0
  30. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h +88 -0
  31. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h +286 -0
  32. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h +89 -0
  33. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h +34 -0
  34. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h +28 -0
  35. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h +28 -0
  36. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h +61 -0
  37. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h +241 -0
  38. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h +411 -0
  39. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h +60 -0
  40. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h +114 -0
  41. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h +298 -0
  42. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h +181 -0
  43. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h +28 -0
  44. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h +115 -0
  45. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h +656 -0
  46. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h +882 -0
  47. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h +944 -0
  48. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h +137 -0
  49. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h +118 -0
  50. valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h +452 -0
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <unordered_set>
28
+
29
+ #include "arrow/memory_pool.h"
30
+ #include "arrow/python/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Array;
35
+ class ChunkedArray;
36
+ class Column;
37
+ class DataType;
38
+ class MemoryPool;
39
+ class Status;
40
+ class Table;
41
+
42
+ namespace py {
43
+
44
+ enum class MapConversionType {
45
+ DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas
46
+ LOSSY, // report warnings when lossiness is encountered due to duplicate keys
47
+ STRICT_, // raise a Python exception when lossiness is encountered due to duplicate
48
+ // keys
49
+ };
50
+
51
+ struct PandasOptions {
52
+ /// arrow::MemoryPool to use for memory allocations
53
+ MemoryPool* pool = default_memory_pool();
54
+
55
+ /// If true, we will convert all string columns to categoricals
56
+ bool strings_to_categorical = false;
57
+ bool zero_copy_only = false;
58
+ bool integer_object_nulls = false;
59
+ bool date_as_object = false;
60
+ bool timestamp_as_object = false;
61
+ bool use_threads = false;
62
+
63
+ /// Coerce all date and timestamp to datetime64[ns]
64
+ bool coerce_temporal_nanoseconds = false;
65
+
66
+ /// Used to maintain backwards compatibility for
67
+ /// timezone bugs (see ARROW-9528). Should be removed
68
+ /// after Arrow 2.0 release.
69
+ bool ignore_timezone = false;
70
+
71
+ /// \brief If true, do not create duplicate PyObject versions of equal
72
+ /// objects. This only applies to immutable objects like strings or datetime
73
+ /// objects
74
+ bool deduplicate_objects = false;
75
+
76
+ /// \brief For certain data types, a cast is needed in order to store the
77
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
78
+ /// as nanoseconds in pandas). This option controls whether it is a safe
79
+ /// cast or not.
80
+ bool safe_cast = true;
81
+
82
+ /// \brief If true, create one block per column rather than consolidated
83
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
84
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
85
+ /// increased memory use, so keep this in mind if you are working on a
86
+ /// memory-constrained situation.
87
+ bool split_blocks = false;
88
+
89
+ /// \brief If true, allow non-writable zero-copy views to be created for
90
+ /// single column blocks. This option is also used to provide zero copy for
91
+ /// Series data
92
+ bool allow_zero_copy_blocks = false;
93
+
94
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
95
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
96
+ /// original context for this feature. Only currently implemented for Table
97
+ /// conversions
98
+ bool self_destruct = false;
99
+
100
+ /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to
101
+ /// Python association lists (list-of-tuples) in the same order as the Arrow
102
+ /// Map, as in [(key1, value1), (key2, value2), ...]
103
+ /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts.
104
+ /// This can change the ordering of (key, value) pairs, and will deduplicate
105
+ /// multiple keys, resulting in a possible loss of data.
106
+ /// If 'lossy', this key deduplication results in a warning printed
107
+ /// when detected. If 'strict', this instead results in an exception
108
+ /// being raised when detected.
109
+ MapConversionType maps_as_pydicts = MapConversionType::DEFAULT;
110
+
111
+ // Used internally for nested arrays.
112
+ bool decode_dictionaries = false;
113
+
114
+ // Columns that should be casted to categorical
115
+ std::unordered_set<std::string> categorical_columns;
116
+
117
+ // Columns that should be passed through to be converted to
118
+ // ExtensionArray/Block
119
+ std::unordered_set<std::string> extension_columns;
120
+
121
+ // Used internally to decipher between to_numpy() and to_pandas() when
122
+ // the expected output differs
123
+ bool to_numpy = false;
124
+ };
125
+
126
+ ARROW_PYTHON_EXPORT
127
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
128
+ PyObject* py_ref, PyObject** out);
129
+
130
+ ARROW_PYTHON_EXPORT
131
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
132
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
133
+ PyObject** out);
134
+
135
+ // Convert a whole table as efficiently as possible to a pandas.DataFrame.
136
+ //
137
+ // The returned Python object is a list of tuples consisting of the exact 2D
138
+ // BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
139
+ //
140
+ // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
141
+ ARROW_PYTHON_EXPORT
142
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
143
+ PyObject** out);
144
+
145
+ } // namespace py
146
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include "arrow/python/visibility.h"
23
+
24
+ namespace arrow {
25
+ namespace py {
26
+ namespace benchmark {
27
+
28
+ // Micro-benchmark routines for use from ASV
29
+
30
+ // Run PandasObjectIsNull() once over every object in *list*
31
+ ARROW_PYTHON_EXPORT
32
+ void Benchmark_PandasObjectIsNull(PyObject* list);
33
+
34
+ } // namespace benchmark
35
+ } // namespace py
36
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/csv/options.h"
26
+ #include "arrow/python/common.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+ namespace py {
31
+ namespace csv {
32
+
33
+ using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult(
34
+ PyObject*, const ::arrow::csv::InvalidRow&)>;
35
+
36
+ ARROW_PYTHON_EXPORT
37
+ ::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback,
38
+ PyObject* handler);
39
+
40
+ } // namespace csv
41
+ } // namespace py
42
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/python/serialize.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ class RecordBatch;
31
+ class Tensor;
32
+
33
+ namespace io {
34
+
35
+ class RandomAccessFile;
36
+
37
+ } // namespace io
38
+
39
+ namespace py {
40
+
41
+ struct ARROW_PYTHON_EXPORT SparseTensorCounts {
42
+ int coo;
43
+ int csr;
44
+ int csc;
45
+ int csf;
46
+ int ndim_csf;
47
+
48
+ int num_total_tensors() const { return coo + csr + csc + csf; }
49
+ int num_total_buffers() const {
50
+ return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
51
+ }
52
+ };
53
+
54
+ /// \brief Read serialized Python sequence from file interface using Arrow IPC
55
+ /// \param[in] src a RandomAccessFile
56
+ /// \param[out] out the reconstructed data
57
+ /// \return Status
58
+ ARROW_PYTHON_EXPORT
59
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
60
+
61
+ /// \brief Reconstruct SerializedPyObject from representation produced by
62
+ /// SerializedPyObject::GetComponents.
63
+ ///
64
+ /// \param[in] num_tensors number of tensors in the object
65
+ /// \param[in] num_sparse_tensors number of sparse tensors in the object
66
+ /// \param[in] num_ndarrays number of numpy Ndarrays in the object
67
+ /// \param[in] num_buffers number of buffers in the object
68
+ /// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
69
+ /// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
70
+ /// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
71
+ /// \param[out] out the reconstructed object
72
+ /// \return Status
73
+ ARROW_PYTHON_EXPORT
74
+ Status GetSerializedFromComponents(int num_tensors,
75
+ const SparseTensorCounts& num_sparse_tensors,
76
+ int num_ndarrays, int num_buffers, PyObject* data,
77
+ SerializedPyObject* out);
78
+
79
+ /// \brief Reconstruct Python object from Arrow-serialized representation
80
+ /// \param[in] context Serialization context which contains custom serialization
81
+ /// and deserialization callbacks. Can be any Python object with a
82
+ /// _serialize_callback method for serialization and a _deserialize_callback
83
+ /// method for deserialization. If context is None, no custom serialization
84
+ /// will be attempted.
85
+ /// \param[in] object Object to deserialize
86
+ /// \param[in] base a Python object holding the underlying data that any NumPy
87
+ /// arrays will reference, to avoid premature deallocation
88
+ /// \param[out] out The returned object
89
+ /// \return Status
90
+ /// This acquires the GIL
91
+ ARROW_PYTHON_EXPORT
92
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
93
+ PyObject* base, PyObject** out);
94
+
95
+ /// \brief Reconstruct Ndarray from Arrow-serialized representation
96
+ /// \param[in] object Object to deserialize
97
+ /// \param[out] out The deserialized tensor
98
+ /// \return Status
99
+ ARROW_PYTHON_EXPORT
100
+ Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
101
+
102
+ ARROW_PYTHON_EXPORT
103
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
104
+
105
+ } // namespace py
106
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace gdb {
24
+
25
+ ARROW_PYTHON_EXPORT
26
+ void TestSession();
27
+
28
+ } // namespace gdb
29
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include <limits>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+
27
+ #include "arrow/python/numpy_interop.h"
28
+
29
+ #include <numpy/halffloat.h>
30
+
31
+ #include "arrow/python/visibility.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow {
36
+
37
+ namespace py {
38
+
39
+ class OwnedRef;
40
+
41
+ // \brief Get an arrow DataType instance from Arrow's Type::type enum
42
+ // \param[in] type One of the values of Arrow's Type::type enum
43
+ // \return A shared pointer to DataType
44
+ ARROW_PYTHON_EXPORT std::shared_ptr<DataType> GetPrimitiveType(Type::type type);
45
+
46
+ // \brief Construct a np.float16 object from a npy_half value.
47
+ ARROW_PYTHON_EXPORT PyObject* PyHalf_FromHalf(npy_half value);
48
+
49
+ // \brief Convert a Python object to a npy_half value.
50
+ ARROW_PYTHON_EXPORT Status PyFloat_AsHalf(PyObject* obj, npy_half* out);
51
+
52
+ namespace internal {
53
+
54
+ // \brief Check that a Python module has been already imported
55
+ // \param[in] module_name The name of the module
56
+ Result<bool> IsModuleImported(const std::string& module_name);
57
+
58
+ // \brief Import a Python module
59
+ // \param[in] module_name The name of the module
60
+ // \param[out] ref The OwnedRef containing the module PyObject*
61
+ ARROW_PYTHON_EXPORT
62
+ Status ImportModule(const std::string& module_name, OwnedRef* ref);
63
+
64
+ // \brief Import an object from a Python module
65
+ // \param[in] module A Python module
66
+ // \param[in] name The name of the object to import
67
+ // \param[out] ref The OwnedRef containing the \c name attribute of the Python module \c
68
+ // module
69
+ ARROW_PYTHON_EXPORT
70
+ Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref);
71
+
72
+ // \brief Check whether obj is an integer, independent of Python versions.
73
+ inline bool IsPyInteger(PyObject* obj) { return PyLong_Check(obj); }
74
+
75
+ // \brief Import symbols from pandas that we need for various type-checking,
76
+ // like pandas.NaT or pandas.NA
77
+ void InitPandasStaticData();
78
+
79
+ // \brief Use pandas missing value semantics to check if a value is null
80
+ ARROW_PYTHON_EXPORT
81
+ bool PandasObjectIsNull(PyObject* obj);
82
+
83
+ // \brief Check that obj is a pandas.Timedelta instance
84
+ ARROW_PYTHON_EXPORT
85
+ bool IsPandasTimedelta(PyObject* obj);
86
+
87
+ // \brief Check that obj is a pandas.Timestamp instance
88
+ bool IsPandasTimestamp(PyObject* obj);
89
+
90
+ // \brief Returned a borrowed reference to the pandas.tseries.offsets.DateOffset
91
+ PyObject* BorrowPandasDataOffsetType();
92
+
93
+ // \brief Check whether obj is a floating-point NaN
94
+ ARROW_PYTHON_EXPORT
95
+ bool PyFloat_IsNaN(PyObject* obj);
96
+
97
+ inline bool IsPyBinary(PyObject* obj) {
98
+ return PyBytes_Check(obj) || PyByteArray_Check(obj) || PyMemoryView_Check(obj);
99
+ }
100
+
101
+ // \brief Convert a Python integer into a C integer
102
+ // \param[in] obj A Python integer
103
+ // \param[out] out A pointer to a C integer to hold the result of the conversion
104
+ // \return The status of the operation
105
+ template <typename Int>
106
+ Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message = "");
107
+
108
+ // \brief Convert a Python unicode string to a std::string
109
+ ARROW_PYTHON_EXPORT
110
+ Status PyUnicode_AsStdString(PyObject* obj, std::string* out);
111
+
112
+ // \brief Convert a Python bytes object to a std::string
113
+ ARROW_PYTHON_EXPORT
114
+ std::string PyBytes_AsStdString(PyObject* obj);
115
+
116
+ // \brief Call str() on the given object and return the result as a std::string
117
+ ARROW_PYTHON_EXPORT
118
+ Status PyObject_StdStringStr(PyObject* obj, std::string* out);
119
+
120
+ // \brief Return the repr() of the given object (always succeeds)
121
+ ARROW_PYTHON_EXPORT
122
+ std::string PyObject_StdStringRepr(PyObject* obj);
123
+
124
+ // \brief Cast the given size to int32_t, with error checking
125
+ inline Status CastSize(Py_ssize_t size, int32_t* out,
126
+ const char* error_msg = "Maximum size exceeded (2GB)") {
127
+ // size is assumed to be positive
128
+ if (size > std::numeric_limits<int32_t>::max()) {
129
+ return Status::Invalid(error_msg);
130
+ }
131
+ *out = static_cast<int32_t>(size);
132
+ return Status::OK();
133
+ }
134
+
135
+ inline Status CastSize(Py_ssize_t size, int64_t* out, const char* error_msg = NULLPTR) {
136
+ // size is assumed to be positive
137
+ *out = static_cast<int64_t>(size);
138
+ return Status::OK();
139
+ }
140
+
141
+ // \brief Print the Python object's __str__ form along with the passed error
142
+ // message
143
+ ARROW_PYTHON_EXPORT
144
+ Status InvalidValue(PyObject* obj, const std::string& why);
145
+
146
+ ARROW_PYTHON_EXPORT
147
+ Status InvalidType(PyObject* obj, const std::string& why);
148
+
149
+ ARROW_PYTHON_EXPORT
150
+ Status IntegerScalarToDoubleSafe(PyObject* obj, double* result);
151
+ ARROW_PYTHON_EXPORT
152
+ Status IntegerScalarToFloat32Safe(PyObject* obj, float* result);
153
+
154
+ // \brief Print Python object __repr__
155
+ void DebugPrint(PyObject* obj);
156
+
157
+ ARROW_PYTHON_EXPORT
158
+ bool IsThreadingEnabled();
159
+
160
+ } // namespace internal
161
+ } // namespace py
162
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+
27
+ #include "arrow/python/visibility.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/util/macros.h"
30
+
31
+ #include "common.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class Status;
37
+
38
+ namespace py {
39
+
40
+ // These functions take a sequence input, not arbitrary iterables
41
+
42
+ /// \brief Infer Arrow type from a Python sequence
43
+ /// \param[in] obj the sequence of values
44
+ /// \param[in] mask an optional mask where True values are null. May
45
+ /// be nullptr
46
+ /// \param[in] pandas_null_sentinels use pandas's null value markers
47
+ ARROW_PYTHON_EXPORT
48
+ Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
49
+ bool pandas_null_sentinels);
50
+
51
+ /// Checks whether the passed Python object is a boolean scalar
52
+ ARROW_PYTHON_EXPORT
53
+ bool IsPyBool(PyObject* obj);
54
+
55
+ /// Checks whether the passed Python object is an integer scalar
56
+ ARROW_PYTHON_EXPORT
57
+ bool IsPyInt(PyObject* obj);
58
+
59
+ /// Checks whether the passed Python object is a float scalar
60
+ ARROW_PYTHON_EXPORT
61
+ bool IsPyFloat(PyObject* obj);
62
+
63
+ } // namespace py
64
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+ #include "arrow/python/visibility.h"
22
+
23
+ extern "C" {
24
+ ARROW_PYTHON_EXPORT
25
+ int arrow_init_numpy();
26
+ }
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Generated by Cython 3.0.10 */
2
+
3
+ #ifndef __PYX_HAVE_API__pyarrow__lib
4
+ #define __PYX_HAVE_API__pyarrow__lib
5
+ #ifdef __MINGW64__
6
+ #define MS_WIN64
7
+ #endif
8
+ #include "Python.h"
9
+ #include "lib.h"
10
+
11
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0;
12
+ #define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool
13
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0;
14
+ #define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer
15
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0;
16
+ #define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer
17
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0;
18
+ #define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type
19
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0;
20
+ #define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field
21
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0;
22
+ #define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema
23
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0;
24
+ #define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar
25
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0;
26
+ #define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array
27
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0;
28
+ #define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array
29
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0;
30
+ #define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor
31
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0;
32
+ #define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix
33
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0;
34
+ #define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor
35
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0;
36
+ #define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix
37
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0;
38
+ #define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor
39
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0;
40
+ #define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch
41
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0;
42
+ #define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table
43
+ static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0;
44
+ #define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer
45
+ static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0;
46
+ #define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type
47
+ static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0;
48
+ #define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field
49
+ static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0;
50
+ #define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema
51
+ static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0;
52
+ #define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar
53
+ static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0;
54
+ #define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array
55
+ static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0;
56
+ #define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array
57
+ static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0;
58
+ #define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor
59
+ static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0;
60
+ #define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix
61
+ static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0;
62
+ #define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor
63
+ static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0;
64
+ #define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix
65
+ static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0;
66
+ #define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor
67
+ static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0;
68
+ #define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch
69
+ static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0;
70
+ #define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table
71
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0;
72
+ #define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status
73
+ static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0;
74
+ #define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status
75
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0;
76
+ #define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer
77
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0;
78
+ #define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type
79
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0;
80
+ #define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata
81
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0;
82
+ #define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field
83
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0;
84
+ #define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema
85
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0;
86
+ #define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array
87
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0;
88
+ #define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array
89
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0;
90
+ #define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar
91
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0;
92
+ #define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor
93
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0;
94
+ #define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor
95
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0;
96
+ #define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix
97
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0;
98
+ #define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix
99
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0;
100
+ #define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor
101
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0;
102
+ #define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table
103
+ static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0;
104
+ #define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch
105
+ #ifndef __PYX_HAVE_RT_ImportFunction_3_0_10
106
+ #define __PYX_HAVE_RT_ImportFunction_3_0_10
107
+ static int __Pyx_ImportFunction_3_0_10(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
108
+ PyObject *d = 0;
109
+ PyObject *cobj = 0;
110
+ union {
111
+ void (*fp)(void);
112
+ void *p;
113
+ } tmp;
114
+ d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
115
+ if (!d)
116
+ goto bad;
117
+ cobj = PyDict_GetItemString(d, funcname);
118
+ if (!cobj) {
119
+ PyErr_Format(PyExc_ImportError,
120
+ "%.200s does not export expected C function %.200s",
121
+ PyModule_GetName(module), funcname);
122
+ goto bad;
123
+ }
124
+ if (!PyCapsule_IsValid(cobj, sig)) {
125
+ PyErr_Format(PyExc_TypeError,
126
+ "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
127
+ PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
128
+ goto bad;
129
+ }
130
+ tmp.p = PyCapsule_GetPointer(cobj, sig);
131
+ *f = tmp.fp;
132
+ if (!(*f))
133
+ goto bad;
134
+ Py_DECREF(d);
135
+ return 0;
136
+ bad:
137
+ Py_XDECREF(d);
138
+ return -1;
139
+ }
140
+ #endif
141
+
142
+
143
+ static int import_pyarrow__lib(void) {
144
+ PyObject *module = 0;
145
+ module = PyImport_ImportModule("pyarrow.lib");
146
+ if (!module) goto bad;
147
+ if (__Pyx_ImportFunction_3_0_10(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad;
148
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad;
149
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad;
150
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad;
151
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad;
152
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad;
153
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad;
154
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad;
155
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad;
156
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad;
157
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad;
158
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad;
159
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad;
160
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad;
161
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad;
162
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad;
163
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad;
164
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad;
165
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad;
166
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad;
167
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad;
168
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad;
169
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad;
170
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad;
171
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad;
172
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad;
173
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad;
174
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad;
175
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad;
176
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad;
177
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad;
178
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad;
179
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad;
180
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad;
181
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad;
182
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad;
183
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad;
184
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad;
185
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad;
186
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad;
187
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad;
188
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad;
189
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad;
190
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad;
191
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad;
192
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad;
193
+ if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad;
194
+ Py_DECREF(module); module = 0;
195
+ return 0;
196
+ bad:
197
+ Py_XDECREF(module);
198
+ return -1;
199
+ }
200
+
201
+ #endif /* !__PYX_HAVE_API__pyarrow__lib */
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h" // IWYU pragma: export
21
+
22
+ #include <numpy/numpyconfig.h> // IWYU pragma: export
23
+
24
+ // Don't use the deprecated Numpy functions
25
+ #ifdef NPY_1_7_API_VERSION
26
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
27
+ #else
28
+ #define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED
29
+ #define NPY_ARRAY_ALIGNED NPY_ALIGNED
30
+ #define NPY_ARRAY_WRITEABLE NPY_WRITEABLE
31
+ #define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY
32
+ #endif
33
+
34
+ // This is required to be able to access the NumPy C API properly in C++ files
35
+ // other than init.cc.
36
+ #define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API
37
+ #ifndef NUMPY_IMPORT_ARRAY
38
+ #define NO_IMPORT_ARRAY
39
+ #endif
40
+
41
+ #include <numpy/arrayobject.h> // IWYU pragma: export
42
+ #include <numpy/arrayscalars.h> // IWYU pragma: export
43
+ #include <numpy/ufuncobject.h> // IWYU pragma: export
44
+
45
+ // A bit subtle. Numpy has 5 canonical integer types:
46
+ // (or, rather, type pairs: signed and unsigned)
47
+ // NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG
48
+ // It also has 4 fixed-width integer aliases.
49
+ // When mapping Arrow integer types to these 4 fixed-width aliases,
50
+ // we always miss one of the canonical types (even though it may
51
+ // have the same width as one of the aliases).
52
+ // Which one depends on the platform...
53
+ // On a LP64 system, NPY_INT64 maps to NPY_LONG and
54
+ // NPY_LONGLONG needs to be handled separately.
55
+ // On a LLP64 system, NPY_INT32 maps to NPY_LONG and
56
+ // NPY_INT needs to be handled separately.
57
+
58
+ #if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64
59
+ #define NPY_INT64_IS_LONG_LONG 1
60
+ #else
61
+ #define NPY_INT64_IS_LONG_LONG 0
62
+ #endif
63
+
64
+ #if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64
65
+ #define NPY_INT32_IS_INT 1
66
+ #else
67
+ #define NPY_INT32_IS_INT 0
68
+ #endif
69
+
70
+ // Backported NumPy 2 API (can be removed if numpy 2 is required)
71
+ #if NPY_ABI_VERSION < 0x02000000
72
+ #define PyDataType_ELSIZE(descr) ((descr)->elsize)
73
+ #define PyDataType_C_METADATA(descr) ((descr)->c_metadata)
74
+ #define PyDataType_FIELDS(descr) ((descr)->fields)
75
+ #endif
76
+
77
+ namespace arrow {
78
+ namespace py {
79
+
80
+ inline int import_numpy() {
81
+ #ifdef NUMPY_IMPORT_ARRAY
82
+ import_array1(-1);
83
+ import_umath1(-1);
84
+ #endif
85
+
86
+ return 0;
87
+ }
88
+
89
+ // See above about the missing Numpy integer type numbers
90
+ inline int fix_numpy_type_num(int type_num) {
91
+ #if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32
92
+ if (type_num == NPY_INT) return NPY_INT32;
93
+ if (type_num == NPY_UINT) return NPY_UINT32;
94
+ #endif
95
+ #if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64
96
+ if (type_num == NPY_LONGLONG) return NPY_INT64;
97
+ if (type_num == NPY_ULONGLONG) return NPY_UINT64;
98
+ #endif
99
+ return type_num;
100
+ }
101
+
102
+ } // namespace py
103
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Converting from pandas memory representation to Arrow data structures
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/python/platform.h"
23
+
24
+ #include <memory>
25
+
26
+ #include "arrow/compute/api.h"
27
+ #include "arrow/python/visibility.h"
28
+
29
+ namespace arrow {
30
+
31
+ class Array;
32
+ class ChunkedArray;
33
+ class DataType;
34
+ class MemoryPool;
35
+ class Status;
36
+
37
+ namespace py {
38
+
39
+ /// Convert NumPy arrays to Arrow. If target data type is not known, pass a
40
+ /// type with null
41
+ ///
42
+ /// \param[in] pool Memory pool for any memory allocations
43
+ /// \param[in] ao an ndarray with the array data
44
+ /// \param[in] mo an ndarray with a null mask (True is null), optional
45
+ /// \param[in] from_pandas If true, use pandas's null sentinels to determine
46
+ /// whether values are null
47
+ /// \param[in] type a specific type to cast to, may be null
48
+ /// \param[in] cast_options casting options
49
+ /// \param[out] out a ChunkedArray, to accommodate chunked output
50
+ ARROW_PYTHON_EXPORT
51
+ Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
52
+ const std::shared_ptr<DataType>& type,
53
+ const compute::CastOptions& cast_options,
54
+ std::shared_ptr<ChunkedArray>* out);
55
+
56
+ /// Safely convert NumPy arrays to Arrow. If target data type is not known,
57
+ /// pass a type with null.
58
+ ///
59
+ /// \param[in] pool Memory pool for any memory allocations
60
+ /// \param[in] ao an ndarray with the array data
61
+ /// \param[in] mo an ndarray with a null mask (True is null), optional
62
+ /// \param[in] from_pandas If true, use pandas's null sentinels to determine
63
+ /// whether values are null
64
+ /// \param[in] type a specific type to cast to, may be null
65
+ /// \param[out] out a ChunkedArray, to accommodate chunked output
66
+ ARROW_PYTHON_EXPORT
67
+ Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
68
+ const std::shared_ptr<DataType>& type,
69
+ std::shared_ptr<ChunkedArray>* out);
70
+
71
+ } // namespace py
72
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/python/visibility.h"
24
+ #include "arrow/util/macros.h"
25
+ #include "parquet/encryption/crypto_factory.h"
26
+ #include "parquet/encryption/kms_client.h"
27
+ #include "parquet/encryption/kms_client_factory.h"
28
+
29
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
30
+ #if defined(_MSC_VER)
31
+ #pragma warning(disable : 4251)
32
+ #else
33
+ #pragma GCC diagnostic ignored "-Wattributes"
34
+ #endif
35
+
36
+ #ifdef ARROW_PYTHON_STATIC
37
+ #define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT
38
+ #elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING)
39
+ #define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport)
40
+ #else
41
+ #define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport)
42
+ #endif
43
+
44
+ #else // Not Windows
45
+ #ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT
46
+ #define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default")))
47
+ #endif
48
+ #endif // Non-Windows
49
+
50
+ namespace arrow {
51
+ namespace py {
52
+ namespace parquet {
53
+ namespace encryption {
54
+
55
+ /// \brief A table of function pointers for calling from C++ into
56
+ /// Python.
57
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable {
58
+ public:
59
+ std::function<void(PyObject*, const std::string& key_bytes,
60
+ const std::string& master_key_identifier, std::string* out)>
61
+ wrap_key;
62
+ std::function<void(PyObject*, const std::string& wrapped_key,
63
+ const std::string& master_key_identifier, std::string* out)>
64
+ unwrap_key;
65
+ };
66
+
67
+ /// \brief A helper for KmsClient implementation in Python.
68
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient
69
+ : public ::parquet::encryption::KmsClient {
70
+ public:
71
+ PyKmsClient(PyObject* handler, PyKmsClientVtable vtable);
72
+ ~PyKmsClient() override;
73
+
74
+ std::string WrapKey(const std::string& key_bytes,
75
+ const std::string& master_key_identifier) override;
76
+
77
+ std::string UnwrapKey(const std::string& wrapped_key,
78
+ const std::string& master_key_identifier) override;
79
+
80
+ private:
81
+ OwnedRefNoGIL handler_;
82
+ PyKmsClientVtable vtable_;
83
+ };
84
+
85
+ /// \brief A table of function pointers for calling from C++ into
86
+ /// Python.
87
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable {
88
+ public:
89
+ std::function<void(
90
+ PyObject*, const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
91
+ std::shared_ptr<::parquet::encryption::KmsClient>* out)>
92
+ create_kms_client;
93
+ };
94
+
95
+ /// \brief A helper for KmsClientFactory implementation in Python.
96
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory
97
+ : public ::parquet::encryption::KmsClientFactory {
98
+ public:
99
+ PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable);
100
+ ~PyKmsClientFactory() override;
101
+
102
+ std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient(
103
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override;
104
+
105
+ private:
106
+ OwnedRefNoGIL handler_;
107
+ PyKmsClientFactoryVtable vtable_;
108
+ };
109
+
110
+ /// \brief A CryptoFactory that returns Results instead of throwing exceptions.
111
+ class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory
112
+ : public ::parquet::encryption::CryptoFactory {
113
+ public:
114
+ arrow::Result<std::shared_ptr<::parquet::FileEncryptionProperties>>
115
+ SafeGetFileEncryptionProperties(
116
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
117
+ const ::parquet::encryption::EncryptionConfiguration& encryption_config);
118
+
119
+ /// The returned FileDecryptionProperties object will use the cache inside this
120
+ /// CryptoFactory object, so please keep this
121
+ /// CryptoFactory object alive along with the returned
122
+ /// FileDecryptionProperties object.
123
+ arrow::Result<std::shared_ptr<::parquet::FileDecryptionProperties>>
124
+ SafeGetFileDecryptionProperties(
125
+ const ::parquet::encryption::KmsConnectionConfig& kms_connection_config,
126
+ const ::parquet::encryption::DecryptionConfiguration& decryption_config);
127
+ };
128
+
129
+ } // namespace encryption
130
+ } // namespace parquet
131
+ } // namespace py
132
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
24
+ #include "arrow/python/platform.h"
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ // If PY_SSIZE_T_CLEAN is defined, argument parsing functions treat #-specifier
24
+ // to mean Py_ssize_t (defining this to suppress deprecation warning)
25
+ #define PY_SSIZE_T_CLEAN
26
+
27
+ #include <Python.h> // IWYU pragma: export
28
+ #include <datetime.h>
29
+
30
+ // Work around C2528 error
31
+ #ifdef _MSC_VER
32
+ #if _MSC_VER >= 1900
33
+ #undef timezone
34
+ #endif
35
+
36
+ // https://bugs.python.org/issue36020
37
+ // TODO(wjones127): Can remove once we drop support for CPython 3.9
38
+ #ifdef snprintf
39
+ #undef snprintf
40
+ #endif
41
+ #endif
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include <memory>
23
+
24
+ #include "arrow/python/visibility.h"
25
+
26
+ #include "arrow/sparse_tensor.h"
27
+
28
+ // Work around ARROW-2317 (C linkage warning from Cython)
29
+ extern "C++" {
30
+
31
+ namespace arrow {
32
+
33
+ class Array;
34
+ class Buffer;
35
+ class DataType;
36
+ class Field;
37
+ class RecordBatch;
38
+ class Schema;
39
+ class Status;
40
+ class Table;
41
+ class Tensor;
42
+
43
+ namespace py {
44
+
45
+ // Returns 0 on success, -1 on error.
46
+ ARROW_PYTHON_EXPORT int import_pyarrow();
47
+
48
+ #define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \
49
+ ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \
50
+ ARROW_PYTHON_EXPORT Result<std::shared_ptr<TYPE_NAME>> unwrap_##FUNC_SUFFIX( \
51
+ PyObject*); \
52
+ ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr<TYPE_NAME>&);
53
+
54
+ DECLARE_WRAP_FUNCTIONS(buffer, Buffer)
55
+
56
+ DECLARE_WRAP_FUNCTIONS(data_type, DataType)
57
+ DECLARE_WRAP_FUNCTIONS(field, Field)
58
+ DECLARE_WRAP_FUNCTIONS(schema, Schema)
59
+
60
+ DECLARE_WRAP_FUNCTIONS(scalar, Scalar)
61
+
62
+ DECLARE_WRAP_FUNCTIONS(array, Array)
63
+ DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray)
64
+
65
+ DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor)
66
+ DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix)
67
+ DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor)
68
+ DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix)
69
+ DECLARE_WRAP_FUNCTIONS(tensor, Tensor)
70
+
71
+ DECLARE_WRAP_FUNCTIONS(batch, RecordBatch)
72
+ DECLARE_WRAP_FUNCTIONS(table, Table)
73
+
74
+ #undef DECLARE_WRAP_FUNCTIONS
75
+
76
+ namespace internal {
77
+
78
+ // If status is ok, return 0.
79
+ // If status is not ok, set Python error indicator and return -1.
80
+ ARROW_PYTHON_EXPORT int check_status(const Status& status);
81
+
82
+ // Convert status to a Python exception object. Status must not be ok.
83
+ ARROW_PYTHON_EXPORT PyObject* convert_status(const Status& status);
84
+
85
+ } // namespace internal
86
+ } // namespace py
87
+ } // namespace arrow
88
+
89
+ } // extern "C++"
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // For backward compatibility.
19
+ #include "arrow/python/lib.h"
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "arrow/status.h"
25
+
26
+ #include "arrow/python/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+ namespace testing {
31
+
32
+ struct TestCase {
33
+ std::string name;
34
+ std::function<Status()> func;
35
+ };
36
+
37
+ ARROW_PYTHON_EXPORT
38
+ std::vector<TestCase> GetCppTestCases();
39
+
40
+ } // namespace testing
41
+ } // namespace py
42
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <cstdint>
26
+ #include <memory>
27
+
28
+ #include "arrow/python/visibility.h"
29
+ #include "arrow/type.h"
30
+ #include "arrow/util/macros.h"
31
+
32
+ #include "arrow/python/common.h"
33
+
34
+ namespace arrow {
35
+
36
+ class Array;
37
+ class Status;
38
+
39
+ namespace py {
40
+
41
+ struct PyConversionOptions {
42
+ PyConversionOptions() = default;
43
+
44
+ PyConversionOptions(const std::shared_ptr<DataType>& type, int64_t size,
45
+ MemoryPool* pool, bool from_pandas)
46
+ : type(type), size(size), from_pandas(from_pandas) {}
47
+
48
+ // Set to null if to be inferred
49
+ std::shared_ptr<DataType> type;
50
+
51
+ // Default is -1, which indicates the size should the same as the input sequence
52
+ int64_t size = -1;
53
+
54
+ bool from_pandas = false;
55
+
56
+ /// Used to maintain backwards compatibility for
57
+ /// timezone bugs (see ARROW-9528). Should be removed
58
+ /// after Arrow 2.0 release.
59
+ bool ignore_timezone = false;
60
+
61
+ bool strict = false;
62
+ };
63
+
64
+ /// \brief Convert sequence (list, generator, NumPy array with dtype object) of
65
+ /// Python objects.
66
+ /// \param[in] obj the sequence to convert
67
+ /// \param[in] mask a NumPy array of true/false values to indicate whether
68
+ /// values in the sequence are null (true) or not null (false). This parameter
69
+ /// may be null
70
+ /// \param[in] options various conversion options
71
+ /// \param[in] pool MemoryPool to use for allocations
72
+ /// \return Result ChunkedArray
73
+ ARROW_PYTHON_EXPORT
74
+ Result<std::shared_ptr<ChunkedArray>> ConvertPySequence(
75
+ PyObject* obj, PyObject* mask, PyConversionOptions options,
76
+ MemoryPool* pool = default_memory_pool());
77
+
78
+ } // namespace py
79
+
80
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/udf.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/compute/exec.h"
21
+ #include "arrow/compute/function.h"
22
+ #include "arrow/compute/registry.h"
23
+ #include "arrow/python/platform.h"
24
+ #include "arrow/record_batch.h"
25
+ #include "arrow/util/iterator.h"
26
+
27
+ #include "arrow/python/common.h"
28
+ #include "arrow/python/pyarrow.h"
29
+ #include "arrow/python/visibility.h"
30
+
31
+ namespace arrow {
32
+
33
+ namespace py {
34
+
35
+ // TODO: TODO(ARROW-16041): UDF Options are not exposed to the Python
36
+ // users. This feature will be included when extending to provide advanced
37
+ // options for the users.
38
+ struct ARROW_PYTHON_EXPORT UdfOptions {
39
+ std::string func_name;
40
+ compute::Arity arity;
41
+ compute::FunctionDoc func_doc;
42
+ std::vector<std::shared_ptr<DataType>> input_types;
43
+ std::shared_ptr<DataType> output_type;
44
+ };
45
+
46
+ /// \brief A context passed as the first argument of UDF functions.
47
+ struct ARROW_PYTHON_EXPORT UdfContext {
48
+ MemoryPool* pool;
49
+ int64_t batch_length;
50
+ };
51
+
52
+ using UdfWrapperCallback = std::function<PyObject*(
53
+ PyObject* user_function, const UdfContext& context, PyObject* inputs)>;
54
+
55
+ /// \brief register a Scalar user-defined-function from Python
56
+ Status ARROW_PYTHON_EXPORT RegisterScalarFunction(
57
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
58
+ compute::FunctionRegistry* registry = NULLPTR);
59
+
60
+ /// \brief register a Table user-defined-function from Python
61
+ Status ARROW_PYTHON_EXPORT RegisterTabularFunction(
62
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
63
+ compute::FunctionRegistry* registry = NULLPTR);
64
+
65
+ /// \brief register a Aggregate user-defined-function from Python
66
+ Status ARROW_PYTHON_EXPORT RegisterAggregateFunction(
67
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
68
+ compute::FunctionRegistry* registry = NULLPTR);
69
+
70
+ /// \brief register a Vector user-defined-function from Python
71
+ Status ARROW_PYTHON_EXPORT RegisterVectorFunction(
72
+ PyObject* user_function, UdfWrapperCallback wrapper, const UdfOptions& options,
73
+ compute::FunctionRegistry* registry = NULLPTR);
74
+
75
+ Result<std::shared_ptr<RecordBatchReader>> ARROW_PYTHON_EXPORT
76
+ CallTabularFunction(const std::string& func_name, const std::vector<Datum>& args,
77
+ compute::FunctionRegistry* registry = NULLPTR);
78
+
79
+ } // namespace py
80
+
81
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
21
+ #if defined(_MSC_VER)
22
+ #pragma warning(disable : 4251)
23
+ #else
24
+ #pragma GCC diagnostic ignored "-Wattributes"
25
+ #endif
26
+
27
+ #ifdef ARROW_PYTHON_STATIC
28
+ #define ARROW_PYTHON_EXPORT
29
+ #elif defined(ARROW_PYTHON_EXPORTING)
30
+ #define ARROW_PYTHON_EXPORT __declspec(dllexport)
31
+ #else
32
+ #define ARROW_PYTHON_EXPORT __declspec(dllimport)
33
+ #endif
34
+
35
+ #else // Not Windows
36
+ #ifndef ARROW_PYTHON_EXPORT
37
+ #define ARROW_PYTHON_EXPORT __attribute__((visibility("default")))
38
+ #endif
39
+ #endif // Non-Windows
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/result.h"
21
+
22
+ namespace arrow {
23
+
24
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
25
+ Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out,
26
+ UnaryOperation unary_op) {
27
+ for (; first != last; ++first, (void)++out) {
28
+ ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first));
29
+ }
30
+ return Status::OK();
31
+ }
32
+
33
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+
22
+ #include "arrow/memory_pool.h"
23
+ #include "arrow/type_fwd.h"
24
+ #include "arrow/util/bit_util.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ struct BitmapWordAlignParams {
30
+ int64_t leading_bits;
31
+ int64_t trailing_bits;
32
+ int64_t trailing_bit_offset;
33
+ const uint8_t* aligned_start;
34
+ int64_t aligned_bits;
35
+ int64_t aligned_words;
36
+ };
37
+
38
+ // Compute parameters for accessing a bitmap using aligned word instructions.
39
+ // The returned parameters describe:
40
+ // - a leading area of size `leading_bits` before the aligned words
41
+ // - a word-aligned area of size `aligned_bits`
42
+ // - a trailing area of size `trailing_bits` after the aligned words
43
+ template <uint64_t ALIGN_IN_BYTES>
44
+ inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset,
45
+ int64_t length) {
46
+ static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES),
47
+ "ALIGN_IN_BYTES should be a positive power of two");
48
+ constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8;
49
+
50
+ BitmapWordAlignParams p;
51
+
52
+ // Compute a "bit address" that we can align up to ALIGN_IN_BITS.
53
+ // We don't care about losing the upper bits since we are only interested in the
54
+ // difference between both addresses.
55
+ const uint64_t bit_addr =
56
+ reinterpret_cast<size_t>(data) * 8 + static_cast<uint64_t>(bit_offset);
57
+ const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS);
58
+
59
+ p.leading_bits = std::min<int64_t>(length, aligned_bit_addr - bit_addr);
60
+ p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS;
61
+ p.aligned_bits = p.aligned_words * ALIGN_IN_BITS;
62
+ p.trailing_bits = length - p.leading_bits - p.aligned_bits;
63
+ p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits;
64
+
65
+ p.aligned_start = data + (bit_offset + p.leading_bits) / 8;
66
+ return p;
67
+ }
68
+ } // namespace internal
69
+
70
+ namespace util {
71
+
72
+ // Functions to check if the provided Arrow object is aligned by the specified alignment
73
+
74
+ /// \brief Special alignment value to use data type-specific alignment
75
+ ///
76
+ /// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment
77
+ /// functions, then the function will ensure each buffer is suitably aligned
78
+ /// for the data type of the array. For example, given an int32 buffer the values
79
+ /// buffer's address must be a multiple of 4. Given a large_string buffer the offsets
80
+ /// buffer's address must be a multiple of 8.
81
+ constexpr int64_t kValueAlignment = -3;
82
+
83
+ /// \brief Calculate if the buffer's address is a multiple of `alignment`
84
+ ///
85
+ /// If `alignment` is less than or equal to 0 then this method will always return true
86
+ /// \param buffer the buffer to check
87
+ /// \param alignment the alignment (in bytes) to check for
88
+ ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment);
89
+ /// \brief Calculate if all buffers in the array data are aligned
90
+ ///
91
+ /// This will also check the buffers in the dictionary and any children
92
+ /// \param array the array data to check
93
+ /// \param alignment the alignment (in bytes) to check for
94
+ ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment);
95
+ /// \brief Calculate if all buffers in the array are aligned
96
+ ///
97
+ /// This will also check the buffers in the dictionary and any children
98
+ /// \param array the array to check
99
+ /// \param alignment the alignment (in bytes) to check for
100
+ ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment);
101
+
102
+ // Following functions require an additional boolean vector which stores the
103
+ // alignment check bits of the constituent objects.
104
+ // For example, needs_alignment vector for a ChunkedArray will contain the
105
+ // check bits of the constituent Arrays.
106
+ // The boolean vector check was introduced to minimize the repetitive checks
107
+ // of the constituent objects during the EnsureAlignment function where certain
108
+ // objects can be ignored for further checking if we already know that they are
109
+ // completely aligned.
110
+
111
+ /// \brief Calculate which (if any) chunks in a chunked array are unaligned
112
+ /// \param array the array to check
113
+ /// \param alignment the alignment (in bytes) to check for
114
+ /// \param needs_alignment an output vector that will store the results of the check
115
+ /// it must be set to a valid vector. Extra elements will be added to the end
116
+ /// of the vector for each chunk that is checked. `true` will be stored if
117
+ /// the chunk is unaligned.
118
+ /// \param offset the index of the chunk to start checking
119
+ /// \return true if all chunks (starting at `offset`) are aligned, false otherwise
120
+ ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment,
121
+ std::vector<bool>* needs_alignment, int offset = 0);
122
+
123
+ /// \brief calculate which (if any) columns in a record batch are unaligned
124
+ /// \param batch the batch to check
125
+ /// \param alignment the alignment (in bytes) to check for
126
+ /// \param needs_alignment an output vector that will store the results of the
127
+ /// check. It must be set to a valid vector. Extra elements will be added
128
+ /// to the end of the vector for each column that is checked. `true` will be
129
+ /// stored if the column is unaligned.
130
+ ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment,
131
+ std::vector<bool>* needs_alignment);
132
+
133
+ /// \brief calculate which (if any) columns in a table are unaligned
134
+ /// \param table the table to check
135
+ /// \param alignment the alignment (in bytes) to check for
136
+ /// \param needs_alignment an output vector that will store the results of the
137
+ /// check. It must be set to a valid vector. Extra elements will be added
138
+ /// to the end of the vector for each column that is checked. `true` will be
139
+ /// stored if the column is unaligned.
140
+ ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment,
141
+ std::vector<bool>* needs_alignment);
142
+
143
+ /// \brief return a buffer that has the given alignment and the same data as the input
144
+ /// buffer
145
+ ///
146
+ /// If the input buffer is already aligned then this method will return the input buffer
147
+ /// If the input buffer is not already aligned then this method will allocate a new
148
+ /// buffer. The alignment of the new buffer will have at least
149
+ /// max(kDefaultBufferAlignment, alignment) bytes of alignment.
150
+ ///
151
+ /// \param buffer the buffer to check
152
+ /// \param alignment the alignment (in bytes) to check for
153
+ /// \param memory_pool a memory pool that will be used to allocate a new buffer if the
154
+ /// input buffer is not sufficiently aligned
155
+ ARROW_EXPORT Result<std::shared_ptr<Buffer>> EnsureAlignment(
156
+ std::shared_ptr<Buffer> buffer, int64_t alignment, MemoryPool* memory_pool);
157
+
158
+ /// \brief return an array data where all buffers are aligned by the given alignment
159
+ ///
160
+ /// If any input buffer is already aligned then this method will reuse that same input
161
+ /// buffer.
162
+ ///
163
+ /// \param array_data the array data to check
164
+ /// \param alignment the alignment (in bytes) to check for
165
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
166
+ /// input buffer is not sufficiently aligned
167
+ ARROW_EXPORT Result<std::shared_ptr<ArrayData>> EnsureAlignment(
168
+ std::shared_ptr<ArrayData> array_data, int64_t alignment, MemoryPool* memory_pool);
169
+
170
+ /// \brief return an array where all buffers are aligned by the given alignment
171
+ ///
172
+ /// If any input buffer is already aligned then this method will reuse that same input
173
+ /// buffer.
174
+ ///
175
+ /// \param array the array to check
176
+ /// \param alignment the alignment (in bytes) to check for
177
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
178
+ /// input buffer is not sufficiently aligned
179
+ ARROW_EXPORT Result<std::shared_ptr<Array>> EnsureAlignment(std::shared_ptr<Array> array,
180
+ int64_t alignment,
181
+ MemoryPool* memory_pool);
182
+
183
+ /// \brief return a chunked array where all buffers are aligned by the given alignment
184
+ ///
185
+ /// If any input buffer is already aligned then this method will reuse that same input
186
+ /// buffer.
187
+ ///
188
+ /// \param array the chunked array to check
189
+ /// \param alignment the alignment (in bytes) to check for
190
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
191
+ /// input buffer is not sufficiently aligned
192
+ ARROW_EXPORT Result<std::shared_ptr<ChunkedArray>> EnsureAlignment(
193
+ std::shared_ptr<ChunkedArray> array, int64_t alignment, MemoryPool* memory_pool);
194
+
195
+ /// \brief return a record batch where all buffers are aligned by the given alignment
196
+ ///
197
+ /// If any input buffer is already aligned then this method will reuse that same input
198
+ /// buffer.
199
+ ///
200
+ /// \param batch the batch to check
201
+ /// \param alignment the alignment (in bytes) to check for
202
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
203
+ /// input buffer is not sufficiently aligned
204
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> EnsureAlignment(
205
+ std::shared_ptr<RecordBatch> batch, int64_t alignment, MemoryPool* memory_pool);
206
+
207
+ /// \brief return a table where all buffers are aligned by the given alignment
208
+ ///
209
+ /// If any input buffer is already aligned then this method will reuse that same input
210
+ /// buffer.
211
+ ///
212
+ /// \param table the table to check
213
+ /// \param alignment the alignment (in bytes) to check for
214
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
215
+ /// input buffer is not sufficiently aligned
216
+ ARROW_EXPORT Result<std::shared_ptr<Table>> EnsureAlignment(std::shared_ptr<Table> table,
217
+ int64_t alignment,
218
+ MemoryPool* memory_pool);
219
+
220
+ } // namespace util
221
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstring>
23
+ #include <deque>
24
+ #include <limits>
25
+ #include <optional>
26
+ #include <queue>
27
+
28
+ #include "arrow/util/async_generator_fwd.h"
29
+ #include "arrow/util/async_util.h"
30
+ #include "arrow/util/functional.h"
31
+ #include "arrow/util/future.h"
32
+ #include "arrow/util/io_util.h"
33
+ #include "arrow/util/iterator.h"
34
+ #include "arrow/util/mutex.h"
35
+ #include "arrow/util/queue.h"
36
+ #include "arrow/util/thread_pool.h"
37
+
38
+ namespace arrow {
39
+
40
+ // The methods in this file create, modify, and utilize AsyncGenerator which is an
41
+ // iterator of futures. This allows an asynchronous source (like file input) to be run
42
+ // through a pipeline in the same way that iterators can be used to create pipelined
43
+ // workflows.
44
+ //
45
+ // In order to support pipeline parallelism we introduce the concept of asynchronous
46
+ // reentrancy. This is different than synchronous reentrancy. With synchronous code a
47
+ // function is reentrant if the function can be called again while a previous call to that
48
+ // function is still running. Unless otherwise specified none of these generators are
49
+ // synchronously reentrant. Care should be taken to avoid calling them in such a way (and
50
+ // the utilities Visit/Collect/Await take care to do this).
51
+ //
52
+ // Asynchronous reentrancy on the other hand means the function is called again before the
53
+ // future returned by the function is marked finished (but after the call to get the
54
+ // future returns). Some of these generators are async-reentrant while others (e.g.
55
+ // those that depend on ordered processing like decompression) are not. Read the MakeXYZ
56
+ // function comments to determine which generators support async reentrancy.
57
+ //
58
+ // Note: Generators that are not asynchronously reentrant can still support readahead
59
+ // (\see MakeSerialReadaheadGenerator).
60
+ //
61
+ // Readahead operators, and some other operators, may introduce queueing. Any operators
62
+ // that introduce buffering should detail the amount of buffering they introduce in their
63
+ // MakeXYZ function comments.
64
+ //
65
+ // A generator should always be fully consumed before it is destroyed.
66
+ // A generator should not mark a future complete with an error status or a terminal value
67
+ // until all outstanding futures have completed. Generators that spawn multiple
68
+ // concurrent futures may need to hold onto an error while other concurrent futures wrap
69
+ // up.
70
+ template <typename T>
71
+ struct IterationTraits<AsyncGenerator<T>> {
72
+ /// \brief by default when iterating through a sequence of AsyncGenerator<T>,
73
+ /// an empty function indicates the end of iteration.
74
+ static AsyncGenerator<T> End() { return AsyncGenerator<T>(); }
75
+
76
+ static bool IsEnd(const AsyncGenerator<T>& val) { return !val; }
77
+ };
78
+
79
+ template <typename T>
80
+ Future<T> AsyncGeneratorEnd() {
81
+ return Future<T>::MakeFinished(IterationTraits<T>::End());
82
+ }
83
+
84
+ /// returning a future that completes when all have been visited
85
+ template <typename T, typename Visitor>
86
+ Future<> VisitAsyncGenerator(AsyncGenerator<T> generator, Visitor visitor) {
87
+ struct LoopBody {
88
+ struct Callback {
89
+ Result<ControlFlow<>> operator()(const T& next) {
90
+ if (IsIterationEnd(next)) {
91
+ return Break();
92
+ } else {
93
+ auto visited = visitor(next);
94
+ if (visited.ok()) {
95
+ return Continue();
96
+ } else {
97
+ return visited;
98
+ }
99
+ }
100
+ }
101
+
102
+ Visitor visitor;
103
+ };
104
+
105
+ Future<ControlFlow<>> operator()() {
106
+ Callback callback{visitor};
107
+ auto next = generator();
108
+ return next.Then(std::move(callback));
109
+ }
110
+
111
+ AsyncGenerator<T> generator;
112
+ Visitor visitor;
113
+ };
114
+
115
+ return Loop(LoopBody{std::move(generator), std::move(visitor)});
116
+ }
117
+
118
+ /// \brief Wait for an async generator to complete, discarding results.
119
+ template <typename T>
120
+ Future<> DiscardAllFromAsyncGenerator(AsyncGenerator<T> generator) {
121
+ std::function<Status(T)> visitor = [](const T&) { return Status::OK(); };
122
+ return VisitAsyncGenerator(generator, visitor);
123
+ }
124
+
125
+ /// \brief Collect the results of an async generator into a vector
126
+ template <typename T>
127
+ Future<std::vector<T>> CollectAsyncGenerator(AsyncGenerator<T> generator) {
128
+ auto vec = std::make_shared<std::vector<T>>();
129
+ auto loop_body = [generator = std::move(generator),
130
+ vec = std::move(vec)]() -> Future<ControlFlow<std::vector<T>>> {
131
+ auto next = generator();
132
+ return next.Then([vec](const T& result) -> Result<ControlFlow<std::vector<T>>> {
133
+ if (IsIterationEnd(result)) {
134
+ return Break(*vec);
135
+ } else {
136
+ vec->push_back(result);
137
+ return Continue();
138
+ }
139
+ });
140
+ };
141
+ return Loop(std::move(loop_body));
142
+ }
143
+
144
+ /// \see MakeMappedGenerator
145
+ template <typename T, typename V>
146
+ class MappingGenerator {
147
+ public:
148
+ MappingGenerator(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
149
+ : state_(std::make_shared<State>(std::move(source), std::move(map))) {}
150
+
151
+ Future<V> operator()() {
152
+ auto future = Future<V>::Make();
153
+ bool should_trigger;
154
+ {
155
+ auto guard = state_->mutex.Lock();
156
+ if (state_->finished) {
157
+ return AsyncGeneratorEnd<V>();
158
+ }
159
+ should_trigger = state_->waiting_jobs.empty();
160
+ state_->waiting_jobs.push_back(future);
161
+ }
162
+ if (should_trigger) {
163
+ state_->source().AddCallback(Callback{state_});
164
+ }
165
+ return future;
166
+ }
167
+
168
+ private:
169
+ struct State {
170
+ State(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
171
+ : source(std::move(source)),
172
+ map(std::move(map)),
173
+ waiting_jobs(),
174
+ mutex(),
175
+ finished(false) {}
176
+
177
+ void Purge() {
178
+ // This might be called by an original callback (if the source iterator fails or
179
+ // ends) or by a mapped callback (if the map function fails or ends prematurely).
180
+ // Either way it should only be called once and after finished is set so there is no
181
+ // need to guard access to `waiting_jobs`.
182
+ while (!waiting_jobs.empty()) {
183
+ waiting_jobs.front().MarkFinished(IterationTraits<V>::End());
184
+ waiting_jobs.pop_front();
185
+ }
186
+ }
187
+
188
+ AsyncGenerator<T> source;
189
+ std::function<Future<V>(const T&)> map;
190
+ std::deque<Future<V>> waiting_jobs;
191
+ util::Mutex mutex;
192
+ bool finished;
193
+ };
194
+
195
+ struct Callback;
196
+
197
+ struct MappedCallback {
198
+ void operator()(const Result<V>& maybe_next) {
199
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
200
+ bool should_purge = false;
201
+ if (end) {
202
+ {
203
+ auto guard = state->mutex.Lock();
204
+ should_purge = !state->finished;
205
+ state->finished = true;
206
+ }
207
+ }
208
+ sink.MarkFinished(maybe_next);
209
+ if (should_purge) {
210
+ state->Purge();
211
+ }
212
+ }
213
+ std::shared_ptr<State> state;
214
+ Future<V> sink;
215
+ };
216
+
217
+ struct Callback {
218
+ void operator()(const Result<T>& maybe_next) {
219
+ Future<V> sink;
220
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
221
+ bool should_purge = false;
222
+ bool should_trigger;
223
+ {
224
+ auto guard = state->mutex.Lock();
225
+ // A MappedCallback may have purged or be purging the queue;
226
+ // we shouldn't do anything here.
227
+ if (state->finished) return;
228
+ if (end) {
229
+ should_purge = !state->finished;
230
+ state->finished = true;
231
+ }
232
+ sink = state->waiting_jobs.front();
233
+ state->waiting_jobs.pop_front();
234
+ should_trigger = !end && !state->waiting_jobs.empty();
235
+ }
236
+ if (should_purge) {
237
+ state->Purge();
238
+ }
239
+ if (should_trigger) {
240
+ state->source().AddCallback(Callback{state});
241
+ }
242
+ if (maybe_next.ok()) {
243
+ const T& val = maybe_next.ValueUnsafe();
244
+ if (IsIterationEnd(val)) {
245
+ sink.MarkFinished(IterationTraits<V>::End());
246
+ } else {
247
+ Future<V> mapped_fut = state->map(val);
248
+ mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)});
249
+ }
250
+ } else {
251
+ sink.MarkFinished(maybe_next.status());
252
+ }
253
+ }
254
+
255
+ std::shared_ptr<State> state;
256
+ };
257
+
258
+ std::shared_ptr<State> state_;
259
+ };
260
+
261
+ /// \brief Create a generator that will apply the map function to each element of
262
+ /// source. The map function is not called on the end token.
263
+ ///
264
+ /// Note: This function makes a copy of `map` for each item
265
+ /// Note: Errors returned from the `map` function will be propagated
266
+ ///
267
+ /// If the source generator is async-reentrant then this generator will be also
268
+ template <typename T, typename MapFn,
269
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
270
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
271
+ AsyncGenerator<V> MakeMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
272
+ auto map_callback = [map = std::move(map)](const T& val) mutable -> Future<V> {
273
+ return ToFuture(map(val));
274
+ };
275
+ return MappingGenerator<T, V>(std::move(source_generator), std::move(map_callback));
276
+ }
277
+
278
+ /// \brief Create a generator that will apply the map function to
279
+ /// each element of source. The map function is not called on the end
280
+ /// token. The result of the map function should be another
281
+ /// generator; all these generators will then be flattened to produce
282
+ /// a single stream of items.
283
+ ///
284
+ /// Note: This function makes a copy of `map` for each item
285
+ /// Note: Errors returned from the `map` function will be propagated
286
+ ///
287
+ /// If the source generator is async-reentrant then this generator will be also
288
+ template <typename T, typename MapFn,
289
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
290
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
291
+ AsyncGenerator<T> MakeFlatMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
292
+ return MakeConcatenatedGenerator(
293
+ MakeMappedGenerator(std::move(source_generator), std::move(map)));
294
+ }
295
+
296
+ /// \see MakeSequencingGenerator
297
+ template <typename T, typename ComesAfter, typename IsNext>
298
+ class SequencingGenerator {
299
+ public:
300
+ SequencingGenerator(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next,
301
+ T initial_value)
302
+ : state_(std::make_shared<State>(std::move(source), std::move(compare),
303
+ std::move(is_next), std::move(initial_value))) {}
304
+
305
+ Future<T> operator()() {
306
+ {
307
+ auto guard = state_->mutex.Lock();
308
+ // We can send a result immediately if the top of the queue is either an
309
+ // error or the next item
310
+ if (!state_->queue.empty() &&
311
+ (!state_->queue.top().ok() ||
312
+ state_->is_next(state_->previous_value, *state_->queue.top()))) {
313
+ auto result = std::move(state_->queue.top());
314
+ if (result.ok()) {
315
+ state_->previous_value = *result;
316
+ }
317
+ state_->queue.pop();
318
+ return Future<T>::MakeFinished(result);
319
+ }
320
+ if (state_->finished) {
321
+ return AsyncGeneratorEnd<T>();
322
+ }
323
+ // The next item is not in the queue so we will need to wait
324
+ auto new_waiting_fut = Future<T>::Make();
325
+ state_->waiting_future = new_waiting_fut;
326
+ guard.Unlock();
327
+ state_->source().AddCallback(Callback{state_});
328
+ return new_waiting_fut;
329
+ }
330
+ }
331
+
332
+ private:
333
+ struct WrappedComesAfter {
334
+ bool operator()(const Result<T>& left, const Result<T>& right) {
335
+ if (!left.ok() || !right.ok()) {
336
+ // Should never happen
337
+ return false;
338
+ }
339
+ return compare(*left, *right);
340
+ }
341
+ ComesAfter compare;
342
+ };
343
+
344
+ struct State {
345
+ State(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next, T initial_value)
346
+ : source(std::move(source)),
347
+ is_next(std::move(is_next)),
348
+ previous_value(std::move(initial_value)),
349
+ waiting_future(),
350
+ queue(WrappedComesAfter{compare}),
351
+ finished(false),
352
+ mutex() {}
353
+
354
+ AsyncGenerator<T> source;
355
+ IsNext is_next;
356
+ T previous_value;
357
+ Future<T> waiting_future;
358
+ std::priority_queue<Result<T>, std::vector<Result<T>>, WrappedComesAfter> queue;
359
+ bool finished;
360
+ util::Mutex mutex;
361
+ };
362
+
363
+ class Callback {
364
+ public:
365
+ explicit Callback(std::shared_ptr<State> state) : state_(std::move(state)) {}
366
+
367
+ void operator()(const Result<T> result) {
368
+ Future<T> to_deliver;
369
+ bool finished;
370
+ {
371
+ auto guard = state_->mutex.Lock();
372
+ bool ready_to_deliver = false;
373
+ if (!result.ok()) {
374
+ // Clear any cached results
375
+ while (!state_->queue.empty()) {
376
+ state_->queue.pop();
377
+ }
378
+ ready_to_deliver = true;
379
+ state_->finished = true;
380
+ } else if (IsIterationEnd<T>(result.ValueUnsafe())) {
381
+ ready_to_deliver = state_->queue.empty();
382
+ state_->finished = true;
383
+ } else {
384
+ ready_to_deliver = state_->is_next(state_->previous_value, *result);
385
+ }
386
+
387
+ if (ready_to_deliver && state_->waiting_future.is_valid()) {
388
+ to_deliver = state_->waiting_future;
389
+ if (result.ok()) {
390
+ state_->previous_value = *result;
391
+ }
392
+ } else {
393
+ state_->queue.push(result);
394
+ }
395
+ // Capture state_->finished so we can access it outside the mutex
396
+ finished = state_->finished;
397
+ }
398
+ // Must deliver result outside of the mutex
399
+ if (to_deliver.is_valid()) {
400
+ to_deliver.MarkFinished(result);
401
+ } else {
402
+ // Otherwise, if we didn't get the next item (or a terminal item), we
403
+ // need to keep looking
404
+ if (!finished) {
405
+ state_->source().AddCallback(Callback{state_});
406
+ }
407
+ }
408
+ }
409
+
410
+ private:
411
+ const std::shared_ptr<State> state_;
412
+ };
413
+
414
+ const std::shared_ptr<State> state_;
415
+ };
416
+
417
+ /// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter
418
+ /// and IsNext determine the sequence order.
419
+ ///
420
+ /// ComesAfter should be a BinaryPredicate that only returns true if a comes after b
421
+ ///
422
+ /// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if
423
+ /// `b` follows immediately after `a`. It should return true given `initial_value` and
424
+ /// `b` if `b` is the first item in the sequence.
425
+ ///
426
+ /// This operator will queue unboundedly while waiting for the next item. It is intended
427
+ /// for jittery sources that might scatter an ordered sequence. It is NOT intended to
428
+ /// sort. Using it to try and sort could result in excessive RAM usage. This generator
429
+ /// will queue up to N blocks where N is the max "out of order"ness of the source.
430
+ ///
431
+ /// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3
432
+ /// blocks beyond where it belongs.
433
+ ///
434
+ /// This generator is not async-reentrant but it consists only of a simple log(n)
435
+ /// insertion into a priority queue.
436
+ template <typename T, typename ComesAfter, typename IsNext>
437
+ AsyncGenerator<T> MakeSequencingGenerator(AsyncGenerator<T> source_generator,
438
+ ComesAfter compare, IsNext is_next,
439
+ T initial_value) {
440
+ return SequencingGenerator<T, ComesAfter, IsNext>(
441
+ std::move(source_generator), std::move(compare), std::move(is_next),
442
+ std::move(initial_value));
443
+ }
444
+
445
+ /// \see MakeTransformedGenerator
446
+ template <typename T, typename V>
447
+ class TransformingGenerator {
448
+ // The transforming generator state will be referenced as an async generator but will
449
+ // also be referenced via callback to various futures. If the async generator owner
450
+ // moves it around we need the state to be consistent for future callbacks.
451
+ struct TransformingGeneratorState
452
+ : std::enable_shared_from_this<TransformingGeneratorState> {
453
+ TransformingGeneratorState(AsyncGenerator<T> generator, Transformer<T, V> transformer)
454
+ : generator_(std::move(generator)),
455
+ transformer_(std::move(transformer)),
456
+ last_value_(),
457
+ finished_() {}
458
+
459
+ Future<V> operator()() {
460
+ while (true) {
461
+ auto maybe_next_result = Pump();
462
+ if (!maybe_next_result.ok()) {
463
+ return Future<V>::MakeFinished(maybe_next_result.status());
464
+ }
465
+ auto maybe_next = std::move(maybe_next_result).ValueUnsafe();
466
+ if (maybe_next.has_value()) {
467
+ return Future<V>::MakeFinished(*std::move(maybe_next));
468
+ }
469
+
470
+ auto next_fut = generator_();
471
+ // If finished already, process results immediately inside the loop to avoid
472
+ // stack overflow
473
+ if (next_fut.is_finished()) {
474
+ auto next_result = next_fut.result();
475
+ if (next_result.ok()) {
476
+ last_value_ = *next_result;
477
+ } else {
478
+ return Future<V>::MakeFinished(next_result.status());
479
+ }
480
+ // Otherwise, if not finished immediately, add callback to process results
481
+ } else {
482
+ auto self = this->shared_from_this();
483
+ return next_fut.Then([self](const T& next_result) {
484
+ self->last_value_ = next_result;
485
+ return (*self)();
486
+ });
487
+ }
488
+ }
489
+ }
490
+
491
+ // See comment on TransformingIterator::Pump
492
+ Result<std::optional<V>> Pump() {
493
+ if (!finished_ && last_value_.has_value()) {
494
+ ARROW_ASSIGN_OR_RAISE(TransformFlow<V> next, transformer_(*last_value_));
495
+ if (next.ReadyForNext()) {
496
+ if (IsIterationEnd(*last_value_)) {
497
+ finished_ = true;
498
+ }
499
+ last_value_.reset();
500
+ }
501
+ if (next.Finished()) {
502
+ finished_ = true;
503
+ }
504
+ if (next.HasValue()) {
505
+ return next.Value();
506
+ }
507
+ }
508
+ if (finished_) {
509
+ return IterationTraits<V>::End();
510
+ }
511
+ return std::nullopt;
512
+ }
513
+
514
+ AsyncGenerator<T> generator_;
515
+ Transformer<T, V> transformer_;
516
+ std::optional<T> last_value_;
517
+ bool finished_;
518
+ };
519
+
520
+ public:
521
+ explicit TransformingGenerator(AsyncGenerator<T> generator,
522
+ Transformer<T, V> transformer)
523
+ : state_(std::make_shared<TransformingGeneratorState>(std::move(generator),
524
+ std::move(transformer))) {}
525
+
526
+ Future<V> operator()() { return (*state_)(); }
527
+
528
+ protected:
529
+ std::shared_ptr<TransformingGeneratorState> state_;
530
+ };
531
+
532
+ /// \brief Transform an async generator using a transformer function returning a new
533
+ /// AsyncGenerator
534
+ ///
535
+ /// The transform function here behaves exactly the same as the transform function in
536
+ /// MakeTransformedIterator and you can safely use the same transform function to
537
+ /// transform both synchronous and asynchronous streams.
538
+ ///
539
+ /// This generator is not async-reentrant
540
+ ///
541
+ /// This generator may queue up to 1 instance of T but will not delay
542
+ template <typename T, typename V>
543
+ AsyncGenerator<V> MakeTransformedGenerator(AsyncGenerator<T> generator,
544
+ Transformer<T, V> transformer) {
545
+ return TransformingGenerator<T, V>(generator, transformer);
546
+ }
547
+
548
+ /// \see MakeSerialReadaheadGenerator
549
+ template <typename T>
550
+ class SerialReadaheadGenerator {
551
+ public:
552
+ SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
553
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
554
+
555
+ Future<T> operator()() {
556
+ if (state_->first_) {
557
+ // Lazy generator, need to wait for the first ask to prime the pump
558
+ state_->first_ = false;
559
+ auto next = state_->source_();
560
+ return next.Then(Callback{state_}, ErrCallback{state_});
561
+ }
562
+
563
+ // This generator is not async-reentrant. We won't be called until the last
564
+ // future finished so we know there is something in the queue
565
+ auto finished = state_->finished_.load();
566
+ if (finished && state_->readahead_queue_.IsEmpty()) {
567
+ return AsyncGeneratorEnd<T>();
568
+ }
569
+
570
+ std::shared_ptr<Future<T>> next;
571
+ if (!state_->readahead_queue_.Read(next)) {
572
+ return Status::UnknownError("Could not read from readahead_queue");
573
+ }
574
+
575
+ auto last_available = state_->spaces_available_.fetch_add(1);
576
+ if (last_available == 0 && !finished) {
577
+ // Reader idled out, we need to restart it
578
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
579
+ }
580
+ return *next;
581
+ }
582
+
583
+ private:
584
+ struct State {
585
+ State(AsyncGenerator<T> source, int max_readahead)
586
+ : first_(true),
587
+ source_(std::move(source)),
588
+ finished_(false),
589
+ // There is one extra "space" for the in-flight request
590
+ spaces_available_(max_readahead + 1),
591
+ // The SPSC queue has size-1 "usable" slots so we need to overallocate 1
592
+ readahead_queue_(max_readahead + 1) {}
593
+
594
+ Status Pump(const std::shared_ptr<State>& self) {
595
+ // Can't do readahead_queue.write(source().Then(...)) because then the
596
+ // callback might run immediately and add itself to the queue before this gets added
597
+ // to the queue messing up the order.
598
+ auto next_slot = std::make_shared<Future<T>>();
599
+ auto written = readahead_queue_.Write(next_slot);
600
+ if (!written) {
601
+ return Status::UnknownError("Could not write to readahead_queue");
602
+ }
603
+ // If this Pump is being called from a callback it is possible for the source to
604
+ // poll and read from the queue between the Write and this spot where we fill the
605
+ // value in. However, it is not possible for the future to read this value we are
606
+ // writing. That is because this callback (the callback for future X) must be
607
+ // finished before future X is marked complete and this source is not pulled
608
+ // reentrantly so it will not poll for future X+1 until this callback has completed.
609
+ *next_slot = source_().Then(Callback{self}, ErrCallback{self});
610
+ return Status::OK();
611
+ }
612
+
613
+ // Only accessed by the consumer end
614
+ bool first_;
615
+ // Accessed by both threads
616
+ AsyncGenerator<T> source_;
617
+ std::atomic<bool> finished_;
618
+ // The queue has a size but it is not atomic. We keep track of how many spaces are
619
+ // left in the queue here so we know if we've just written the last value and we need
620
+ // to stop reading ahead or if we've just read from a full queue and we need to
621
+ // restart reading ahead
622
+ std::atomic<uint32_t> spaces_available_;
623
+ // Needs to be a queue of shared_ptr and not Future because we set the value of the
624
+ // future after we add it to the queue
625
+ util::SpscQueue<std::shared_ptr<Future<T>>> readahead_queue_;
626
+ };
627
+
628
+ struct Callback {
629
+ Result<T> operator()(const T& next) {
630
+ if (IsIterationEnd(next)) {
631
+ state_->finished_.store(true);
632
+ return next;
633
+ }
634
+ auto last_available = state_->spaces_available_.fetch_sub(1);
635
+ if (last_available > 1) {
636
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
637
+ }
638
+ return next;
639
+ }
640
+
641
+ std::shared_ptr<State> state_;
642
+ };
643
+
644
+ struct ErrCallback {
645
+ Result<T> operator()(const Status& st) {
646
+ state_->finished_.store(true);
647
+ return st;
648
+ }
649
+
650
+ std::shared_ptr<State> state_;
651
+ };
652
+
653
+ std::shared_ptr<State> state_;
654
+ };
655
+
656
+ /// \see MakeFromFuture
657
+ template <typename T>
658
+ class FutureFirstGenerator {
659
+ public:
660
+ explicit FutureFirstGenerator(Future<AsyncGenerator<T>> future)
661
+ : state_(std::make_shared<State>(std::move(future))) {}
662
+
663
+ Future<T> operator()() {
664
+ if (state_->source_) {
665
+ return state_->source_();
666
+ } else {
667
+ auto state = state_;
668
+ return state_->future_.Then([state](const AsyncGenerator<T>& source) {
669
+ state->source_ = source;
670
+ return state->source_();
671
+ });
672
+ }
673
+ }
674
+
675
+ private:
676
+ struct State {
677
+ explicit State(Future<AsyncGenerator<T>> future) : future_(future), source_() {}
678
+
679
+ Future<AsyncGenerator<T>> future_;
680
+ AsyncGenerator<T> source_;
681
+ };
682
+
683
+ std::shared_ptr<State> state_;
684
+ };
685
+
686
+ /// \brief Transform a Future<AsyncGenerator<T>> into an AsyncGenerator<T>
687
+ /// that waits for the future to complete as part of the first item.
688
+ ///
689
+ /// This generator is not async-reentrant (even if the generator yielded by future is)
690
+ ///
691
+ /// This generator does not queue
692
+ template <typename T>
693
+ AsyncGenerator<T> MakeFromFuture(Future<AsyncGenerator<T>> future) {
694
+ return FutureFirstGenerator<T>(std::move(future));
695
+ }
696
+
697
+ /// \brief Create a generator that will pull from the source into a queue. Unlike
698
+ /// MakeReadaheadGenerator this will not pull reentrantly from the source.
699
+ ///
700
+ /// The source generator does not need to be async-reentrant
701
+ ///
702
+ /// This generator is not async-reentrant (even if the source is)
703
+ ///
704
+ /// This generator may queue up to max_readahead additional instances of T
705
+ template <typename T>
706
+ AsyncGenerator<T> MakeSerialReadaheadGenerator(AsyncGenerator<T> source_generator,
707
+ int max_readahead) {
708
+ return SerialReadaheadGenerator<T>(std::move(source_generator), max_readahead);
709
+ }
710
+
711
+ /// \brief Create a generator that immediately pulls from the source
712
+ ///
713
+ /// Typical generators do not pull from their source until they themselves
714
+ /// are pulled. This generator does not follow that convention and will call
715
+ /// generator() once before it returns. The returned generator will otherwise
716
+ /// mirror the source.
717
+ ///
718
+ /// This generator forwards async-reentrant pressure to the source
719
+ /// This generator buffers one item (the first result) until it is delivered.
720
+ template <typename T>
721
+ AsyncGenerator<T> MakeAutoStartingGenerator(AsyncGenerator<T> generator) {
722
+ struct AutostartGenerator {
723
+ Future<T> operator()() {
724
+ if (first_future->is_valid()) {
725
+ Future<T> result = *first_future;
726
+ *first_future = Future<T>();
727
+ return result;
728
+ }
729
+ return source();
730
+ }
731
+
732
+ std::shared_ptr<Future<T>> first_future;
733
+ AsyncGenerator<T> source;
734
+ };
735
+
736
+ std::shared_ptr<Future<T>> first_future = std::make_shared<Future<T>>(generator());
737
+ return AutostartGenerator{std::move(first_future), std::move(generator)};
738
+ }
739
+
740
+ /// \see MakeReadaheadGenerator
741
+ template <typename T>
742
+ class ReadaheadGenerator {
743
+ public:
744
+ ReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
745
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
746
+
747
+ Future<T> AddMarkFinishedContinuation(Future<T> fut) {
748
+ auto state = state_;
749
+ return fut.Then(
750
+ [state](const T& result) -> Future<T> {
751
+ state->MarkFinishedIfDone(result);
752
+ if (state->finished.load()) {
753
+ if (state->num_running.fetch_sub(1) == 1) {
754
+ state->final_future.MarkFinished();
755
+ }
756
+ } else {
757
+ state->num_running.fetch_sub(1);
758
+ }
759
+ return result;
760
+ },
761
+ [state](const Status& err) -> Future<T> {
762
+ // If there is an error we need to make sure all running
763
+ // tasks finish before we return the error.
764
+ state->finished.store(true);
765
+ if (state->num_running.fetch_sub(1) == 1) {
766
+ state->final_future.MarkFinished();
767
+ }
768
+ return state->final_future.Then([err]() -> Result<T> { return err; });
769
+ });
770
+ }
771
+
772
+ Future<T> operator()() {
773
+ if (state_->readahead_queue.empty()) {
774
+ // This is the first request, let's pump the underlying queue
775
+ state_->num_running.store(state_->max_readahead);
776
+ for (int i = 0; i < state_->max_readahead; i++) {
777
+ auto next = state_->source_generator();
778
+ auto next_after_check = AddMarkFinishedContinuation(std::move(next));
779
+ state_->readahead_queue.push(std::move(next_after_check));
780
+ }
781
+ }
782
+ // Pop one and add one
783
+ auto result = state_->readahead_queue.front();
784
+ state_->readahead_queue.pop();
785
+ if (state_->finished.load()) {
786
+ state_->readahead_queue.push(AsyncGeneratorEnd<T>());
787
+ } else {
788
+ state_->num_running.fetch_add(1);
789
+ auto back_of_queue = state_->source_generator();
790
+ auto back_of_queue_after_check =
791
+ AddMarkFinishedContinuation(std::move(back_of_queue));
792
+ state_->readahead_queue.push(std::move(back_of_queue_after_check));
793
+ }
794
+ return result;
795
+ }
796
+
797
+ private:
798
+ struct State {
799
+ State(AsyncGenerator<T> source_generator, int max_readahead)
800
+ : source_generator(std::move(source_generator)), max_readahead(max_readahead) {}
801
+
802
+ void MarkFinishedIfDone(const T& next_result) {
803
+ if (IsIterationEnd(next_result)) {
804
+ finished.store(true);
805
+ }
806
+ }
807
+
808
+ AsyncGenerator<T> source_generator;
809
+ int max_readahead;
810
+ Future<> final_future = Future<>::Make();
811
+ std::atomic<int> num_running{0};
812
+ std::atomic<bool> finished{false};
813
+ std::queue<Future<T>> readahead_queue;
814
+ };
815
+
816
+ std::shared_ptr<State> state_;
817
+ };
818
+
819
+ /// \brief A generator where the producer pushes items on a queue.
820
+ ///
821
+ /// No back-pressure is applied, so this generator is mostly useful when
822
+ /// producing the values is neither CPU- nor memory-expensive (e.g. fetching
823
+ /// filesystem metadata).
824
+ ///
825
+ /// This generator is not async-reentrant.
826
+ template <typename T>
827
+ class PushGenerator {
828
+ struct State {
829
+ State() {}
830
+
831
+ util::Mutex mutex;
832
+ std::deque<Result<T>> result_q;
833
+ std::optional<Future<T>> consumer_fut;
834
+ bool finished = false;
835
+ };
836
+
837
+ public:
838
+ /// Producer API for PushGenerator
839
+ class Producer {
840
+ public:
841
+ explicit Producer(const std::shared_ptr<State>& state) : weak_state_(state) {}
842
+
843
+ /// \brief Push a value on the queue
844
+ ///
845
+ /// True is returned if the value was pushed, false if the generator is
846
+ /// already closed or destroyed. If the latter, it is recommended to stop
847
+ /// producing any further values.
848
+ bool Push(Result<T> result) {
849
+ auto state = weak_state_.lock();
850
+ if (!state) {
851
+ // Generator was destroyed
852
+ return false;
853
+ }
854
+ auto lock = state->mutex.Lock();
855
+ if (state->finished) {
856
+ // Closed early
857
+ return false;
858
+ }
859
+ if (state->consumer_fut.has_value()) {
860
+ auto fut = std::move(state->consumer_fut.value());
861
+ state->consumer_fut.reset();
862
+ lock.Unlock(); // unlock before potentially invoking a callback
863
+ fut.MarkFinished(std::move(result));
864
+ } else {
865
+ state->result_q.push_back(std::move(result));
866
+ }
867
+ return true;
868
+ }
869
+
870
+ /// \brief Tell the consumer we have finished producing
871
+ ///
872
+ /// It is allowed to call this and later call Push() again ("early close").
873
+ /// In this case, calls to Push() after the queue is closed are silently
874
+ /// ignored. This can help implementing non-trivial cancellation cases.
875
+ ///
876
+ /// True is returned on success, false if the generator is already closed
877
+ /// or destroyed.
878
+ bool Close() {
879
+ auto state = weak_state_.lock();
880
+ if (!state) {
881
+ // Generator was destroyed
882
+ return false;
883
+ }
884
+ auto lock = state->mutex.Lock();
885
+ if (state->finished) {
886
+ // Already closed
887
+ return false;
888
+ }
889
+ state->finished = true;
890
+ if (state->consumer_fut.has_value()) {
891
+ auto fut = std::move(state->consumer_fut.value());
892
+ state->consumer_fut.reset();
893
+ lock.Unlock(); // unlock before potentially invoking a callback
894
+ fut.MarkFinished(IterationTraits<T>::End());
895
+ }
896
+ return true;
897
+ }
898
+
899
+ /// Return whether the generator was closed or destroyed.
900
+ bool is_closed() const {
901
+ auto state = weak_state_.lock();
902
+ if (!state) {
903
+ // Generator was destroyed
904
+ return true;
905
+ }
906
+ auto lock = state->mutex.Lock();
907
+ return state->finished;
908
+ }
909
+
910
+ private:
911
+ const std::weak_ptr<State> weak_state_;
912
+ };
913
+
914
+ PushGenerator() : state_(std::make_shared<State>()) {}
915
+
916
+ /// Read an item from the queue
917
+ Future<T> operator()() const {
918
+ auto lock = state_->mutex.Lock();
919
+ assert(!state_->consumer_fut.has_value()); // Non-reentrant
920
+ if (!state_->result_q.empty()) {
921
+ auto fut = Future<T>::MakeFinished(std::move(state_->result_q.front()));
922
+ state_->result_q.pop_front();
923
+ return fut;
924
+ }
925
+ if (state_->finished) {
926
+ return AsyncGeneratorEnd<T>();
927
+ }
928
+ auto fut = Future<T>::Make();
929
+ state_->consumer_fut = fut;
930
+ return fut;
931
+ }
932
+
933
+ /// \brief Return producer-side interface
934
+ ///
935
+ /// The returned object must be used by the producer to push values on the queue.
936
+ /// Only a single Producer object should be instantiated.
937
+ Producer producer() { return Producer{state_}; }
938
+
939
+ private:
940
+ const std::shared_ptr<State> state_;
941
+ };
942
+
943
+ /// \brief Create a generator that pulls reentrantly from a source
944
+ /// This generator will pull reentrantly from a source, ensuring that max_readahead
945
+ /// requests are active at any given time.
946
+ ///
947
+ /// The source generator must be async-reentrant
948
+ ///
949
+ /// This generator itself is async-reentrant.
950
+ ///
951
+ /// This generator may queue up to max_readahead instances of T
952
+ template <typename T>
953
+ AsyncGenerator<T> MakeReadaheadGenerator(AsyncGenerator<T> source_generator,
954
+ int max_readahead) {
955
+ return ReadaheadGenerator<T>(std::move(source_generator), max_readahead);
956
+ }
957
+
958
+ /// \brief Creates a generator that will yield finished futures from a vector
959
+ ///
960
+ /// This generator is async-reentrant
961
+ template <typename T>
962
+ AsyncGenerator<T> MakeVectorGenerator(std::vector<T> vec) {
963
+ struct State {
964
+ explicit State(std::vector<T> vec_) : vec(std::move(vec_)), vec_idx(0) {}
965
+
966
+ std::vector<T> vec;
967
+ std::atomic<std::size_t> vec_idx;
968
+ };
969
+
970
+ auto state = std::make_shared<State>(std::move(vec));
971
+ return [state]() {
972
+ auto idx = state->vec_idx.fetch_add(1);
973
+ if (idx >= state->vec.size()) {
974
+ // Eagerly return memory
975
+ state->vec.clear();
976
+ return AsyncGeneratorEnd<T>();
977
+ }
978
+ return Future<T>::MakeFinished(state->vec[idx]);
979
+ };
980
+ }
981
+
982
+ /// \see MakeMergedGenerator
983
+ template <typename T>
984
+ class MergedGenerator {
985
+ // Note, the implementation of this class is quite complex at the moment (PRs to
986
+ // simplify are always welcome)
987
+ //
988
+ // Terminology is borrowed from rxjs. This is a pull based implementation of the
989
+ // mergeAll operator. The "outer subscription" refers to the async
990
+ // generator that the caller provided when creating this. The outer subscription
991
+ // yields generators.
992
+ //
993
+ // Each of these generators is then subscribed to (up to max_subscriptions) and these
994
+ // are referred to as "inner subscriptions".
995
+ //
996
+ // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For
997
+ // each inner subscription we will cache up to 1 value. This means we may have more
998
+ // values than we have been asked for. In our example, if a caller asks for one record
999
+ // batch we will start scanning `max_subscriptions` different files. For each file we
1000
+ // will only queue up to 1 batch (so a separate readahead is needed on the file if batch
1001
+ // readahead is desired).
1002
+ //
1003
+ // If the caller is slow we may accumulate ready-to-deliver items. These are stored
1004
+ // in `delivered_jobs`.
1005
+ //
1006
+ // If the caller is very quick we may accumulate requests. These are stored in
1007
+ // `waiting_jobs`.
1008
+ //
1009
+ // It may be helpful to consider an example, in the scanner the outer subscription
1010
+ // is some kind of asynchronous directory listing. The inner subscription is
1011
+ // then a scan on a file yielded by the directory listing.
1012
+ //
1013
+ // An "outstanding" request is when we have polled either the inner or outer
1014
+ // subscription but that future hasn't completed yet.
1015
+ //
1016
+ // There are three possible "events" that can happen.
1017
+ // * A caller could request the next future
1018
+ // * An outer callback occurs when the next subscription is ready (e.g. the directory
1019
+ // listing has produced a new file)
1020
+ // * An inner callback occurs when one of the inner subscriptions emits a value (e.g.
1021
+ // a file scan emits a record batch)
1022
+ //
1023
+ // Any time an event happens the logic is broken into two phases. First, we grab the
1024
+ // lock and modify the shared state. While doing this we figure out what callbacks we
1025
+ // will need to execute. Then, we give up the lock and execute these callbacks. It is
1026
+ // important to execute these callbacks without the lock to avoid deadlock.
1027
+ public:
1028
+ explicit MergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1029
+ int max_subscriptions)
1030
+ : state_(std::make_shared<State>(std::move(source), max_subscriptions)) {}
1031
+
1032
+ Future<T> operator()() {
1033
+ // A caller has requested a future
1034
+ Future<T> waiting_future;
1035
+ std::shared_ptr<DeliveredJob> delivered_job;
1036
+ bool mark_generator_complete = false;
1037
+ {
1038
+ auto guard = state_->mutex.Lock();
1039
+ if (!state_->delivered_jobs.empty()) {
1040
+ // If we have a job sitting around we can deliver it
1041
+ delivered_job = std::move(state_->delivered_jobs.front());
1042
+ state_->delivered_jobs.pop_front();
1043
+ if (state_->IsCompleteUnlocked(guard)) {
1044
+ // It's possible this waiting job was the only thing left to handle and
1045
+ // we have now completed the generator.
1046
+ mark_generator_complete = true;
1047
+ } else {
1048
+ // Since we had a job sitting around we also had an inner subscription
1049
+ // that had paused. We are going to restart this inner subscription and
1050
+ // so there will be a new outstanding request.
1051
+ state_->outstanding_requests++;
1052
+ }
1053
+ } else if (state_->broken ||
1054
+ (!state_->first && state_->num_running_subscriptions == 0)) {
1055
+ // If we are broken or exhausted then prepare a terminal item but
1056
+ // we won't complete it until we've finished.
1057
+ Result<T> end_res = IterationEnd<T>();
1058
+ if (!state_->final_error.ok()) {
1059
+ end_res = state_->final_error;
1060
+ state_->final_error = Status::OK();
1061
+ }
1062
+ return state_->all_finished.Then([end_res]() -> Result<T> { return end_res; });
1063
+ } else {
1064
+ // Otherwise we just queue the request and it will be completed when one of the
1065
+ // ongoing inner subscriptions delivers a result
1066
+ waiting_future = Future<T>::Make();
1067
+ state_->waiting_jobs.push_back(std::make_shared<Future<T>>(waiting_future));
1068
+ }
1069
+ if (state_->first) {
1070
+ // On the first request we are going to try and immediately fill our queue
1071
+ // of subscriptions. We assume we are going to be able to start them all.
1072
+ state_->outstanding_requests +=
1073
+ static_cast<int>(state_->active_subscriptions.size());
1074
+ state_->num_running_subscriptions +=
1075
+ static_cast<int>(state_->active_subscriptions.size());
1076
+ }
1077
+ }
1078
+ // If we grabbed a finished item from the delivered_jobs queue then we may need
1079
+ // to mark the generator finished or issue a request for a new item to fill in
1080
+ // the spot we just vacated. Notice that we issue that request to the same
1081
+ // subscription that delivered it (deliverer).
1082
+ if (delivered_job) {
1083
+ if (mark_generator_complete) {
1084
+ state_->all_finished.MarkFinished();
1085
+ } else {
1086
+ delivered_job->deliverer().AddCallback(
1087
+ InnerCallback(state_, delivered_job->index));
1088
+ }
1089
+ return std::move(delivered_job->value);
1090
+ }
1091
+ // On the first call we try and fill up our subscriptions. It's possible the outer
1092
+ // generator only has a few items and we can't fill up to what we were hoping. In
1093
+ // that case we have to bail early.
1094
+ if (state_->first) {
1095
+ state_->first = false;
1096
+ mark_generator_complete = false;
1097
+ for (int i = 0; i < static_cast<int>(state_->active_subscriptions.size()); i++) {
1098
+ state_->PullSource().AddCallback(
1099
+ OuterCallback{state_, static_cast<std::size_t>(i)});
1100
+ // If we have to bail early then we need to update the shared state again so
1101
+ // we need to reacquire the lock.
1102
+ auto guard = state_->mutex.Lock();
1103
+ if (state_->source_exhausted) {
1104
+ int excess_requests =
1105
+ static_cast<int>(state_->active_subscriptions.size()) - i - 1;
1106
+ state_->outstanding_requests -= excess_requests;
1107
+ state_->num_running_subscriptions -= excess_requests;
1108
+ if (excess_requests > 0) {
1109
+ // It's possible that we are completing the generator by reducing the number
1110
+ // of outstanding requests (e.g. this happens when the outer subscription and
1111
+ // all inner subscriptions are synchronous)
1112
+ mark_generator_complete = state_->IsCompleteUnlocked(guard);
1113
+ }
1114
+ break;
1115
+ }
1116
+ }
1117
+ if (mark_generator_complete) {
1118
+ state_->MarkFinishedAndPurge();
1119
+ }
1120
+ }
1121
+ return waiting_future;
1122
+ }
1123
+
1124
+ private:
1125
+ struct DeliveredJob {
1126
+ explicit DeliveredJob(AsyncGenerator<T> deliverer_, Result<T> value_,
1127
+ std::size_t index_)
1128
+ : deliverer(deliverer_), value(std::move(value_)), index(index_) {}
1129
+
1130
+ // The generator that delivered this result, we will request another item
1131
+ // from this generator once the result is delivered
1132
+ AsyncGenerator<T> deliverer;
1133
+ // The result we received from the generator
1134
+ Result<T> value;
1135
+ // The index of the generator (in active_subscriptions) that delivered this
1136
+ // result. This is used if we need to replace a finished generator.
1137
+ std::size_t index;
1138
+ };
1139
+
1140
+ struct State {
1141
+ State(AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions)
1142
+ : source(std::move(source)),
1143
+ active_subscriptions(max_subscriptions),
1144
+ delivered_jobs(),
1145
+ waiting_jobs(),
1146
+ mutex(),
1147
+ first(true),
1148
+ broken(false),
1149
+ source_exhausted(false),
1150
+ outstanding_requests(0),
1151
+ num_running_subscriptions(0),
1152
+ final_error(Status::OK()) {}
1153
+
1154
+ Future<AsyncGenerator<T>> PullSource() {
1155
+ // Need to guard access to source() so we don't pull sync-reentrantly which
1156
+ // is never valid.
1157
+ auto lock = mutex.Lock();
1158
+ return source();
1159
+ }
1160
+
1161
+ void SignalErrorUnlocked(const util::Mutex::Guard& guard) {
1162
+ broken = true;
1163
+ // Empty any results that have arrived but not asked for.
1164
+ while (!delivered_jobs.empty()) {
1165
+ delivered_jobs.pop_front();
1166
+ }
1167
+ }
1168
+
1169
+ // This function is called outside the mutex but it will only ever be
1170
+ // called once
1171
+ void MarkFinishedAndPurge() {
1172
+ all_finished.MarkFinished();
1173
+ while (!waiting_jobs.empty()) {
1174
+ waiting_jobs.front()->MarkFinished(IterationEnd<T>());
1175
+ waiting_jobs.pop_front();
1176
+ }
1177
+ }
1178
+
1179
+ // This is called outside the mutex but it is only ever called
1180
+ // once and Future<>::AddCallback is thread-safe
1181
+ void MarkFinalError(const Status& err, Future<T> maybe_sink) {
1182
+ if (maybe_sink.is_valid()) {
1183
+ // Someone is waiting for this error so lets mark it complete when
1184
+ // all the work is done
1185
+ all_finished.AddCallback([maybe_sink, err](const Status& status) mutable {
1186
+ maybe_sink.MarkFinished(err);
1187
+ });
1188
+ } else {
1189
+ // No one is waiting for this error right now so it will be delivered
1190
+ // next.
1191
+ final_error = err;
1192
+ }
1193
+ }
1194
+
1195
+ bool IsCompleteUnlocked(const util::Mutex::Guard& guard) {
1196
+ return outstanding_requests == 0 &&
1197
+ (broken || (source_exhausted && num_running_subscriptions == 0 &&
1198
+ delivered_jobs.empty()));
1199
+ }
1200
+
1201
+ bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) {
1202
+ --outstanding_requests;
1203
+ return IsCompleteUnlocked(guard);
1204
+ }
1205
+
1206
+ // The outer generator. Each item we pull from this will be its own generator
1207
+ // and become an inner subscription
1208
+ AsyncGenerator<AsyncGenerator<T>> source;
1209
+ // active_subscriptions and delivered_jobs will be bounded by max_subscriptions
1210
+ std::vector<AsyncGenerator<T>> active_subscriptions;
1211
+ // Results delivered by the inner subscriptions that weren't yet asked for by the
1212
+ // caller
1213
+ std::deque<std::shared_ptr<DeliveredJob>> delivered_jobs;
1214
+ // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the
1215
+ // backpressure
1216
+ std::deque<std::shared_ptr<Future<T>>> waiting_jobs;
1217
+ // A future that will be marked complete when the terminal item has arrived and all
1218
+ // outstanding futures have completed. It is used to hold off emission of an error
1219
+ // until all outstanding work is done.
1220
+ Future<> all_finished = Future<>::Make();
1221
+ util::Mutex mutex;
1222
+ // A flag cleared when the caller firsts asks for a future. Used to start polling.
1223
+ bool first;
1224
+ // A flag set when an error arrives, prevents us from issuing new requests.
1225
+ bool broken;
1226
+ // A flag set when the outer subscription has been exhausted. Prevents us from
1227
+ // pulling it further (even though it would be generally harmless) and lets us know we
1228
+ // are finishing up.
1229
+ bool source_exhausted;
1230
+ // The number of futures that we have requested from either the outer or inner
1231
+ // subscriptions that have not yet completed. We cannot mark all_finished until this
1232
+ // reaches 0. This will never be greater than max_subscriptions
1233
+ int outstanding_requests;
1234
+ // The number of running subscriptions. We ramp this up to `max_subscriptions` as
1235
+ // soon as the first item is requested and then it stays at that level (each exhausted
1236
+ // inner subscription is replaced by a new inner subscription) until the outer
1237
+ // subscription is exhausted at which point this descends to 0 (and source_exhausted)
1238
+ // is then set to true.
1239
+ int num_running_subscriptions;
1240
+ // If an error arrives, and the caller hasn't asked for that item, we store the error
1241
+ // here. It is analagous to delivered_jobs but for errors instead of finished
1242
+ // results.
1243
+ Status final_error;
1244
+ };
1245
+
1246
+ struct InnerCallback {
1247
+ InnerCallback(std::shared_ptr<State> state, std::size_t index, bool recursive = false)
1248
+ : state(std::move(state)), index(index), recursive(recursive) {}
1249
+
1250
+ void operator()(const Result<T>& maybe_next_ref) {
1251
+ // An item has been delivered by one of the inner subscriptions
1252
+ Future<T> next_fut;
1253
+ const Result<T>* maybe_next = &maybe_next_ref;
1254
+
1255
+ // When an item is delivered (and the caller has asked for it) we grab the
1256
+ // next item from the inner subscription. To avoid this behavior leading to an
1257
+ // infinite loop (this can happen if the caller's callback asks for the next item)
1258
+ // we use a while loop.
1259
+ while (true) {
1260
+ Future<T> sink;
1261
+ bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next);
1262
+ bool pull_next_sub = false;
1263
+ bool was_broken = false;
1264
+ bool should_mark_gen_complete = false;
1265
+ bool should_mark_final_error = false;
1266
+ {
1267
+ auto guard = state->mutex.Lock();
1268
+ if (state->broken) {
1269
+ // We've errored out previously so ignore the result. If anyone was waiting
1270
+ // for this they will get IterationEnd when we purge
1271
+ was_broken = true;
1272
+ } else {
1273
+ if (!sub_finished) {
1274
+ // There is a result to deliver. Either we can deliver it now or we will
1275
+ // queue it up
1276
+ if (state->waiting_jobs.empty()) {
1277
+ state->delivered_jobs.push_back(std::make_shared<DeliveredJob>(
1278
+ state->active_subscriptions[index], *maybe_next, index));
1279
+ } else {
1280
+ sink = std::move(*state->waiting_jobs.front());
1281
+ state->waiting_jobs.pop_front();
1282
+ }
1283
+ }
1284
+
1285
+ // If this is the first error then we transition the state to a broken state
1286
+ if (!maybe_next->ok()) {
1287
+ should_mark_final_error = true;
1288
+ state->SignalErrorUnlocked(guard);
1289
+ }
1290
+ }
1291
+
1292
+ // If we finished this inner subscription then we need to grab a new inner
1293
+ // subscription to take its spot. If we can't (because we're broken or
1294
+ // exhausted) then we aren't going to be starting any new futures and so
1295
+ // the number of running subscriptions drops.
1296
+ pull_next_sub = sub_finished && !state->source_exhausted && !was_broken;
1297
+ if (sub_finished && !pull_next_sub) {
1298
+ state->num_running_subscriptions--;
1299
+ }
1300
+ // There are three situations we won't pull again. If an error occurred or we
1301
+ // are already finished or if no one was waiting for our result and so we queued
1302
+ // it up. We will decrement outstanding_requests and possibly mark the
1303
+ // generator completed.
1304
+ if (state->broken || (!sink.is_valid() && !sub_finished) ||
1305
+ (sub_finished && state->source_exhausted)) {
1306
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1307
+ should_mark_gen_complete = true;
1308
+ }
1309
+ }
1310
+ }
1311
+
1312
+ // Now we have given up the lock and we can take all the actions we decided we
1313
+ // need to take.
1314
+ if (should_mark_final_error) {
1315
+ state->MarkFinalError(maybe_next->status(), std::move(sink));
1316
+ }
1317
+
1318
+ if (should_mark_gen_complete) {
1319
+ state->MarkFinishedAndPurge();
1320
+ }
1321
+
1322
+ // An error occurred elsewhere so there is no need to mark any future
1323
+ // finished (will happen during the purge) or pull from anything
1324
+ if (was_broken) {
1325
+ return;
1326
+ }
1327
+
1328
+ if (pull_next_sub) {
1329
+ if (recursive) {
1330
+ was_empty = true;
1331
+ return;
1332
+ }
1333
+ // We pulled an end token so we need to start a new subscription
1334
+ // in our spot
1335
+ state->PullSource().AddCallback(OuterCallback{state, index});
1336
+ } else if (sink.is_valid()) {
1337
+ // We pulled a valid result and there was someone waiting for it
1338
+ // so lets fetch the next result from our subscription
1339
+ sink.MarkFinished(*maybe_next);
1340
+ next_fut = state->active_subscriptions[index]();
1341
+ if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) {
1342
+ return;
1343
+ }
1344
+ // Already completed. Avoid very deep recursion by looping
1345
+ // here instead of relying on the callback.
1346
+ maybe_next = &next_fut.result();
1347
+ continue;
1348
+ }
1349
+ // else: We pulled a valid result but no one was waiting for it so
1350
+ // we can just stop.
1351
+ return;
1352
+ }
1353
+ }
1354
+ std::shared_ptr<State> state;
1355
+ std::size_t index;
1356
+ bool recursive;
1357
+ bool was_empty = false;
1358
+ };
1359
+
1360
+ struct OuterCallback {
1361
+ void operator()(const Result<AsyncGenerator<T>>& initial_maybe_next) {
1362
+ Result<AsyncGenerator<T>> maybe_next = initial_maybe_next;
1363
+ while (true) {
1364
+ // We have been given a new inner subscription
1365
+ bool should_continue = false;
1366
+ bool should_mark_gen_complete = false;
1367
+ bool should_deliver_error = false;
1368
+ bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next);
1369
+ Future<T> error_sink;
1370
+ {
1371
+ auto guard = state->mutex.Lock();
1372
+ if (!maybe_next.ok() || source_exhausted || state->broken) {
1373
+ // If here then we will not pull any more from the outer source
1374
+ if (!state->broken && !maybe_next.ok()) {
1375
+ state->SignalErrorUnlocked(guard);
1376
+ // If here then we are the first error so we need to deliver it
1377
+ should_deliver_error = true;
1378
+ if (!state->waiting_jobs.empty()) {
1379
+ error_sink = std::move(*state->waiting_jobs.front());
1380
+ state->waiting_jobs.pop_front();
1381
+ }
1382
+ }
1383
+ if (source_exhausted) {
1384
+ state->source_exhausted = true;
1385
+ state->num_running_subscriptions--;
1386
+ }
1387
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1388
+ should_mark_gen_complete = true;
1389
+ }
1390
+ } else {
1391
+ state->active_subscriptions[index] = *maybe_next;
1392
+ should_continue = true;
1393
+ }
1394
+ }
1395
+ if (should_deliver_error) {
1396
+ state->MarkFinalError(maybe_next.status(), std::move(error_sink));
1397
+ }
1398
+ if (should_mark_gen_complete) {
1399
+ state->MarkFinishedAndPurge();
1400
+ }
1401
+ if (should_continue) {
1402
+ // There is a possibility that a large sequence of immediately available inner
1403
+ // callbacks could lead to a stack overflow. To avoid this we need to
1404
+ // synchronously loop through inner/outer callbacks until we either find an
1405
+ // unfinished future or we find an actual item to deliver.
1406
+ Future<T> next_item = (*maybe_next)();
1407
+ if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) {
1408
+ // By setting recursive to true we signal to the inner callback that, if it is
1409
+ // empty, instead of adding a new outer callback, it should just immediately
1410
+ // return, flagging was_empty so that we know we need to check the next
1411
+ // subscription.
1412
+ InnerCallback immediate_inner(state, index, /*recursive=*/true);
1413
+ immediate_inner(next_item.result());
1414
+ if (immediate_inner.was_empty) {
1415
+ Future<AsyncGenerator<T>> next_source = state->PullSource();
1416
+ if (next_source.TryAddCallback([this] {
1417
+ return OuterCallback{state, index};
1418
+ })) {
1419
+ // We hit an unfinished future so we can stop looping
1420
+ return;
1421
+ }
1422
+ // The current subscription was immediately and synchronously empty
1423
+ // and we were able to synchronously pull the next subscription so we
1424
+ // can keep looping.
1425
+ maybe_next = next_source.result();
1426
+ continue;
1427
+ }
1428
+ }
1429
+ }
1430
+ return;
1431
+ }
1432
+ }
1433
+ std::shared_ptr<State> state;
1434
+ std::size_t index;
1435
+ };
1436
+
1437
+ std::shared_ptr<State> state_;
1438
+ };
1439
+
1440
+ /// \brief Create a generator that takes in a stream of generators and pulls from up to
1441
+ /// max_subscriptions at a time
1442
+ ///
1443
+ /// Note: This may deliver items out of sequence. For example, items from the third
1444
+ /// AsyncGenerator generated by the source may be emitted before some items from the first
1445
+ /// AsyncGenerator generated by the source.
1446
+ ///
1447
+ /// This generator will pull from source async-reentrantly unless max_subscriptions is 1
1448
+ /// This generator will not pull from the individual subscriptions reentrantly. Add
1449
+ /// readahead to the individual subscriptions if that is desired.
1450
+ /// This generator is async-reentrant
1451
+ ///
1452
+ /// This generator may queue up to max_subscriptions instances of T
1453
+ template <typename T>
1454
+ AsyncGenerator<T> MakeMergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1455
+ int max_subscriptions) {
1456
+ return MergedGenerator<T>(std::move(source), max_subscriptions);
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ Result<AsyncGenerator<T>> MakeSequencedMergedGenerator(
1461
+ AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions) {
1462
+ if (max_subscriptions < 0) {
1463
+ return Status::Invalid("max_subscriptions must be a positive integer");
1464
+ }
1465
+ if (max_subscriptions == 1) {
1466
+ return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1");
1467
+ }
1468
+ AsyncGenerator<AsyncGenerator<T>> autostarting_source = MakeMappedGenerator(
1469
+ std::move(source),
1470
+ [](const AsyncGenerator<T>& sub) { return MakeAutoStartingGenerator(sub); });
1471
+ AsyncGenerator<AsyncGenerator<T>> sub_readahead =
1472
+ MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1);
1473
+ return MakeConcatenatedGenerator(std::move(sub_readahead));
1474
+ }
1475
+
1476
+ /// \brief Create a generator that takes in a stream of generators and pulls from each
1477
+ /// one in sequence.
1478
+ ///
1479
+ /// This generator is async-reentrant but will never pull from source reentrantly and
1480
+ /// will never pull from any subscription reentrantly.
1481
+ ///
1482
+ /// This generator may queue 1 instance of T
1483
+ ///
1484
+ /// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that
1485
+ /// forwards async-reentrant requests instead of buffering them (which is what
1486
+ /// MergedGenerator does)
1487
+ template <typename T>
1488
+ AsyncGenerator<T> MakeConcatenatedGenerator(AsyncGenerator<AsyncGenerator<T>> source) {
1489
+ return MergedGenerator<T>(std::move(source), 1);
1490
+ }
1491
+
1492
+ template <typename T>
1493
+ struct Enumerated {
1494
+ T value;
1495
+ int index;
1496
+ bool last;
1497
+ };
1498
+
1499
+ template <typename T>
1500
+ struct IterationTraits<Enumerated<T>> {
1501
+ static Enumerated<T> End() { return Enumerated<T>{IterationEnd<T>(), -1, false}; }
1502
+ static bool IsEnd(const Enumerated<T>& val) { return val.index < 0; }
1503
+ };
1504
+
1505
+ /// \see MakeEnumeratedGenerator
1506
+ template <typename T>
1507
+ class EnumeratingGenerator {
1508
+ public:
1509
+ EnumeratingGenerator(AsyncGenerator<T> source, T initial_value)
1510
+ : state_(std::make_shared<State>(std::move(source), std::move(initial_value))) {}
1511
+
1512
+ Future<Enumerated<T>> operator()() {
1513
+ if (state_->finished) {
1514
+ return AsyncGeneratorEnd<Enumerated<T>>();
1515
+ } else {
1516
+ auto state = state_;
1517
+ return state->source().Then([state](const T& next) {
1518
+ auto finished = IsIterationEnd<T>(next);
1519
+ auto prev = Enumerated<T>{state->prev_value, state->prev_index, finished};
1520
+ state->prev_value = next;
1521
+ state->prev_index++;
1522
+ state->finished = finished;
1523
+ return prev;
1524
+ });
1525
+ }
1526
+ }
1527
+
1528
+ private:
1529
+ struct State {
1530
+ State(AsyncGenerator<T> source, T initial_value)
1531
+ : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) {
1532
+ finished = IsIterationEnd<T>(prev_value);
1533
+ }
1534
+
1535
+ AsyncGenerator<T> source;
1536
+ T prev_value;
1537
+ int prev_index;
1538
+ bool finished;
1539
+ };
1540
+
1541
+ std::shared_ptr<State> state_;
1542
+ };
1543
+
1544
+ /// Wrap items from a source generator with positional information
1545
+ ///
1546
+ /// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be
1547
+ /// processed in a "first-available" fashion and later resequenced which can reduce the
1548
+ /// impact of sources with erratic performance (e.g. a filesystem where some items may
1549
+ /// take longer to read than others).
1550
+ ///
1551
+ /// TODO(ARROW-12371) Would require this generator be async-reentrant
1552
+ ///
1553
+ /// \see MakeSequencingGenerator for an example of putting items back in order
1554
+ ///
1555
+ /// This generator is not async-reentrant
1556
+ ///
1557
+ /// This generator buffers one item (so it knows which item is the last item)
1558
+ template <typename T>
1559
+ AsyncGenerator<Enumerated<T>> MakeEnumeratedGenerator(AsyncGenerator<T> source) {
1560
+ return FutureFirstGenerator<Enumerated<T>>(
1561
+ source().Then([source](const T& initial_value) -> AsyncGenerator<Enumerated<T>> {
1562
+ return EnumeratingGenerator<T>(std::move(source), initial_value);
1563
+ }));
1564
+ }
1565
+
1566
+ /// \see MakeTransferredGenerator
1567
+ template <typename T>
1568
+ class TransferringGenerator {
1569
+ public:
1570
+ explicit TransferringGenerator(AsyncGenerator<T> source, internal::Executor* executor)
1571
+ : source_(std::move(source)), executor_(executor) {}
1572
+
1573
+ Future<T> operator()() { return executor_->Transfer(source_()); }
1574
+
1575
+ private:
1576
+ AsyncGenerator<T> source_;
1577
+ internal::Executor* executor_;
1578
+ };
1579
+
1580
+ /// \brief Transfer a future to an underlying executor.
1581
+ ///
1582
+ /// Continuations run on the returned future will be run on the given executor
1583
+ /// if they cannot be run synchronously.
1584
+ ///
1585
+ /// This is often needed to move computation off I/O threads or other external
1586
+ /// completion sources and back on to the CPU executor so the I/O thread can
1587
+ /// stay busy and focused on I/O
1588
+ ///
1589
+ /// Keep in mind that continuations called on an already completed future will
1590
+ /// always be run synchronously and so no transfer will happen in that case.
1591
+ ///
1592
+ /// This generator is async reentrant if the source is
1593
+ ///
1594
+ /// This generator will not queue
1595
+ template <typename T>
1596
+ AsyncGenerator<T> MakeTransferredGenerator(AsyncGenerator<T> source,
1597
+ internal::Executor* executor) {
1598
+ return TransferringGenerator<T>(std::move(source), executor);
1599
+ }
1600
+
1601
+ /// \see MakeBackgroundGenerator
1602
+ template <typename T>
1603
+ class BackgroundGenerator {
1604
+ public:
1605
+ explicit BackgroundGenerator(Iterator<T> it, internal::Executor* io_executor, int max_q,
1606
+ int q_restart)
1607
+ : state_(std::make_shared<State>(io_executor, std::move(it), max_q, q_restart)),
1608
+ cleanup_(std::make_shared<Cleanup>(state_.get())) {}
1609
+
1610
+ Future<T> operator()() {
1611
+ auto guard = state_->mutex.Lock();
1612
+ Future<T> waiting_future;
1613
+ if (state_->queue.empty()) {
1614
+ if (state_->finished) {
1615
+ return AsyncGeneratorEnd<T>();
1616
+ } else {
1617
+ waiting_future = Future<T>::Make();
1618
+ state_->waiting_future = waiting_future;
1619
+ }
1620
+ } else {
1621
+ auto next = Future<T>::MakeFinished(std::move(state_->queue.front()));
1622
+ state_->queue.pop();
1623
+ if (state_->NeedsRestart()) {
1624
+ return state_->RestartTask(state_, std::move(guard), std::move(next));
1625
+ }
1626
+ return next;
1627
+ }
1628
+ // This should only trigger the very first time this method is called
1629
+ if (state_->NeedsRestart()) {
1630
+ return state_->RestartTask(state_, std::move(guard), std::move(waiting_future));
1631
+ }
1632
+ return waiting_future;
1633
+ }
1634
+
1635
+ protected:
1636
+ static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits<uint64_t>::max()};
1637
+
1638
+ struct State {
1639
+ State(internal::Executor* io_executor, Iterator<T> it, int max_q, int q_restart)
1640
+ : io_executor(io_executor),
1641
+ max_q(max_q),
1642
+ q_restart(q_restart),
1643
+ it(std::move(it)),
1644
+ reading(false),
1645
+ finished(false),
1646
+ should_shutdown(false) {}
1647
+
1648
+ void ClearQueue() {
1649
+ while (!queue.empty()) {
1650
+ queue.pop();
1651
+ }
1652
+ }
1653
+
1654
+ bool TaskIsRunning() const { return task_finished.is_valid(); }
1655
+
1656
+ bool NeedsRestart() const {
1657
+ return !finished && !reading && static_cast<int>(queue.size()) <= q_restart;
1658
+ }
1659
+
1660
+ void DoRestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard) {
1661
+ // If we get here we are actually going to start a new task so let's create a
1662
+ // task_finished future for it
1663
+ state->task_finished = Future<>::Make();
1664
+ state->reading = true;
1665
+ auto spawn_status = io_executor->Spawn(
1666
+ [state]() { BackgroundGenerator::WorkerTask(std::move(state)); });
1667
+ if (!spawn_status.ok()) {
1668
+ // If we can't spawn a new task then send an error to the consumer (either via a
1669
+ // waiting future or the queue) and mark ourselves finished
1670
+ state->finished = true;
1671
+ state->task_finished = Future<>();
1672
+ if (waiting_future.has_value()) {
1673
+ auto to_deliver = std::move(waiting_future.value());
1674
+ waiting_future.reset();
1675
+ guard.Unlock();
1676
+ to_deliver.MarkFinished(spawn_status);
1677
+ } else {
1678
+ ClearQueue();
1679
+ queue.push(spawn_status);
1680
+ }
1681
+ }
1682
+ }
1683
+
1684
+ Future<T> RestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard,
1685
+ Future<T> next) {
1686
+ if (TaskIsRunning()) {
1687
+ // If the task is still cleaning up we need to wait for it to finish before
1688
+ // restarting. We also want to block the consumer until we've restarted the
1689
+ // reader to avoid multiple restarts
1690
+ return task_finished.Then([state, next]() {
1691
+ // This may appear dangerous (recursive mutex) but we should be guaranteed the
1692
+ // outer guard has been released by this point. We know...
1693
+ // * task_finished is not already finished (it would be invalid in that case)
1694
+ // * task_finished will not be marked complete until we've given up the mutex
1695
+ auto guard_ = state->mutex.Lock();
1696
+ state->DoRestartTask(state, std::move(guard_));
1697
+ return next;
1698
+ });
1699
+ }
1700
+ // Otherwise we can restart immediately
1701
+ DoRestartTask(std::move(state), std::move(guard));
1702
+ return next;
1703
+ }
1704
+
1705
+ internal::Executor* io_executor;
1706
+ const int max_q;
1707
+ const int q_restart;
1708
+ Iterator<T> it;
1709
+ std::atomic<uint64_t> worker_thread_id{kUnlikelyThreadId};
1710
+
1711
+ // If true, the task is actively pumping items from the queue and does not need a
1712
+ // restart
1713
+ bool reading;
1714
+ // Set to true when a terminal item arrives
1715
+ bool finished;
1716
+ // Signal to the background task to end early because consumers have given up on it
1717
+ bool should_shutdown;
1718
+ // If the queue is empty, the consumer will create a waiting future and wait for it
1719
+ std::queue<Result<T>> queue;
1720
+ std::optional<Future<T>> waiting_future;
1721
+ // Every background task is given a future to complete when it is entirely finished
1722
+ // processing and ready for the next task to start or for State to be destroyed
1723
+ Future<> task_finished;
1724
+ util::Mutex mutex;
1725
+ };
1726
+
1727
+ // Cleanup task that will be run when all consumer references to the generator are lost
1728
+ struct Cleanup {
1729
+ explicit Cleanup(State* state) : state(state) {}
1730
+ ~Cleanup() {
1731
+ /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and
1732
+ /// there is no need to perform this check.
1733
+ ///
1734
+ /// It's a deadlock if we enter cleanup from
1735
+ /// the worker thread but it can happen if the consumer doesn't transfer away
1736
+ assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId());
1737
+ Future<> finish_fut;
1738
+ {
1739
+ auto lock = state->mutex.Lock();
1740
+ if (!state->TaskIsRunning()) {
1741
+ return;
1742
+ }
1743
+ // Signal the current task to stop and wait for it to finish
1744
+ state->should_shutdown = true;
1745
+ finish_fut = state->task_finished;
1746
+ }
1747
+ // Using future as a condition variable here
1748
+ Status st = finish_fut.status();
1749
+ ARROW_UNUSED(st);
1750
+ }
1751
+ State* state;
1752
+ };
1753
+
1754
+ static void WorkerTask(std::shared_ptr<State> state) {
1755
+ state->worker_thread_id.store(::arrow::internal::GetThreadId());
1756
+ // We need to capture the state to read while outside the mutex
1757
+ bool reading = true;
1758
+ while (reading) {
1759
+ auto next = state->it.Next();
1760
+ // Need to capture state->waiting_future inside the mutex to mark finished outside
1761
+ Future<T> waiting_future;
1762
+ {
1763
+ auto guard = state->mutex.Lock();
1764
+
1765
+ if (state->should_shutdown) {
1766
+ state->finished = true;
1767
+ break;
1768
+ }
1769
+
1770
+ if (!next.ok() || IsIterationEnd<T>(*next)) {
1771
+ // Terminal item. Mark finished to true, send this last item, and quit
1772
+ state->finished = true;
1773
+ if (!next.ok()) {
1774
+ state->ClearQueue();
1775
+ }
1776
+ }
1777
+ // At this point we are going to send an item. Either we will add it to the
1778
+ // queue or deliver it to a waiting future.
1779
+ if (state->waiting_future.has_value()) {
1780
+ waiting_future = std::move(state->waiting_future.value());
1781
+ state->waiting_future.reset();
1782
+ } else {
1783
+ state->queue.push(std::move(next));
1784
+ // We just filled up the queue so it is time to quit. We may need to notify
1785
+ // a cleanup task so we transition to Quitting
1786
+ if (static_cast<int>(state->queue.size()) >= state->max_q) {
1787
+ state->reading = false;
1788
+ }
1789
+ }
1790
+ reading = state->reading && !state->finished;
1791
+ }
1792
+ // This should happen outside the mutex. Presumably there is a
1793
+ // transferring generator on the other end that will quickly transfer any
1794
+ // callbacks off of this thread so we can continue looping. Still, best not to
1795
+ // rely on that
1796
+ if (waiting_future.is_valid()) {
1797
+ waiting_future.MarkFinished(next);
1798
+ }
1799
+ }
1800
+ // Once we've sent our last item we can notify any waiters that we are done and so
1801
+ // either state can be cleaned up or a new background task can be started
1802
+ Future<> task_finished;
1803
+ {
1804
+ auto guard = state->mutex.Lock();
1805
+ // After we give up the mutex state can be safely deleted. We will no longer
1806
+ // reference it. We can safely transition to idle now.
1807
+ task_finished = state->task_finished;
1808
+ state->task_finished = Future<>();
1809
+ state->worker_thread_id.store(kUnlikelyThreadId);
1810
+ }
1811
+ task_finished.MarkFinished();
1812
+ }
1813
+
1814
+ std::shared_ptr<State> state_;
1815
+ // state_ is held by both the generator and the background thread so it won't be cleaned
1816
+ // up when all consumer references are relinquished. cleanup_ is only held by the
1817
+ // generator so it will be destructed when the last consumer reference is gone. We use
1818
+ // this to cleanup / stop the background generator in case the consuming end stops
1819
+ // listening (e.g. due to a downstream error)
1820
+ std::shared_ptr<Cleanup> cleanup_;
1821
+ };
1822
+
1823
+ constexpr int kDefaultBackgroundMaxQ = 32;
1824
+ constexpr int kDefaultBackgroundQRestart = 16;
1825
+
1826
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> on a background
1827
+ /// thread
1828
+ ///
1829
+ /// The parameter max_q and q_restart control queue size and background thread task
1830
+ /// management. If the background task is fast you typically don't want it creating a
1831
+ /// thread task for every item. Instead the background thread will run until it fills
1832
+ /// up a readahead queue.
1833
+ ///
1834
+ /// Once the queue has filled up the background thread task will terminate (allowing other
1835
+ /// I/O tasks to use the thread). Once the queue has been drained enough (specified by
1836
+ /// q_restart) then the background thread task will be restarted. If q_restart is too low
1837
+ /// then you may exhaust the queue waiting for the background thread task to start running
1838
+ /// again. If it is too high then it will be constantly stopping and restarting the
1839
+ /// background queue task
1840
+ ///
1841
+ /// The "background thread" is a logical thread and will run as tasks on the io_executor.
1842
+ /// This thread may stop and start when the queue fills up but there will only be one
1843
+ /// active background thread task at any given time. You MUST transfer away from this
1844
+ /// background generator. Otherwise there could be a race condition if a callback on the
1845
+ /// background thread deletes the last consumer reference to the background generator. You
1846
+ /// can transfer onto the same executor as the background thread, it is only necessary to
1847
+ /// create a new thread task, not to switch executors.
1848
+ ///
1849
+ /// This generator is not async-reentrant
1850
+ ///
1851
+ /// This generator will queue up to max_q blocks
1852
+ template <typename T>
1853
+ static Result<AsyncGenerator<T>> MakeBackgroundGenerator(
1854
+ Iterator<T> iterator, internal::Executor* io_executor,
1855
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) {
1856
+ if (max_q < q_restart) {
1857
+ return Status::Invalid("max_q must be >= q_restart");
1858
+ }
1859
+ return BackgroundGenerator<T>(std::move(iterator), io_executor, max_q, q_restart);
1860
+ }
1861
+
1862
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> synchronously
1863
+ ///
1864
+ /// This should only be used if you know the source iterator does not involve any
1865
+ /// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending
1866
+ /// on the complexity of the iterator, it may lead to deadlock.
1867
+ ///
1868
+ /// If you are not certain if there will be I/O then it is better to use
1869
+ /// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator
1870
+ /// equivalent of Future::MakeFinished
1871
+ ///
1872
+ /// It is impossible to call this in an async-reentrant manner since the returned
1873
+ /// future will be completed by the time it is polled.
1874
+ ///
1875
+ /// This generator does not queue
1876
+ template <typename T>
1877
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(
1878
+ std::shared_ptr<Iterator<T>> iterator) {
1879
+ return [it = std::move(iterator)]() mutable -> Future<T> {
1880
+ return Future<T>::MakeFinished(it->Next());
1881
+ };
1882
+ }
1883
+
1884
+ template <typename T>
1885
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(Iterator<T> iterator) {
1886
+ return MakeBlockingGenerator(std::make_shared<Iterator<T>>(std::move(iterator)));
1887
+ }
1888
+
1889
+ /// \see MakeGeneratorIterator
1890
+ template <typename T>
1891
+ class GeneratorIterator {
1892
+ public:
1893
+ explicit GeneratorIterator(AsyncGenerator<T> source) : source_(std::move(source)) {}
1894
+
1895
+ Result<T> Next() { return source_().result(); }
1896
+
1897
+ private:
1898
+ AsyncGenerator<T> source_;
1899
+ };
1900
+
1901
+ /// \brief Convert an AsyncGenerator<T> to an Iterator<T> which blocks until each future
1902
+ /// is finished
1903
+ template <typename T>
1904
+ Iterator<T> MakeGeneratorIterator(AsyncGenerator<T> source) {
1905
+ return Iterator<T>(GeneratorIterator<T>(std::move(source)));
1906
+ }
1907
+
1908
+ /// \brief Add readahead to an iterator using a background thread.
1909
+ ///
1910
+ /// Under the hood this is converting the iterator to a generator using
1911
+ /// MakeBackgroundGenerator, adding readahead to the converted generator with
1912
+ /// MakeReadaheadGenerator, and then converting back to an iterator using
1913
+ /// MakeGeneratorIterator.
1914
+ template <typename T>
1915
+ Result<Iterator<T>> MakeReadaheadIterator(Iterator<T> it, int readahead_queue_size) {
1916
+ ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1));
1917
+ auto max_q = readahead_queue_size;
1918
+ auto q_restart = std::max(1, max_q / 2);
1919
+ ARROW_ASSIGN_OR_RAISE(
1920
+ auto background_generator,
1921
+ MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart));
1922
+ // Capture io_executor to keep it alive as long as owned_bg_generator is still
1923
+ // referenced
1924
+ AsyncGenerator<T> owned_bg_generator = [io_executor, background_generator]() {
1925
+ return background_generator();
1926
+ };
1927
+ return MakeGeneratorIterator(std::move(owned_bg_generator));
1928
+ }
1929
+
1930
+ /// \brief Make a generator that returns a single pre-generated future
1931
+ ///
1932
+ /// This generator is async-reentrant.
1933
+ template <typename T>
1934
+ std::function<Future<T>()> MakeSingleFutureGenerator(Future<T> future) {
1935
+ assert(future.is_valid());
1936
+ auto state = std::make_shared<Future<T>>(std::move(future));
1937
+ return [state]() -> Future<T> {
1938
+ auto fut = std::move(*state);
1939
+ if (fut.is_valid()) {
1940
+ return fut;
1941
+ } else {
1942
+ return AsyncGeneratorEnd<T>();
1943
+ }
1944
+ };
1945
+ }
1946
+
1947
+ /// \brief Make a generator that immediately ends.
1948
+ ///
1949
+ /// This generator is async-reentrant.
1950
+ template <typename T>
1951
+ std::function<Future<T>()> MakeEmptyGenerator() {
1952
+ return []() -> Future<T> { return AsyncGeneratorEnd<T>(); };
1953
+ }
1954
+
1955
+ /// \brief Make a generator that always fails with a given error
1956
+ ///
1957
+ /// This generator is async-reentrant.
1958
+ template <typename T>
1959
+ AsyncGenerator<T> MakeFailingGenerator(Status st) {
1960
+ assert(!st.ok());
1961
+ auto state = std::make_shared<Status>(std::move(st));
1962
+ return [state]() -> Future<T> {
1963
+ auto st = std::move(*state);
1964
+ if (!st.ok()) {
1965
+ return st;
1966
+ } else {
1967
+ return AsyncGeneratorEnd<T>();
1968
+ }
1969
+ };
1970
+ }
1971
+
1972
+ /// \brief Make a generator that always fails with a given error
1973
+ ///
1974
+ /// This overload allows inferring the return type from the argument.
1975
+ template <typename T>
1976
+ AsyncGenerator<T> MakeFailingGenerator(const Result<T>& result) {
1977
+ return MakeFailingGenerator<T>(result.status());
1978
+ }
1979
+
1980
+ /// \brief Prepend initial_values onto a generator
1981
+ ///
1982
+ /// This generator is async-reentrant but will buffer requests and will not
1983
+ /// pull from following_values async-reentrantly.
1984
+ template <typename T>
1985
+ AsyncGenerator<T> MakeGeneratorStartsWith(std::vector<T> initial_values,
1986
+ AsyncGenerator<T> following_values) {
1987
+ auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values));
1988
+ auto gen_gen = MakeVectorGenerator<AsyncGenerator<T>>(
1989
+ {std::move(initial_values_vec_gen), std::move(following_values)});
1990
+ return MakeConcatenatedGenerator(std::move(gen_gen));
1991
+ }
1992
+
1993
+ template <typename T>
1994
+ struct CancellableGenerator {
1995
+ Future<T> operator()() {
1996
+ if (stop_token.IsStopRequested()) {
1997
+ return stop_token.Poll();
1998
+ }
1999
+ return source();
2000
+ }
2001
+
2002
+ AsyncGenerator<T> source;
2003
+ StopToken stop_token;
2004
+ };
2005
+
2006
+ /// \brief Allow an async generator to be cancelled
2007
+ ///
2008
+ /// This generator is async-reentrant
2009
+ template <typename T>
2010
+ AsyncGenerator<T> MakeCancellable(AsyncGenerator<T> source, StopToken stop_token) {
2011
+ return CancellableGenerator<T>{std::move(source), std::move(stop_token)};
2012
+ }
2013
+
2014
+ template <typename T>
2015
+ class DefaultIfEmptyGenerator {
2016
+ public:
2017
+ DefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value)
2018
+ : state_(std::make_shared<State>(std::move(source), std::move(or_value))) {}
2019
+
2020
+ Future<T> operator()() {
2021
+ if (state_->first) {
2022
+ state_->first = false;
2023
+ struct {
2024
+ T or_value;
2025
+
2026
+ Result<T> operator()(const T& value) {
2027
+ if (IterationTraits<T>::IsEnd(value)) {
2028
+ return std::move(or_value);
2029
+ }
2030
+ return value;
2031
+ }
2032
+ } Continuation;
2033
+ Continuation.or_value = std::move(state_->or_value);
2034
+ return state_->source().Then(std::move(Continuation));
2035
+ }
2036
+ return state_->source();
2037
+ }
2038
+
2039
+ private:
2040
+ struct State {
2041
+ AsyncGenerator<T> source;
2042
+ T or_value;
2043
+ bool first;
2044
+ State(AsyncGenerator<T> source_, T or_value_)
2045
+ : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {}
2046
+ };
2047
+ std::shared_ptr<State> state_;
2048
+ };
2049
+
2050
+ /// \brief If the generator is empty, return the given value, else
2051
+ /// forward the values from the generator.
2052
+ ///
2053
+ /// This generator is async-reentrant.
2054
+ template <typename T>
2055
+ AsyncGenerator<T> MakeDefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value) {
2056
+ return DefaultIfEmptyGenerator<T>(std::move(source), std::move(or_value));
2057
+ }
2058
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ template <typename T>
27
+ using AsyncGenerator = std::function<Future<T>()>;
28
+
29
+ template <typename T, typename V>
30
+ class MappingGenerator;
31
+
32
+ template <typename T, typename ComesAfter, typename IsNext>
33
+ class SequencingGenerator;
34
+
35
+ template <typename T, typename V>
36
+ class TransformingGenerator;
37
+
38
+ template <typename T>
39
+ class SerialReadaheadGenerator;
40
+
41
+ template <typename T>
42
+ class ReadaheadGenerator;
43
+
44
+ template <typename T>
45
+ class PushGenerator;
46
+
47
+ template <typename T>
48
+ class MergedGenerator;
49
+
50
+ template <typename T>
51
+ struct Enumerated;
52
+
53
+ template <typename T>
54
+ class EnumeratingGenerator;
55
+
56
+ template <typename T>
57
+ class TransferringGenerator;
58
+
59
+ template <typename T>
60
+ class BackgroundGenerator;
61
+
62
+ template <typename T>
63
+ class GeneratorIterator;
64
+
65
+ template <typename T>
66
+ struct CancellableGenerator;
67
+
68
+ template <typename T>
69
+ class DefaultIfEmptyGenerator;
70
+
71
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <string_view>
22
+
23
+ #include "arrow/util/visibility.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ ARROW_EXPORT
29
+ std::string base64_encode(std::string_view s);
30
+
31
+ ARROW_EXPORT
32
+ std::string base64_decode(std::string_view s);
33
+
34
+ } // namespace util
35
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string_view>
21
+ #include <utility>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/span.h"
25
+
26
+ namespace arrow::util {
27
+
28
+ inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) {
29
+ // Small string: inlined. Bytes beyond size are zeroed
30
+ BinaryViewType::c_type out;
31
+ out.inlined = {size, {}};
32
+ memcpy(&out.inlined.data, data, size);
33
+ return out;
34
+ }
35
+
36
+ inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) {
37
+ return ToInlineBinaryView(v.data(), static_cast<int32_t>(v.size()));
38
+ }
39
+
40
+ inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size,
41
+ int32_t buffer_index, int32_t offset) {
42
+ if (size <= BinaryViewType::kInlineSize) {
43
+ return ToInlineBinaryView(data, size);
44
+ }
45
+
46
+ // Large string: store index/offset.
47
+ BinaryViewType::c_type out;
48
+ out.ref = {size, {}, buffer_index, offset};
49
+ memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix));
50
+ return out;
51
+ }
52
+
53
+ inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index,
54
+ int32_t offset) {
55
+ return ToBinaryView(v.data(), static_cast<int32_t>(v.size()), buffer_index, offset);
56
+ }
57
+
58
+ template <typename BufferPtr>
59
+ std::string_view FromBinaryView(const BinaryViewType::c_type& v,
60
+ const BufferPtr* data_buffers) {
61
+ auto* data = v.is_inline() ? v.inlined.data.data()
62
+ : data_buffers[v.ref.buffer_index]->data() + v.ref.offset;
63
+ return {reinterpret_cast<const char*>(data), static_cast<size_t>(v.size())};
64
+ }
65
+ template <typename BufferPtr>
66
+ std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete;
67
+
68
+ template <typename BufferPtr>
69
+ bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r,
70
+ const BufferPtr* l_buffers, const BufferPtr* r_buffers) {
71
+ int64_t l_size_and_prefix, r_size_and_prefix;
72
+ memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix));
73
+ memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix));
74
+
75
+ if (l_size_and_prefix != r_size_and_prefix) return false;
76
+
77
+ if (l.is_inline()) {
78
+ // The columnar spec mandates that the inlined part be zero-padded, so we can compare
79
+ // a word at a time regardless of the exact size.
80
+ int64_t l_inlined, r_inlined;
81
+ memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined));
82
+ memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined));
83
+ return l_inlined == r_inlined;
84
+ }
85
+
86
+ // Sizes are equal and this is not inline, therefore both are out
87
+ // of line and have kPrefixSize first in common.
88
+ const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset;
89
+ const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset;
90
+ return memcmp(l_data + BinaryViewType::kPrefixSize,
91
+ r_data + BinaryViewType::kPrefixSize,
92
+ l.size() - BinaryViewType::kPrefixSize) == 0;
93
+ }
94
+
95
+ } // namespace arrow::util
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <utility>
30
+
31
+ #include "arrow/buffer.h"
32
+ #include "arrow/util/bit_util.h"
33
+ #include "arrow/util/bitmap_ops.h"
34
+ #include "arrow/util/bitmap_reader.h"
35
+ #include "arrow/util/bitmap_writer.h"
36
+ #include "arrow/util/compare.h"
37
+ #include "arrow/util/endian.h"
38
+ #include "arrow/util/functional.h"
39
+ #include "arrow/util/span.h"
40
+ #include "arrow/util/string_builder.h"
41
+ #include "arrow/util/visibility.h"
42
+
43
+ namespace arrow {
44
+
45
+ class BooleanArray;
46
+
47
+ namespace internal {
48
+
49
+ class ARROW_EXPORT Bitmap : public util::ToStringOstreamable<Bitmap>,
50
+ public util::EqualityComparable<Bitmap> {
51
+ public:
52
+ Bitmap() = default;
53
+
54
+ Bitmap(const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length)
55
+ : data_(buffer->data()), offset_(offset), length_(length) {
56
+ if (buffer->is_mutable()) {
57
+ mutable_data_ = buffer->mutable_data();
58
+ }
59
+ }
60
+
61
+ Bitmap(const void* data, int64_t offset, int64_t length)
62
+ : data_(reinterpret_cast<const uint8_t*>(data)), offset_(offset), length_(length) {}
63
+
64
+ Bitmap(void* data, int64_t offset, int64_t length)
65
+ : data_(reinterpret_cast<const uint8_t*>(data)),
66
+ mutable_data_(reinterpret_cast<uint8_t*>(data)),
67
+ offset_(offset),
68
+ length_(length) {}
69
+
70
+ Bitmap Slice(int64_t offset) const {
71
+ if (mutable_data_ != NULLPTR) {
72
+ return {mutable_data_, offset_ + offset, length_ - offset};
73
+ } else {
74
+ return {data_, offset_ + offset, length_ - offset};
75
+ }
76
+ }
77
+
78
+ Bitmap Slice(int64_t offset, int64_t length) const {
79
+ if (mutable_data_ != NULLPTR) {
80
+ return {mutable_data_, offset_ + offset, length};
81
+ } else {
82
+ return {data_, offset_ + offset, length};
83
+ }
84
+ }
85
+
86
+ std::string ToString() const;
87
+
88
+ bool Equals(const Bitmap& other) const;
89
+
90
+ std::string Diff(const Bitmap& other) const;
91
+
92
+ bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); }
93
+
94
+ bool operator[](int64_t i) const { return GetBit(i); }
95
+
96
+ void SetBitTo(int64_t i, bool v) const {
97
+ bit_util::SetBitTo(mutable_data_, i + offset_, v);
98
+ }
99
+
100
+ void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); }
101
+
102
+ void CopyFrom(const Bitmap& other);
103
+ void CopyFromInverted(const Bitmap& other);
104
+
105
+ /// \brief Visit bits from each bitmap as bitset<N>
106
+ ///
107
+ /// All bitmaps must have identical length.
108
+ template <size_t N, typename Visitor>
109
+ static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) {
110
+ int64_t bit_length = BitLength(bitmaps, N);
111
+ std::bitset<N> bits;
112
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
113
+ for (size_t i = 0; i < N; ++i) {
114
+ bits[i] = bitmaps[i].GetBit(bit_i);
115
+ }
116
+ visitor(bits);
117
+ }
118
+ }
119
+
120
+ /// \brief Visit bits from each bitmap as bitset<N>
121
+ ///
122
+ /// All bitmaps must have identical length.
123
+ template <size_t N, typename Visitor>
124
+ static void VisitBits(const std::array<Bitmap, N>& bitmaps, Visitor&& visitor) {
125
+ int64_t bit_length = BitLength(bitmaps);
126
+ std::bitset<N> bits;
127
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
128
+ for (size_t i = 0; i < N; ++i) {
129
+ bits[i] = bitmaps[i].GetBit(bit_i);
130
+ }
131
+ visitor(bits);
132
+ }
133
+ }
134
+
135
+ /// \brief Visit words of bits from each bitmap as array<Word, N>
136
+ ///
137
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
138
+ /// may be offset within the first visited word, but words will otherwise contain
139
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
140
+ /// returned.
141
+ ///
142
+ /// TODO(bkietz) allow for early termination
143
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
144
+ // It also has a large prolog / epilog overhead and should be used
145
+ // carefully in other cases.
146
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
147
+ // and BitmapUInt64Reader.
148
+ template <size_t N, typename Visitor,
149
+ typename Word = typename std::decay<
150
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
151
+ static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) {
152
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
153
+
154
+ // local, mutable variables which will be sliced/decremented to represent consumption:
155
+ Bitmap bitmaps[N];
156
+ int64_t offsets[N];
157
+ int64_t bit_length = BitLength(bitmaps_arg, N);
158
+ util::span<const Word> words[N];
159
+ for (size_t i = 0; i < N; ++i) {
160
+ bitmaps[i] = bitmaps_arg[i];
161
+ offsets[i] = bitmaps[i].template word_offset<Word>();
162
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
163
+ words[i] = bitmaps[i].template words<Word>();
164
+ }
165
+
166
+ auto consume = [&](int64_t consumed_bits) {
167
+ for (size_t i = 0; i < N; ++i) {
168
+ bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits);
169
+ offsets[i] = bitmaps[i].template word_offset<Word>();
170
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
171
+ words[i] = bitmaps[i].template words<Word>();
172
+ }
173
+ bit_length -= consumed_bits;
174
+ };
175
+
176
+ std::array<Word, N> visited_words;
177
+ visited_words.fill(0);
178
+
179
+ if (bit_length <= kBitWidth * 2) {
180
+ // bitmaps fit into one or two words so don't bother with optimization
181
+ while (bit_length > 0) {
182
+ auto leading_bits = std::min(bit_length, kBitWidth);
183
+ SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words);
184
+ visitor(visited_words);
185
+ consume(leading_bits);
186
+ }
187
+ return 0;
188
+ }
189
+
190
+ int64_t max_offset = *std::max_element(offsets, offsets + N);
191
+ int64_t min_offset = *std::min_element(offsets, offsets + N);
192
+ if (max_offset > 0) {
193
+ // consume leading bits
194
+ auto leading_bits = kBitWidth - min_offset;
195
+ SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words);
196
+ visitor(visited_words);
197
+ consume(leading_bits);
198
+ }
199
+ assert(*std::min_element(offsets, offsets + N) == 0);
200
+
201
+ int64_t whole_word_count = bit_length / kBitWidth;
202
+ assert(whole_word_count >= 1);
203
+
204
+ if (min_offset == max_offset) {
205
+ // all offsets were identical, all leading bits have been consumed
206
+ assert(
207
+ std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; }));
208
+
209
+ for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) {
210
+ for (size_t i = 0; i < N; ++i) {
211
+ visited_words[i] = words[i][word_i];
212
+ }
213
+ visitor(visited_words);
214
+ }
215
+ consume(whole_word_count * kBitWidth);
216
+ } else {
217
+ // leading bits from potentially incomplete words have been consumed
218
+
219
+ // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely
220
+ // within the bitmap for all i
221
+ for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) {
222
+ for (size_t i = 0; i < N; ++i) {
223
+ if (offsets[i] == 0) {
224
+ visited_words[i] = words[i][word_i];
225
+ } else {
226
+ auto words0 = bit_util::ToLittleEndian(words[i][word_i]);
227
+ auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]);
228
+ visited_words[i] = bit_util::FromLittleEndian(
229
+ (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i])));
230
+ }
231
+ }
232
+ visitor(visited_words);
233
+ }
234
+ consume((whole_word_count - 1) * kBitWidth);
235
+
236
+ SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words);
237
+
238
+ visitor(visited_words);
239
+ consume(kBitWidth);
240
+ }
241
+
242
+ // load remaining bits
243
+ if (bit_length > 0) {
244
+ SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words);
245
+ visitor(visited_words);
246
+ }
247
+
248
+ return min_offset;
249
+ }
250
+
251
+ template <size_t N, size_t M, typename ReaderT, typename WriterT, typename Visitor,
252
+ typename Word = typename std::decay<
253
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
254
+ static void RunVisitWordsAndWriteLoop(int64_t bit_length,
255
+ std::array<ReaderT, N>& readers,
256
+ std::array<WriterT, M>& writers,
257
+ Visitor&& visitor) {
258
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
259
+
260
+ std::array<Word, N> visited_words;
261
+ std::array<Word, M> output_words;
262
+
263
+ // every reader will have same number of words, since they are same length'ed
264
+ // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond
265
+ // Word boundary, every Word would have to be created from 2 adjoining Words
266
+ auto n_words = readers[0].words();
267
+ bit_length -= n_words * kBitWidth;
268
+ while (n_words--) {
269
+ // first collect all words to visited_words array
270
+ for (size_t i = 0; i < N; i++) {
271
+ visited_words[i] = readers[i].NextWord();
272
+ }
273
+ visitor(visited_words, &output_words);
274
+ for (size_t i = 0; i < M; i++) {
275
+ writers[i].PutNextWord(output_words[i]);
276
+ }
277
+ }
278
+
279
+ // every reader will have same number of trailing bytes, because of the above reason
280
+ // tailing portion could be more than one word! (ref: BitmapWordReader constructor)
281
+ // remaining full/ partial words to write
282
+
283
+ if (bit_length) {
284
+ // convert the word visitor lambda to a byte_visitor
285
+ auto byte_visitor = [&](const std::array<uint8_t, N>& in,
286
+ std::array<uint8_t, M>* out) {
287
+ std::array<Word, N> in_words;
288
+ std::array<Word, M> out_words;
289
+ std::copy(in.begin(), in.end(), in_words.begin());
290
+ visitor(in_words, &out_words);
291
+ for (size_t i = 0; i < M; i++) {
292
+ out->at(i) = static_cast<uint8_t>(out_words[i]);
293
+ }
294
+ };
295
+
296
+ std::array<uint8_t, N> visited_bytes;
297
+ std::array<uint8_t, M> output_bytes;
298
+ int n_bytes = readers[0].trailing_bytes();
299
+ while (n_bytes--) {
300
+ visited_bytes.fill(0);
301
+ output_bytes.fill(0);
302
+ int valid_bits;
303
+ for (size_t i = 0; i < N; i++) {
304
+ visited_bytes[i] = readers[i].NextTrailingByte(valid_bits);
305
+ }
306
+ byte_visitor(visited_bytes, &output_bytes);
307
+ for (size_t i = 0; i < M; i++) {
308
+ writers[i].PutNextTrailingByte(output_bytes[i], valid_bits);
309
+ }
310
+ }
311
+ }
312
+ }
313
+
314
+ /// \brief Visit words of bits from each input bitmap as array<Word, N> and collects
315
+ /// outputs to an array<Word, M>, to be written into the output bitmaps accordingly.
316
+ ///
317
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
318
+ /// may be offset within the first visited word, but words will otherwise contain
319
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
320
+ /// returned.
321
+ /// Visitor is expected to have the following signature
322
+ /// [](const std::array<Word, N>& in_words, std::array<Word, M>* out_words){...}
323
+ ///
324
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
325
+ // It also has a large prolog / epilog overhead and should be used
326
+ // carefully in other cases.
327
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
328
+ // and BitmapUInt64Reader.
329
+ template <size_t N, size_t M, typename Visitor,
330
+ typename Word = typename std::decay<
331
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
332
+ static void VisitWordsAndWrite(const std::array<Bitmap, N>& bitmaps_arg,
333
+ std::array<Bitmap, M>* out_bitmaps_arg,
334
+ Visitor&& visitor) {
335
+ int64_t bit_length = BitLength(bitmaps_arg);
336
+ assert(bit_length == BitLength(*out_bitmaps_arg));
337
+
338
+ // if both input and output bitmaps have no byte offset, then use special template
339
+ if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(),
340
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; }) &&
341
+ std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(),
342
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; })) {
343
+ std::array<BitmapWordReader<Word, /*may_have_byte_offset=*/false>, N> readers;
344
+ for (size_t i = 0; i < N; ++i) {
345
+ const Bitmap& in_bitmap = bitmaps_arg[i];
346
+ readers[i] = BitmapWordReader<Word, /*may_have_byte_offset=*/false>(
347
+ in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
348
+ }
349
+
350
+ std::array<BitmapWordWriter<Word, /*may_have_byte_offset=*/false>, M> writers;
351
+ for (size_t i = 0; i < M; ++i) {
352
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
353
+ writers[i] = BitmapWordWriter<Word, /*may_have_byte_offset=*/false>(
354
+ out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_);
355
+ }
356
+
357
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
358
+ } else {
359
+ std::array<BitmapWordReader<Word>, N> readers;
360
+ for (size_t i = 0; i < N; ++i) {
361
+ const Bitmap& in_bitmap = bitmaps_arg[i];
362
+ readers[i] =
363
+ BitmapWordReader<Word>(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
364
+ }
365
+
366
+ std::array<BitmapWordWriter<Word>, M> writers;
367
+ for (size_t i = 0; i < M; ++i) {
368
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
369
+ writers[i] = BitmapWordWriter<Word>(out_bitmap.mutable_data_, out_bitmap.offset_,
370
+ out_bitmap.length_);
371
+ }
372
+
373
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
374
+ }
375
+ }
376
+
377
+ const uint8_t* data() const { return data_; }
378
+ uint8_t* mutable_data() { return mutable_data_; }
379
+
380
+ /// offset of first bit relative to buffer().data()
381
+ int64_t offset() const { return offset_; }
382
+
383
+ /// number of bits in this Bitmap
384
+ int64_t length() const { return length_; }
385
+
386
+ /// span of all bytes which contain any bit in this Bitmap
387
+ util::span<const uint8_t> bytes() const {
388
+ auto byte_offset = offset_ / 8;
389
+ auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset;
390
+ return {data_ + byte_offset, static_cast<size_t>(byte_count)};
391
+ }
392
+
393
+ private:
394
+ /// span of all Words which contain any bit in this Bitmap
395
+ ///
396
+ /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36)
397
+ /// words() would span bits [16, 48).
398
+ ///
399
+ /// 0 16 32 48 64
400
+ /// |-------|-------|------|------| (buffer)
401
+ /// [ ] (bitmap)
402
+ /// |-------|------| (returned words)
403
+ ///
404
+ /// \warning The words may contain bytes which lie outside the buffer or are
405
+ /// uninitialized.
406
+ template <typename Word>
407
+ util::span<const Word> words() const {
408
+ auto bytes_addr = reinterpret_cast<intptr_t>(bytes().data());
409
+ auto words_addr = bytes_addr - bytes_addr % sizeof(Word);
410
+ auto word_byte_count =
411
+ bit_util::RoundUpToPowerOf2(static_cast<int64_t>(bytes_addr + bytes().size()),
412
+ static_cast<int64_t>(sizeof(Word))) -
413
+ words_addr;
414
+ return {reinterpret_cast<const Word*>(words_addr),
415
+ static_cast<size_t>(word_byte_count / sizeof(Word))};
416
+ }
417
+
418
+ /// offset of first bit relative to words<Word>().data()
419
+ template <typename Word>
420
+ int64_t word_offset() const {
421
+ return offset_ + 8 * (reinterpret_cast<intptr_t>(data_) -
422
+ reinterpret_cast<intptr_t>(words<Word>().data()));
423
+ }
424
+
425
+ /// load words from bitmaps bitwise
426
+ template <size_t N, typename Word>
427
+ static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset,
428
+ int64_t out_length, bool set_trailing_bits,
429
+ std::array<Word, N>* out) {
430
+ out->fill(0);
431
+
432
+ int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0;
433
+
434
+ Bitmap slices[N], out_bitmaps[N];
435
+ for (size_t i = 0; i < N; ++i) {
436
+ slices[i] = bitmaps[i].Slice(offset, out_length);
437
+ out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length);
438
+ }
439
+
440
+ int64_t bit_i = 0;
441
+ Bitmap::VisitBits(slices, [&](std::bitset<N> bits) {
442
+ for (size_t i = 0; i < N; ++i) {
443
+ out_bitmaps[i].SetBitTo(bit_i, bits[i]);
444
+ }
445
+ ++bit_i;
446
+ });
447
+ }
448
+
449
+ /// assert bitmaps have identical length and return that length
450
+ static int64_t BitLength(const Bitmap* bitmaps, size_t N);
451
+
452
+ template <size_t N>
453
+ static int64_t BitLength(const std::array<Bitmap, N>& bitmaps) {
454
+ for (size_t i = 1; i < N; ++i) {
455
+ assert(bitmaps[i].length() == bitmaps[0].length());
456
+ }
457
+ return bitmaps[0].length();
458
+ }
459
+
460
+ const uint8_t* data_ = NULLPTR;
461
+ uint8_t* mutable_data_ = NULLPTR;
462
+ int64_t offset_ = 0, length_ = 0;
463
+ };
464
+
465
+ } // namespace internal
466
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace internal {
30
+
31
+ /// \brief Generate Bitmap with all position to `value` except for one found
32
+ /// at `straggler_pos`.
33
+ ARROW_EXPORT
34
+ Result<std::shared_ptr<Buffer>> BitmapAllButOne(MemoryPool* pool, int64_t length,
35
+ int64_t straggler_pos, bool value = true);
36
+
37
+ /// \brief Convert vector of bytes to bitmap buffer
38
+ ARROW_EXPORT
39
+ Result<std::shared_ptr<Buffer>> BytesToBits(const std::vector<uint8_t>&,
40
+ MemoryPool* pool = default_memory_pool());
41
+
42
+ } // namespace internal
43
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/buffer.h"
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ // A std::generate() like function to write sequential bits into a bitmap area.
33
+ // Bits preceding the bitmap area are preserved, bits following the bitmap
34
+ // area may be clobbered.
35
+
36
+ template <class Generator>
37
+ void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) {
38
+ if (length == 0) {
39
+ return;
40
+ }
41
+ uint8_t* cur = bitmap + start_offset / 8;
42
+ uint8_t bit_mask = bit_util::kBitmask[start_offset % 8];
43
+ uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8];
44
+
45
+ for (int64_t index = 0; index < length; ++index) {
46
+ const bool bit = g();
47
+ current_byte = bit ? (current_byte | bit_mask) : current_byte;
48
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
49
+ if (bit_mask == 0) {
50
+ bit_mask = 1;
51
+ *cur++ = current_byte;
52
+ current_byte = 0;
53
+ }
54
+ }
55
+ if (bit_mask != 1) {
56
+ *cur++ = current_byte;
57
+ }
58
+ }
59
+
60
+ // Like GenerateBits(), but unrolls its main loop for higher performance.
61
+
62
+ template <class Generator>
63
+ void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length,
64
+ Generator&& g) {
65
+ static_assert(std::is_same<decltype(std::declval<Generator>()()), bool>::value,
66
+ "Functor passed to GenerateBitsUnrolled must return bool");
67
+
68
+ if (length == 0) {
69
+ return;
70
+ }
71
+ uint8_t current_byte;
72
+ uint8_t* cur = bitmap + start_offset / 8;
73
+ const uint64_t start_bit_offset = start_offset % 8;
74
+ uint8_t bit_mask = bit_util::kBitmask[start_bit_offset];
75
+ int64_t remaining = length;
76
+
77
+ if (bit_mask != 0x01) {
78
+ current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset];
79
+ while (bit_mask != 0 && remaining > 0) {
80
+ current_byte |= g() * bit_mask;
81
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
82
+ --remaining;
83
+ }
84
+ *cur++ = current_byte;
85
+ }
86
+
87
+ int64_t remaining_bytes = remaining / 8;
88
+ uint8_t out_results[8];
89
+ while (remaining_bytes-- > 0) {
90
+ for (int i = 0; i < 8; ++i) {
91
+ out_results[i] = g();
92
+ }
93
+ *cur++ = static_cast<uint8_t>(out_results[0] | out_results[1] << 1 |
94
+ out_results[2] << 2 | out_results[3] << 3 |
95
+ out_results[4] << 4 | out_results[5] << 5 |
96
+ out_results[6] << 6 | out_results[7] << 7);
97
+ }
98
+
99
+ int64_t remaining_bits = remaining % 8;
100
+ if (remaining_bits) {
101
+ current_byte = 0;
102
+ bit_mask = 0x01;
103
+ while (remaining_bits-- > 0) {
104
+ current_byte |= g() * bit_mask;
105
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
106
+ }
107
+ *cur++ = current_byte;
108
+ }
109
+ }
110
+
111
+ } // namespace internal
112
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/util/bit_util.h"
23
+ #include "arrow/util/bitmap_reader.h"
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ // A function that visits each bit in a bitmap and calls a visitor function with a
29
+ // boolean representation of that bit. This is intended to be analogous to
30
+ // GenerateBits.
31
+ template <class Visitor>
32
+ void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length,
33
+ Visitor&& visit) {
34
+ BitmapReader reader(bitmap, start_offset, length);
35
+ for (int64_t index = 0; index < length; ++index) {
36
+ visit(reader.IsSet());
37
+ reader.Next();
38
+ }
39
+ }
40
+
41
+ // Like VisitBits(), but unrolls its main loop for better performance.
42
+ template <class Visitor>
43
+ void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length,
44
+ Visitor&& visit) {
45
+ if (length == 0) {
46
+ return;
47
+ }
48
+
49
+ // Start by visiting any bits preceding the first full byte.
50
+ int64_t num_bits_before_full_bytes =
51
+ bit_util::RoundUpToMultipleOf8(start_offset) - start_offset;
52
+ // Truncate num_bits_before_full_bytes if it is greater than length.
53
+ if (num_bits_before_full_bytes > length) {
54
+ num_bits_before_full_bytes = length;
55
+ }
56
+ // Use the non loop-unrolled VisitBits since we don't want to add branches
57
+ VisitBits<Visitor>(bitmap, start_offset, num_bits_before_full_bytes, visit);
58
+
59
+ // Shift the start pointer to the first full byte and compute the
60
+ // number of full bytes to be read.
61
+ const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8);
62
+ const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8;
63
+
64
+ // Iterate over each full byte of the input bitmap and call the visitor in
65
+ // a loop-unrolled manner.
66
+ for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) {
67
+ // Get the current bit-packed byte value from the bitmap.
68
+ const uint8_t byte = *(first_full_byte + byte_index);
69
+
70
+ // Execute the visitor function on each bit of the current byte.
71
+ visit(bit_util::GetBitFromByte(byte, 0));
72
+ visit(bit_util::GetBitFromByte(byte, 1));
73
+ visit(bit_util::GetBitFromByte(byte, 2));
74
+ visit(bit_util::GetBitFromByte(byte, 3));
75
+ visit(bit_util::GetBitFromByte(byte, 4));
76
+ visit(bit_util::GetBitFromByte(byte, 5));
77
+ visit(bit_util::GetBitFromByte(byte, 6));
78
+ visit(bit_util::GetBitFromByte(byte, 7));
79
+ }
80
+
81
+ // Write any leftover bits in the last byte.
82
+ const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8;
83
+ VisitBits<Visitor>(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes,
84
+ visit);
85
+ }
86
+
87
+ } // namespace internal
88
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+
23
+ #include "arrow/util/bit_util.h"
24
+ #include "arrow/util/endian.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ class BitmapWriter {
31
+ // A sequential bitwise writer that preserves surrounding bit values.
32
+
33
+ public:
34
+ BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ byte_offset_ = start_offset / 8;
37
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
38
+ if (length > 0) {
39
+ current_byte_ = bitmap[byte_offset_];
40
+ } else {
41
+ current_byte_ = 0;
42
+ }
43
+ }
44
+
45
+ void Set() { current_byte_ |= bit_mask_; }
46
+
47
+ void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; }
48
+
49
+ void Next() {
50
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
51
+ ++position_;
52
+ if (bit_mask_ == 0) {
53
+ // Finished this byte, need advancing
54
+ bit_mask_ = 0x01;
55
+ bitmap_[byte_offset_++] = current_byte_;
56
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
57
+ current_byte_ = bitmap_[byte_offset_];
58
+ }
59
+ }
60
+ }
61
+
62
+ void Finish() {
63
+ // Store current byte if we didn't went past bitmap storage
64
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
65
+ bitmap_[byte_offset_] = current_byte_;
66
+ }
67
+ }
68
+
69
+ int64_t position() const { return position_; }
70
+
71
+ private:
72
+ uint8_t* bitmap_;
73
+ int64_t position_;
74
+ int64_t length_;
75
+
76
+ uint8_t current_byte_;
77
+ uint8_t bit_mask_;
78
+ int64_t byte_offset_;
79
+ };
80
+
81
+ class FirstTimeBitmapWriter {
82
+ // Like BitmapWriter, but any bit values *following* the bits written
83
+ // might be clobbered. It is hence faster than BitmapWriter, and can
84
+ // also avoid false positives with Valgrind.
85
+
86
+ public:
87
+ FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
88
+ : bitmap_(bitmap), position_(0), length_(length) {
89
+ current_byte_ = 0;
90
+ byte_offset_ = start_offset / 8;
91
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
92
+ if (length > 0) {
93
+ current_byte_ =
94
+ bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8];
95
+ } else {
96
+ current_byte_ = 0;
97
+ }
98
+ }
99
+
100
+ /// Appends number_of_bits from word to valid_bits and valid_bits_offset.
101
+ ///
102
+ /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed
103
+ /// to be unset (i.e. 0).
104
+ /// \param[in] number_of_bits The number of bits to append from word.
105
+ void AppendWord(uint64_t word, int64_t number_of_bits) {
106
+ if (ARROW_PREDICT_FALSE(number_of_bits == 0)) {
107
+ return;
108
+ }
109
+
110
+ // Location that the first byte needs to be written to.
111
+ uint8_t* append_position = bitmap_ + byte_offset_;
112
+
113
+ // Update state variables except for current_byte_ here.
114
+ position_ += number_of_bits;
115
+ int64_t bit_offset = bit_util::CountTrailingZeros(static_cast<uint32_t>(bit_mask_));
116
+ bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8];
117
+ byte_offset_ += (bit_offset + number_of_bits) / 8;
118
+
119
+ if (bit_offset != 0) {
120
+ // We are in the middle of the byte. This code updates the byte and shifts
121
+ // bits appropriately within word so it can be memcpy'd below.
122
+ int64_t bits_to_carry = 8 - bit_offset;
123
+ // Carry over bits from word to current_byte_. We assume any extra bits in word
124
+ // unset so no additional accounting is needed for when number_of_bits <
125
+ // bits_to_carry.
126
+ current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset;
127
+ // Check if everything is transferred into current_byte_.
128
+ if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) {
129
+ return;
130
+ }
131
+ *append_position = current_byte_;
132
+ append_position++;
133
+ // Move the carry bits off of word.
134
+ word = word >> bits_to_carry;
135
+ number_of_bits -= bits_to_carry;
136
+ }
137
+ word = bit_util::ToLittleEndian(word);
138
+ int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits);
139
+ std::memcpy(append_position, &word, bytes_for_word);
140
+ // At this point, the previous current_byte_ has been written to bitmap_.
141
+ // The new current_byte_ is either the last relevant byte in 'word'
142
+ // or cleared if the new position is byte aligned (i.e. a fresh byte).
143
+ if (bit_mask_ == 0x1) {
144
+ current_byte_ = 0;
145
+ } else {
146
+ current_byte_ = *(append_position + bytes_for_word - 1);
147
+ }
148
+ }
149
+
150
+ void Set() { current_byte_ |= bit_mask_; }
151
+
152
+ void Clear() {}
153
+
154
+ void Next() {
155
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
156
+ ++position_;
157
+ if (bit_mask_ == 0) {
158
+ // Finished this byte, need advancing
159
+ bit_mask_ = 0x01;
160
+ bitmap_[byte_offset_++] = current_byte_;
161
+ current_byte_ = 0;
162
+ }
163
+ }
164
+
165
+ void Finish() {
166
+ // Store current byte if we didn't went go bitmap storage
167
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
168
+ bitmap_[byte_offset_] = current_byte_;
169
+ }
170
+ }
171
+
172
+ int64_t position() const { return position_; }
173
+
174
+ private:
175
+ uint8_t* bitmap_;
176
+ int64_t position_;
177
+ int64_t length_;
178
+
179
+ uint8_t current_byte_;
180
+ uint8_t bit_mask_;
181
+ int64_t byte_offset_;
182
+ };
183
+
184
+ template <typename Word, bool may_have_byte_offset = true>
185
+ class BitmapWordWriter {
186
+ public:
187
+ BitmapWordWriter() = default;
188
+ BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length)
189
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
190
+ bitmap_(bitmap + offset / 8),
191
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)),
192
+ mask_((1U << offset_) - 1) {
193
+ if (offset_) {
194
+ if (length >= static_cast<int>(sizeof(Word) * 8)) {
195
+ current_data.word_ = load<Word>(bitmap_);
196
+ } else if (length > 0) {
197
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
198
+ }
199
+ }
200
+ }
201
+
202
+ void PutNextWord(Word word) {
203
+ if (may_have_byte_offset && offset_) {
204
+ // split one word into two adjacent words, don't touch unused bits
205
+ // |<------ word ----->|
206
+ // +-----+-------------+
207
+ // | A | B |
208
+ // +-----+-------------+
209
+ // | |
210
+ // v v offset
211
+ // +-------------+-----+-------------+-----+
212
+ // | --- | A | B | --- |
213
+ // +-------------+-----+-------------+-----+
214
+ // |<------ next ----->|<---- current ---->|
215
+ word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_));
216
+ Word next_word = load<Word>(bitmap_ + sizeof(Word));
217
+ current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_);
218
+ next_word = (next_word & ~mask_) | (word & mask_);
219
+ store<Word>(bitmap_, current_data.word_);
220
+ store<Word>(bitmap_ + sizeof(Word), next_word);
221
+ current_data.word_ = next_word;
222
+ } else {
223
+ store<Word>(bitmap_, word);
224
+ }
225
+ bitmap_ += sizeof(Word);
226
+ }
227
+
228
+ void PutNextTrailingByte(uint8_t byte, int valid_bits) {
229
+ if (valid_bits == 8) {
230
+ if (may_have_byte_offset && offset_) {
231
+ byte = (byte << offset_) | (byte >> (8 - offset_));
232
+ uint8_t next_byte = load<uint8_t>(bitmap_ + 1);
233
+ current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_);
234
+ next_byte = (next_byte & ~mask_) | (byte & mask_);
235
+ store<uint8_t>(bitmap_, current_data.epi.byte_);
236
+ store<uint8_t>(bitmap_ + 1, next_byte);
237
+ current_data.epi.byte_ = next_byte;
238
+ } else {
239
+ store<uint8_t>(bitmap_, byte);
240
+ }
241
+ ++bitmap_;
242
+ } else {
243
+ assert(valid_bits > 0);
244
+ assert(valid_bits < 8);
245
+ assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_);
246
+ internal::BitmapWriter writer(bitmap_, offset_, valid_bits);
247
+ for (int i = 0; i < valid_bits; ++i) {
248
+ (byte & 0x01) ? writer.Set() : writer.Clear();
249
+ writer.Next();
250
+ byte >>= 1;
251
+ }
252
+ writer.Finish();
253
+ }
254
+ }
255
+
256
+ private:
257
+ int64_t offset_;
258
+ uint8_t* bitmap_;
259
+
260
+ const uint8_t* bitmap_end_;
261
+ uint64_t mask_;
262
+ union {
263
+ Word word_;
264
+ struct {
265
+ #if ARROW_LITTLE_ENDIAN == 0
266
+ uint8_t padding_bytes_[sizeof(Word) - 1];
267
+ #endif
268
+ uint8_t byte_;
269
+ } epi;
270
+ } current_data;
271
+
272
+ template <typename DType>
273
+ DType load(const uint8_t* bitmap) {
274
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
275
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
276
+ }
277
+
278
+ template <typename DType>
279
+ void store(uint8_t* bitmap, DType data) {
280
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
281
+ util::SafeStore(bitmap, bit_util::FromLittleEndian(data));
282
+ }
283
+ };
284
+
285
+ } // namespace internal
286
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/buffer.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/bit_util.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/functional.h"
40
+ #include "arrow/util/macros.h"
41
+ #include "arrow/util/string_builder.h"
42
+ #include "arrow/util/type_traits.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace internal {
47
+
48
+ /// \brief Store a stack of bitsets efficiently. The top bitset may be
49
+ /// accessed and its bits may be modified, but it may not be resized.
50
+ class BitsetStack {
51
+ public:
52
+ using reference = typename std::vector<bool>::reference;
53
+
54
+ /// \brief push a bitset onto the stack
55
+ /// \param size number of bits in the next bitset
56
+ /// \param value initial value for bits in the pushed bitset
57
+ void Push(int size, bool value) {
58
+ offsets_.push_back(bit_count());
59
+ bits_.resize(bit_count() + size, value);
60
+ }
61
+
62
+ /// \brief number of bits in the bitset at the top of the stack
63
+ int TopSize() const {
64
+ if (offsets_.size() == 0) return 0;
65
+ return bit_count() - offsets_.back();
66
+ }
67
+
68
+ /// \brief pop a bitset off the stack
69
+ void Pop() {
70
+ bits_.resize(offsets_.back());
71
+ offsets_.pop_back();
72
+ }
73
+
74
+ /// \brief get the value of a bit in the top bitset
75
+ /// \param i index of the bit to access
76
+ bool operator[](int i) const { return bits_[offsets_.back() + i]; }
77
+
78
+ /// \brief get a mutable reference to a bit in the top bitset
79
+ /// \param i index of the bit to access
80
+ reference operator[](int i) { return bits_[offsets_.back() + i]; }
81
+
82
+ private:
83
+ int bit_count() const { return static_cast<int>(bits_.size()); }
84
+ std::vector<bool> bits_;
85
+ std::vector<int> offsets_;
86
+ };
87
+
88
+ } // namespace internal
89
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/endian.h"
21
+ #include "arrow/util/visibility.h"
22
+
23
+ #include <stdint.h>
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ ARROW_EXPORT
29
+ int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
30
+ ARROW_EXPORT
31
+ int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits);
32
+
33
+ } // namespace internal
34
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace arrow {
25
+ namespace internal {
26
+
27
+ template <typename OutputType, typename InputType>
28
+ inline OutputType checked_cast(InputType&& value) {
29
+ static_assert(std::is_class<typename std::remove_pointer<
30
+ typename std::remove_reference<InputType>::type>::type>::value,
31
+ "checked_cast input type must be a class");
32
+ static_assert(std::is_class<typename std::remove_pointer<
33
+ typename std::remove_reference<OutputType>::type>::type>::value,
34
+ "checked_cast output type must be a class");
35
+ #ifdef NDEBUG
36
+ return static_cast<OutputType>(value);
37
+ #else
38
+ return dynamic_cast<OutputType>(value);
39
+ #endif
40
+ }
41
+
42
+ template <class T, class U>
43
+ std::shared_ptr<T> checked_pointer_cast(std::shared_ptr<U> r) noexcept {
44
+ #ifdef NDEBUG
45
+ return std::static_pointer_cast<T>(std::move(r));
46
+ #else
47
+ return std::dynamic_pointer_cast<T>(std::move(r));
48
+ #endif
49
+ }
50
+
51
+ template <class T, class U>
52
+ std::unique_ptr<T> checked_pointer_cast(std::unique_ptr<U> r) noexcept {
53
+ #ifdef NDEBUG
54
+ return std::unique_ptr<T>(static_cast<T*>(r.release()));
55
+ #else
56
+ return std::unique_ptr<T>(dynamic_cast<T*>(r.release()));
57
+ #endif
58
+ }
59
+
60
+ } // namespace internal
61
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/util/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace util {
33
+
34
+ constexpr int kUseDefaultCompressionLevel = std::numeric_limits<int>::min();
35
+
36
+ /// \brief Streaming compressor interface
37
+ ///
38
+ class ARROW_EXPORT Compressor {
39
+ public:
40
+ virtual ~Compressor() = default;
41
+
42
+ struct CompressResult {
43
+ int64_t bytes_read;
44
+ int64_t bytes_written;
45
+ };
46
+ struct FlushResult {
47
+ int64_t bytes_written;
48
+ bool should_retry;
49
+ };
50
+ struct EndResult {
51
+ int64_t bytes_written;
52
+ bool should_retry;
53
+ };
54
+
55
+ /// \brief Compress some input.
56
+ ///
57
+ /// If bytes_read is 0 on return, then a larger output buffer should be supplied.
58
+ virtual Result<CompressResult> Compress(int64_t input_len, const uint8_t* input,
59
+ int64_t output_len, uint8_t* output) = 0;
60
+
61
+ /// \brief Flush part of the compressed output.
62
+ ///
63
+ /// If should_retry is true on return, Flush() should be called again
64
+ /// with a larger buffer.
65
+ virtual Result<FlushResult> Flush(int64_t output_len, uint8_t* output) = 0;
66
+
67
+ /// \brief End compressing, doing whatever is necessary to end the stream.
68
+ ///
69
+ /// If should_retry is true on return, End() should be called again
70
+ /// with a larger buffer. Otherwise, the Compressor should not be used anymore.
71
+ ///
72
+ /// End() implies Flush().
73
+ virtual Result<EndResult> End(int64_t output_len, uint8_t* output) = 0;
74
+
75
+ // XXX add methods for buffer size heuristics?
76
+ };
77
+
78
+ /// \brief Streaming decompressor interface
79
+ ///
80
+ class ARROW_EXPORT Decompressor {
81
+ public:
82
+ virtual ~Decompressor() = default;
83
+
84
+ struct DecompressResult {
85
+ // XXX is need_more_output necessary? (Brotli?)
86
+ int64_t bytes_read;
87
+ int64_t bytes_written;
88
+ bool need_more_output;
89
+ };
90
+
91
+ /// \brief Decompress some input.
92
+ ///
93
+ /// If need_more_output is true on return, a larger output buffer needs
94
+ /// to be supplied.
95
+ virtual Result<DecompressResult> Decompress(int64_t input_len, const uint8_t* input,
96
+ int64_t output_len, uint8_t* output) = 0;
97
+
98
+ /// \brief Return whether the compressed stream is finished.
99
+ ///
100
+ /// This is a heuristic. If true is returned, then it is guaranteed
101
+ /// that the stream is finished. If false is returned, however, it may
102
+ /// simply be that the underlying library isn't able to provide the information.
103
+ virtual bool IsFinished() = 0;
104
+
105
+ /// \brief Reinitialize decompressor, making it ready for a new compressed stream.
106
+ virtual Status Reset() = 0;
107
+
108
+ // XXX add methods for buffer size heuristics?
109
+ };
110
+
111
+ /// \brief Compression codec options
112
+ class ARROW_EXPORT CodecOptions {
113
+ public:
114
+ explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel)
115
+ : compression_level(compression_level) {}
116
+
117
+ virtual ~CodecOptions() = default;
118
+
119
+ int compression_level;
120
+ };
121
+
122
+ // ----------------------------------------------------------------------
123
+ // GZip codec options implementation
124
+
125
+ enum class GZipFormat {
126
+ ZLIB,
127
+ DEFLATE,
128
+ GZIP,
129
+ };
130
+
131
+ class ARROW_EXPORT GZipCodecOptions : public CodecOptions {
132
+ public:
133
+ GZipFormat gzip_format = GZipFormat::GZIP;
134
+ std::optional<int> window_bits;
135
+ };
136
+
137
+ // ----------------------------------------------------------------------
138
+ // brotli codec options implementation
139
+
140
+ class ARROW_EXPORT BrotliCodecOptions : public CodecOptions {
141
+ public:
142
+ std::optional<int> window_bits;
143
+ };
144
+
145
+ /// \brief Compression codec
146
+ class ARROW_EXPORT Codec {
147
+ public:
148
+ virtual ~Codec() = default;
149
+
150
+ /// \brief Return special value to indicate that a codec implementation
151
+ /// should use its default compression level
152
+ static int UseDefaultCompressionLevel();
153
+
154
+ /// \brief Return a string name for compression type
155
+ static const std::string& GetCodecAsString(Compression::type t);
156
+
157
+ /// \brief Return compression type for name (all lower case)
158
+ static Result<Compression::type> GetCompressionType(const std::string& name);
159
+
160
+ /// \brief Create a codec for the given compression algorithm with CodecOptions
161
+ static Result<std::unique_ptr<Codec>> Create(
162
+ Compression::type codec, const CodecOptions& codec_options = CodecOptions{});
163
+
164
+ /// \brief Create a codec for the given compression algorithm
165
+ static Result<std::unique_ptr<Codec>> Create(Compression::type codec,
166
+ int compression_level);
167
+
168
+ /// \brief Return true if support for indicated codec has been enabled
169
+ static bool IsAvailable(Compression::type codec);
170
+
171
+ /// \brief Return true if indicated codec supports setting a compression level
172
+ static bool SupportsCompressionLevel(Compression::type codec);
173
+
174
+ /// \brief Return the smallest supported compression level for the codec
175
+ /// Note: This function creates a temporary Codec instance
176
+ static Result<int> MinimumCompressionLevel(Compression::type codec);
177
+
178
+ /// \brief Return the largest supported compression level for the codec
179
+ /// Note: This function creates a temporary Codec instance
180
+ static Result<int> MaximumCompressionLevel(Compression::type codec);
181
+
182
+ /// \brief Return the default compression level
183
+ /// Note: This function creates a temporary Codec instance
184
+ static Result<int> DefaultCompressionLevel(Compression::type codec);
185
+
186
+ /// \brief Return the smallest supported compression level
187
+ virtual int minimum_compression_level() const = 0;
188
+
189
+ /// \brief Return the largest supported compression level
190
+ virtual int maximum_compression_level() const = 0;
191
+
192
+ /// \brief Return the default compression level
193
+ virtual int default_compression_level() const = 0;
194
+
195
+ /// \brief One-shot decompression function
196
+ ///
197
+ /// output_buffer_len must be correct and therefore be obtained in advance.
198
+ /// The actual decompressed length is returned.
199
+ ///
200
+ /// \note One-shot decompression is not always compatible with streaming
201
+ /// compression. Depending on the codec (e.g. LZ4), different formats may
202
+ /// be used.
203
+ virtual Result<int64_t> Decompress(int64_t input_len, const uint8_t* input,
204
+ int64_t output_buffer_len,
205
+ uint8_t* output_buffer) = 0;
206
+
207
+ /// \brief One-shot compression function
208
+ ///
209
+ /// output_buffer_len must first have been computed using MaxCompressedLen().
210
+ /// The actual compressed length is returned.
211
+ ///
212
+ /// \note One-shot compression is not always compatible with streaming
213
+ /// decompression. Depending on the codec (e.g. LZ4), different formats may
214
+ /// be used.
215
+ virtual Result<int64_t> Compress(int64_t input_len, const uint8_t* input,
216
+ int64_t output_buffer_len, uint8_t* output_buffer) = 0;
217
+
218
+ virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0;
219
+
220
+ /// \brief Create a streaming compressor instance
221
+ virtual Result<std::shared_ptr<Compressor>> MakeCompressor() = 0;
222
+
223
+ /// \brief Create a streaming compressor instance
224
+ virtual Result<std::shared_ptr<Decompressor>> MakeDecompressor() = 0;
225
+
226
+ /// \brief This Codec's compression type
227
+ virtual Compression::type compression_type() const = 0;
228
+
229
+ /// \brief The name of this Codec's compression type
230
+ const std::string& name() const { return GetCodecAsString(compression_type()); }
231
+
232
+ /// \brief This Codec's compression level, if applicable
233
+ virtual int compression_level() const { return UseDefaultCompressionLevel(); }
234
+
235
+ private:
236
+ /// \brief Initializes the codec's resources.
237
+ virtual Status Init();
238
+ };
239
+
240
+ } // namespace util
241
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <string>
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/array.h"
24
+ #include "arrow/chunked_array.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/type.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/checked_cast.h"
29
+ #include "arrow/visit_type_inline.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
35
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
36
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
37
+ MemoryPool* pool);
38
+
39
+ template <typename Input, typename Options>
40
+ class Converter {
41
+ public:
42
+ using Self = Converter<Input, Options>;
43
+ using InputType = Input;
44
+ using OptionsType = Options;
45
+
46
+ virtual ~Converter() = default;
47
+
48
+ Status Construct(std::shared_ptr<DataType> type, OptionsType options,
49
+ MemoryPool* pool) {
50
+ type_ = std::move(type);
51
+ options_ = std::move(options);
52
+ return Init(pool);
53
+ }
54
+
55
+ virtual Status Append(InputType value) { return Status::NotImplemented("Append"); }
56
+
57
+ virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) {
58
+ return Status::NotImplemented("Extend");
59
+ }
60
+
61
+ virtual Status ExtendMasked(InputType values, InputType mask, int64_t size,
62
+ int64_t offset = 0) {
63
+ return Status::NotImplemented("ExtendMasked");
64
+ }
65
+
66
+ const std::shared_ptr<ArrayBuilder>& builder() const { return builder_; }
67
+
68
+ const std::shared_ptr<DataType>& type() const { return type_; }
69
+
70
+ OptionsType options() const { return options_; }
71
+
72
+ bool may_overflow() const { return may_overflow_; }
73
+
74
+ bool rewind_on_overflow() const { return rewind_on_overflow_; }
75
+
76
+ virtual Status Reserve(int64_t additional_capacity) {
77
+ return builder_->Reserve(additional_capacity);
78
+ }
79
+
80
+ Status AppendNull() { return builder_->AppendNull(); }
81
+
82
+ virtual Result<std::shared_ptr<Array>> ToArray() { return builder_->Finish(); }
83
+
84
+ virtual Result<std::shared_ptr<Array>> ToArray(int64_t length) {
85
+ ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray());
86
+ return arr->Slice(0, length);
87
+ }
88
+
89
+ virtual Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
90
+ ARROW_ASSIGN_OR_RAISE(auto array, ToArray());
91
+ std::vector<std::shared_ptr<Array>> chunks = {std::move(array)};
92
+ return std::make_shared<ChunkedArray>(chunks);
93
+ }
94
+
95
+ protected:
96
+ virtual Status Init(MemoryPool* pool) { return Status::OK(); }
97
+
98
+ std::shared_ptr<DataType> type_;
99
+ std::shared_ptr<ArrayBuilder> builder_;
100
+ OptionsType options_;
101
+ bool may_overflow_ = false;
102
+ bool rewind_on_overflow_ = false;
103
+ };
104
+
105
+ template <typename ArrowType, typename BaseConverter>
106
+ class PrimitiveConverter : public BaseConverter {
107
+ public:
108
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
109
+
110
+ protected:
111
+ Status Init(MemoryPool* pool) override {
112
+ this->builder_ = std::make_shared<BuilderType>(this->type_, pool);
113
+ // Narrow variable-sized binary types may overflow
114
+ this->may_overflow_ = is_binary_like(this->type_->id());
115
+ primitive_type_ = checked_cast<const ArrowType*>(this->type_.get());
116
+ primitive_builder_ = checked_cast<BuilderType*>(this->builder_.get());
117
+ return Status::OK();
118
+ }
119
+
120
+ const ArrowType* primitive_type_;
121
+ BuilderType* primitive_builder_;
122
+ };
123
+
124
+ template <typename ArrowType, typename BaseConverter,
125
+ template <typename...> class ConverterTrait>
126
+ class ListConverter : public BaseConverter {
127
+ public:
128
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
129
+ using ConverterType = typename ConverterTrait<ArrowType>::type;
130
+
131
+ protected:
132
+ Status Init(MemoryPool* pool) override {
133
+ list_type_ = checked_cast<const ArrowType*>(this->type_.get());
134
+ ARROW_ASSIGN_OR_RAISE(value_converter_,
135
+ (MakeConverter<BaseConverter, ConverterTrait>(
136
+ list_type_->value_type(), this->options_, pool)));
137
+ this->builder_ =
138
+ std::make_shared<BuilderType>(pool, value_converter_->builder(), this->type_);
139
+ list_builder_ = checked_cast<BuilderType*>(this->builder_.get());
140
+ // Narrow list types may overflow
141
+ this->may_overflow_ = this->rewind_on_overflow_ =
142
+ sizeof(typename ArrowType::offset_type) < sizeof(int64_t);
143
+ return Status::OK();
144
+ }
145
+
146
+ const ArrowType* list_type_;
147
+ BuilderType* list_builder_;
148
+ std::unique_ptr<BaseConverter> value_converter_;
149
+ };
150
+
151
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
152
+ class StructConverter : public BaseConverter {
153
+ public:
154
+ using ConverterType = typename ConverterTrait<StructType>::type;
155
+
156
+ Status Reserve(int64_t additional_capacity) override {
157
+ ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity));
158
+ for (const auto& child : children_) {
159
+ ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity));
160
+ }
161
+ return Status::OK();
162
+ }
163
+
164
+ protected:
165
+ Status Init(MemoryPool* pool) override {
166
+ std::unique_ptr<BaseConverter> child_converter;
167
+ std::vector<std::shared_ptr<ArrayBuilder>> child_builders;
168
+
169
+ struct_type_ = checked_cast<const StructType*>(this->type_.get());
170
+ for (const auto& field : struct_type_->fields()) {
171
+ ARROW_ASSIGN_OR_RAISE(child_converter,
172
+ (MakeConverter<BaseConverter, ConverterTrait>(
173
+ field->type(), this->options_, pool)));
174
+ this->may_overflow_ |= child_converter->may_overflow();
175
+ this->rewind_on_overflow_ = this->may_overflow_;
176
+ child_builders.push_back(child_converter->builder());
177
+ children_.push_back(std::move(child_converter));
178
+ }
179
+
180
+ this->builder_ =
181
+ std::make_shared<StructBuilder>(this->type_, pool, std::move(child_builders));
182
+ struct_builder_ = checked_cast<StructBuilder*>(this->builder_.get());
183
+
184
+ return Status::OK();
185
+ }
186
+
187
+ const StructType* struct_type_;
188
+ StructBuilder* struct_builder_;
189
+ std::vector<std::unique_ptr<BaseConverter>> children_;
190
+ };
191
+
192
+ template <typename ValueType, typename BaseConverter>
193
+ class DictionaryConverter : public BaseConverter {
194
+ public:
195
+ using BuilderType = DictionaryBuilder<ValueType>;
196
+
197
+ protected:
198
+ Status Init(MemoryPool* pool) override {
199
+ std::unique_ptr<ArrayBuilder> builder;
200
+ ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder));
201
+ this->builder_ = std::move(builder);
202
+ this->may_overflow_ = false;
203
+ dict_type_ = checked_cast<const DictionaryType*>(this->type_.get());
204
+ value_type_ = checked_cast<const ValueType*>(dict_type_->value_type().get());
205
+ value_builder_ = checked_cast<BuilderType*>(this->builder_.get());
206
+ return Status::OK();
207
+ }
208
+
209
+ const DictionaryType* dict_type_;
210
+ const ValueType* value_type_;
211
+ BuilderType* value_builder_;
212
+ };
213
+
214
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
215
+ struct MakeConverterImpl {
216
+ template <typename T, typename ConverterType = typename ConverterTrait<T>::type>
217
+ Status Visit(const T&) {
218
+ out.reset(new ConverterType());
219
+ return out->Construct(std::move(type), std::move(options), pool);
220
+ }
221
+
222
+ Status Visit(const DictionaryType& t) {
223
+ switch (t.value_type()->id()) {
224
+ #define DICTIONARY_CASE(TYPE) \
225
+ case TYPE::type_id: \
226
+ out = std::make_unique< \
227
+ typename ConverterTrait<DictionaryType>::template dictionary_type<TYPE>>(); \
228
+ break;
229
+ DICTIONARY_CASE(BooleanType);
230
+ DICTIONARY_CASE(Int8Type);
231
+ DICTIONARY_CASE(Int16Type);
232
+ DICTIONARY_CASE(Int32Type);
233
+ DICTIONARY_CASE(Int64Type);
234
+ DICTIONARY_CASE(UInt8Type);
235
+ DICTIONARY_CASE(UInt16Type);
236
+ DICTIONARY_CASE(UInt32Type);
237
+ DICTIONARY_CASE(UInt64Type);
238
+ DICTIONARY_CASE(FloatType);
239
+ DICTIONARY_CASE(DoubleType);
240
+ DICTIONARY_CASE(BinaryType);
241
+ DICTIONARY_CASE(StringType);
242
+ DICTIONARY_CASE(FixedSizeBinaryType);
243
+ #undef DICTIONARY_CASE
244
+ default:
245
+ return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(),
246
+ " not implemented");
247
+ }
248
+ return out->Construct(std::move(type), std::move(options), pool);
249
+ }
250
+
251
+ Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); }
252
+
253
+ std::shared_ptr<DataType> type;
254
+ typename BaseConverter::OptionsType options;
255
+ MemoryPool* pool;
256
+ std::unique_ptr<BaseConverter> out;
257
+ };
258
+
259
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
260
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
261
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
262
+ MemoryPool* pool) {
263
+ MakeConverterImpl<BaseConverter, ConverterTrait> visitor{
264
+ std::move(type), std::move(options), pool, NULLPTR};
265
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor));
266
+ return std::move(visitor.out);
267
+ }
268
+
269
+ template <typename Converter>
270
+ class Chunker {
271
+ public:
272
+ using InputType = typename Converter::InputType;
273
+
274
+ explicit Chunker(std::unique_ptr<Converter> converter)
275
+ : converter_(std::move(converter)) {}
276
+
277
+ Status Reserve(int64_t additional_capacity) {
278
+ ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity));
279
+ reserved_ += additional_capacity;
280
+ return Status::OK();
281
+ }
282
+
283
+ Status AppendNull() {
284
+ auto status = converter_->AppendNull();
285
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
286
+ if (converter_->builder()->length() == 0) {
287
+ // Builder length == 0 means the individual element is too large to append.
288
+ // In this case, no need to try again.
289
+ return status;
290
+ }
291
+ ARROW_RETURN_NOT_OK(FinishChunk());
292
+ return converter_->AppendNull();
293
+ }
294
+ ++length_;
295
+ return status;
296
+ }
297
+
298
+ Status Append(InputType value) {
299
+ auto status = converter_->Append(value);
300
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
301
+ if (converter_->builder()->length() == 0) {
302
+ return status;
303
+ }
304
+ ARROW_RETURN_NOT_OK(FinishChunk());
305
+ return Append(value);
306
+ }
307
+ ++length_;
308
+ return status;
309
+ }
310
+
311
+ Status Extend(InputType values, int64_t size, int64_t offset = 0) {
312
+ while (offset < size) {
313
+ auto length_before = converter_->builder()->length();
314
+ auto status = converter_->Extend(values, size, offset);
315
+ auto length_after = converter_->builder()->length();
316
+ auto num_converted = length_after - length_before;
317
+
318
+ offset += num_converted;
319
+ length_ += num_converted;
320
+
321
+ if (status.IsCapacityError()) {
322
+ if (converter_->builder()->length() == 0) {
323
+ // Builder length == 0 means the individual element is too large to append.
324
+ // In this case, no need to try again.
325
+ return status;
326
+ } else if (converter_->rewind_on_overflow()) {
327
+ // The list-like and binary-like conversion paths may raise a capacity error,
328
+ // we need to handle them differently. While the binary-like converters check
329
+ // the capacity before append/extend the list-like converters just check after
330
+ // append/extend. Thus depending on the implementation semantics we may need
331
+ // to rewind (slice) the output chunk by one.
332
+ length_ -= 1;
333
+ offset -= 1;
334
+ }
335
+ ARROW_RETURN_NOT_OK(FinishChunk());
336
+ } else if (!status.ok()) {
337
+ return status;
338
+ }
339
+ }
340
+ return Status::OK();
341
+ }
342
+
343
+ Status ExtendMasked(InputType values, InputType mask, int64_t size,
344
+ int64_t offset = 0) {
345
+ while (offset < size) {
346
+ auto length_before = converter_->builder()->length();
347
+ auto status = converter_->ExtendMasked(values, mask, size, offset);
348
+ auto length_after = converter_->builder()->length();
349
+ auto num_converted = length_after - length_before;
350
+
351
+ offset += num_converted;
352
+ length_ += num_converted;
353
+
354
+ if (status.IsCapacityError()) {
355
+ if (converter_->builder()->length() == 0) {
356
+ // Builder length == 0 means the individual element is too large to append.
357
+ // In this case, no need to try again.
358
+ return status;
359
+ } else if (converter_->rewind_on_overflow()) {
360
+ // The list-like and binary-like conversion paths may raise a capacity error,
361
+ // we need to handle them differently. While the binary-like converters check
362
+ // the capacity before append/extend the list-like converters just check after
363
+ // append/extend. Thus depending on the implementation semantics we may need
364
+ // to rewind (slice) the output chunk by one.
365
+ length_ -= 1;
366
+ offset -= 1;
367
+ }
368
+ ARROW_RETURN_NOT_OK(FinishChunk());
369
+ } else if (!status.ok()) {
370
+ return status;
371
+ }
372
+ }
373
+ return Status::OK();
374
+ }
375
+
376
+ Status FinishChunk() {
377
+ ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_));
378
+ chunks_.push_back(chunk);
379
+ // Reserve space for the remaining items.
380
+ // Besides being an optimization, it is also required if the converter's
381
+ // implementation relies on unsafe builder methods in converter->Append().
382
+ auto remaining = reserved_ - length_;
383
+ Reset();
384
+ return Reserve(remaining);
385
+ }
386
+
387
+ Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
388
+ ARROW_RETURN_NOT_OK(FinishChunk());
389
+ return std::make_shared<ChunkedArray>(chunks_);
390
+ }
391
+
392
+ protected:
393
+ void Reset() {
394
+ converter_->builder()->Reset();
395
+ length_ = 0;
396
+ reserved_ = 0;
397
+ }
398
+
399
+ int64_t length_ = 0;
400
+ int64_t reserved_ = 0;
401
+ std::unique_ptr<Converter> converter_;
402
+ std::vector<std::shared_ptr<Array>> chunks_;
403
+ };
404
+
405
+ template <typename T>
406
+ static Result<std::unique_ptr<Chunker<T>>> MakeChunker(std::unique_ptr<T> converter) {
407
+ return std::make_unique<Chunker<T>>(std::move(converter));
408
+ }
409
+
410
+ } // namespace internal
411
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #ifndef ARROW_COUNTING_SEMAPHORE_H
19
+ #define ARROW_COUNTING_SEMAPHORE_H
20
+
21
+ #include <memory>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ /// \brief Simple mutex-based counting semaphore with timeout
29
+ class ARROW_EXPORT CountingSemaphore {
30
+ public:
31
+ /// \brief Create an instance with initial_avail starting permits
32
+ ///
33
+ /// \param[in] initial_avail The semaphore will start with this many permits available
34
+ /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations
35
+ /// will return Status::Invalid if this timeout elapses
36
+ explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10);
37
+ ~CountingSemaphore();
38
+ /// \brief Block until num_permits permits are available
39
+ Status Acquire(uint32_t num_permits);
40
+ /// \brief Make num_permits permits available
41
+ Status Release(uint32_t num_permits);
42
+ /// \brief Wait until num_waiters are waiting on permits
43
+ ///
44
+ /// This method is non-standard but useful in unit tests to ensure sequencing
45
+ Status WaitForWaiters(uint32_t num_waiters);
46
+ /// \brief Immediately time out any waiters
47
+ ///
48
+ /// This method will return Status::OK only if there were no waiters to time out.
49
+ /// Once closed any operation on this instance will return an invalid status.
50
+ Status Close();
51
+
52
+ private:
53
+ class Impl;
54
+ std::unique_ptr<Impl> impl_;
55
+ };
56
+
57
+ } // namespace util
58
+ } // namespace arrow
59
+
60
+ #endif // ARROW_COUNTING_SEMAPHORE_H
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal
19
+ // set of functions needed for Apache Arrow / Apache parquet-cpp
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace internal {
32
+
33
+ /// CpuInfo is an interface to query for cpu information at runtime. The caller can
34
+ /// ask for the sizes of the caches and what hardware features are supported.
35
+ /// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and
36
+ /// /sys/devices)
37
+ class ARROW_EXPORT CpuInfo {
38
+ public:
39
+ ~CpuInfo();
40
+
41
+ /// x86 features
42
+ static constexpr int64_t SSSE3 = (1LL << 0);
43
+ static constexpr int64_t SSE4_1 = (1LL << 1);
44
+ static constexpr int64_t SSE4_2 = (1LL << 2);
45
+ static constexpr int64_t POPCNT = (1LL << 3);
46
+ static constexpr int64_t AVX = (1LL << 4);
47
+ static constexpr int64_t AVX2 = (1LL << 5);
48
+ static constexpr int64_t AVX512F = (1LL << 6);
49
+ static constexpr int64_t AVX512CD = (1LL << 7);
50
+ static constexpr int64_t AVX512VL = (1LL << 8);
51
+ static constexpr int64_t AVX512DQ = (1LL << 9);
52
+ static constexpr int64_t AVX512BW = (1LL << 10);
53
+ static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW;
54
+ static constexpr int64_t BMI1 = (1LL << 11);
55
+ static constexpr int64_t BMI2 = (1LL << 12);
56
+
57
+ /// Arm features
58
+ static constexpr int64_t ASIMD = (1LL << 32);
59
+
60
+ /// Cache enums for L1 (data), L2 and L3
61
+ enum class CacheLevel { L1 = 0, L2, L3, Last = L3 };
62
+
63
+ /// CPU vendors
64
+ enum class Vendor { Unknown, Intel, AMD };
65
+
66
+ static const CpuInfo* GetInstance();
67
+
68
+ /// Returns all the flags for this cpu
69
+ int64_t hardware_flags() const;
70
+
71
+ /// Returns the number of cores (including hyper-threaded) on this machine.
72
+ int num_cores() const;
73
+
74
+ /// Returns the vendor of the cpu.
75
+ Vendor vendor() const;
76
+
77
+ /// Returns the model name of the cpu (e.g. Intel i7-2600)
78
+ const std::string& model_name() const;
79
+
80
+ /// Returns the size of the cache in KB at this cache level
81
+ int64_t CacheSize(CacheLevel level) const;
82
+
83
+ /// \brief Returns whether or not the given feature is enabled.
84
+ ///
85
+ /// IsSupported() is true iff IsDetected() is also true and the feature
86
+ /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL
87
+ /// environment variable).
88
+ bool IsSupported(int64_t flags) const;
89
+
90
+ /// Returns whether or not the given feature is available on the CPU.
91
+ bool IsDetected(int64_t flags) const;
92
+
93
+ /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error
94
+ /// and terminate.
95
+ void VerifyCpuRequirements() const;
96
+
97
+ /// Toggle a hardware feature on and off. It is not valid to turn on a feature
98
+ /// that the underlying hardware cannot support. This is useful for testing.
99
+ void EnableFeature(int64_t flag, bool enable);
100
+
101
+ bool HasEfficientBmi2() const {
102
+ // BMI2 (pext, pdep) is only efficient on Intel X86 processors.
103
+ return vendor() == Vendor::Intel && IsSupported(BMI2);
104
+ }
105
+
106
+ private:
107
+ CpuInfo();
108
+
109
+ struct Impl;
110
+ std::unique_ptr<Impl> impl_;
111
+ };
112
+
113
+ } // namespace internal
114
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <iosfwd>
22
+ #include <limits>
23
+ #include <string>
24
+ #include <string_view>
25
+ #include <utility>
26
+
27
+ #include "arrow/result.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/type_fwd.h"
30
+ #include "arrow/util/basic_decimal.h"
31
+
32
+ namespace arrow {
33
+
34
+ /// Represents a signed 128-bit integer in two's complement.
35
+ /// Calculations wrap around and overflow is ignored.
36
+ /// The max decimal precision that can be safely represented is
37
+ /// 38 significant digits.
38
+ ///
39
+ /// For a discussion of the algorithms, look at Knuth's volume 2,
40
+ /// Semi-numerical Algorithms section 4.3.1.
41
+ ///
42
+ /// Adapted from the Apache ORC C++ implementation
43
+ ///
44
+ /// The implementation is split into two parts :
45
+ ///
46
+ /// 1. BasicDecimal128
47
+ /// - can be safely compiled to IR without references to libstdc++.
48
+ /// 2. Decimal128
49
+ /// - has additional functionality on top of BasicDecimal128 to deal with
50
+ /// strings and streams.
51
+ class ARROW_EXPORT Decimal128 : public BasicDecimal128 {
52
+ public:
53
+ /// \cond FALSE
54
+ // (need to avoid a duplicate definition in Sphinx)
55
+ using BasicDecimal128::BasicDecimal128;
56
+ /// \endcond
57
+
58
+ /// \brief constructor creates a Decimal128 from a BasicDecimal128.
59
+ constexpr Decimal128(const BasicDecimal128& value) noexcept // NOLINT runtime/explicit
60
+ : BasicDecimal128(value) {}
61
+
62
+ /// \brief Parse the number from a base 10 string representation.
63
+ explicit Decimal128(const std::string& value);
64
+
65
+ /// \brief Empty constructor creates a Decimal128 with a value of 0.
66
+ // This is required on some older compilers.
67
+ constexpr Decimal128() noexcept : BasicDecimal128() {}
68
+
69
+ /// Divide this number by right and return the result.
70
+ ///
71
+ /// This operation is not destructive.
72
+ /// The answer rounds to zero. Signs work like:
73
+ /// 21 / 5 -> 4, 1
74
+ /// -21 / 5 -> -4, -1
75
+ /// 21 / -5 -> -4, 1
76
+ /// -21 / -5 -> 4, -1
77
+ /// \param[in] divisor the number to divide by
78
+ /// \return the pair of the quotient and the remainder
79
+ Result<std::pair<Decimal128, Decimal128>> Divide(const Decimal128& divisor) const {
80
+ std::pair<Decimal128, Decimal128> result;
81
+ auto dstatus = BasicDecimal128::Divide(divisor, &result.first, &result.second);
82
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
83
+ return result;
84
+ }
85
+
86
+ /// \brief Convert the Decimal128 value to a base 10 decimal string with the given
87
+ /// scale.
88
+ std::string ToString(int32_t scale) const;
89
+
90
+ /// \brief Convert the value to an integer string
91
+ std::string ToIntegerString() const;
92
+
93
+ /// \brief Cast this value to an int64_t.
94
+ explicit operator int64_t() const;
95
+
96
+ /// \brief Convert a decimal string to a Decimal128 value, optionally including
97
+ /// precision and scale if they're passed in and not null.
98
+ static Status FromString(std::string_view s, Decimal128* out, int32_t* precision,
99
+ int32_t* scale = NULLPTR);
100
+ static Status FromString(const std::string& s, Decimal128* out, int32_t* precision,
101
+ int32_t* scale = NULLPTR);
102
+ static Status FromString(const char* s, Decimal128* out, int32_t* precision,
103
+ int32_t* scale = NULLPTR);
104
+ static Result<Decimal128> FromString(std::string_view s);
105
+ static Result<Decimal128> FromString(const std::string& s);
106
+ static Result<Decimal128> FromString(const char* s);
107
+
108
+ static Result<Decimal128> FromReal(double real, int32_t precision, int32_t scale);
109
+ static Result<Decimal128> FromReal(float real, int32_t precision, int32_t scale);
110
+
111
+ /// \brief Convert from a big-endian byte representation. The length must be
112
+ /// between 1 and 16.
113
+ /// \return error status if the length is an invalid value
114
+ static Result<Decimal128> FromBigEndian(const uint8_t* data, int32_t length);
115
+
116
+ /// \brief Convert Decimal128 from one scale to another
117
+ Result<Decimal128> Rescale(int32_t original_scale, int32_t new_scale) const {
118
+ Decimal128 out;
119
+ auto dstatus = BasicDecimal128::Rescale(original_scale, new_scale, &out);
120
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
121
+ return out;
122
+ }
123
+
124
+ /// \brief Convert to a signed integer
125
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
126
+ Result<T> ToInteger() const {
127
+ constexpr auto min_value = std::numeric_limits<T>::min();
128
+ constexpr auto max_value = std::numeric_limits<T>::max();
129
+ const auto& self = *this;
130
+ if (self < min_value || self > max_value) {
131
+ return Status::Invalid("Invalid cast from Decimal128 to ", sizeof(T),
132
+ " byte integer");
133
+ }
134
+ return static_cast<T>(low_bits());
135
+ }
136
+
137
+ /// \brief Convert to a signed integer
138
+ template <typename T, typename = internal::EnableIfIsOneOf<T, int32_t, int64_t>>
139
+ Status ToInteger(T* out) const {
140
+ return ToInteger<T>().Value(out);
141
+ }
142
+
143
+ /// \brief Convert to a floating-point number (scaled)
144
+ float ToFloat(int32_t scale) const;
145
+ /// \brief Convert to a floating-point number (scaled)
146
+ double ToDouble(int32_t scale) const;
147
+
148
+ /// \brief Convert to a floating-point number (scaled)
149
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
150
+ T ToReal(int32_t scale) const {
151
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
152
+ "Unexpected floating-point type");
153
+ if constexpr (std::is_same_v<T, float>) {
154
+ return ToFloat(scale);
155
+ } else {
156
+ return ToDouble(scale);
157
+ }
158
+ }
159
+
160
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
161
+ const Decimal128& decimal);
162
+
163
+ private:
164
+ /// Converts internal error code to Status
165
+ Status ToArrowStatus(DecimalStatus dstatus) const;
166
+ };
167
+
168
+ /// Represents a signed 256-bit integer in two's complement.
169
+ /// The max decimal precision that can be safely represented is
170
+ /// 76 significant digits.
171
+ ///
172
+ /// The implementation is split into two parts :
173
+ ///
174
+ /// 1. BasicDecimal256
175
+ /// - can be safely compiled to IR without references to libstdc++.
176
+ /// 2. Decimal256
177
+ /// - (TODO) has additional functionality on top of BasicDecimal256 to deal with
178
+ /// strings and streams.
179
+ class ARROW_EXPORT Decimal256 : public BasicDecimal256 {
180
+ public:
181
+ /// \cond FALSE
182
+ // (need to avoid a duplicate definition in Sphinx)
183
+ using BasicDecimal256::BasicDecimal256;
184
+ /// \endcond
185
+
186
+ /// \brief constructor creates a Decimal256 from a BasicDecimal256.
187
+ constexpr Decimal256(const BasicDecimal256& value) noexcept // NOLINT(runtime/explicit)
188
+ : BasicDecimal256(value) {}
189
+
190
+ /// \brief Parse the number from a base 10 string representation.
191
+ explicit Decimal256(const std::string& value);
192
+
193
+ /// \brief Empty constructor creates a Decimal256 with a value of 0.
194
+ // This is required on some older compilers.
195
+ constexpr Decimal256() noexcept : BasicDecimal256() {}
196
+
197
+ /// \brief Convert the Decimal256 value to a base 10 decimal string with the given
198
+ /// scale.
199
+ std::string ToString(int32_t scale) const;
200
+
201
+ /// \brief Convert the value to an integer string
202
+ std::string ToIntegerString() const;
203
+
204
+ /// \brief Convert a decimal string to a Decimal256 value, optionally including
205
+ /// precision and scale if they're passed in and not null.
206
+ static Status FromString(std::string_view s, Decimal256* out, int32_t* precision,
207
+ int32_t* scale = NULLPTR);
208
+ static Status FromString(const std::string& s, Decimal256* out, int32_t* precision,
209
+ int32_t* scale = NULLPTR);
210
+ static Status FromString(const char* s, Decimal256* out, int32_t* precision,
211
+ int32_t* scale = NULLPTR);
212
+ static Result<Decimal256> FromString(std::string_view s);
213
+ static Result<Decimal256> FromString(const std::string& s);
214
+ static Result<Decimal256> FromString(const char* s);
215
+
216
+ /// \brief Convert Decimal256 from one scale to another
217
+ Result<Decimal256> Rescale(int32_t original_scale, int32_t new_scale) const {
218
+ Decimal256 out;
219
+ auto dstatus = BasicDecimal256::Rescale(original_scale, new_scale, &out);
220
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
221
+ return out;
222
+ }
223
+
224
+ /// Divide this number by right and return the result.
225
+ ///
226
+ /// This operation is not destructive.
227
+ /// The answer rounds to zero. Signs work like:
228
+ /// 21 / 5 -> 4, 1
229
+ /// -21 / 5 -> -4, -1
230
+ /// 21 / -5 -> -4, 1
231
+ /// -21 / -5 -> 4, -1
232
+ /// \param[in] divisor the number to divide by
233
+ /// \return the pair of the quotient and the remainder
234
+ Result<std::pair<Decimal256, Decimal256>> Divide(const Decimal256& divisor) const {
235
+ std::pair<Decimal256, Decimal256> result;
236
+ auto dstatus = BasicDecimal256::Divide(divisor, &result.first, &result.second);
237
+ ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus));
238
+ return result;
239
+ }
240
+
241
+ /// \brief Convert from a big-endian byte representation. The length must be
242
+ /// between 1 and 32.
243
+ /// \return error status if the length is an invalid value
244
+ static Result<Decimal256> FromBigEndian(const uint8_t* data, int32_t length);
245
+
246
+ static Result<Decimal256> FromReal(double real, int32_t precision, int32_t scale);
247
+ static Result<Decimal256> FromReal(float real, int32_t precision, int32_t scale);
248
+
249
+ /// \brief Convert to a floating-point number (scaled).
250
+ /// May return infinity in case of overflow.
251
+ float ToFloat(int32_t scale) const;
252
+ /// \brief Convert to a floating-point number (scaled)
253
+ double ToDouble(int32_t scale) const;
254
+
255
+ /// \brief Convert to a floating-point number (scaled)
256
+ template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
257
+ T ToReal(int32_t scale) const {
258
+ static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
259
+ "Unexpected floating-point type");
260
+ if constexpr (std::is_same_v<T, float>) {
261
+ return ToFloat(scale);
262
+ } else {
263
+ return ToDouble(scale);
264
+ }
265
+ }
266
+
267
+ ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os,
268
+ const Decimal256& decimal);
269
+
270
+ private:
271
+ /// Converts internal error code to Status
272
+ Status ToArrowStatus(DecimalStatus dstatus) const;
273
+ };
274
+
275
+ /// For an integer type, return the max number of decimal digits
276
+ /// (=minimal decimal precision) it can represent.
277
+ inline Result<int32_t> MaxDecimalDigitsForInteger(Type::type type_id) {
278
+ switch (type_id) {
279
+ case Type::INT8:
280
+ case Type::UINT8:
281
+ return 3;
282
+ case Type::INT16:
283
+ case Type::UINT16:
284
+ return 5;
285
+ case Type::INT32:
286
+ case Type::UINT32:
287
+ return 10;
288
+ case Type::INT64:
289
+ return 19;
290
+ case Type::UINT64:
291
+ return 20;
292
+ default:
293
+ break;
294
+ }
295
+ return Status::Invalid("Not an integer type: ", type_id);
296
+ }
297
+
298
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string_view>
23
+
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+
30
+ class Buffer;
31
+
32
+ class ARROW_EXPORT BoundaryFinder {
33
+ public:
34
+ BoundaryFinder() = default;
35
+
36
+ virtual ~BoundaryFinder();
37
+
38
+ /// \brief Find the position of the first delimiter inside block
39
+ ///
40
+ /// `partial` is taken to be the beginning of the block, and `block`
41
+ /// its continuation. Also, `partial` doesn't contain a delimiter.
42
+ ///
43
+ /// The returned `out_pos` is relative to `block`'s start and should point
44
+ /// to the first character after the first delimiter.
45
+ /// `out_pos` will be -1 if no delimiter is found.
46
+ virtual Status FindFirst(std::string_view partial, std::string_view block,
47
+ int64_t* out_pos) = 0;
48
+
49
+ /// \brief Find the position of the last delimiter inside block
50
+ ///
51
+ /// The returned `out_pos` is relative to `block`'s start and should point
52
+ /// to the first character after the last delimiter.
53
+ /// `out_pos` will be -1 if no delimiter is found.
54
+ virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0;
55
+
56
+ /// \brief Find the position of the Nth delimiter inside the block
57
+ ///
58
+ /// `partial` is taken to be the beginning of the block, and `block`
59
+ /// its continuation. Also, `partial` doesn't contain a delimiter.
60
+ ///
61
+ /// The returned `out_pos` is relative to `block`'s start and should point
62
+ /// to the first character after the first delimiter.
63
+ /// `out_pos` will be -1 if no delimiter is found.
64
+ ///
65
+ /// The returned `num_found` is the number of delimiters actually found
66
+ virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count,
67
+ int64_t* out_pos, int64_t* num_found) = 0;
68
+
69
+ static constexpr int64_t kNoDelimiterFound = -1;
70
+
71
+ protected:
72
+ ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder);
73
+ };
74
+
75
+ ARROW_EXPORT
76
+ std::shared_ptr<BoundaryFinder> MakeNewlineBoundaryFinder();
77
+
78
+ /// \brief A reusable block-based chunker for delimited data
79
+ ///
80
+ /// The chunker takes a block of delimited data and helps carve a sub-block
81
+ /// which begins and ends on delimiters (suitable for consumption by parsers
82
+ /// which can only parse whole objects).
83
+ class ARROW_EXPORT Chunker {
84
+ public:
85
+ explicit Chunker(std::shared_ptr<BoundaryFinder> delimiter);
86
+ ~Chunker();
87
+
88
+ /// \brief Carve up a chunk in a block of data to contain only whole objects
89
+ ///
90
+ /// Pre-conditions:
91
+ /// - `block` is the start of a valid block of delimited data
92
+ /// (i.e. starts just after a delimiter)
93
+ ///
94
+ /// Post-conditions:
95
+ /// - block == whole + partial
96
+ /// - `whole` is a valid block of delimited data
97
+ /// (i.e. starts just after a delimiter and ends with a delimiter)
98
+ /// - `partial` doesn't contain an entire delimited object
99
+ /// (IOW: `partial` is generally small)
100
+ ///
101
+ /// This method will look for the last delimiter in `block` and may
102
+ /// therefore be costly.
103
+ ///
104
+ /// \param[in] block data to be chunked
105
+ /// \param[out] whole subrange of block containing whole delimited objects
106
+ /// \param[out] partial subrange of block starting with a partial delimited object
107
+ Status Process(std::shared_ptr<Buffer> block, std::shared_ptr<Buffer>* whole,
108
+ std::shared_ptr<Buffer>* partial);
109
+
110
+ /// \brief Carve the completion of a partial object out of a block
111
+ ///
112
+ /// Pre-conditions:
113
+ /// - `partial` is the start of a valid block of delimited data
114
+ /// (i.e. starts just after a delimiter)
115
+ /// - `block` follows `partial` in file order
116
+ ///
117
+ /// Post-conditions:
118
+ /// - block == completion + rest
119
+ /// - `partial + completion` is a valid block of delimited data
120
+ /// (i.e. starts just after a delimiter and ends with a delimiter)
121
+ /// - `completion` doesn't contain an entire delimited object
122
+ /// (IOW: `completion` is generally small)
123
+ ///
124
+ /// This method will look for the first delimiter in `block` and should
125
+ /// therefore be reasonably cheap.
126
+ ///
127
+ /// \param[in] partial incomplete delimited data
128
+ /// \param[in] block delimited data following partial
129
+ /// \param[out] completion subrange of block containing the completion of partial
130
+ /// \param[out] rest subrange of block containing what completion does not cover
131
+ Status ProcessWithPartial(std::shared_ptr<Buffer> partial,
132
+ std::shared_ptr<Buffer> block,
133
+ std::shared_ptr<Buffer>* completion,
134
+ std::shared_ptr<Buffer>* rest);
135
+
136
+ /// \brief Like ProcessWithPartial, but for the last block of a file
137
+ ///
138
+ /// This method allows for a final delimited object without a trailing delimiter
139
+ /// (ProcessWithPartial would return an error in that case).
140
+ ///
141
+ /// Pre-conditions:
142
+ /// - `partial` is the start of a valid block of delimited data
143
+ /// - `block` follows `partial` in file order and is the last data block
144
+ ///
145
+ /// Post-conditions:
146
+ /// - block == completion + rest
147
+ /// - `partial + completion` is a valid block of delimited data
148
+ /// - `completion` doesn't contain an entire delimited object
149
+ /// (IOW: `completion` is generally small)
150
+ ///
151
+ Status ProcessFinal(std::shared_ptr<Buffer> partial, std::shared_ptr<Buffer> block,
152
+ std::shared_ptr<Buffer>* completion, std::shared_ptr<Buffer>* rest);
153
+
154
+ /// \brief Skip count number of rows
155
+ /// Pre-conditions:
156
+ /// - `partial` is the start of a valid block of delimited data
157
+ /// (i.e. starts just after a delimiter)
158
+ /// - `block` follows `partial` in file order
159
+ ///
160
+ /// Post-conditions:
161
+ /// - `count` is updated to indicate the number of rows that still need to be skipped
162
+ /// - If `count` is > 0 then `rest` is an incomplete block that should be a future
163
+ /// `partial`
164
+ /// - Else `rest` could be one or more valid blocks of delimited data which need to be
165
+ /// parsed
166
+ ///
167
+ /// \param[in] partial incomplete delimited data
168
+ /// \param[in] block delimited data following partial
169
+ /// \param[in] final whether this is the final chunk
170
+ /// \param[in,out] count number of rows that need to be skipped
171
+ /// \param[out] rest subrange of block containing what was not skipped
172
+ Status ProcessSkip(std::shared_ptr<Buffer> partial, std::shared_ptr<Buffer> block,
173
+ bool final, int64_t* count, std::shared_ptr<Buffer>* rest);
174
+
175
+ protected:
176
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker);
177
+
178
+ std::shared_ptr<BoundaryFinder> boundary_finder_;
179
+ };
180
+
181
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array/data.h"
21
+
22
+ namespace arrow {
23
+ namespace dict_util {
24
+
25
+ int64_t LogicalNullCount(const ArraySpan& span);
26
+
27
+ } // namespace dict_util
28
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/cpu_info.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ enum class DispatchLevel : int {
30
+ // These dispatch levels, corresponding to instruction set features,
31
+ // are sorted in increasing order of preference.
32
+ NONE = 0,
33
+ SSE4_2,
34
+ AVX2,
35
+ AVX512,
36
+ NEON,
37
+ MAX
38
+ };
39
+
40
+ /*
41
+ A facility for dynamic dispatch according to available DispatchLevel.
42
+
43
+ Typical use:
44
+
45
+ static void my_function_default(...);
46
+ static void my_function_avx2(...);
47
+
48
+ struct MyDynamicFunction {
49
+ using FunctionType = decltype(&my_function_default);
50
+
51
+ static std::vector<std::pair<DispatchLevel, FunctionType>> implementations() {
52
+ return {
53
+ { DispatchLevel::NONE, my_function_default }
54
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
55
+ , { DispatchLevel::AVX2, my_function_avx2 }
56
+ #endif
57
+ };
58
+ }
59
+ };
60
+
61
+ void my_function(...) {
62
+ static DynamicDispatch<MyDynamicFunction> dispatch;
63
+ return dispatch.func(...);
64
+ }
65
+ */
66
+ template <typename DynamicFunction>
67
+ class DynamicDispatch {
68
+ protected:
69
+ using FunctionType = typename DynamicFunction::FunctionType;
70
+ using Implementation = std::pair<DispatchLevel, FunctionType>;
71
+
72
+ public:
73
+ DynamicDispatch() { Resolve(DynamicFunction::implementations()); }
74
+
75
+ FunctionType func = {};
76
+
77
+ protected:
78
+ // Use the Implementation with the highest DispatchLevel
79
+ void Resolve(const std::vector<Implementation>& implementations) {
80
+ Implementation cur{DispatchLevel::NONE, {}};
81
+
82
+ for (const auto& impl : implementations) {
83
+ if (impl.first >= cur.first && IsSupported(impl.first)) {
84
+ // Higher (or same) level than current
85
+ cur = impl;
86
+ }
87
+ }
88
+
89
+ if (!cur.second) {
90
+ Status::Invalid("No appropriate implementation found").Abort();
91
+ }
92
+ func = cur.second;
93
+ }
94
+
95
+ private:
96
+ bool IsSupported(DispatchLevel level) const {
97
+ static const auto cpu_info = arrow::internal::CpuInfo::GetInstance();
98
+
99
+ switch (level) {
100
+ case DispatchLevel::NONE:
101
+ return true;
102
+ case DispatchLevel::SSE4_2:
103
+ return cpu_info->IsSupported(CpuInfo::SSE4_2);
104
+ case DispatchLevel::AVX2:
105
+ return cpu_info->IsSupported(CpuInfo::AVX2);
106
+ case DispatchLevel::AVX512:
107
+ return cpu_info->IsSupported(CpuInfo::AVX512);
108
+ default:
109
+ return false;
110
+ }
111
+ }
112
+ };
113
+
114
+ } // namespace internal
115
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This is a private header for number-to-string formatting utilities
19
+
20
+ #pragma once
21
+
22
+ #include <array>
23
+ #include <cassert>
24
+ #include <chrono>
25
+ #include <limits>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+
32
+ #include "arrow/status.h"
33
+ #include "arrow/type.h"
34
+ #include "arrow/type_traits.h"
35
+ #include "arrow/util/double_conversion.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/string.h"
38
+ #include "arrow/util/time.h"
39
+ #include "arrow/util/visibility.h"
40
+ #include "arrow/vendored/datetime.h"
41
+
42
+ namespace arrow {
43
+ namespace internal {
44
+
45
+ /// \brief The entry point for conversion to strings.
46
+ template <typename ARROW_TYPE, typename Enable = void>
47
+ class StringFormatter;
48
+
49
+ template <typename T>
50
+ struct is_formattable {
51
+ template <typename U, typename = typename StringFormatter<U>::value_type>
52
+ static std::true_type Test(U*);
53
+
54
+ template <typename U>
55
+ static std::false_type Test(...);
56
+
57
+ static constexpr bool value = decltype(Test<T>(NULLPTR))::value;
58
+ };
59
+
60
+ template <typename T, typename R = void>
61
+ using enable_if_formattable = enable_if_t<is_formattable<T>::value, R>;
62
+
63
+ template <typename Appender>
64
+ using Return = decltype(std::declval<Appender>()(std::string_view{}));
65
+
66
+ /////////////////////////////////////////////////////////////////////////
67
+ // Boolean formatting
68
+
69
+ template <>
70
+ class StringFormatter<BooleanType> {
71
+ public:
72
+ explicit StringFormatter(const DataType* = NULLPTR) {}
73
+
74
+ using value_type = bool;
75
+
76
+ template <typename Appender>
77
+ Return<Appender> operator()(bool value, Appender&& append) {
78
+ if (value) {
79
+ const char string[] = "true";
80
+ return append(std::string_view(string));
81
+ } else {
82
+ const char string[] = "false";
83
+ return append(std::string_view(string));
84
+ }
85
+ }
86
+ };
87
+
88
+ /////////////////////////////////////////////////////////////////////////
89
+ // Decimals formatting
90
+
91
+ template <typename ARROW_TYPE>
92
+ class DecimalToStringFormatterMixin {
93
+ public:
94
+ explicit DecimalToStringFormatterMixin(const DataType* type)
95
+ : scale_(static_cast<const ARROW_TYPE*>(type)->scale()) {}
96
+
97
+ using value_type = typename TypeTraits<ARROW_TYPE>::CType;
98
+
99
+ template <typename Appender>
100
+ Return<Appender> operator()(const value_type& value, Appender&& append) {
101
+ return append(value.ToString(scale_));
102
+ }
103
+
104
+ private:
105
+ int32_t scale_;
106
+ };
107
+
108
+ template <>
109
+ class StringFormatter<Decimal128Type>
110
+ : public DecimalToStringFormatterMixin<Decimal128Type> {
111
+ using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin;
112
+ };
113
+
114
+ template <>
115
+ class StringFormatter<Decimal256Type>
116
+ : public DecimalToStringFormatterMixin<Decimal256Type> {
117
+ using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin;
118
+ };
119
+
120
+ /////////////////////////////////////////////////////////////////////////
121
+ // Integer formatting
122
+
123
+ namespace detail {
124
+
125
+ // A 2x100 direct table mapping integers in [0..99] to their decimal representations.
126
+ ARROW_EXPORT extern const char digit_pairs[];
127
+
128
+ // Based on fmtlib's format_int class:
129
+ // Write digits from right to left into a stack allocated buffer.
130
+ // \pre *cursor points to the byte after the one that will be written.
131
+ // \post *cursor points to the byte that was written.
132
+ inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; }
133
+
134
+ template <typename Int>
135
+ void FormatOneDigit(Int value, char** cursor) {
136
+ assert(value >= 0 && value <= 9);
137
+ FormatOneChar(static_cast<char>('0' + value), cursor);
138
+ }
139
+
140
+ // GH-35662: I don't know why but the following combination causes SEGV:
141
+ // * template implementation without inline
142
+ // * MinGW
143
+ // * Release build
144
+ template <typename Int>
145
+ inline void FormatTwoDigits(Int value, char** cursor) {
146
+ assert(value >= 0 && value <= 99);
147
+ auto digit_pair = &digit_pairs[value * 2];
148
+ FormatOneChar(digit_pair[1], cursor);
149
+ FormatOneChar(digit_pair[0], cursor);
150
+ }
151
+
152
+ template <typename Int>
153
+ void FormatAllDigits(Int value, char** cursor) {
154
+ assert(value >= 0);
155
+ while (value >= 100) {
156
+ FormatTwoDigits(value % 100, cursor);
157
+ value /= 100;
158
+ }
159
+
160
+ if (value >= 10) {
161
+ FormatTwoDigits(value, cursor);
162
+ } else {
163
+ FormatOneDigit(value, cursor);
164
+ }
165
+ }
166
+
167
+ template <typename Int>
168
+ void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) {
169
+ auto end = *cursor - pad;
170
+ FormatAllDigits(value, cursor);
171
+ while (*cursor > end) {
172
+ FormatOneChar(pad_char, cursor);
173
+ }
174
+ }
175
+
176
+ template <size_t BUFFER_SIZE>
177
+ std::string_view ViewDigitBuffer(const std::array<char, BUFFER_SIZE>& buffer,
178
+ char* cursor) {
179
+ auto buffer_end = buffer.data() + BUFFER_SIZE;
180
+ return {cursor, static_cast<size_t>(buffer_end - cursor)};
181
+ }
182
+
183
+ template <typename Int, typename UInt = typename std::make_unsigned<Int>::type>
184
+ constexpr UInt Abs(Int value) {
185
+ return value < 0 ? ~static_cast<UInt>(value) + 1 : static_cast<UInt>(value);
186
+ }
187
+
188
+ template <typename Int>
189
+ constexpr size_t Digits10(Int value) {
190
+ return value <= 9 ? 1 : Digits10(value / 10) + 1;
191
+ }
192
+
193
+ } // namespace detail
194
+
195
+ template <typename ARROW_TYPE>
196
+ class IntToStringFormatterMixin {
197
+ public:
198
+ explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {}
199
+
200
+ using value_type = typename ARROW_TYPE::c_type;
201
+
202
+ template <typename Appender>
203
+ Return<Appender> operator()(value_type value, Appender&& append) {
204
+ constexpr size_t buffer_size =
205
+ detail::Digits10(std::numeric_limits<value_type>::max()) + 1;
206
+
207
+ std::array<char, buffer_size> buffer;
208
+ char* cursor = buffer.data() + buffer_size;
209
+ detail::FormatAllDigits(detail::Abs(value), &cursor);
210
+ if (value < 0) {
211
+ detail::FormatOneChar('-', &cursor);
212
+ }
213
+ return append(detail::ViewDigitBuffer(buffer, cursor));
214
+ }
215
+ };
216
+
217
+ template <>
218
+ class StringFormatter<Int8Type> : public IntToStringFormatterMixin<Int8Type> {
219
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
220
+ };
221
+
222
+ template <>
223
+ class StringFormatter<Int16Type> : public IntToStringFormatterMixin<Int16Type> {
224
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
225
+ };
226
+
227
+ template <>
228
+ class StringFormatter<Int32Type> : public IntToStringFormatterMixin<Int32Type> {
229
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
230
+ };
231
+
232
+ template <>
233
+ class StringFormatter<Int64Type> : public IntToStringFormatterMixin<Int64Type> {
234
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
235
+ };
236
+
237
+ template <>
238
+ class StringFormatter<UInt8Type> : public IntToStringFormatterMixin<UInt8Type> {
239
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
240
+ };
241
+
242
+ template <>
243
+ class StringFormatter<UInt16Type> : public IntToStringFormatterMixin<UInt16Type> {
244
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
245
+ };
246
+
247
+ template <>
248
+ class StringFormatter<UInt32Type> : public IntToStringFormatterMixin<UInt32Type> {
249
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
250
+ };
251
+
252
+ template <>
253
+ class StringFormatter<UInt64Type> : public IntToStringFormatterMixin<UInt64Type> {
254
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
255
+ };
256
+
257
+ /////////////////////////////////////////////////////////////////////////
258
+ // Floating-point formatting
259
+
260
+ class ARROW_EXPORT FloatToStringFormatter {
261
+ public:
262
+ FloatToStringFormatter();
263
+ FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol,
264
+ char exp_character, int decimal_in_shortest_low,
265
+ int decimal_in_shortest_high,
266
+ int max_leading_padding_zeroes_in_precision_mode,
267
+ int max_trailing_padding_zeroes_in_precision_mode);
268
+ ~FloatToStringFormatter();
269
+
270
+ // Returns the number of characters written
271
+ int FormatFloat(float v, char* out_buffer, int out_size);
272
+ int FormatFloat(double v, char* out_buffer, int out_size);
273
+ int FormatFloat(uint16_t v, char* out_buffer, int out_size);
274
+
275
+ protected:
276
+ struct Impl;
277
+ std::unique_ptr<Impl> impl_;
278
+ };
279
+
280
+ template <typename ARROW_TYPE>
281
+ class FloatToStringFormatterMixin : public FloatToStringFormatter {
282
+ public:
283
+ using value_type = typename ARROW_TYPE::c_type;
284
+
285
+ static constexpr int buffer_size = 50;
286
+
287
+ explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {}
288
+
289
+ FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol,
290
+ char exp_character, int decimal_in_shortest_low,
291
+ int decimal_in_shortest_high,
292
+ int max_leading_padding_zeroes_in_precision_mode,
293
+ int max_trailing_padding_zeroes_in_precision_mode)
294
+ : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character,
295
+ decimal_in_shortest_low, decimal_in_shortest_high,
296
+ max_leading_padding_zeroes_in_precision_mode,
297
+ max_trailing_padding_zeroes_in_precision_mode) {}
298
+
299
+ template <typename Appender>
300
+ Return<Appender> operator()(value_type value, Appender&& append) {
301
+ char buffer[buffer_size];
302
+ int size = FormatFloat(value, buffer, buffer_size);
303
+ return append(std::string_view(buffer, size));
304
+ }
305
+ };
306
+
307
+ template <>
308
+ class StringFormatter<HalfFloatType> : public FloatToStringFormatterMixin<HalfFloatType> {
309
+ public:
310
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
311
+ };
312
+
313
+ template <>
314
+ class StringFormatter<FloatType> : public FloatToStringFormatterMixin<FloatType> {
315
+ public:
316
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
317
+ };
318
+
319
+ template <>
320
+ class StringFormatter<DoubleType> : public FloatToStringFormatterMixin<DoubleType> {
321
+ public:
322
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
323
+ };
324
+
325
+ /////////////////////////////////////////////////////////////////////////
326
+ // Temporal formatting
327
+
328
+ namespace detail {
329
+
330
+ constexpr size_t BufferSizeYYYY_MM_DD() {
331
+ // "-"? "99999-12-31"
332
+ return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 +
333
+ detail::Digits10(31);
334
+ }
335
+
336
+ inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) {
337
+ FormatTwoDigits(static_cast<unsigned>(ymd.day()), cursor);
338
+ FormatOneChar('-', cursor);
339
+ FormatTwoDigits(static_cast<unsigned>(ymd.month()), cursor);
340
+ FormatOneChar('-', cursor);
341
+ auto year = static_cast<int>(ymd.year());
342
+ const auto is_neg_year = year < 0;
343
+ year = std::abs(year);
344
+ assert(year <= 99999);
345
+ FormatTwoDigits(year % 100, cursor);
346
+ year /= 100;
347
+ FormatTwoDigits(year % 100, cursor);
348
+ if (year >= 100) {
349
+ FormatOneDigit(year / 100, cursor);
350
+ }
351
+ if (is_neg_year) {
352
+ FormatOneChar('-', cursor);
353
+ }
354
+ }
355
+
356
+ template <typename Duration>
357
+ constexpr size_t BufferSizeHH_MM_SS() {
358
+ // "23:59:59" ("." "9"+)?
359
+ return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 +
360
+ detail::Digits10(Duration::period::den) - 1;
361
+ }
362
+
363
+ template <typename Duration>
364
+ void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss<Duration> hms, char** cursor) {
365
+ constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1;
366
+ if (subsecond_digits != 0) {
367
+ FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor);
368
+ FormatOneChar('.', cursor);
369
+ }
370
+ FormatTwoDigits(hms.seconds().count(), cursor);
371
+ FormatOneChar(':', cursor);
372
+ FormatTwoDigits(hms.minutes().count(), cursor);
373
+ FormatOneChar(':', cursor);
374
+ FormatTwoDigits(hms.hours().count(), cursor);
375
+ }
376
+
377
+ // Some out-of-bound datetime values would result in erroneous printing
378
+ // because of silent integer wraparound in the `arrow_vendored::date` library.
379
+ //
380
+ // To avoid such misprinting, we must therefore check the bounds explicitly.
381
+ // The bounds correspond to start of year -32767 and end of year 32767,
382
+ // respectively (-32768 is an invalid year value in `arrow_vendored::date`).
383
+ //
384
+ // Note these values are the same as documented for C++20:
385
+ // https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days
386
+ template <typename Unit>
387
+ bool IsDateTimeInRange(Unit duration) {
388
+ constexpr Unit kMinIncl =
389
+ std::chrono::duration_cast<Unit>(arrow_vendored::date::days{-12687428});
390
+ constexpr Unit kMaxExcl =
391
+ std::chrono::duration_cast<Unit>(arrow_vendored::date::days{11248738});
392
+ return duration >= kMinIncl && duration < kMaxExcl;
393
+ }
394
+
395
+ // IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of
396
+ // nanoseconds cannot represent years outside of the [-32767, 32767]
397
+ // range, and the {kMinIncl, kMaxExcl} constants above would overflow.
398
+ constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; }
399
+
400
+ template <typename Unit>
401
+ bool IsTimeInRange(Unit duration) {
402
+ constexpr Unit kMinIncl = std::chrono::duration_cast<Unit>(std::chrono::seconds{0});
403
+ constexpr Unit kMaxExcl = std::chrono::duration_cast<Unit>(std::chrono::seconds{86400});
404
+ return duration >= kMinIncl && duration < kMaxExcl;
405
+ }
406
+
407
+ template <typename RawValue, typename Appender>
408
+ Return<Appender> FormatOutOfRange(RawValue&& raw_value, Appender&& append) {
409
+ // XXX locale-sensitive but good enough for now
410
+ std::string formatted = "<value out of range: " + ToChars(raw_value) + ">";
411
+ return append(std::move(formatted));
412
+ }
413
+
414
+ const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970};
415
+
416
+ } // namespace detail
417
+
418
+ template <>
419
+ class StringFormatter<DurationType> : public IntToStringFormatterMixin<DurationType> {
420
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
421
+ };
422
+
423
+ class DateToStringFormatterMixin {
424
+ public:
425
+ explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {}
426
+
427
+ protected:
428
+ template <typename Appender>
429
+ Return<Appender> FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) {
430
+ arrow_vendored::date::sys_days timepoint_days{since_epoch};
431
+
432
+ constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD();
433
+
434
+ std::array<char, buffer_size> buffer;
435
+ char* cursor = buffer.data() + buffer_size;
436
+
437
+ detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days},
438
+ &cursor);
439
+ return append(detail::ViewDigitBuffer(buffer, cursor));
440
+ }
441
+ };
442
+
443
+ template <>
444
+ class StringFormatter<Date32Type> : public DateToStringFormatterMixin {
445
+ public:
446
+ using value_type = typename Date32Type::c_type;
447
+
448
+ using DateToStringFormatterMixin::DateToStringFormatterMixin;
449
+
450
+ template <typename Appender>
451
+ Return<Appender> operator()(value_type value, Appender&& append) {
452
+ const auto since_epoch = arrow_vendored::date::days{value};
453
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
454
+ return detail::FormatOutOfRange(value, append);
455
+ }
456
+ return FormatDays(since_epoch, std::forward<Appender>(append));
457
+ }
458
+ };
459
+
460
+ template <>
461
+ class StringFormatter<Date64Type> : public DateToStringFormatterMixin {
462
+ public:
463
+ using value_type = typename Date64Type::c_type;
464
+
465
+ using DateToStringFormatterMixin::DateToStringFormatterMixin;
466
+
467
+ template <typename Appender>
468
+ Return<Appender> operator()(value_type value, Appender&& append) {
469
+ const auto since_epoch = std::chrono::milliseconds{value};
470
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
471
+ return detail::FormatOutOfRange(value, append);
472
+ }
473
+ return FormatDays(std::chrono::duration_cast<arrow_vendored::date::days>(since_epoch),
474
+ std::forward<Appender>(append));
475
+ }
476
+ };
477
+
478
+ template <>
479
+ class StringFormatter<TimestampType> {
480
+ public:
481
+ using value_type = int64_t;
482
+
483
+ explicit StringFormatter(const DataType* type)
484
+ : unit_(checked_cast<const TimestampType&>(*type).unit()),
485
+ timezone_(checked_cast<const TimestampType&>(*type).timezone()) {}
486
+
487
+ template <typename Duration, typename Appender>
488
+ Return<Appender> operator()(Duration, value_type value, Appender&& append) {
489
+ using arrow_vendored::date::days;
490
+
491
+ const Duration since_epoch{value};
492
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
493
+ return detail::FormatOutOfRange(value, append);
494
+ }
495
+
496
+ const auto timepoint = detail::kEpoch + since_epoch;
497
+ // Round days towards zero
498
+ // (the naive approach of using arrow_vendored::date::floor() would
499
+ // result in UB for very large negative timestamps, similarly as
500
+ // https://github.com/HowardHinnant/date/issues/696)
501
+ auto timepoint_days = std::chrono::time_point_cast<days>(timepoint);
502
+ Duration since_midnight;
503
+ if (timepoint_days <= timepoint) {
504
+ // Year >= 1970
505
+ since_midnight = timepoint - timepoint_days;
506
+ } else {
507
+ // Year < 1970
508
+ since_midnight = days(1) - (timepoint_days - timepoint);
509
+ timepoint_days -= days(1);
510
+ }
511
+
512
+ // YYYY_MM_DD " " HH_MM_SS "Z"?
513
+ constexpr size_t buffer_size =
514
+ detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS<Duration>() + 1;
515
+
516
+ std::array<char, buffer_size> buffer;
517
+ char* cursor = buffer.data() + buffer_size;
518
+
519
+ if (timezone_.size() > 0) {
520
+ detail::FormatOneChar('Z', &cursor);
521
+ }
522
+ detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor);
523
+ detail::FormatOneChar(' ', &cursor);
524
+ detail::FormatYYYY_MM_DD(timepoint_days, &cursor);
525
+ return append(detail::ViewDigitBuffer(buffer, cursor));
526
+ }
527
+
528
+ template <typename Appender>
529
+ Return<Appender> operator()(value_type value, Appender&& append) {
530
+ return util::VisitDuration(unit_, *this, value, std::forward<Appender>(append));
531
+ }
532
+
533
+ private:
534
+ TimeUnit::type unit_;
535
+ std::string timezone_;
536
+ };
537
+
538
+ template <typename T>
539
+ class StringFormatter<T, enable_if_time<T>> {
540
+ public:
541
+ using value_type = typename T::c_type;
542
+
543
+ explicit StringFormatter(const DataType* type)
544
+ : unit_(checked_cast<const T&>(*type).unit()) {}
545
+
546
+ template <typename Duration, typename Appender>
547
+ Return<Appender> operator()(Duration, value_type count, Appender&& append) {
548
+ const Duration since_midnight{count};
549
+ if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) {
550
+ return detail::FormatOutOfRange(count, append);
551
+ }
552
+
553
+ constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS<Duration>();
554
+
555
+ std::array<char, buffer_size> buffer;
556
+ char* cursor = buffer.data() + buffer_size;
557
+
558
+ detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor);
559
+ return append(detail::ViewDigitBuffer(buffer, cursor));
560
+ }
561
+
562
+ template <typename Appender>
563
+ Return<Appender> operator()(value_type value, Appender&& append) {
564
+ return util::VisitDuration(unit_, *this, value, std::forward<Appender>(append));
565
+ }
566
+
567
+ private:
568
+ TimeUnit::type unit_;
569
+ };
570
+
571
+ template <>
572
+ class StringFormatter<MonthIntervalType> {
573
+ public:
574
+ using value_type = MonthIntervalType::c_type;
575
+
576
+ explicit StringFormatter(const DataType*) {}
577
+
578
+ template <typename Appender>
579
+ Return<Appender> operator()(value_type interval, Appender&& append) {
580
+ constexpr size_t buffer_size =
581
+ /*'m'*/ 3 + /*negative signs*/ 1 +
582
+ /*months*/ detail::Digits10(std::numeric_limits<value_type>::max());
583
+ std::array<char, buffer_size> buffer;
584
+ char* cursor = buffer.data() + buffer_size;
585
+
586
+ detail::FormatOneChar('M', &cursor);
587
+ detail::FormatAllDigits(detail::Abs(interval), &cursor);
588
+ if (interval < 0) detail::FormatOneChar('-', &cursor);
589
+
590
+ return append(detail::ViewDigitBuffer(buffer, cursor));
591
+ }
592
+ };
593
+
594
+ template <>
595
+ class StringFormatter<DayTimeIntervalType> {
596
+ public:
597
+ using value_type = DayTimeIntervalType::DayMilliseconds;
598
+
599
+ explicit StringFormatter(const DataType*) {}
600
+
601
+ template <typename Appender>
602
+ Return<Appender> operator()(value_type interval, Appender&& append) {
603
+ constexpr size_t buffer_size =
604
+ /*d, ms*/ 3 + /*negative signs*/ 2 +
605
+ /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits<int32_t>::max());
606
+ std::array<char, buffer_size> buffer;
607
+ char* cursor = buffer.data() + buffer_size;
608
+
609
+ detail::FormatOneChar('s', &cursor);
610
+ detail::FormatOneChar('m', &cursor);
611
+ detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor);
612
+ if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor);
613
+
614
+ detail::FormatOneChar('d', &cursor);
615
+ detail::FormatAllDigits(detail::Abs(interval.days), &cursor);
616
+ if (interval.days < 0) detail::FormatOneChar('-', &cursor);
617
+
618
+ return append(detail::ViewDigitBuffer(buffer, cursor));
619
+ }
620
+ };
621
+
622
+ template <>
623
+ class StringFormatter<MonthDayNanoIntervalType> {
624
+ public:
625
+ using value_type = MonthDayNanoIntervalType::MonthDayNanos;
626
+
627
+ explicit StringFormatter(const DataType*) {}
628
+
629
+ template <typename Appender>
630
+ Return<Appender> operator()(value_type interval, Appender&& append) {
631
+ constexpr size_t buffer_size =
632
+ /*m, d, ns*/ 4 + /*negative signs*/ 3 +
633
+ /*months/days*/ 2 * detail::Digits10(std::numeric_limits<int32_t>::max()) +
634
+ /*nanoseconds*/ detail::Digits10(std::numeric_limits<int64_t>::max());
635
+ std::array<char, buffer_size> buffer;
636
+ char* cursor = buffer.data() + buffer_size;
637
+
638
+ detail::FormatOneChar('s', &cursor);
639
+ detail::FormatOneChar('n', &cursor);
640
+ detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor);
641
+ if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor);
642
+
643
+ detail::FormatOneChar('d', &cursor);
644
+ detail::FormatAllDigits(detail::Abs(interval.days), &cursor);
645
+ if (interval.days < 0) detail::FormatOneChar('-', &cursor);
646
+
647
+ detail::FormatOneChar('M', &cursor);
648
+ detail::FormatAllDigits(detail::Abs(interval.months), &cursor);
649
+ if (interval.months < 0) detail::FormatOneChar('-', &cursor);
650
+
651
+ return append(detail::ViewDigitBuffer(buffer, cursor));
652
+ }
653
+ };
654
+
655
+ } // namespace internal
656
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/future.h ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cmath>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <type_traits>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/result.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/type_traits.h"
33
+ #include "arrow/util/config.h"
34
+ #include "arrow/util/functional.h"
35
+ #include "arrow/util/macros.h"
36
+ #include "arrow/util/tracing.h"
37
+ #include "arrow/util/type_fwd.h"
38
+ #include "arrow/util/visibility.h"
39
+
40
+ namespace arrow {
41
+
42
+ template <typename>
43
+ struct EnsureFuture;
44
+
45
+ namespace detail {
46
+
47
+ template <typename>
48
+ struct is_future : std::false_type {};
49
+
50
+ template <typename T>
51
+ struct is_future<Future<T>> : std::true_type {};
52
+
53
+ template <typename Signature, typename Enable = void>
54
+ struct result_of;
55
+
56
+ template <typename Fn, typename... A>
57
+ struct result_of<Fn(A...),
58
+ internal::void_t<decltype(std::declval<Fn>()(std::declval<A>()...))>> {
59
+ using type = decltype(std::declval<Fn>()(std::declval<A>()...));
60
+ };
61
+
62
+ template <typename Signature>
63
+ using result_of_t = typename result_of<Signature>::type;
64
+
65
+ // Helper to find the synchronous counterpart for a Future
66
+ template <typename T>
67
+ struct SyncType {
68
+ using type = Result<T>;
69
+ };
70
+
71
+ template <>
72
+ struct SyncType<internal::Empty> {
73
+ using type = Status;
74
+ };
75
+
76
+ template <typename Fn>
77
+ using first_arg_is_status =
78
+ std::is_same<typename std::decay<internal::call_traits::argument_type<0, Fn>>::type,
79
+ Status>;
80
+
81
+ template <typename Fn, typename Then, typename Else,
82
+ typename Count = internal::call_traits::argument_count<Fn>>
83
+ using if_has_no_args = typename std::conditional<Count::value == 0, Then, Else>::type;
84
+
85
+ /// Creates a callback that can be added to a future to mark a `dest` future finished
86
+ template <typename Source, typename Dest, bool SourceEmpty = Source::is_empty,
87
+ bool DestEmpty = Dest::is_empty>
88
+ struct MarkNextFinished {};
89
+
90
+ /// If the source and dest are both empty we can pass on the status
91
+ template <typename Source, typename Dest>
92
+ struct MarkNextFinished<Source, Dest, true, true> {
93
+ void operator()(const Status& status) && { next.MarkFinished(status); }
94
+ Dest next;
95
+ };
96
+
97
+ /// If the source is not empty but the dest is then we can take the
98
+ /// status out of the result
99
+ template <typename Source, typename Dest>
100
+ struct MarkNextFinished<Source, Dest, false, true> {
101
+ void operator()(const Result<typename Source::ValueType>& res) && {
102
+ next.MarkFinished(internal::Empty::ToResult(res.status()));
103
+ }
104
+ Dest next;
105
+ };
106
+
107
+ /// If neither are empty we pass on the result
108
+ template <typename Source, typename Dest>
109
+ struct MarkNextFinished<Source, Dest, false, false> {
110
+ void operator()(const Result<typename Source::ValueType>& res) && {
111
+ next.MarkFinished(res);
112
+ }
113
+ Dest next;
114
+ };
115
+
116
+ /// Helper that contains information about how to apply a continuation
117
+ struct ContinueFuture {
118
+ template <typename Return>
119
+ struct ForReturnImpl;
120
+
121
+ template <typename Return>
122
+ using ForReturn = typename ForReturnImpl<Return>::type;
123
+
124
+ template <typename Signature>
125
+ using ForSignature = ForReturn<result_of_t<Signature>>;
126
+
127
+ // If the callback returns void then we return Future<> that always finishes OK.
128
+ template <typename ContinueFunc, typename... Args,
129
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
130
+ typename NextFuture = ForReturn<ContinueResult>>
131
+ typename std::enable_if<std::is_void<ContinueResult>::value>::type operator()(
132
+ NextFuture next, ContinueFunc&& f, Args&&... a) const {
133
+ std::forward<ContinueFunc>(f)(std::forward<Args>(a)...);
134
+ next.MarkFinished();
135
+ }
136
+
137
+ /// If the callback returns a non-future then we return Future<T>
138
+ /// and mark the future finished with the callback result. It will get promoted
139
+ /// to Result<T> as part of MarkFinished if it isn't already.
140
+ ///
141
+ /// If the callback returns Status and we return Future<> then also send the callback
142
+ /// result as-is to the destination future.
143
+ template <typename ContinueFunc, typename... Args,
144
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
145
+ typename NextFuture = ForReturn<ContinueResult>>
146
+ typename std::enable_if<
147
+ !std::is_void<ContinueResult>::value && !is_future<ContinueResult>::value &&
148
+ (!NextFuture::is_empty || std::is_same<ContinueResult, Status>::value)>::type
149
+ operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const {
150
+ next.MarkFinished(std::forward<ContinueFunc>(f)(std::forward<Args>(a)...));
151
+ }
152
+
153
+ /// If the callback returns a Result and the next future is Future<> then we mark
154
+ /// the future finished with the callback result.
155
+ ///
156
+ /// It may seem odd that the next future is Future<> when the callback returns a
157
+ /// result but this can occur if the OnFailure callback returns a result while the
158
+ /// OnSuccess callback is void/Status (e.g. you would get this calling the one-arg
159
+ /// version of Then with an OnSuccess callback that returns void)
160
+ template <typename ContinueFunc, typename... Args,
161
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
162
+ typename NextFuture = ForReturn<ContinueResult>>
163
+ typename std::enable_if<!std::is_void<ContinueResult>::value &&
164
+ !is_future<ContinueResult>::value && NextFuture::is_empty &&
165
+ !std::is_same<ContinueResult, Status>::value>::type
166
+ operator()(NextFuture next, ContinueFunc&& f, Args&&... a) const {
167
+ next.MarkFinished(std::forward<ContinueFunc>(f)(std::forward<Args>(a)...).status());
168
+ }
169
+
170
+ /// If the callback returns a Future<T> then we return Future<T>. We create a new
171
+ /// future and add a callback to the future given to us by the user that forwards the
172
+ /// result to the future we just created
173
+ template <typename ContinueFunc, typename... Args,
174
+ typename ContinueResult = result_of_t<ContinueFunc && (Args && ...)>,
175
+ typename NextFuture = ForReturn<ContinueResult>>
176
+ typename std::enable_if<is_future<ContinueResult>::value>::type operator()(
177
+ NextFuture next, ContinueFunc&& f, Args&&... a) const {
178
+ ContinueResult signal_to_complete_next =
179
+ std::forward<ContinueFunc>(f)(std::forward<Args>(a)...);
180
+ MarkNextFinished<ContinueResult, NextFuture> callback{std::move(next)};
181
+ signal_to_complete_next.AddCallback(std::move(callback));
182
+ }
183
+
184
+ /// Helpers to conditionally ignore arguments to ContinueFunc
185
+ template <typename ContinueFunc, typename NextFuture, typename... Args>
186
+ void IgnoringArgsIf(std::true_type, NextFuture&& next, ContinueFunc&& f,
187
+ Args&&...) const {
188
+ operator()(std::forward<NextFuture>(next), std::forward<ContinueFunc>(f));
189
+ }
190
+ template <typename ContinueFunc, typename NextFuture, typename... Args>
191
+ void IgnoringArgsIf(std::false_type, NextFuture&& next, ContinueFunc&& f,
192
+ Args&&... a) const {
193
+ operator()(std::forward<NextFuture>(next), std::forward<ContinueFunc>(f),
194
+ std::forward<Args>(a)...);
195
+ }
196
+ };
197
+
198
+ /// Helper struct which tells us what kind of Future gets returned from `Then` based on
199
+ /// the return type of the OnSuccess callback
200
+ template <>
201
+ struct ContinueFuture::ForReturnImpl<void> {
202
+ using type = Future<>;
203
+ };
204
+
205
+ template <>
206
+ struct ContinueFuture::ForReturnImpl<Status> {
207
+ using type = Future<>;
208
+ };
209
+
210
+ template <typename R>
211
+ struct ContinueFuture::ForReturnImpl {
212
+ using type = Future<R>;
213
+ };
214
+
215
+ template <typename T>
216
+ struct ContinueFuture::ForReturnImpl<Result<T>> {
217
+ using type = Future<T>;
218
+ };
219
+
220
+ template <typename T>
221
+ struct ContinueFuture::ForReturnImpl<Future<T>> {
222
+ using type = Future<T>;
223
+ };
224
+
225
+ } // namespace detail
226
+
227
+ /// A Future's execution or completion status
228
+ enum class FutureState : int8_t { PENDING, SUCCESS, FAILURE };
229
+
230
+ inline bool IsFutureFinished(FutureState state) { return state != FutureState::PENDING; }
231
+
232
+ /// \brief Describe whether the callback should be scheduled or run synchronously
233
+ enum class ShouldSchedule {
234
+ /// Always run the callback synchronously (the default)
235
+ Never = 0,
236
+ /// Schedule a new task only if the future is not finished when the
237
+ /// callback is added
238
+ IfUnfinished = 1,
239
+ /// Always schedule the callback as a new task
240
+ Always = 2,
241
+ /// Schedule a new task only if it would run on an executor other than
242
+ /// the specified executor.
243
+ IfDifferentExecutor = 3,
244
+ };
245
+
246
+ /// \brief Options that control how a continuation is run
247
+ struct CallbackOptions {
248
+ /// Describe whether the callback should be run synchronously or scheduled
249
+ ShouldSchedule should_schedule = ShouldSchedule::Never;
250
+ /// If the callback is scheduled then this is the executor it should be scheduled
251
+ /// on. If this is NULL then should_schedule must be Never
252
+ internal::Executor* executor = NULLPTR;
253
+
254
+ static CallbackOptions Defaults() { return {}; }
255
+ };
256
+
257
+ // Untyped private implementation
258
+ class ARROW_EXPORT FutureImpl : public std::enable_shared_from_this<FutureImpl> {
259
+ public:
260
+ FutureImpl();
261
+ virtual ~FutureImpl() = default;
262
+
263
+ FutureState state() { return state_.load(); }
264
+
265
+ static std::unique_ptr<FutureImpl> Make();
266
+ static std::unique_ptr<FutureImpl> MakeFinished(FutureState state);
267
+
268
+ #ifdef ARROW_WITH_OPENTELEMETRY
269
+ void SetSpan(util::tracing::Span* span) { span_ = span; }
270
+ #endif
271
+
272
+ // Future API
273
+ void MarkFinished();
274
+ void MarkFailed();
275
+ void Wait();
276
+ bool Wait(double seconds);
277
+ template <typename ValueType>
278
+ Result<ValueType>* CastResult() const {
279
+ return static_cast<Result<ValueType>*>(result_.get());
280
+ }
281
+
282
+ using Callback = internal::FnOnce<void(const FutureImpl& impl)>;
283
+ void AddCallback(Callback callback, CallbackOptions opts);
284
+ bool TryAddCallback(const std::function<Callback()>& callback_factory,
285
+ CallbackOptions opts);
286
+
287
+ std::atomic<FutureState> state_{FutureState::PENDING};
288
+
289
+ // Type erased storage for arbitrary results
290
+ // XXX small objects could be stored inline instead of boxed in a pointer
291
+ using Storage = std::unique_ptr<void, void (*)(void*)>;
292
+ Storage result_{NULLPTR, NULLPTR};
293
+
294
+ struct CallbackRecord {
295
+ Callback callback;
296
+ CallbackOptions options;
297
+ };
298
+ std::vector<CallbackRecord> callbacks_;
299
+ #ifdef ARROW_WITH_OPENTELEMETRY
300
+ util::tracing::Span* span_ = NULLPTR;
301
+ #endif
302
+ };
303
+
304
+ // ---------------------------------------------------------------------
305
+ // Public API
306
+
307
+ /// \brief EXPERIMENTAL A std::future-like class with more functionality.
308
+ ///
309
+ /// A Future represents the results of a past or future computation.
310
+ /// The Future API has two sides: a producer side and a consumer side.
311
+ ///
312
+ /// The producer API allows creating a Future and setting its result or
313
+ /// status, possibly after running a computation function.
314
+ ///
315
+ /// The consumer API allows querying a Future's current state, wait for it
316
+ /// to complete, and composing futures with callbacks.
317
+ template <typename T>
318
+ class [[nodiscard]] Future {
319
+ public:
320
+ using ValueType = T;
321
+ using SyncType = typename detail::SyncType<T>::type;
322
+ static constexpr bool is_empty = std::is_same<T, internal::Empty>::value;
323
+ // The default constructor creates an invalid Future. Use Future::Make()
324
+ // for a valid Future. This constructor is mostly for the convenience
325
+ // of being able to presize a vector of Futures.
326
+ Future() = default;
327
+
328
+ #ifdef ARROW_WITH_OPENTELEMETRY
329
+ void SetSpan(util::tracing::Span* span) { impl_->SetSpan(span); }
330
+ #endif
331
+
332
+ // Consumer API
333
+
334
+ bool is_valid() const { return impl_ != NULLPTR; }
335
+
336
+ /// \brief Return the Future's current state
337
+ ///
338
+ /// A return value of PENDING is only indicative, as the Future can complete
339
+ /// concurrently. A return value of FAILURE or SUCCESS is definitive, though.
340
+ FutureState state() const {
341
+ CheckValid();
342
+ return impl_->state();
343
+ }
344
+
345
+ /// \brief Whether the Future is finished
346
+ ///
347
+ /// A false return value is only indicative, as the Future can complete
348
+ /// concurrently. A true return value is definitive, though.
349
+ bool is_finished() const {
350
+ CheckValid();
351
+ return IsFutureFinished(impl_->state());
352
+ }
353
+
354
+ /// \brief Wait for the Future to complete and return its Result
355
+ const Result<ValueType>& result() const& {
356
+ Wait();
357
+ return *GetResult();
358
+ }
359
+
360
+ /// \brief Returns an rvalue to the result. This method is potentially unsafe
361
+ ///
362
+ /// The future is not the unique owner of the result, copies of a future will
363
+ /// also point to the same result. You must make sure that no other copies
364
+ /// of the future exist. Attempts to add callbacks after you move the result
365
+ /// will result in undefined behavior.
366
+ Result<ValueType>&& MoveResult() {
367
+ Wait();
368
+ return std::move(*GetResult());
369
+ }
370
+
371
+ /// \brief Wait for the Future to complete and return its Status
372
+ const Status& status() const { return result().status(); }
373
+
374
+ /// \brief Future<T> is convertible to Future<>, which views only the
375
+ /// Status of the original. Marking the returned Future Finished is not supported.
376
+ explicit operator Future<>() const {
377
+ Future<> status_future;
378
+ status_future.impl_ = impl_;
379
+ return status_future;
380
+ }
381
+
382
+ /// \brief Wait for the Future to complete
383
+ void Wait() const {
384
+ CheckValid();
385
+ impl_->Wait();
386
+ }
387
+
388
+ /// \brief Wait for the Future to complete, or for the timeout to expire
389
+ ///
390
+ /// `true` is returned if the Future completed, `false` if the timeout expired.
391
+ /// Note a `false` value is only indicative, as the Future can complete
392
+ /// concurrently.
393
+ bool Wait(double seconds) const {
394
+ CheckValid();
395
+ return impl_->Wait(seconds);
396
+ }
397
+
398
+ // Producer API
399
+
400
+ /// \brief Producer API: mark Future finished
401
+ ///
402
+ /// The Future's result is set to `res`.
403
+ void MarkFinished(Result<ValueType> res) { DoMarkFinished(std::move(res)); }
404
+
405
+ /// \brief Mark a Future<> completed with the provided Status.
406
+ template <typename E = ValueType, typename = typename std::enable_if<
407
+ std::is_same<E, internal::Empty>::value>::type>
408
+ void MarkFinished(Status s = Status::OK()) {
409
+ return DoMarkFinished(E::ToResult(std::move(s)));
410
+ }
411
+
412
+ /// \brief Producer API: instantiate a valid Future
413
+ ///
414
+ /// The Future's state is initialized with PENDING. If you are creating a future with
415
+ /// this method you must ensure that future is eventually completed (with success or
416
+ /// failure). Creating a future, returning it, and never completing the future can lead
417
+ /// to memory leaks (for example, see Loop).
418
+ static Future Make() {
419
+ Future fut;
420
+ fut.impl_ = FutureImpl::Make();
421
+ return fut;
422
+ }
423
+
424
+ /// \brief Producer API: instantiate a finished Future
425
+ static Future<ValueType> MakeFinished(Result<ValueType> res) {
426
+ Future<ValueType> fut;
427
+ fut.InitializeFromResult(std::move(res));
428
+ return fut;
429
+ }
430
+
431
+ /// \brief Make a finished Future<> with the provided Status.
432
+ template <typename E = ValueType, typename = typename std::enable_if<
433
+ std::is_same<E, internal::Empty>::value>::type>
434
+ static Future<> MakeFinished(Status s = Status::OK()) {
435
+ return MakeFinished(E::ToResult(std::move(s)));
436
+ }
437
+
438
+ struct WrapResultOnComplete {
439
+ template <typename OnComplete>
440
+ struct Callback {
441
+ void operator()(const FutureImpl& impl) && {
442
+ std::move(on_complete)(*impl.CastResult<ValueType>());
443
+ }
444
+ OnComplete on_complete;
445
+ };
446
+ };
447
+
448
+ struct WrapStatusyOnComplete {
449
+ template <typename OnComplete>
450
+ struct Callback {
451
+ static_assert(std::is_same<internal::Empty, ValueType>::value,
452
+ "Only callbacks for Future<> should accept Status and not Result");
453
+
454
+ void operator()(const FutureImpl& impl) && {
455
+ std::move(on_complete)(impl.CastResult<ValueType>()->status());
456
+ }
457
+ OnComplete on_complete;
458
+ };
459
+ };
460
+
461
+ template <typename OnComplete>
462
+ using WrapOnComplete = typename std::conditional<
463
+ detail::first_arg_is_status<OnComplete>::value, WrapStatusyOnComplete,
464
+ WrapResultOnComplete>::type::template Callback<OnComplete>;
465
+
466
+ /// \brief Consumer API: Register a callback to run when this future completes
467
+ ///
468
+ /// The callback should receive the result of the future (const Result<T>&)
469
+ /// For a void or statusy future this should be (const Status&)
470
+ ///
471
+ /// There is no guarantee to the order in which callbacks will run. In
472
+ /// particular, callbacks added while the future is being marked complete
473
+ /// may be executed immediately, ahead of, or even the same time as, other
474
+ /// callbacks that have been previously added.
475
+ ///
476
+ /// WARNING: callbacks may hold arbitrary references, including cyclic references.
477
+ /// Since callbacks will only be destroyed after they are invoked, this can lead to
478
+ /// memory leaks if a Future is never marked finished (abandoned):
479
+ ///
480
+ /// {
481
+ /// auto fut = Future<>::Make();
482
+ /// fut.AddCallback([fut]() {});
483
+ /// }
484
+ ///
485
+ /// In this example `fut` falls out of scope but is not destroyed because it holds a
486
+ /// cyclic reference to itself through the callback.
487
+ template <typename OnComplete, typename Callback = WrapOnComplete<OnComplete>>
488
+ void AddCallback(OnComplete on_complete,
489
+ CallbackOptions opts = CallbackOptions::Defaults()) const {
490
+ // We know impl_ will not be dangling when invoking callbacks because at least one
491
+ // thread will be waiting for MarkFinished to return. Thus it's safe to keep a
492
+ // weak reference to impl_ here
493
+ impl_->AddCallback(Callback{std::move(on_complete)}, opts);
494
+ }
495
+
496
+ /// \brief Overload of AddCallback that will return false instead of running
497
+ /// synchronously
498
+ ///
499
+ /// This overload will guarantee the callback is never run synchronously. If the future
500
+ /// is already finished then it will simply return false. This can be useful to avoid
501
+ /// stack overflow in a situation where you have recursive Futures. For an example
502
+ /// see the Loop function
503
+ ///
504
+ /// Takes in a callback factory function to allow moving callbacks (the factory function
505
+ /// will only be called if the callback can successfully be added)
506
+ ///
507
+ /// Returns true if a callback was actually added and false if the callback failed
508
+ /// to add because the future was marked complete.
509
+ template <typename CallbackFactory,
510
+ typename OnComplete = detail::result_of_t<CallbackFactory()>,
511
+ typename Callback = WrapOnComplete<OnComplete>>
512
+ bool TryAddCallback(CallbackFactory callback_factory,
513
+ CallbackOptions opts = CallbackOptions::Defaults()) const {
514
+ return impl_->TryAddCallback([&]() { return Callback{callback_factory()}; }, opts);
515
+ }
516
+
517
+ template <typename OnSuccess, typename OnFailure>
518
+ struct ThenOnComplete {
519
+ static constexpr bool has_no_args =
520
+ internal::call_traits::argument_count<OnSuccess>::value == 0;
521
+
522
+ using ContinuedFuture = detail::ContinueFuture::ForSignature<
523
+ detail::if_has_no_args<OnSuccess, OnSuccess && (), OnSuccess && (const T&)>>;
524
+
525
+ static_assert(
526
+ std::is_same<detail::ContinueFuture::ForSignature<OnFailure && (const Status&)>,
527
+ ContinuedFuture>::value,
528
+ "OnSuccess and OnFailure must continue with the same future type");
529
+
530
+ struct DummyOnSuccess {
531
+ void operator()(const T&);
532
+ };
533
+ using OnSuccessArg = typename std::decay<internal::call_traits::argument_type<
534
+ 0, detail::if_has_no_args<OnSuccess, DummyOnSuccess, OnSuccess>>>::type;
535
+
536
+ static_assert(
537
+ !std::is_same<OnSuccessArg, typename EnsureResult<OnSuccessArg>::type>::value,
538
+ "OnSuccess' argument should not be a Result");
539
+
540
+ void operator()(const Result<T>& result) && {
541
+ detail::ContinueFuture continue_future;
542
+ if (ARROW_PREDICT_TRUE(result.ok())) {
543
+ // move on_failure to a(n immediately destroyed) temporary to free its resources
544
+ ARROW_UNUSED(OnFailure(std::move(on_failure)));
545
+ continue_future.IgnoringArgsIf(
546
+ detail::if_has_no_args<OnSuccess, std::true_type, std::false_type>{},
547
+ std::move(next), std::move(on_success), result.ValueOrDie());
548
+ } else {
549
+ ARROW_UNUSED(OnSuccess(std::move(on_success)));
550
+ continue_future(std::move(next), std::move(on_failure), result.status());
551
+ }
552
+ }
553
+
554
+ OnSuccess on_success;
555
+ OnFailure on_failure;
556
+ ContinuedFuture next;
557
+ };
558
+
559
+ template <typename OnSuccess>
560
+ struct PassthruOnFailure {
561
+ using ContinuedFuture = detail::ContinueFuture::ForSignature<
562
+ detail::if_has_no_args<OnSuccess, OnSuccess && (), OnSuccess && (const T&)>>;
563
+
564
+ Result<typename ContinuedFuture::ValueType> operator()(const Status& s) { return s; }
565
+ };
566
+
567
+ /// \brief Consumer API: Register a continuation to run when this future completes
568
+ ///
569
+ /// The continuation will run in the same thread that called MarkFinished (whatever
570
+ /// callback is registered with this function will run before MarkFinished returns).
571
+ /// Avoid long-running callbacks in favor of submitting a task to an Executor and
572
+ /// returning the future.
573
+ ///
574
+ /// Two callbacks are supported:
575
+ /// - OnSuccess, called with the result (const ValueType&) on successful completion.
576
+ /// for an empty future this will be called with nothing ()
577
+ /// - OnFailure, called with the error (const Status&) on failed completion.
578
+ /// This callback is optional and defaults to a passthru of any errors.
579
+ ///
580
+ /// Then() returns a Future whose ValueType is derived from the return type of the
581
+ /// callbacks. If a callback returns:
582
+ /// - void, a Future<> will be returned which will completes successfully as soon
583
+ /// as the callback runs.
584
+ /// - Status, a Future<> will be returned which will complete with the returned Status
585
+ /// as soon as the callback runs.
586
+ /// - V or Result<V>, a Future<V> will be returned which will complete with the result
587
+ /// of invoking the callback as soon as the callback runs.
588
+ /// - Future<V>, a Future<V> will be returned which will be marked complete when the
589
+ /// future returned by the callback completes (and will complete with the same
590
+ /// result).
591
+ ///
592
+ /// The continued Future type must be the same for both callbacks.
593
+ ///
594
+ /// Note that OnFailure can swallow errors, allowing continued Futures to successfully
595
+ /// complete even if this Future fails.
596
+ ///
597
+ /// If this future is already completed then the callback will be run immediately
598
+ /// and the returned future may already be marked complete.
599
+ ///
600
+ /// See AddCallback for general considerations when writing callbacks.
601
+ template <typename OnSuccess, typename OnFailure = PassthruOnFailure<OnSuccess>,
602
+ typename OnComplete = ThenOnComplete<OnSuccess, OnFailure>,
603
+ typename ContinuedFuture = typename OnComplete::ContinuedFuture>
604
+ ContinuedFuture Then(OnSuccess on_success, OnFailure on_failure = {},
605
+ CallbackOptions options = CallbackOptions::Defaults()) const {
606
+ auto next = ContinuedFuture::Make();
607
+ AddCallback(OnComplete{std::forward<OnSuccess>(on_success),
608
+ std::forward<OnFailure>(on_failure), next},
609
+ options);
610
+ return next;
611
+ }
612
+
613
+ /// \brief Implicit constructor to create a finished future from a value
614
+ Future(ValueType val) : Future() { // NOLINT runtime/explicit
615
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
616
+ SetResult(std::move(val));
617
+ }
618
+
619
+ /// \brief Implicit constructor to create a future from a Result, enabling use
620
+ /// of macros like ARROW_ASSIGN_OR_RAISE.
621
+ Future(Result<ValueType> res) : Future() { // NOLINT runtime/explicit
622
+ if (ARROW_PREDICT_TRUE(res.ok())) {
623
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
624
+ } else {
625
+ impl_ = FutureImpl::MakeFinished(FutureState::FAILURE);
626
+ }
627
+ SetResult(std::move(res));
628
+ }
629
+
630
+ /// \brief Implicit constructor to create a future from a Status, enabling use
631
+ /// of macros like ARROW_RETURN_NOT_OK.
632
+ Future(Status s) // NOLINT runtime/explicit
633
+ : Future(Result<ValueType>(std::move(s))) {}
634
+
635
+ protected:
636
+ void InitializeFromResult(Result<ValueType> res) {
637
+ if (ARROW_PREDICT_TRUE(res.ok())) {
638
+ impl_ = FutureImpl::MakeFinished(FutureState::SUCCESS);
639
+ } else {
640
+ impl_ = FutureImpl::MakeFinished(FutureState::FAILURE);
641
+ }
642
+ SetResult(std::move(res));
643
+ }
644
+
645
+ void Initialize() { impl_ = FutureImpl::Make(); }
646
+
647
+ Result<ValueType>* GetResult() const { return impl_->CastResult<ValueType>(); }
648
+
649
+ void SetResult(Result<ValueType> res) {
650
+ impl_->result_ = {new Result<ValueType>(std::move(res)),
651
+ [](void* p) { delete static_cast<Result<ValueType>*>(p); }};
652
+ }
653
+
654
+ void DoMarkFinished(Result<ValueType> res) {
655
+ SetResult(std::move(res));
656
+
657
+ if (ARROW_PREDICT_TRUE(GetResult()->ok())) {
658
+ impl_->MarkFinished();
659
+ } else {
660
+ impl_->MarkFailed();
661
+ }
662
+ }
663
+
664
+ void CheckValid() const {
665
+ #ifndef NDEBUG
666
+ if (!is_valid()) {
667
+ Status::Invalid("Invalid Future (default-initialized?)").Abort();
668
+ }
669
+ #endif
670
+ }
671
+
672
+ explicit Future(std::shared_ptr<FutureImpl> impl) : impl_(std::move(impl)) {}
673
+
674
+ std::shared_ptr<FutureImpl> impl_;
675
+
676
+ friend struct detail::ContinueFuture;
677
+
678
+ template <typename U>
679
+ friend class Future;
680
+ friend class WeakFuture<T>;
681
+
682
+ FRIEND_TEST(FutureRefTest, ChainRemoved);
683
+ FRIEND_TEST(FutureRefTest, TailRemoved);
684
+ FRIEND_TEST(FutureRefTest, HeadRemoved);
685
+ };
686
+
687
+ template <typename T>
688
+ typename Future<T>::SyncType FutureToSync(const Future<T>& fut) {
689
+ return fut.result();
690
+ }
691
+
692
+ template <>
693
+ inline typename Future<internal::Empty>::SyncType FutureToSync<internal::Empty>(
694
+ const Future<internal::Empty>& fut) {
695
+ return fut.status();
696
+ }
697
+
698
+ template <>
699
+ inline Future<>::Future(Status s) : Future(internal::Empty::ToResult(std::move(s))) {}
700
+
701
+ template <typename T>
702
+ class WeakFuture {
703
+ public:
704
+ explicit WeakFuture(const Future<T>& future) : impl_(future.impl_) {}
705
+
706
+ Future<T> get() { return Future<T>{impl_.lock()}; }
707
+
708
+ private:
709
+ std::weak_ptr<FutureImpl> impl_;
710
+ };
711
+
712
+ /// \defgroup future-utilities Functions for working with Futures
713
+ /// @{
714
+
715
+ /// If a Result<Future> holds an error instead of a Future, construct a finished Future
716
+ /// holding that error.
717
+ template <typename T>
718
+ static Future<T> DeferNotOk(Result<Future<T>> maybe_future) {
719
+ if (ARROW_PREDICT_FALSE(!maybe_future.ok())) {
720
+ return Future<T>::MakeFinished(std::move(maybe_future).status());
721
+ }
722
+ return std::move(maybe_future).MoveValueUnsafe();
723
+ }
724
+
725
+ /// \brief Create a Future which completes when all of `futures` complete.
726
+ ///
727
+ /// The future's result is a vector of the results of `futures`.
728
+ /// Note that this future will never be marked "failed"; failed results
729
+ /// will be stored in the result vector alongside successful results.
730
+ template <typename T>
731
+ Future<std::vector<Result<T>>> All(std::vector<Future<T>> futures) {
732
+ struct State {
733
+ explicit State(std::vector<Future<T>> f)
734
+ : futures(std::move(f)), n_remaining(futures.size()) {}
735
+
736
+ std::vector<Future<T>> futures;
737
+ std::atomic<size_t> n_remaining;
738
+ };
739
+
740
+ if (futures.size() == 0) {
741
+ return {std::vector<Result<T>>{}};
742
+ }
743
+
744
+ auto state = std::make_shared<State>(std::move(futures));
745
+
746
+ auto out = Future<std::vector<Result<T>>>::Make();
747
+ for (const Future<T>& future : state->futures) {
748
+ future.AddCallback([state, out](const Result<T>&) mutable {
749
+ if (state->n_remaining.fetch_sub(1) != 1) return;
750
+
751
+ std::vector<Result<T>> results(state->futures.size());
752
+ for (size_t i = 0; i < results.size(); ++i) {
753
+ results[i] = state->futures[i].result();
754
+ }
755
+ out.MarkFinished(std::move(results));
756
+ });
757
+ }
758
+ return out;
759
+ }
760
+
761
+ /// \brief Create a Future which completes when all of `futures` complete.
762
+ ///
763
+ /// The future will be marked complete if all `futures` complete
764
+ /// successfully. Otherwise, it will be marked failed with the status of
765
+ /// the first failing future.
766
+ ARROW_EXPORT
767
+ Future<> AllComplete(const std::vector<Future<>>& futures);
768
+
769
+ /// \brief Create a Future which completes when all of `futures` complete.
770
+ ///
771
+ /// The future will finish with an ok status if all `futures` finish with
772
+ /// an ok status. Otherwise, it will be marked failed with the status of
773
+ /// one of the failing futures.
774
+ ///
775
+ /// Unlike AllComplete this Future will not complete immediately when a
776
+ /// failure occurs. It will wait until all futures have finished.
777
+ ARROW_EXPORT
778
+ Future<> AllFinished(const std::vector<Future<>>& futures);
779
+
780
+ /// @}
781
+
782
+ struct Continue {
783
+ template <typename T>
784
+ operator std::optional<T>() && { // NOLINT explicit
785
+ return {};
786
+ }
787
+ };
788
+
789
+ template <typename T = internal::Empty>
790
+ std::optional<T> Break(T break_value = {}) {
791
+ return std::optional<T>{std::move(break_value)};
792
+ }
793
+
794
+ template <typename T = internal::Empty>
795
+ using ControlFlow = std::optional<T>;
796
+
797
+ /// \brief Loop through an asynchronous sequence
798
+ ///
799
+ /// \param[in] iterate A generator of Future<ControlFlow<BreakValue>>. On completion
800
+ /// of each yielded future the resulting ControlFlow will be examined. A Break will
801
+ /// terminate the loop, while a Continue will re-invoke `iterate`.
802
+ ///
803
+ /// \return A future which will complete when a Future returned by iterate completes with
804
+ /// a Break
805
+ template <typename Iterate,
806
+ typename Control = typename detail::result_of_t<Iterate()>::ValueType,
807
+ typename BreakValueType = typename Control::value_type>
808
+ Future<BreakValueType> Loop(Iterate iterate) {
809
+ struct Callback {
810
+ bool CheckForTermination(const Result<Control>& control_res) {
811
+ if (!control_res.ok()) {
812
+ break_fut.MarkFinished(control_res.status());
813
+ return true;
814
+ }
815
+ if (control_res->has_value()) {
816
+ break_fut.MarkFinished(**control_res);
817
+ return true;
818
+ }
819
+ return false;
820
+ }
821
+
822
+ void operator()(const Result<Control>& maybe_control) && {
823
+ if (CheckForTermination(maybe_control)) return;
824
+
825
+ auto control_fut = iterate();
826
+ while (true) {
827
+ if (control_fut.TryAddCallback([this]() { return *this; })) {
828
+ // Adding a callback succeeded; control_fut was not finished
829
+ // and we must wait to CheckForTermination.
830
+ return;
831
+ }
832
+ // Adding a callback failed; control_fut was finished and we
833
+ // can CheckForTermination immediately. This also avoids recursion and potential
834
+ // stack overflow.
835
+ if (CheckForTermination(control_fut.result())) return;
836
+
837
+ control_fut = iterate();
838
+ }
839
+ }
840
+
841
+ Iterate iterate;
842
+
843
+ // If the future returned by control_fut is never completed then we will be hanging on
844
+ // to break_fut forever even if the listener has given up listening on it. Instead we
845
+ // rely on the fact that a producer (the caller of Future<>::Make) is always
846
+ // responsible for completing the futures they create.
847
+ // TODO: Could avoid this kind of situation with "future abandonment" similar to mesos
848
+ Future<BreakValueType> break_fut;
849
+ };
850
+
851
+ auto break_fut = Future<BreakValueType>::Make();
852
+ auto control_fut = iterate();
853
+ control_fut.AddCallback(Callback{std::move(iterate), break_fut});
854
+
855
+ return break_fut;
856
+ }
857
+
858
+ inline Future<> ToFuture(Status status) {
859
+ return Future<>::MakeFinished(std::move(status));
860
+ }
861
+
862
+ template <typename T>
863
+ Future<T> ToFuture(T value) {
864
+ return Future<T>::MakeFinished(std::move(value));
865
+ }
866
+
867
+ template <typename T>
868
+ Future<T> ToFuture(Result<T> maybe_value) {
869
+ return Future<T>::MakeFinished(std::move(maybe_value));
870
+ }
871
+
872
+ template <typename T>
873
+ Future<T> ToFuture(Future<T> fut) {
874
+ return fut;
875
+ }
876
+
877
+ template <typename T>
878
+ struct EnsureFuture {
879
+ using type = decltype(ToFuture(std::declval<T>()));
880
+ };
881
+
882
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Private header, not to be exported
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cassert>
24
+ #include <cmath>
25
+ #include <cstdint>
26
+ #include <cstring>
27
+ #include <limits>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <type_traits>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array/builder_binary.h"
35
+ #include "arrow/buffer_builder.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/type_fwd.h"
39
+ #include "arrow/type_traits.h"
40
+ #include "arrow/util/bit_util.h"
41
+ #include "arrow/util/bitmap_builders.h"
42
+ #include "arrow/util/endian.h"
43
+ #include "arrow/util/logging.h"
44
+ #include "arrow/util/macros.h"
45
+ #include "arrow/util/ubsan.h"
46
+
47
+ #define XXH_INLINE_ALL
48
+
49
+ #include "arrow/vendored/xxhash.h" // IWYU pragma: keep
50
+
51
+ namespace arrow {
52
+ namespace internal {
53
+
54
+ // XXX would it help to have a 32-bit hash value on large datasets?
55
+ typedef uint64_t hash_t;
56
+
57
+ // Notes about the choice of a hash function.
58
+ // - XXH3 is extremely fast on most data sizes, from small to huge;
59
+ // faster even than HW CRC-based hashing schemes
60
+ // - our custom hash function for tiny values (< 16 bytes) is still
61
+ // significantly faster (~30%), at least on this machine and compiler
62
+
63
+ template <uint64_t AlgNum>
64
+ inline hash_t ComputeStringHash(const void* data, int64_t length);
65
+
66
+ /// \brief A hash function for bitmaps that can handle offsets and lengths in
67
+ /// terms of number of bits. The hash only depends on the bits actually hashed.
68
+ ///
69
+ /// It's the caller's responsibility to ensure that bits_offset + num_bits are
70
+ /// readable from the bitmap.
71
+ ///
72
+ /// \pre bits_offset >= 0
73
+ /// \pre num_bits >= 0
74
+ /// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap
75
+ ///
76
+ /// \param bitmap The pointer to the bitmap.
77
+ /// \param seed The seed for the hash function (useful when chaining hash functions).
78
+ /// \param bits_offset The offset in bits relative to the start of the bitmap.
79
+ /// \param num_bits The number of bits after the offset to be hashed.
80
+ ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed,
81
+ int64_t bits_offset, int64_t num_bits);
82
+
83
+ template <typename Scalar, uint64_t AlgNum>
84
+ struct ScalarHelperBase {
85
+ static bool CompareScalars(Scalar u, Scalar v) { return u == v; }
86
+
87
+ static hash_t ComputeHash(const Scalar& value) {
88
+ // Generic hash computation for scalars. Simply apply the string hash
89
+ // to the bit representation of the value.
90
+
91
+ // XXX in the case of FP values, we'd like equal values to have the same hash,
92
+ // even if they have different bit representations...
93
+ return ComputeStringHash<AlgNum>(&value, sizeof(value));
94
+ }
95
+ };
96
+
97
+ template <typename Scalar, uint64_t AlgNum = 0, typename Enable = void>
98
+ struct ScalarHelper : public ScalarHelperBase<Scalar, AlgNum> {};
99
+
100
+ template <typename Scalar, uint64_t AlgNum>
101
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_integral<Scalar>::value>>
102
+ : public ScalarHelperBase<Scalar, AlgNum> {
103
+ // ScalarHelper specialization for integers
104
+
105
+ static hash_t ComputeHash(const Scalar& value) {
106
+ // Faster hash computation for integers.
107
+
108
+ // Two of xxhash's prime multipliers (which are chosen for their
109
+ // bit dispersion properties)
110
+ static constexpr uint64_t multipliers[] = {11400714785074694791ULL,
111
+ 14029467366897019727ULL};
112
+
113
+ // Multiplying by the prime number mixes the low bits into the high bits,
114
+ // then byte-swapping (which is a single CPU instruction) allows the
115
+ // combined high and low bits to participate in the initial hash table index.
116
+ auto h = static_cast<hash_t>(value);
117
+ return bit_util::ByteSwap(multipliers[AlgNum] * h);
118
+ }
119
+ };
120
+
121
+ template <typename Scalar, uint64_t AlgNum>
122
+ struct ScalarHelper<Scalar, AlgNum,
123
+ enable_if_t<std::is_same<std::string_view, Scalar>::value>>
124
+ : public ScalarHelperBase<Scalar, AlgNum> {
125
+ // ScalarHelper specialization for std::string_view
126
+
127
+ static hash_t ComputeHash(std::string_view value) {
128
+ return ComputeStringHash<AlgNum>(value.data(), static_cast<int64_t>(value.size()));
129
+ }
130
+ };
131
+
132
+ template <typename Scalar, uint64_t AlgNum>
133
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_floating_point<Scalar>::value>>
134
+ : public ScalarHelperBase<Scalar, AlgNum> {
135
+ // ScalarHelper specialization for reals
136
+
137
+ static bool CompareScalars(Scalar u, Scalar v) {
138
+ if (std::isnan(u)) {
139
+ // XXX should we do a bit-precise comparison?
140
+ return std::isnan(v);
141
+ }
142
+ return u == v;
143
+ }
144
+ };
145
+
146
+ template <uint64_t AlgNum = 0>
147
+ hash_t ComputeStringHash(const void* data, int64_t length) {
148
+ if (ARROW_PREDICT_TRUE(length <= 16)) {
149
+ // Specialize for small hash strings, as they are quite common as
150
+ // hash table keys. Even XXH3 isn't quite as fast.
151
+ auto p = reinterpret_cast<const uint8_t*>(data);
152
+ auto n = static_cast<uint32_t>(length);
153
+ if (n <= 8) {
154
+ if (n <= 3) {
155
+ if (n == 0) {
156
+ return 1U;
157
+ }
158
+ uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1];
159
+ return ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
160
+ }
161
+ // 4 <= length <= 8
162
+ // We can read the string as two overlapping 32-bit ints, apply
163
+ // different hash functions to each of them in parallel, then XOR
164
+ // the results
165
+ uint32_t x, y;
166
+ hash_t hx, hy;
167
+ x = util::SafeLoadAs<uint32_t>(p + n - 4);
168
+ y = util::SafeLoadAs<uint32_t>(p);
169
+ hx = ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
170
+ hy = ScalarHelper<uint32_t, AlgNum ^ 1>::ComputeHash(y);
171
+ return n ^ hx ^ hy;
172
+ }
173
+ // 8 <= length <= 16
174
+ // Apply the same principle as above
175
+ uint64_t x, y;
176
+ hash_t hx, hy;
177
+ x = util::SafeLoadAs<uint64_t>(p + n - 8);
178
+ y = util::SafeLoadAs<uint64_t>(p);
179
+ hx = ScalarHelper<uint64_t, AlgNum>::ComputeHash(x);
180
+ hy = ScalarHelper<uint64_t, AlgNum ^ 1>::ComputeHash(y);
181
+ return n ^ hx ^ hy;
182
+ }
183
+
184
+ #if XXH3_SECRET_SIZE_MIN != 136
185
+ #error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets
186
+ #endif
187
+
188
+ // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow.
189
+ // Instead, we use hard-coded random secrets. To maximize cache efficiency,
190
+ // they reuse the same memory area.
191
+ static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = {
192
+ 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f,
193
+ 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24,
194
+ 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26,
195
+ 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75,
196
+ 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce,
197
+ 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3,
198
+ 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42,
199
+ 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1,
200
+ 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5,
201
+ 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87};
202
+
203
+ static_assert(AlgNum < 2, "AlgNum too large");
204
+ static constexpr auto secret = kXxh3Secrets + AlgNum;
205
+ return XXH3_64bits_withSecret(data, static_cast<size_t>(length), secret,
206
+ XXH3_SECRET_SIZE_MIN);
207
+ }
208
+
209
+ // XXX add a HashEq<ArrowType> struct with both hash and compare functions?
210
+
211
+ // ----------------------------------------------------------------------
212
+ // An open-addressing insert-only hash table (no deletes)
213
+
214
+ template <typename Payload>
215
+ class HashTable {
216
+ public:
217
+ static constexpr hash_t kSentinel = 0ULL;
218
+ static constexpr int64_t kLoadFactor = 2UL;
219
+
220
+ struct Entry {
221
+ hash_t h;
222
+ Payload payload;
223
+
224
+ // An entry is valid if the hash is different from the sentinel value
225
+ operator bool() const { return h != kSentinel; }
226
+ };
227
+
228
+ HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) {
229
+ DCHECK_NE(pool, nullptr);
230
+ // Minimum of 32 elements
231
+ capacity = std::max<uint64_t>(capacity, 32UL);
232
+ capacity_ = bit_util::NextPower2(capacity);
233
+ capacity_mask_ = capacity_ - 1;
234
+ size_ = 0;
235
+
236
+ DCHECK_OK(UpsizeBuffer(capacity_));
237
+ }
238
+
239
+ // Lookup with non-linear probing
240
+ // cmp_func should have signature bool(const Payload*).
241
+ // Return a (Entry*, found) pair.
242
+ template <typename CmpFunc>
243
+ std::pair<Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) {
244
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
245
+ std::forward<CmpFunc>(cmp_func));
246
+ return {&entries_[p.first], p.second};
247
+ }
248
+
249
+ template <typename CmpFunc>
250
+ std::pair<const Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) const {
251
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
252
+ std::forward<CmpFunc>(cmp_func));
253
+ return {&entries_[p.first], p.second};
254
+ }
255
+
256
+ Status Insert(Entry* entry, hash_t h, const Payload& payload) {
257
+ // Ensure entry is empty before inserting
258
+ assert(!*entry);
259
+ entry->h = FixHash(h);
260
+ entry->payload = payload;
261
+ ++size_;
262
+
263
+ if (ARROW_PREDICT_FALSE(NeedUpsizing())) {
264
+ // Resize less frequently since it is expensive
265
+ return Upsize(capacity_ * kLoadFactor * 2);
266
+ }
267
+ return Status::OK();
268
+ }
269
+
270
+ uint64_t size() const { return size_; }
271
+
272
+ // Visit all non-empty entries in the table
273
+ // The visit_func should have signature void(const Entry*)
274
+ template <typename VisitFunc>
275
+ void VisitEntries(VisitFunc&& visit_func) const {
276
+ for (uint64_t i = 0; i < capacity_; i++) {
277
+ const auto& entry = entries_[i];
278
+ if (entry) {
279
+ visit_func(&entry);
280
+ }
281
+ }
282
+ }
283
+
284
+ protected:
285
+ // NoCompare is for when the value is known not to exist in the table
286
+ enum CompareKind { DoCompare, NoCompare };
287
+
288
+ // The workhorse lookup function
289
+ template <CompareKind CKind, typename CmpFunc>
290
+ std::pair<uint64_t, bool> Lookup(hash_t h, const Entry* entries, uint64_t size_mask,
291
+ CmpFunc&& cmp_func) const {
292
+ static constexpr uint8_t perturb_shift = 5;
293
+
294
+ uint64_t index, perturb;
295
+ const Entry* entry;
296
+
297
+ h = FixHash(h);
298
+ index = h & size_mask;
299
+ perturb = (h >> perturb_shift) + 1U;
300
+
301
+ while (true) {
302
+ entry = &entries[index];
303
+ if (CompareEntry<CKind, CmpFunc>(h, entry, std::forward<CmpFunc>(cmp_func))) {
304
+ // Found
305
+ return {index, true};
306
+ }
307
+ if (entry->h == kSentinel) {
308
+ // Empty slot
309
+ return {index, false};
310
+ }
311
+
312
+ // Perturbation logic inspired from CPython's set / dict object.
313
+ // The goal is that all 64 bits of the unmasked hash value eventually
314
+ // participate in the probing sequence, to minimize clustering.
315
+ index = (index + perturb) & size_mask;
316
+ perturb = (perturb >> perturb_shift) + 1U;
317
+ }
318
+ }
319
+
320
+ template <CompareKind CKind, typename CmpFunc>
321
+ bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const {
322
+ if (CKind == NoCompare) {
323
+ return false;
324
+ } else {
325
+ return entry->h == h && cmp_func(&entry->payload);
326
+ }
327
+ }
328
+
329
+ bool NeedUpsizing() const {
330
+ // Keep the load factor <= 1/2
331
+ return size_ * kLoadFactor >= capacity_;
332
+ }
333
+
334
+ Status UpsizeBuffer(uint64_t capacity) {
335
+ RETURN_NOT_OK(entries_builder_.Resize(capacity));
336
+ entries_ = entries_builder_.mutable_data();
337
+ memset(static_cast<void*>(entries_), 0, capacity * sizeof(Entry));
338
+
339
+ return Status::OK();
340
+ }
341
+
342
+ Status Upsize(uint64_t new_capacity) {
343
+ assert(new_capacity > capacity_);
344
+ uint64_t new_mask = new_capacity - 1;
345
+ assert((new_capacity & new_mask) == 0); // it's a power of two
346
+
347
+ // Stash old entries and seal builder, effectively resetting the Buffer
348
+ const Entry* old_entries = entries_;
349
+ ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_));
350
+ // Allocate new buffer
351
+ RETURN_NOT_OK(UpsizeBuffer(new_capacity));
352
+
353
+ for (uint64_t i = 0; i < capacity_; i++) {
354
+ const auto& entry = old_entries[i];
355
+ if (entry) {
356
+ // Dummy compare function will not be called
357
+ auto p = Lookup<NoCompare>(entry.h, entries_, new_mask,
358
+ [](const Payload*) { return false; });
359
+ // Lookup<NoCompare> (and CompareEntry<NoCompare>) ensure that an
360
+ // empty slots is always returned
361
+ assert(!p.second);
362
+ entries_[p.first] = entry;
363
+ }
364
+ }
365
+ capacity_ = new_capacity;
366
+ capacity_mask_ = new_mask;
367
+
368
+ return Status::OK();
369
+ }
370
+
371
+ hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; }
372
+
373
+ // The number of slots available in the hash table array.
374
+ uint64_t capacity_;
375
+ uint64_t capacity_mask_;
376
+ // The number of used slots in the hash table array.
377
+ uint64_t size_;
378
+
379
+ Entry* entries_;
380
+ TypedBufferBuilder<Entry> entries_builder_;
381
+ };
382
+
383
+ // XXX typedef memo_index_t int32_t ?
384
+
385
+ constexpr int32_t kKeyNotFound = -1;
386
+
387
+ // ----------------------------------------------------------------------
388
+ // A base class for memoization table.
389
+
390
+ class MemoTable {
391
+ public:
392
+ virtual ~MemoTable() = default;
393
+
394
+ virtual int32_t size() const = 0;
395
+ };
396
+
397
+ // ----------------------------------------------------------------------
398
+ // A memoization table for memory-cheap scalar values.
399
+
400
+ // The memoization table remembers and allows to look up the insertion
401
+ // index for each key.
402
+
403
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
404
+ class ScalarMemoTable : public MemoTable {
405
+ public:
406
+ explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0)
407
+ : hash_table_(pool, static_cast<uint64_t>(entries)) {}
408
+
409
+ int32_t Get(const Scalar& value) const {
410
+ auto cmp_func = [value](const Payload* payload) -> bool {
411
+ return ScalarHelper<Scalar, 0>::CompareScalars(payload->value, value);
412
+ };
413
+ hash_t h = ComputeHash(value);
414
+ auto p = hash_table_.Lookup(h, cmp_func);
415
+ if (p.second) {
416
+ return p.first->payload.memo_index;
417
+ } else {
418
+ return kKeyNotFound;
419
+ }
420
+ }
421
+
422
+ template <typename Func1, typename Func2>
423
+ Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found,
424
+ int32_t* out_memo_index) {
425
+ auto cmp_func = [value](const Payload* payload) -> bool {
426
+ return ScalarHelper<Scalar, 0>::CompareScalars(value, payload->value);
427
+ };
428
+ hash_t h = ComputeHash(value);
429
+ auto p = hash_table_.Lookup(h, cmp_func);
430
+ int32_t memo_index;
431
+ if (p.second) {
432
+ memo_index = p.first->payload.memo_index;
433
+ on_found(memo_index);
434
+ } else {
435
+ memo_index = size();
436
+ RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index}));
437
+ on_not_found(memo_index);
438
+ }
439
+ *out_memo_index = memo_index;
440
+ return Status::OK();
441
+ }
442
+
443
+ Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) {
444
+ return GetOrInsert(
445
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
446
+ }
447
+
448
+ int32_t GetNull() const { return null_index_; }
449
+
450
+ template <typename Func1, typename Func2>
451
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
452
+ int32_t memo_index = GetNull();
453
+ if (memo_index != kKeyNotFound) {
454
+ on_found(memo_index);
455
+ } else {
456
+ null_index_ = memo_index = size();
457
+ on_not_found(memo_index);
458
+ }
459
+ return memo_index;
460
+ }
461
+
462
+ int32_t GetOrInsertNull() {
463
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
464
+ }
465
+
466
+ // The number of entries in the memo table +1 if null was added.
467
+ // (which is also 1 + the largest memo index)
468
+ int32_t size() const override {
469
+ return static_cast<int32_t>(hash_table_.size()) + (GetNull() != kKeyNotFound);
470
+ }
471
+
472
+ // Copy values starting from index `start` into `out_data`
473
+ void CopyValues(int32_t start, Scalar* out_data) const {
474
+ hash_table_.VisitEntries([=](const HashTableEntry* entry) {
475
+ int32_t index = entry->payload.memo_index - start;
476
+ if (index >= 0) {
477
+ out_data[index] = entry->payload.value;
478
+ }
479
+ });
480
+ // Zero-initialize the null entry
481
+ if (null_index_ != kKeyNotFound) {
482
+ int32_t index = null_index_ - start;
483
+ if (index >= 0) {
484
+ out_data[index] = Scalar{};
485
+ }
486
+ }
487
+ }
488
+
489
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
490
+
491
+ protected:
492
+ struct Payload {
493
+ Scalar value;
494
+ int32_t memo_index;
495
+ };
496
+
497
+ using HashTableType = HashTableTemplateType<Payload>;
498
+ using HashTableEntry = typename HashTableType::Entry;
499
+ HashTableType hash_table_;
500
+ int32_t null_index_ = kKeyNotFound;
501
+
502
+ hash_t ComputeHash(const Scalar& value) const {
503
+ return ScalarHelper<Scalar, 0>::ComputeHash(value);
504
+ }
505
+
506
+ public:
507
+ // defined here so that `HashTableType` is visible
508
+ // Merge entries from `other_table` into `this->hash_table_`.
509
+ Status MergeTable(const ScalarMemoTable& other_table) {
510
+ const HashTableType& other_hashtable = other_table.hash_table_;
511
+
512
+ other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) {
513
+ int32_t unused;
514
+ DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused));
515
+ });
516
+ // TODO: ARROW-17074 - implement proper error handling
517
+ return Status::OK();
518
+ }
519
+ };
520
+
521
+ // ----------------------------------------------------------------------
522
+ // A memoization table for small scalar values, using direct indexing
523
+
524
+ template <typename Scalar, typename Enable = void>
525
+ struct SmallScalarTraits {};
526
+
527
+ template <>
528
+ struct SmallScalarTraits<bool> {
529
+ static constexpr int32_t cardinality = 2;
530
+
531
+ static uint32_t AsIndex(bool value) { return value ? 1 : 0; }
532
+ };
533
+
534
+ template <typename Scalar>
535
+ struct SmallScalarTraits<Scalar, enable_if_t<std::is_integral<Scalar>::value>> {
536
+ using Unsigned = typename std::make_unsigned<Scalar>::type;
537
+
538
+ static constexpr int32_t cardinality = 1U + std::numeric_limits<Unsigned>::max();
539
+
540
+ static uint32_t AsIndex(Scalar value) { return static_cast<Unsigned>(value); }
541
+ };
542
+
543
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
544
+ class SmallScalarMemoTable : public MemoTable {
545
+ public:
546
+ explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) {
547
+ std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound);
548
+ index_to_value_.reserve(cardinality);
549
+ }
550
+
551
+ int32_t Get(const Scalar value) const {
552
+ auto value_index = AsIndex(value);
553
+ return value_to_index_[value_index];
554
+ }
555
+
556
+ template <typename Func1, typename Func2>
557
+ Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found,
558
+ int32_t* out_memo_index) {
559
+ auto value_index = AsIndex(value);
560
+ auto memo_index = value_to_index_[value_index];
561
+ if (memo_index == kKeyNotFound) {
562
+ memo_index = static_cast<int32_t>(index_to_value_.size());
563
+ index_to_value_.push_back(value);
564
+ value_to_index_[value_index] = memo_index;
565
+ DCHECK_LT(memo_index, cardinality + 1);
566
+ on_not_found(memo_index);
567
+ } else {
568
+ on_found(memo_index);
569
+ }
570
+ *out_memo_index = memo_index;
571
+ return Status::OK();
572
+ }
573
+
574
+ Status GetOrInsert(const Scalar value, int32_t* out_memo_index) {
575
+ return GetOrInsert(
576
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
577
+ }
578
+
579
+ int32_t GetNull() const { return value_to_index_[cardinality]; }
580
+
581
+ template <typename Func1, typename Func2>
582
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
583
+ auto memo_index = GetNull();
584
+ if (memo_index == kKeyNotFound) {
585
+ memo_index = value_to_index_[cardinality] = size();
586
+ index_to_value_.push_back(0);
587
+ on_not_found(memo_index);
588
+ } else {
589
+ on_found(memo_index);
590
+ }
591
+ return memo_index;
592
+ }
593
+
594
+ int32_t GetOrInsertNull() {
595
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
596
+ }
597
+
598
+ // The number of entries in the memo table
599
+ // (which is also 1 + the largest memo index)
600
+ int32_t size() const override { return static_cast<int32_t>(index_to_value_.size()); }
601
+
602
+ // Merge entries from `other_table` into `this`.
603
+ Status MergeTable(const SmallScalarMemoTable& other_table) {
604
+ for (const Scalar& other_val : other_table.index_to_value_) {
605
+ int32_t unused;
606
+ RETURN_NOT_OK(this->GetOrInsert(other_val, &unused));
607
+ }
608
+ return Status::OK();
609
+ }
610
+
611
+ // Copy values starting from index `start` into `out_data`
612
+ void CopyValues(int32_t start, Scalar* out_data) const {
613
+ DCHECK_GE(start, 0);
614
+ DCHECK_LE(static_cast<size_t>(start), index_to_value_.size());
615
+ int64_t offset = start * static_cast<int32_t>(sizeof(Scalar));
616
+ memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar));
617
+ }
618
+
619
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
620
+
621
+ const std::vector<Scalar>& values() const { return index_to_value_; }
622
+
623
+ protected:
624
+ static constexpr auto cardinality = SmallScalarTraits<Scalar>::cardinality;
625
+ static_assert(cardinality <= 256, "cardinality too large for direct-addressed table");
626
+
627
+ uint32_t AsIndex(Scalar value) const {
628
+ return SmallScalarTraits<Scalar>::AsIndex(value);
629
+ }
630
+
631
+ // The last index is reserved for the null element.
632
+ int32_t value_to_index_[cardinality + 1];
633
+ std::vector<Scalar> index_to_value_;
634
+ };
635
+
636
+ // ----------------------------------------------------------------------
637
+ // A memoization table for variable-sized binary data.
638
+
639
+ template <typename BinaryBuilderT>
640
+ class BinaryMemoTable : public MemoTable {
641
+ public:
642
+ using builder_offset_type = typename BinaryBuilderT::offset_type;
643
+ explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0,
644
+ int64_t values_size = -1)
645
+ : hash_table_(pool, static_cast<uint64_t>(entries)), binary_builder_(pool) {
646
+ const int64_t data_size = (values_size < 0) ? entries * 4 : values_size;
647
+ DCHECK_OK(binary_builder_.Resize(entries));
648
+ DCHECK_OK(binary_builder_.ReserveData(data_size));
649
+ }
650
+
651
+ int32_t Get(const void* data, builder_offset_type length) const {
652
+ hash_t h = ComputeStringHash<0>(data, length);
653
+ auto p = Lookup(h, data, length);
654
+ if (p.second) {
655
+ return p.first->payload.memo_index;
656
+ } else {
657
+ return kKeyNotFound;
658
+ }
659
+ }
660
+
661
+ int32_t Get(std::string_view value) const {
662
+ return Get(value.data(), static_cast<builder_offset_type>(value.length()));
663
+ }
664
+
665
+ template <typename Func1, typename Func2>
666
+ Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found,
667
+ Func2&& on_not_found, int32_t* out_memo_index) {
668
+ hash_t h = ComputeStringHash<0>(data, length);
669
+ auto p = Lookup(h, data, length);
670
+ int32_t memo_index;
671
+ if (p.second) {
672
+ memo_index = p.first->payload.memo_index;
673
+ on_found(memo_index);
674
+ } else {
675
+ memo_index = size();
676
+ // Insert string value
677
+ RETURN_NOT_OK(binary_builder_.Append(static_cast<const char*>(data), length));
678
+ // Insert hash entry
679
+ RETURN_NOT_OK(
680
+ hash_table_.Insert(const_cast<HashTableEntry*>(p.first), h, {memo_index}));
681
+
682
+ on_not_found(memo_index);
683
+ }
684
+ *out_memo_index = memo_index;
685
+ return Status::OK();
686
+ }
687
+
688
+ template <typename Func1, typename Func2>
689
+ Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found,
690
+ int32_t* out_memo_index) {
691
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
692
+ std::forward<Func1>(on_found), std::forward<Func2>(on_not_found),
693
+ out_memo_index);
694
+ }
695
+
696
+ Status GetOrInsert(const void* data, builder_offset_type length,
697
+ int32_t* out_memo_index) {
698
+ return GetOrInsert(
699
+ data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
700
+ }
701
+
702
+ Status GetOrInsert(std::string_view value, int32_t* out_memo_index) {
703
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
704
+ out_memo_index);
705
+ }
706
+
707
+ int32_t GetNull() const { return null_index_; }
708
+
709
+ template <typename Func1, typename Func2>
710
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
711
+ int32_t memo_index = GetNull();
712
+ if (memo_index == kKeyNotFound) {
713
+ memo_index = null_index_ = size();
714
+ DCHECK_OK(binary_builder_.AppendNull());
715
+ on_not_found(memo_index);
716
+ } else {
717
+ on_found(memo_index);
718
+ }
719
+ return memo_index;
720
+ }
721
+
722
+ int32_t GetOrInsertNull() {
723
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
724
+ }
725
+
726
+ // The number of entries in the memo table
727
+ // (which is also 1 + the largest memo index)
728
+ int32_t size() const override {
729
+ return static_cast<int32_t>(hash_table_.size() + (GetNull() != kKeyNotFound));
730
+ }
731
+
732
+ int64_t values_size() const { return binary_builder_.value_data_length(); }
733
+
734
+ // Copy (n + 1) offsets starting from index `start` into `out_data`
735
+ template <class Offset>
736
+ void CopyOffsets(int32_t start, Offset* out_data) const {
737
+ DCHECK_LE(start, size());
738
+
739
+ const builder_offset_type* offsets = binary_builder_.offsets_data();
740
+ const builder_offset_type delta =
741
+ start < binary_builder_.length() ? offsets[start] : 0;
742
+ for (int32_t i = start; i < size(); ++i) {
743
+ const builder_offset_type adjusted_offset = offsets[i] - delta;
744
+ Offset cast_offset = static_cast<Offset>(adjusted_offset);
745
+ assert(static_cast<builder_offset_type>(cast_offset) ==
746
+ adjusted_offset); // avoid truncation
747
+ *out_data++ = cast_offset;
748
+ }
749
+
750
+ // Copy last value since BinaryBuilder only materializes it on in Finish()
751
+ *out_data = static_cast<Offset>(binary_builder_.value_data_length() - delta);
752
+ }
753
+
754
+ template <class Offset>
755
+ void CopyOffsets(Offset* out_data) const {
756
+ CopyOffsets(0, out_data);
757
+ }
758
+
759
+ // Copy values starting from index `start` into `out_data`
760
+ void CopyValues(int32_t start, uint8_t* out_data) const {
761
+ CopyValues(start, -1, out_data);
762
+ }
763
+
764
+ // Same as above, but check output size in debug mode
765
+ void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const {
766
+ DCHECK_LE(start, size());
767
+
768
+ // The absolute byte offset of `start` value in the binary buffer.
769
+ const builder_offset_type offset = binary_builder_.offset(start);
770
+ const auto length = binary_builder_.value_data_length() - static_cast<size_t>(offset);
771
+
772
+ if (out_size != -1) {
773
+ assert(static_cast<int64_t>(length) <= out_size);
774
+ }
775
+
776
+ auto view = binary_builder_.GetView(start);
777
+ memcpy(out_data, view.data(), length);
778
+ }
779
+
780
+ void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); }
781
+
782
+ void CopyValues(int64_t out_size, uint8_t* out_data) const {
783
+ CopyValues(0, out_size, out_data);
784
+ }
785
+
786
+ void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size,
787
+ uint8_t* out_data) const {
788
+ // This method exists to cope with the fact that the BinaryMemoTable does
789
+ // not know the fixed width when inserting the null value. The data
790
+ // buffer hold a zero length string for the null value (if found).
791
+ //
792
+ // Thus, the method will properly inject an empty value of the proper width
793
+ // in the output buffer.
794
+ //
795
+ if (start >= size()) {
796
+ return;
797
+ }
798
+
799
+ int32_t null_index = GetNull();
800
+ if (null_index < start) {
801
+ // Nothing to skip, proceed as usual.
802
+ CopyValues(start, out_size, out_data);
803
+ return;
804
+ }
805
+
806
+ builder_offset_type left_offset = binary_builder_.offset(start);
807
+
808
+ // Ensure that the data length is exactly missing width_size bytes to fit
809
+ // in the expected output (n_values * width_size).
810
+ #ifndef NDEBUG
811
+ int64_t data_length = values_size() - static_cast<size_t>(left_offset);
812
+ assert(data_length + width_size == out_size);
813
+ ARROW_UNUSED(data_length);
814
+ #endif
815
+
816
+ auto in_data = binary_builder_.value_data() + left_offset;
817
+ // The null use 0-length in the data, slice the data in 2 and skip by
818
+ // width_size in out_data. [part_1][width_size][part_2]
819
+ auto null_data_offset = binary_builder_.offset(null_index);
820
+ auto left_size = null_data_offset - left_offset;
821
+ if (left_size > 0) {
822
+ memcpy(out_data, in_data + left_offset, left_size);
823
+ }
824
+ // Zero-initialize the null entry
825
+ memset(out_data + left_size, 0, width_size);
826
+
827
+ auto right_size = values_size() - static_cast<size_t>(null_data_offset);
828
+ if (right_size > 0) {
829
+ // skip the null fixed size value.
830
+ auto out_offset = left_size + width_size;
831
+ assert(out_data + out_offset + right_size == out_data + out_size);
832
+ memcpy(out_data + out_offset, in_data + null_data_offset, right_size);
833
+ }
834
+ }
835
+
836
+ // Visit the stored values in insertion order.
837
+ // The visitor function should have the signature `void(std::string_view)`
838
+ // or `void(const std::string_view&)`.
839
+ template <typename VisitFunc>
840
+ void VisitValues(int32_t start, VisitFunc&& visit) const {
841
+ for (int32_t i = start; i < size(); ++i) {
842
+ visit(binary_builder_.GetView(i));
843
+ }
844
+ }
845
+
846
+ protected:
847
+ struct Payload {
848
+ int32_t memo_index;
849
+ };
850
+
851
+ using HashTableType = HashTable<Payload>;
852
+ using HashTableEntry = typename HashTable<Payload>::Entry;
853
+ HashTableType hash_table_;
854
+ BinaryBuilderT binary_builder_;
855
+
856
+ int32_t null_index_ = kKeyNotFound;
857
+
858
+ std::pair<const HashTableEntry*, bool> Lookup(hash_t h, const void* data,
859
+ builder_offset_type length) const {
860
+ auto cmp_func = [&](const Payload* payload) {
861
+ std::string_view lhs = binary_builder_.GetView(payload->memo_index);
862
+ std::string_view rhs(static_cast<const char*>(data), length);
863
+ return lhs == rhs;
864
+ };
865
+ return hash_table_.Lookup(h, cmp_func);
866
+ }
867
+
868
+ public:
869
+ Status MergeTable(const BinaryMemoTable& other_table) {
870
+ other_table.VisitValues(0, [this](std::string_view other_value) {
871
+ int32_t unused;
872
+ DCHECK_OK(this->GetOrInsert(other_value, &unused));
873
+ });
874
+ return Status::OK();
875
+ }
876
+ };
877
+
878
+ template <typename T, typename Enable = void>
879
+ struct HashTraits {};
880
+
881
+ template <>
882
+ struct HashTraits<BooleanType> {
883
+ using MemoTableType = SmallScalarMemoTable<bool>;
884
+ };
885
+
886
+ template <typename T>
887
+ struct HashTraits<T, enable_if_8bit_int<T>> {
888
+ using c_type = typename T::c_type;
889
+ using MemoTableType = SmallScalarMemoTable<typename T::c_type>;
890
+ };
891
+
892
+ template <typename T>
893
+ struct HashTraits<T, enable_if_t<has_c_type<T>::value && !is_8bit_int<T>::value>> {
894
+ using c_type = typename T::c_type;
895
+ using MemoTableType = ScalarMemoTable<c_type, HashTable>;
896
+ };
897
+
898
+ template <typename T>
899
+ struct HashTraits<T, enable_if_t<has_string_view<T>::value &&
900
+ !std::is_base_of<LargeBinaryType, T>::value>> {
901
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
902
+ };
903
+
904
+ template <typename T>
905
+ struct HashTraits<T, enable_if_decimal<T>> {
906
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
907
+ };
908
+
909
+ template <typename T>
910
+ struct HashTraits<T, enable_if_t<std::is_base_of<LargeBinaryType, T>::value>> {
911
+ using MemoTableType = BinaryMemoTable<LargeBinaryBuilder>;
912
+ };
913
+
914
+ template <typename MemoTableType>
915
+ static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table,
916
+ int64_t start_offset, int64_t* null_count,
917
+ std::shared_ptr<Buffer>* null_bitmap) {
918
+ int64_t dict_length = static_cast<int64_t>(memo_table.size()) - start_offset;
919
+ int64_t null_index = memo_table.GetNull();
920
+
921
+ *null_count = 0;
922
+ *null_bitmap = nullptr;
923
+
924
+ if (null_index != kKeyNotFound && null_index >= start_offset) {
925
+ null_index -= start_offset;
926
+ *null_count = 1;
927
+ ARROW_ASSIGN_OR_RAISE(*null_bitmap,
928
+ internal::BitmapAllButOne(pool, dict_length, null_index));
929
+ }
930
+
931
+ return Status::OK();
932
+ }
933
+
934
+ struct StringViewHash {
935
+ // std::hash compatible hasher for use with std::unordered_*
936
+ // (the std::hash specialization provided by nonstd constructs std::string
937
+ // temporaries then invokes std::hash<std::string> against those)
938
+ hash_t operator()(std::string_view value) const {
939
+ return ComputeStringHash<0>(value.data(), static_cast<int64_t>(value.size()));
940
+ }
941
+ };
942
+
943
+ } // namespace internal
944
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <type_traits>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ class DataType;
30
+ struct ArraySpan;
31
+ struct Scalar;
32
+
33
+ namespace internal {
34
+
35
+ ARROW_EXPORT
36
+ uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1);
37
+
38
+ ARROW_EXPORT
39
+ uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes,
40
+ int64_t length, uint8_t min_width = 1);
41
+
42
+ ARROW_EXPORT
43
+ uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1);
44
+
45
+ ARROW_EXPORT
46
+ uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length,
47
+ uint8_t min_width = 1);
48
+
49
+ ARROW_EXPORT
50
+ void DowncastInts(const int64_t* source, int8_t* dest, int64_t length);
51
+
52
+ ARROW_EXPORT
53
+ void DowncastInts(const int64_t* source, int16_t* dest, int64_t length);
54
+
55
+ ARROW_EXPORT
56
+ void DowncastInts(const int64_t* source, int32_t* dest, int64_t length);
57
+
58
+ ARROW_EXPORT
59
+ void DowncastInts(const int64_t* source, int64_t* dest, int64_t length);
60
+
61
+ ARROW_EXPORT
62
+ void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length);
63
+
64
+ ARROW_EXPORT
65
+ void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length);
66
+
67
+ ARROW_EXPORT
68
+ void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length);
69
+
70
+ ARROW_EXPORT
71
+ void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length);
72
+
73
+ ARROW_EXPORT
74
+ void UpcastInts(const int32_t* source, int64_t* dest, int64_t length);
75
+
76
+ template <typename InputInt, typename OutputInt>
77
+ inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts(
78
+ const InputInt* source, OutputInt* dest, int64_t length) {
79
+ DowncastInts(source, dest, length);
80
+ }
81
+
82
+ template <typename InputInt, typename OutputInt>
83
+ inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts(
84
+ const InputInt* source, OutputInt* dest, int64_t length) {
85
+ UpcastInts(source, dest, length);
86
+ }
87
+
88
+ template <typename InputInt, typename OutputInt>
89
+ ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length,
90
+ const int32_t* transpose_map);
91
+
92
+ ARROW_EXPORT
93
+ Status TransposeInts(const DataType& src_type, const DataType& dest_type,
94
+ const uint8_t* src, uint8_t* dest, int64_t src_offset,
95
+ int64_t dest_offset, int64_t length, const int32_t* transpose_map);
96
+
97
+ /// \brief Do vectorized boundschecking of integer-type array indices. The
98
+ /// indices must be nonnegative and strictly less than the passed upper
99
+ /// limit (which is usually the length of an array that is being indexed-into).
100
+ ARROW_EXPORT
101
+ Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit);
102
+
103
+ /// \brief Boundscheck integer values to determine if they are all between the
104
+ /// passed upper and lower limits (inclusive). Upper and lower bounds must be
105
+ /// the same type as the data and are not currently casted.
106
+ ARROW_EXPORT
107
+ Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower,
108
+ const Scalar& bound_upper);
109
+
110
+ /// \brief Use CheckIntegersInRange to determine whether the passed integers
111
+ /// can fit safely in the passed integer type. This helps quickly determine if
112
+ /// integer narrowing (e.g. int64->int32) is safe to do.
113
+ ARROW_EXPORT
114
+ Status IntegersCanFit(const ArraySpan& values, const DataType& target_type);
115
+
116
+ /// \brief Convenience for boundschecking a single Scalar value
117
+ ARROW_EXPORT
118
+ Status IntegersCanFit(const Scalar& value, const DataType& target_type);
119
+
120
+ /// Upcast an integer to the largest possible width (currently 64 bits)
121
+
122
+ template <typename Integer>
123
+ typename std::enable_if<
124
+ std::is_integral<Integer>::value && std::is_signed<Integer>::value, int64_t>::type
125
+ UpcastInt(Integer v) {
126
+ return v;
127
+ }
128
+
129
+ template <typename Integer>
130
+ typename std::enable_if<
131
+ std::is_integral<Integer>::value && std::is_unsigned<Integer>::value, uint64_t>::type
132
+ UpcastInt(Integer v) {
133
+ return v;
134
+ }
135
+
136
+ } // namespace internal
137
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <type_traits>
23
+
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ // "safe-math.h" includes <intsafe.h> from the Windows headers.
29
+ #include "arrow/util/windows_compatibility.h"
30
+ #include "arrow/vendored/portable-snippets/safe-math.h"
31
+ // clang-format off (avoid include reordering)
32
+ #include "arrow/util/windows_fixup.h"
33
+ // clang-format on
34
+
35
+ namespace arrow {
36
+ namespace internal {
37
+
38
+ // Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow
39
+ // with the signature `bool(T u, T v, T* out)` where T is an integer type.
40
+ // On overflow, these functions return true. Otherwise, false is returned
41
+ // and `out` is updated with the result of the operation.
42
+
43
+ #define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \
44
+ [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \
45
+ return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \
46
+ }
47
+
48
+ #define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \
49
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \
50
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \
51
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \
52
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \
53
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \
54
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \
55
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \
56
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64)
57
+
58
+ OPS_WITH_OVERFLOW(AddWithOverflow, add)
59
+ OPS_WITH_OVERFLOW(SubtractWithOverflow, sub)
60
+ OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul)
61
+ OPS_WITH_OVERFLOW(DivideWithOverflow, div)
62
+
63
+ #undef OP_WITH_OVERFLOW
64
+ #undef OPS_WITH_OVERFLOW
65
+
66
+ // Define function NegateWithOverflow with the signature `bool(T u, T* out)`
67
+ // where T is a signed integer type. On overflow, these functions return true.
68
+ // Otherwise, false is returned and `out` is updated with the result of the
69
+ // operation.
70
+
71
+ #define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \
72
+ [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \
73
+ return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \
74
+ }
75
+
76
+ #define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \
77
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \
78
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \
79
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \
80
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64)
81
+
82
+ SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg)
83
+
84
+ #undef UNARY_OP_WITH_OVERFLOW
85
+ #undef SIGNED_UNARY_OPS_WITH_OVERFLOW
86
+
87
+ /// Signed addition with well-defined behaviour on overflow (as unsigned)
88
+ template <typename SignedInt>
89
+ SignedInt SafeSignedAdd(SignedInt u, SignedInt v) {
90
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
91
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) +
92
+ static_cast<UnsignedInt>(v));
93
+ }
94
+
95
+ /// Signed subtraction with well-defined behaviour on overflow (as unsigned)
96
+ template <typename SignedInt>
97
+ SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) {
98
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
99
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) -
100
+ static_cast<UnsignedInt>(v));
101
+ }
102
+
103
+ /// Signed negation with well-defined behaviour on overflow (as unsigned)
104
+ template <typename SignedInt>
105
+ SignedInt SafeSignedNegate(SignedInt u) {
106
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
107
+ return static_cast<SignedInt>(~static_cast<UnsignedInt>(u) + 1);
108
+ }
109
+
110
+ /// Signed left shift with well-defined behaviour on negative numbers or overflow
111
+ template <typename SignedInt, typename Shift>
112
+ SignedInt SafeLeftShift(SignedInt u, Shift shift) {
113
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
114
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) << shift);
115
+ }
116
+
117
+ } // namespace internal
118
+ } // namespace arrow
valley/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifndef _WIN32
21
+ #define ARROW_HAVE_SIGACTION 1
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #if ARROW_HAVE_SIGACTION
32
+ #include <csignal> // Needed for struct sigaction
33
+ #endif
34
+
35
+ #include "arrow/result.h"
36
+ #include "arrow/status.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/windows_fixup.h"
40
+
41
+ namespace arrow::internal {
42
+
43
+ // NOTE: 8-bit path strings on Windows are encoded using UTF-8.
44
+ // Using MBCS would fail encoding some paths.
45
+
46
+ #if defined(_WIN32)
47
+ using NativePathString = std::wstring;
48
+ #else
49
+ using NativePathString = std::string;
50
+ #endif
51
+
52
+ class ARROW_EXPORT PlatformFilename {
53
+ public:
54
+ struct Impl;
55
+
56
+ ~PlatformFilename();
57
+ PlatformFilename();
58
+ PlatformFilename(const PlatformFilename&);
59
+ PlatformFilename(PlatformFilename&&);
60
+ PlatformFilename& operator=(const PlatformFilename&);
61
+ PlatformFilename& operator=(PlatformFilename&&);
62
+ explicit PlatformFilename(NativePathString path);
63
+ explicit PlatformFilename(const NativePathString::value_type* path);
64
+
65
+ const NativePathString& ToNative() const;
66
+ std::string ToString() const;
67
+
68
+ PlatformFilename Parent() const;
69
+ Result<PlatformFilename> Real() const;
70
+
71
+ // These functions can fail for character encoding reasons.
72
+ static Result<PlatformFilename> FromString(std::string_view file_name);
73
+ Result<PlatformFilename> Join(std::string_view child_name) const;
74
+
75
+ PlatformFilename Join(const PlatformFilename& child_name) const;
76
+
77
+ bool operator==(const PlatformFilename& other) const;
78
+ bool operator!=(const PlatformFilename& other) const;
79
+
80
+ // Made public to avoid the proliferation of friend declarations.
81
+ const Impl* impl() const { return impl_.get(); }
82
+
83
+ private:
84
+ std::unique_ptr<Impl> impl_;
85
+
86
+ explicit PlatformFilename(Impl impl);
87
+ };
88
+
89
+ /// Create a directory if it doesn't exist.
90
+ ///
91
+ /// Return whether the directory was created.
92
+ ARROW_EXPORT
93
+ Result<bool> CreateDir(const PlatformFilename& dir_path);
94
+
95
+ /// Create a directory and its parents if it doesn't exist.
96
+ ///
97
+ /// Return whether the directory was created.
98
+ ARROW_EXPORT
99
+ Result<bool> CreateDirTree(const PlatformFilename& dir_path);
100
+
101
+ /// Delete a directory's contents (but not the directory itself) if it exists.
102
+ ///
103
+ /// Return whether the directory existed.
104
+ ARROW_EXPORT
105
+ Result<bool> DeleteDirContents(const PlatformFilename& dir_path,
106
+ bool allow_not_found = true);
107
+
108
+ /// Delete a directory tree if it exists.
109
+ ///
110
+ /// Return whether the directory existed.
111
+ ARROW_EXPORT
112
+ Result<bool> DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true);
113
+
114
+ // Non-recursively list the contents of the given directory.
115
+ // The returned names are the children's base names, not including dir_path.
116
+ ARROW_EXPORT
117
+ Result<std::vector<PlatformFilename>> ListDir(const PlatformFilename& dir_path);
118
+
119
+ /// Delete a file if it exists.
120
+ ///
121
+ /// Return whether the file existed.
122
+ ARROW_EXPORT
123
+ Result<bool> DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true);
124
+
125
+ /// Return whether a file exists.
126
+ ARROW_EXPORT
127
+ Result<bool> FileExists(const PlatformFilename& path);
128
+
129
+ // TODO expose this more publicly to make it available from io/file.h?
130
+ /// A RAII wrapper for a file descriptor.
131
+ ///
132
+ /// The underlying file descriptor is automatically closed on destruction.
133
+ /// Moving is supported with well-defined semantics.
134
+ /// Furthermore, closing is idempotent.
135
+ class ARROW_EXPORT FileDescriptor {
136
+ public:
137
+ FileDescriptor() = default;
138
+ explicit FileDescriptor(int fd) : fd_(fd) {}
139
+ FileDescriptor(FileDescriptor&&);
140
+ FileDescriptor& operator=(FileDescriptor&&);
141
+
142
+ ~FileDescriptor();
143
+
144
+ Status Close();
145
+
146
+ /// May return -1 if closed or default-initialized
147
+ int fd() const { return fd_.load(); }
148
+
149
+ /// Detach and return the underlying file descriptor
150
+ int Detach();
151
+
152
+ bool closed() const { return fd_.load() == -1; }
153
+
154
+ protected:
155
+ static void CloseFromDestructor(int fd);
156
+
157
+ std::atomic<int> fd_{-1};
158
+ };
159
+
160
+ /// Open a file for reading and return a file descriptor.
161
+ ARROW_EXPORT
162
+ Result<FileDescriptor> FileOpenReadable(const PlatformFilename& file_name);
163
+
164
+ /// Open a file for writing and return a file descriptor.
165
+ ARROW_EXPORT
166
+ Result<FileDescriptor> FileOpenWritable(const PlatformFilename& file_name,
167
+ bool write_only = true, bool truncate = true,
168
+ bool append = false);
169
+
170
+ /// Read from current file position. Return number of bytes read.
171
+ ARROW_EXPORT
172
+ Result<int64_t> FileRead(int fd, uint8_t* buffer, int64_t nbytes);
173
+ /// Read from given file position. Return number of bytes read.
174
+ ARROW_EXPORT
175
+ Result<int64_t> FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes);
176
+
177
+ ARROW_EXPORT
178
+ Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes);
179
+ ARROW_EXPORT
180
+ Status FileTruncate(int fd, const int64_t size);
181
+
182
+ ARROW_EXPORT
183
+ Status FileSeek(int fd, int64_t pos);
184
+ ARROW_EXPORT
185
+ Status FileSeek(int fd, int64_t pos, int whence);
186
+ ARROW_EXPORT
187
+ Result<int64_t> FileTell(int fd);
188
+ ARROW_EXPORT
189
+ Result<int64_t> FileGetSize(int fd);
190
+
191
+ ARROW_EXPORT
192
+ Status FileClose(int fd);
193
+
194
+ struct Pipe {
195
+ FileDescriptor rfd;
196
+ FileDescriptor wfd;
197
+
198
+ Status Close() { return rfd.Close() & wfd.Close(); }
199
+ };
200
+
201
+ ARROW_EXPORT
202
+ Result<Pipe> CreatePipe();
203
+
204
+ ARROW_EXPORT
205
+ Status SetPipeFileDescriptorNonBlocking(int fd);
206
+
207
+ class ARROW_EXPORT SelfPipe {
208
+ public:
209
+ static Result<std::shared_ptr<SelfPipe>> Make(bool signal_safe);
210
+ virtual ~SelfPipe();
211
+
212
+ /// \brief Wait for a wakeup.
213
+ ///
214
+ /// Status::Invalid is returned if the pipe has been shutdown.
215
+ /// Otherwise the next sent payload is returned.
216
+ virtual Result<uint64_t> Wait() = 0;
217
+
218
+ /// \brief Wake up the pipe by sending a payload.
219
+ ///
220
+ /// This method is async-signal-safe if `signal_safe` was set to true.
221
+ virtual void Send(uint64_t payload) = 0;
222
+
223
+ /// \brief Wake up the pipe and shut it down.
224
+ virtual Status Shutdown() = 0;
225
+ };
226
+
227
+ ARROW_EXPORT
228
+ int64_t GetPageSize();
229
+
230
+ struct MemoryRegion {
231
+ void* addr;
232
+ size_t size;
233
+ };
234
+
235
+ ARROW_EXPORT
236
+ Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes,
237
+ void** new_addr);
238
+ ARROW_EXPORT
239
+ Status MemoryAdviseWillNeed(const std::vector<MemoryRegion>& regions);
240
+
241
+ ARROW_EXPORT
242
+ Result<std::string> GetEnvVar(const char* name);
243
+ ARROW_EXPORT
244
+ Result<std::string> GetEnvVar(const std::string& name);
245
+ ARROW_EXPORT
246
+ Result<NativePathString> GetEnvVarNative(const char* name);
247
+ ARROW_EXPORT
248
+ Result<NativePathString> GetEnvVarNative(const std::string& name);
249
+
250
+ ARROW_EXPORT
251
+ Status SetEnvVar(const char* name, const char* value);
252
+ ARROW_EXPORT
253
+ Status SetEnvVar(const std::string& name, const std::string& value);
254
+ ARROW_EXPORT
255
+ Status DelEnvVar(const char* name);
256
+ ARROW_EXPORT
257
+ Status DelEnvVar(const std::string& name);
258
+
259
+ ARROW_EXPORT
260
+ std::string ErrnoMessage(int errnum);
261
+ #if _WIN32
262
+ ARROW_EXPORT
263
+ std::string WinErrorMessage(int errnum);
264
+ #endif
265
+
266
+ ARROW_EXPORT
267
+ std::shared_ptr<StatusDetail> StatusDetailFromErrno(int errnum);
268
+ ARROW_EXPORT
269
+ std::optional<int> ErrnoFromStatusDetail(const StatusDetail& detail);
270
+ #if _WIN32
271
+ ARROW_EXPORT
272
+ std::shared_ptr<StatusDetail> StatusDetailFromWinError(int errnum);
273
+ #endif
274
+ ARROW_EXPORT
275
+ std::shared_ptr<StatusDetail> StatusDetailFromSignal(int signum);
276
+
277
+ template <typename... Args>
278
+ Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) {
279
+ return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum),
280
+ std::forward<Args>(args)...);
281
+ }
282
+
283
+ template <typename... Args>
284
+ Status IOErrorFromErrno(int errnum, Args&&... args) {
285
+ return StatusFromErrno(errnum, StatusCode::IOError, std::forward<Args>(args)...);
286
+ }
287
+
288
+ #if _WIN32
289
+ template <typename... Args>
290
+ Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) {
291
+ return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum),
292
+ std::forward<Args>(args)...);
293
+ }
294
+
295
+ template <typename... Args>
296
+ Status IOErrorFromWinError(int errnum, Args&&... args) {
297
+ return StatusFromWinError(errnum, StatusCode::IOError, std::forward<Args>(args)...);
298
+ }
299
+ #endif
300
+
301
+ template <typename... Args>
302
+ Status StatusFromSignal(int signum, StatusCode code, Args&&... args) {
303
+ return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum),
304
+ std::forward<Args>(args)...);
305
+ }
306
+
307
+ template <typename... Args>
308
+ Status CancelledFromSignal(int signum, Args&&... args) {
309
+ return StatusFromSignal(signum, StatusCode::Cancelled, std::forward<Args>(args)...);
310
+ }
311
+
312
+ ARROW_EXPORT
313
+ int ErrnoFromStatus(const Status&);
314
+
315
+ // Always returns 0 on non-Windows platforms (for Python).
316
+ ARROW_EXPORT
317
+ int WinErrorFromStatus(const Status&);
318
+
319
+ ARROW_EXPORT
320
+ int SignalFromStatus(const Status&);
321
+
322
+ class ARROW_EXPORT TemporaryDir {
323
+ public:
324
+ ~TemporaryDir();
325
+
326
+ /// '/'-terminated path to the temporary dir
327
+ const PlatformFilename& path() { return path_; }
328
+
329
+ /// Create a temporary subdirectory in the system temporary dir,
330
+ /// named starting with `prefix`.
331
+ static Result<std::unique_ptr<TemporaryDir>> Make(const std::string& prefix);
332
+
333
+ private:
334
+ PlatformFilename path_;
335
+
336
+ explicit TemporaryDir(PlatformFilename&&);
337
+ };
338
+
339
+ class ARROW_EXPORT SignalHandler {
340
+ public:
341
+ using Callback = void (*)(int);
342
+
343
+ SignalHandler();
344
+ explicit SignalHandler(Callback cb);
345
+ #if ARROW_HAVE_SIGACTION
346
+ explicit SignalHandler(const struct sigaction& sa);
347
+ #endif
348
+
349
+ Callback callback() const;
350
+ #if ARROW_HAVE_SIGACTION
351
+ const struct sigaction& action() const;
352
+ #endif
353
+
354
+ protected:
355
+ #if ARROW_HAVE_SIGACTION
356
+ // Storing the full sigaction allows to restore the entire signal handling
357
+ // configuration.
358
+ struct sigaction sa_;
359
+ #else
360
+ Callback cb_;
361
+ #endif
362
+ };
363
+
364
+ /// \brief Return the current handler for the given signal number.
365
+ ARROW_EXPORT
366
+ Result<SignalHandler> GetSignalHandler(int signum);
367
+
368
+ /// \brief Set a new handler for the given signal number.
369
+ ///
370
+ /// The old signal handler is returned.
371
+ ARROW_EXPORT
372
+ Result<SignalHandler> SetSignalHandler(int signum, const SignalHandler& handler);
373
+
374
+ /// \brief Reinstate the signal handler
375
+ ///
376
+ /// For use in signal handlers. This is needed on platforms without sigaction()
377
+ /// such as Windows, as the default signal handler is restored there as
378
+ /// soon as a signal is raised.
379
+ ARROW_EXPORT
380
+ void ReinstateSignalHandler(int signum, SignalHandler::Callback handler);
381
+
382
+ /// \brief Send a signal to the current process
383
+ ///
384
+ /// The thread which will receive the signal is unspecified.
385
+ ARROW_EXPORT
386
+ Status SendSignal(int signum);
387
+
388
+ /// \brief Send a signal to the given thread
389
+ ///
390
+ /// This function isn't supported on Windows.
391
+ ARROW_EXPORT
392
+ Status SendSignalToThread(int signum, uint64_t thread_id);
393
+
394
+ /// \brief Get an unpredictable random seed
395
+ ///
396
+ /// This function may be slightly costly, so should only be used to initialize
397
+ /// a PRNG, not to generate a large amount of random numbers.
398
+ /// It is better to use this function rather than std::random_device, unless
399
+ /// absolutely necessary (e.g. to generate a cryptographic secret).
400
+ ARROW_EXPORT
401
+ int64_t GetRandomSeed();
402
+
403
+ /// \brief Get the current thread id
404
+ ///
405
+ /// In addition to having the same properties as std::thread, the returned value
406
+ /// is a regular integer value, which is more convenient than an opaque type.
407
+ ARROW_EXPORT
408
+ uint64_t GetThreadId();
409
+
410
+ /// \brief Get the current memory used by the current process in bytes
411
+ ///
412
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
413
+ ARROW_EXPORT
414
+ int64_t GetCurrentRSS();
415
+
416
+ /// \brief Get the total memory available to the system in bytes
417
+ ///
418
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
419
+ ARROW_EXPORT
420
+ int64_t GetTotalMemoryBytes();
421
+
422
+ /// \brief Load a dynamic library
423
+ ///
424
+ /// This wraps dlopen() except on Windows, where LoadLibrary() is called.
425
+ /// These two platforms handle absolute paths consistently; relative paths
426
+ /// or the library's bare name may be handled but inconsistently.
427
+ ///
428
+ /// \return An opaque handle for the dynamic library, which can be used for
429
+ /// subsequent symbol lookup. Nullptr will never be returned; instead
430
+ /// an error will be raised.
431
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const PlatformFilename& path);
432
+
433
+ /// \brief Load a dynamic library
434
+ ///
435
+ /// An overload taking null terminated string.
436
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const char* path);
437
+
438
+ /// \brief Retrieve a symbol by name from a library handle.
439
+ ///
440
+ /// This wraps dlsym() except on Windows, where GetProcAddress() is called.
441
+ ///
442
+ /// \return The address associated with the named symbol. Nullptr will never be
443
+ /// returned; instead an error will be raised.
444
+ ARROW_EXPORT Result<void*> GetSymbol(void* handle, const char* name);
445
+
446
+ template <typename T>
447
+ Result<T*> GetSymbolAs(void* handle, const char* name) {
448
+ ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name));
449
+ return reinterpret_cast<T*>(sym);
450
+ }
451
+
452
+ } // namespace arrow::internal