ZTWHHH commited on
Commit
371872c
·
verified ·
1 Parent(s): ab5f07d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videollama2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h +24 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h +321 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h +21 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h +183 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h +215 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h +186 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h +160 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h +52 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h +137 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h +75 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h +24 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h +7 -0
  13. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h +160 -0
  14. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h +57 -0
  15. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h +48 -0
  16. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h +819 -0
  17. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h +103 -0
  18. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h +23 -0
  19. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h +226 -0
  20. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h +31 -0
  21. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h +50 -0
  22. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h +323 -0
  23. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h +120 -0
  24. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h +128 -0
  25. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h +887 -0
  26. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h +51 -0
  27. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h +53 -0
  28. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h +709 -0
  29. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h +489 -0
  30. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h +81 -0
  31. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h +753 -0
  32. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h +120 -0
  33. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h +126 -0
  34. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h +58 -0
  35. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h +215 -0
  36. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h +39 -0
  37. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h +481 -0
  38. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h +103 -0
  39. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h +275 -0
  40. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h +495 -0
  41. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h +144 -0
  42. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h +123 -0
  43. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h +98 -0
  44. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h +75 -0
  45. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h +75 -0
  46. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h +432 -0
  47. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h +27 -0
  48. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h +33 -0
  49. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h +32 -0
  50. vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h +583 -0
videollama2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+
5
+ namespace at::caching {
6
+
7
+ // Some systems (just cudagraphs currently) will persist a static tensor output
8
+ // whose TensorImpl does not change across iterations. For these tensors caching
9
+ // dtype conversions is invalid. Additionally, there will be an extra reference
10
+ // count to these cached tensors that would prevent buffer inplacing and other
11
+ // checks on tensor uniqueness. If we are not using these systems the enabled
12
+ // flag will be false and we will avoid the hash lookup.
13
+
14
+ TORCH_API bool is_cached_tensor(const at::Tensor& t);
15
+ TORCH_API void add_cached_tensor(const at::Tensor& t);
16
+ TORCH_API void remove_cached_tensor(const at::Tensor& t);
17
+ TORCH_API void set_cached_tensors_enabled(bool enable);
18
+
19
+ // For gradient buffer stealing we will adjust the use count of tensors
20
+ // which are persisted by cudagraphs, just as we need to adjust reference
21
+ // count of tensors with hooks.
22
+ TORCH_API size_t adjusted_use_count(const at::Tensor& t);
23
+
24
+ } // namespace at::caching
videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeexplicitautogradnonfunctional_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_addmm_activation_compositeexplicitautogradnonfunctional_dispatch.h>
20
+ #include <ATen/ops/_conj_copy_compositeexplicitautogradnonfunctional_dispatch.h>
21
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h>
22
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_compositeexplicitautogradnonfunctional_dispatch.h>
23
+ #include <ATen/ops/_fw_primal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
24
+ #include <ATen/ops/_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
25
+ #include <ATen/ops/_linalg_det_compositeexplicitautogradnonfunctional_dispatch.h>
26
+ #include <ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h>
27
+ #include <ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h>
28
+ #include <ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h>
29
+ #include <ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h>
30
+ #include <ATen/ops/_log_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
31
+ #include <ATen/ops/_log_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
32
+ #include <ATen/ops/_make_dual_copy_compositeexplicitautogradnonfunctional_dispatch.h>
33
+ #include <ATen/ops/_neg_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
34
+ #include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h>
35
+ #include <ATen/ops/_reshape_alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
36
+ #include <ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
37
+ #include <ATen/ops/_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
38
+ #include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h>
39
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
40
+ #include <ATen/ops/_trilinear_compositeexplicitautogradnonfunctional_dispatch.h>
41
+ #include <ATen/ops/_upsample_bicubic2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
42
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
43
+ #include <ATen/ops/_upsample_bilinear2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
44
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
45
+ #include <ATen/ops/_upsample_nearest_exact1d_compositeexplicitautogradnonfunctional_dispatch.h>
46
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
47
+ #include <ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h>
48
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
49
+ #include <ATen/ops/_upsample_nearest_exact3d_compositeexplicitautogradnonfunctional_dispatch.h>
50
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
51
+ #include <ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
52
+ #include <ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h>
53
+ #include <ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h>
54
+ #include <ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
55
+ #include <ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
56
+ #include <ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
57
+ #include <ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
58
+ #include <ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h>
59
+ #include <ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h>
60
+ #include <ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h>
61
+ #include <ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h>
62
+ #include <ATen/ops/addmv_compositeexplicitautogradnonfunctional_dispatch.h>
63
+ #include <ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
64
+ #include <ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h>
65
+ #include <ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h>
66
+ #include <ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h>
67
+ #include <ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h>
68
+ #include <ATen/ops/any_compositeexplicitautogradnonfunctional_dispatch.h>
69
+ #include <ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h>
70
+ #include <ATen/ops/argmin_compositeexplicitautogradnonfunctional_dispatch.h>
71
+ #include <ATen/ops/as_strided_compositeexplicitautogradnonfunctional_dispatch.h>
72
+ #include <ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h>
73
+ #include <ATen/ops/as_strided_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
74
+ #include <ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h>
75
+ #include <ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h>
76
+ #include <ATen/ops/atan_compositeexplicitautogradnonfunctional_dispatch.h>
77
+ #include <ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h>
78
+ #include <ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h>
79
+ #include <ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
80
+ #include <ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
81
+ #include <ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
82
+ #include <ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
83
+ #include <ATen/ops/baddbmm_compositeexplicitautogradnonfunctional_dispatch.h>
84
+ #include <ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h>
85
+ #include <ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h>
86
+ #include <ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h>
87
+ #include <ATen/ops/bitwise_not_compositeexplicitautogradnonfunctional_dispatch.h>
88
+ #include <ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h>
89
+ #include <ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h>
90
+ #include <ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h>
91
+ #include <ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h>
92
+ #include <ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h>
93
+ #include <ATen/ops/ccol_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
94
+ #include <ATen/ops/ceil_compositeexplicitautogradnonfunctional_dispatch.h>
95
+ #include <ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h>
96
+ #include <ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h>
97
+ #include <ATen/ops/clamp_min_compositeexplicitautogradnonfunctional_dispatch.h>
98
+ #include <ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
99
+ #include <ATen/ops/copy_compositeexplicitautogradnonfunctional_dispatch.h>
100
+ #include <ATen/ops/copysign_compositeexplicitautogradnonfunctional_dispatch.h>
101
+ #include <ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h>
102
+ #include <ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h>
103
+ #include <ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
104
+ #include <ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h>
105
+ #include <ATen/ops/cumsum_compositeexplicitautogradnonfunctional_dispatch.h>
106
+ #include <ATen/ops/detach_copy_compositeexplicitautogradnonfunctional_dispatch.h>
107
+ #include <ATen/ops/diag_embed_compositeexplicitautogradnonfunctional_dispatch.h>
108
+ #include <ATen/ops/diagonal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
109
+ #include <ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
110
+ #include <ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h>
111
+ #include <ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h>
112
+ #include <ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h>
113
+ #include <ATen/ops/elu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
114
+ #include <ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h>
115
+ #include <ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h>
116
+ #include <ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h>
117
+ #include <ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h>
118
+ #include <ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h>
119
+ #include <ATen/ops/exp2_compositeexplicitautogradnonfunctional_dispatch.h>
120
+ #include <ATen/ops/expand_copy_compositeexplicitautogradnonfunctional_dispatch.h>
121
+ #include <ATen/ops/expm1_compositeexplicitautogradnonfunctional_dispatch.h>
122
+ #include <ATen/ops/floor_compositeexplicitautogradnonfunctional_dispatch.h>
123
+ #include <ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h>
124
+ #include <ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h>
125
+ #include <ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h>
126
+ #include <ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h>
127
+ #include <ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
128
+ #include <ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
129
+ #include <ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
130
+ #include <ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h>
131
+ #include <ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h>
132
+ #include <ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h>
133
+ #include <ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h>
134
+ #include <ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
135
+ #include <ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h>
136
+ #include <ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h>
137
+ #include <ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h>
138
+ #include <ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
139
+ #include <ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
140
+ #include <ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
141
+ #include <ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h>
142
+ #include <ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h>
143
+ #include <ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h>
144
+ #include <ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h>
145
+ #include <ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h>
146
+ #include <ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h>
147
+ #include <ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h>
148
+ #include <ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h>
149
+ #include <ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
150
+ #include <ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
151
+ #include <ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h>
152
+ #include <ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h>
153
+ #include <ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h>
154
+ #include <ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h>
155
+ #include <ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h>
156
+ #include <ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h>
157
+ #include <ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
158
+ #include <ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h>
159
+ #include <ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h>
160
+ #include <ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h>
161
+ #include <ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h>
162
+ #include <ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h>
163
+ #include <ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h>
164
+ #include <ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
165
+ #include <ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h>
166
+ #include <ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h>
167
+ #include <ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
168
+ #include <ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h>
169
+ #include <ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h>
170
+ #include <ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h>
171
+ #include <ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h>
172
+ #include <ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h>
173
+ #include <ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h>
174
+ #include <ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h>
175
+ #include <ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h>
176
+ #include <ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h>
177
+ #include <ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h>
178
+ #include <ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h>
179
+ #include <ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h>
180
+ #include <ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h>
181
+ #include <ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h>
182
+ #include <ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h>
183
+ #include <ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h>
184
+ #include <ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h>
185
+ #include <ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h>
186
+ #include <ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h>
187
+ #include <ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h>
188
+ #include <ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h>
189
+ #include <ATen/ops/mish_compositeexplicitautogradnonfunctional_dispatch.h>
190
+ #include <ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h>
191
+ #include <ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h>
192
+ #include <ATen/ops/mul_compositeexplicitautogradnonfunctional_dispatch.h>
193
+ #include <ATen/ops/narrow_copy_compositeexplicitautogradnonfunctional_dispatch.h>
194
+ #include <ATen/ops/ne_compositeexplicitautogradnonfunctional_dispatch.h>
195
+ #include <ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h>
196
+ #include <ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h>
197
+ #include <ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h>
198
+ #include <ATen/ops/nll_loss_backward_compositeexplicitautogradnonfunctional_dispatch.h>
199
+ #include <ATen/ops/nll_loss_forward_compositeexplicitautogradnonfunctional_dispatch.h>
200
+ #include <ATen/ops/norm_compositeexplicitautogradnonfunctional_dispatch.h>
201
+ #include <ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h>
202
+ #include <ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h>
203
+ #include <ATen/ops/pixel_unshuffle_compositeexplicitautogradnonfunctional_dispatch.h>
204
+ #include <ATen/ops/polygamma_compositeexplicitautogradnonfunctional_dispatch.h>
205
+ #include <ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h>
206
+ #include <ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h>
207
+ #include <ATen/ops/reciprocal_compositeexplicitautogradnonfunctional_dispatch.h>
208
+ #include <ATen/ops/reflection_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
209
+ #include <ATen/ops/reflection_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
210
+ #include <ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
211
+ #include <ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
212
+ #include <ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h>
213
+ #include <ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h>
214
+ #include <ATen/ops/replication_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
215
+ #include <ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
216
+ #include <ATen/ops/replication_pad2d_compositeexplicitautogradnonfunctional_dispatch.h>
217
+ #include <ATen/ops/replication_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
218
+ #include <ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h>
219
+ #include <ATen/ops/row_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
220
+ #include <ATen/ops/rsqrt_compositeexplicitautogradnonfunctional_dispatch.h>
221
+ #include <ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h>
222
+ #include <ATen/ops/scatter_add_compositeexplicitautogradnonfunctional_dispatch.h>
223
+ #include <ATen/ops/scatter_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
224
+ #include <ATen/ops/select_backward_compositeexplicitautogradnonfunctional_dispatch.h>
225
+ #include <ATen/ops/select_copy_compositeexplicitautogradnonfunctional_dispatch.h>
226
+ #include <ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
227
+ #include <ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h>
228
+ #include <ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
229
+ #include <ATen/ops/sigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
230
+ #include <ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h>
231
+ #include <ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h>
232
+ #include <ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h>
233
+ #include <ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
234
+ #include <ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h>
235
+ #include <ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h>
236
+ #include <ATen/ops/sinh_compositeexplicitautogradnonfunctional_dispatch.h>
237
+ #include <ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h>
238
+ #include <ATen/ops/slice_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
239
+ #include <ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h>
240
+ #include <ATen/ops/smooth_l1_loss_compositeexplicitautogradnonfunctional_dispatch.h>
241
+ #include <ATen/ops/softplus_compositeexplicitautogradnonfunctional_dispatch.h>
242
+ #include <ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h>
243
+ #include <ATen/ops/softshrink_compositeexplicitautogradnonfunctional_dispatch.h>
244
+ #include <ATen/ops/softshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
245
+ #include <ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h>
246
+ #include <ATen/ops/special_airy_ai_compositeexplicitautogradnonfunctional_dispatch.h>
247
+ #include <ATen/ops/special_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
248
+ #include <ATen/ops/special_bessel_j1_compositeexplicitautogradnonfunctional_dispatch.h>
249
+ #include <ATen/ops/special_bessel_y0_compositeexplicitautogradnonfunctional_dispatch.h>
250
+ #include <ATen/ops/special_bessel_y1_compositeexplicitautogradnonfunctional_dispatch.h>
251
+ #include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
252
+ #include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
253
+ #include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
254
+ #include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
255
+ #include <ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h>
256
+ #include <ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h>
257
+ #include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h>
258
+ #include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautogradnonfunctional_dispatch.h>
259
+ #include <ATen/ops/special_i0e_compositeexplicitautogradnonfunctional_dispatch.h>
260
+ #include <ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h>
261
+ #include <ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h>
262
+ #include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautogradnonfunctional_dispatch.h>
263
+ #include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautogradnonfunctional_dispatch.h>
264
+ #include <ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h>
265
+ #include <ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h>
266
+ #include <ATen/ops/special_modified_bessel_i1_compositeexplicitautogradnonfunctional_dispatch.h>
267
+ #include <ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
268
+ #include <ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
269
+ #include <ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h>
270
+ #include <ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
271
+ #include <ATen/ops/special_scaled_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
272
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
273
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
274
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
275
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
276
+ #include <ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
277
+ #include <ATen/ops/special_xlog1py_compositeexplicitautogradnonfunctional_dispatch.h>
278
+ #include <ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h>
279
+ #include <ATen/ops/split_copy_compositeexplicitautogradnonfunctional_dispatch.h>
280
+ #include <ATen/ops/split_with_sizes_copy_compositeexplicitautogradnonfunctional_dispatch.h>
281
+ #include <ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h>
282
+ #include <ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
283
+ #include <ATen/ops/sub_compositeexplicitautogradnonfunctional_dispatch.h>
284
+ #include <ATen/ops/sum_compositeexplicitautogradnonfunctional_dispatch.h>
285
+ #include <ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h>
286
+ #include <ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h>
287
+ #include <ATen/ops/tanh_compositeexplicitautogradnonfunctional_dispatch.h>
288
+ #include <ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h>
289
+ #include <ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h>
290
+ #include <ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h>
291
+ #include <ATen/ops/topk_compositeexplicitautogradnonfunctional_dispatch.h>
292
+ #include <ATen/ops/transpose_copy_compositeexplicitautogradnonfunctional_dispatch.h>
293
+ #include <ATen/ops/triangular_solve_compositeexplicitautogradnonfunctional_dispatch.h>
294
+ #include <ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h>
295
+ #include <ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h>
296
+ #include <ATen/ops/trunc_compositeexplicitautogradnonfunctional_dispatch.h>
297
+ #include <ATen/ops/unbind_copy_compositeexplicitautogradnonfunctional_dispatch.h>
298
+ #include <ATen/ops/unfold_copy_compositeexplicitautogradnonfunctional_dispatch.h>
299
+ #include <ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
300
+ #include <ATen/ops/upsample_bicubic2d_compositeexplicitautogradnonfunctional_dispatch.h>
301
+ #include <ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
302
+ #include <ATen/ops/upsample_bilinear2d_compositeexplicitautogradnonfunctional_dispatch.h>
303
+ #include <ATen/ops/upsample_bilinear2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
304
+ #include <ATen/ops/upsample_linear1d_compositeexplicitautogradnonfunctional_dispatch.h>
305
+ #include <ATen/ops/upsample_linear1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
306
+ #include <ATen/ops/upsample_nearest1d_compositeexplicitautogradnonfunctional_dispatch.h>
307
+ #include <ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
308
+ #include <ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h>
309
+ #include <ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
310
+ #include <ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h>
311
+ #include <ATen/ops/upsample_nearest3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
312
+ #include <ATen/ops/upsample_trilinear3d_compositeexplicitautogradnonfunctional_dispatch.h>
313
+ #include <ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
314
+ #include <ATen/ops/values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
315
+ #include <ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h>
316
+ #include <ATen/ops/view_as_real_copy_compositeexplicitautogradnonfunctional_dispatch.h>
317
+ #include <ATen/ops/view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
318
+ #include <ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h>
319
+
320
+
321
+
videollama2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/Tensor.h>
5
+ #include <ATen/dlpack.h>
6
+
7
+ // this convertor will:
8
+ // 1) take a Tensor object and wrap it in the DLPack tensor
9
+ // 2) take a dlpack tensor and convert it to the ATen Tensor
10
+
11
+ namespace at {
12
+
13
+ TORCH_API ScalarType toScalarType(const DLDataType& dtype);
14
+ TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
15
+ TORCH_API Tensor fromDLPack(const DLManagedTensor* src);
16
+ TORCH_API Tensor
17
+ fromDLPack(const DLManagedTensor* src, std::function<void(void*)> deleter);
18
+ TORCH_API DLDataType getDLDataType(const Tensor& t);
19
+ TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id);
20
+
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/LegacyBatchedTensorImpl.h>
4
+ #include <ATen/core/IListRef.h>
5
+
6
+ namespace at {
7
+
8
+ // This file contains abstractions used for transforming *logical* vmap
9
+ // arguments into *physical* arguments. (Keep reading for definitions of these
10
+ // terms).
11
+
12
+ // NOTE: [Logical vs physical args]
13
+ // Consider the following vmap.
14
+ // vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
15
+ // This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
16
+ // with batch dims 0 and 2:
17
+ // BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
18
+ //
19
+ // We say the *logical* view of the tensor has size [3] -- tensors inside
20
+ // `func` appear to have size [3].
21
+ // However, the *physical* underlying tensor (the one passed to vmap) has size
22
+ // [2, 3, 4].
23
+ //
24
+ // This notion of logical vs physical also extends to non-tensor arguments.
25
+ // Consider the previous tensor; let's assume the user called
26
+ // `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
27
+ // dimension they are reducing over is dim 0 but the physical dim is dim 1
28
+ // (the first non-batch dimension)
29
+
30
+ // Forward declared; see NOTE: [What is a VmapPhysicalView?]
31
+ struct VmapPhysicalView;
32
+
33
+ // Most PyTorch operators take 4 or fewer inputs.
34
+ constexpr int64_t kVmapTransformStaticInputSize = 4;
35
+ using VmapPhysicalViewVec =
36
+ SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
37
+
38
+ // Pytorch generally advertises good performance for <= 5 dims.
39
+ // (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
40
+ // dimensions to get 8. Adjust this number as necessary
41
+ constexpr int64_t kVmapStaticDimVecSize = 8;
42
+ using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
43
+ using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
44
+
45
+ // NOTE: [What is an VmapTransform?]
46
+ // An *VmapTransform* converts logical views of tensors to physical views.
47
+ //
48
+ // Batching rules use VmapTransforms to convert logical arguments to
49
+ // physical arguments, then call one or more at:: operator that handles the
50
+ // physical arguments, and then converts the physical result back to a logical
51
+ // argument.
52
+
53
+ // VmapTransform for operators that take tensors with multiple batch dims.
54
+ // Given one or more logical views on Tensors, `logicalToPhysical`
55
+ // permutes all of the batch dims to the front of the tensor, aligns
56
+ // and expands the batch dims to match each other (according to their `level`),
57
+ // and returns a VmapPhysicalView on the tensor(s).
58
+ struct TORCH_API MultiBatchVmapTransform {
59
+ static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
60
+ static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
61
+ };
62
+
63
+ // VmapTransform for operators that broadcast all inputs.
64
+ // Given some logical views on Tensors, `logicalToPhysical`:
65
+ // - permutes all of the batch dims to the front of the tensors
66
+ // - aligns all the batch dims to the collective levels of all of the tensors.
67
+ // If a tensor does not have a batch dim for a vmap level, then it receives
68
+ // a size-one dimension for said level.
69
+ // - aligns the non-batch dims to have the same dimensionality, adding extra
70
+ // size-1 dimensions in between the batch dimensions and the non-batch
71
+ // dimensions so that the batch dimensions are lined up from the right.
72
+ //
73
+ // For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
74
+ // dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap
75
+ // tensors of size (B, 1, 2) and (B, 3, 2).
76
+ //
77
+ // Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
78
+ // VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
79
+ // actually *need* to return a tensor of size (1, 2) for the second tensor
80
+ // because the broadcasting operation takes care of that for us, but we do
81
+ // it anyways to keep things simple.
82
+ struct TORCH_API BroadcastingVmapTransform {
83
+ static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
84
+ };
85
+
86
+ // Forward declared, if you're reading this file head to toe, don't worry about
87
+ // it yet.
88
+ struct VmapPhysicalToLogicalMap;
89
+
90
+ // NOTE: [What is a VmapPhysicalView?]
91
+ // VmapPhysicalView represents a physical view on a Tensor.
92
+ //
93
+ // One can use it to further convert logical dimension indices, logical shapes,
94
+ // and more to their physical variants, or convert a new (physical) tensor into
95
+ // a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
96
+ //
97
+ // VmapPhysicalView stores a physical tensor with all of its batch dimensions at
98
+ // the front and some levels that correspond to said batch dimensions.
99
+ //
100
+ // The levels bitset specifies which vmap levels correspond to the batch
101
+ // dimensions at the front of the tensor. In particular, the number of set bits
102
+ // corresponds to the number of batch dimensions on `tensor` and the rightmost
103
+ // bit of `levels` specifies the maximum number of nested vmaps we are in at
104
+ // this point in time.
105
+ // For example, given:
106
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
107
+ //
108
+ // Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
109
+ // than or equal to 3.
110
+ // bitset: 010100
111
+ // ^
112
+ // |
113
+ // levels: 012345
114
+ struct TORCH_API VmapPhysicalView {
115
+ VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
116
+ : levels_(levels), tensor_(tensor) {
117
+ TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
118
+ }
119
+
120
+ Tensor& tensor() {
121
+ return tensor_;
122
+ }
123
+ const Tensor& tensor() const {
124
+ return tensor_;
125
+ }
126
+
127
+ // Maps logical dim indices to physical dim indices. Also does dim wrapping.
128
+ //
129
+ // For example, given:
130
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
131
+ //
132
+ // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
133
+ // This is because the size of levels tell us that the first two dimensions
134
+ // of `tensor_` are batch dimensions, so a logical dim of `n` is actually
135
+ // a physical dim of `n + 2`.
136
+ VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const;
137
+ int64_t getPhysicalDim(int64_t logical_dim) const;
138
+
139
+ // Returns a VmapPhysicalToLogicalMap object. This can be used for
140
+ // mapping a physical tensor to a new logical tensor (BatchedTensor)
141
+ VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
142
+
143
+ // Maps a logical shape to a physical shape by pre-pending the batch
144
+ // sizes to the logical shape.
145
+ VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
146
+
147
+ int64_t numBatchDims() const;
148
+
149
+ private:
150
+ int64_t numLogicalDims() const;
151
+
152
+ std::bitset<kVmapNumLevels> levels_;
153
+ Tensor tensor_;
154
+ };
155
+
156
+ // Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
157
+ // to a logical one (BatchedTensor). It holds some levels that are used to do
158
+ // the mapping and assumes that the batch dimensions in the physical tensor all
159
+ // occur at the front of the tensor.
160
+ struct TORCH_API VmapPhysicalToLogicalMap {
161
+ VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels)
162
+ : levels_(levels) {}
163
+
164
+ // Maps a physical tensor to a new logical tensor (BatchedTensor).
165
+ // Assumes that all of the "batch dimensions" are at the front
166
+ // of the physical tensor. For example, given:
167
+ // - x = rank-4 Tensor with size 2, 3, 5, 7
168
+ // - levels = (2, 4)
169
+ // Returns:
170
+ // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
171
+ Tensor apply(const Tensor& physical_tensor) const;
172
+
173
+ // Given a vector of physical tensors,
174
+ // 1. maps each tensor to a new logical tensor. Assumes that all of the
175
+ // "batch dimensions" are at the front of the physical tensors.
176
+ // 2. stores the new logical tensors back into the passed-in vector. This is
177
+ // to avoid additional dynamic allocations.
178
+ void applyInplace(std::vector<Tensor>& physical_tensors) const;
179
+
180
+ std::bitset<kVmapNumLevels> levels_;
181
+ };
182
+
183
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/NamedTensor.h>
3
+ #include <ATen/TensorNames.h>
4
+ #include <ATen/WrapDimUtilsMulti.h>
5
+
6
+ #include <ATen/core/DimVector.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <functional>
9
+
10
+ namespace at {
11
+
12
+ using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
13
+
14
+ inline bool has_names(const ITensorListRef& tensors) {
15
+ return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) {
16
+ return t.has_names();
17
+ });
18
+ }
19
+
20
+ // Converts dim to an positional index. Errors if `dim` cannot be used to
21
+ // refer to any dimension of tensor.
22
+ TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
23
+ TORCH_API std::vector<int64_t> dimnames_to_positions(
24
+ const Tensor& tensor,
25
+ DimnameList dims);
26
+
27
+ // Unifies two DimnameList to produce a third. This is useful for implementing
28
+ // the named inference rule for binary broadcasting operations like add.
29
+ //
30
+ // There are three main constraints:
31
+ // 1) Check matching: Names must match positionally from the right.
32
+ // 2) Check misaligned: If a name `n` is in `names`, then it must appear at
33
+ // the same index from the right in other.
34
+ // 3) The output names are obtained by unifying the names individually from the
35
+ // right.
36
+ TORCH_API std::vector<Dimname> unify_from_right(
37
+ DimnameList names,
38
+ DimnameList other,
39
+ const char* action = "broadcast");
40
+
41
+ [[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
42
+ TORCH_CHECK(
43
+ false,
44
+ op_name,
45
+ ": You passed a dimname (string) to this op in place of a dimension "
46
+ "index but it does not yet support this behavior. Please pass a dimension "
47
+ "index to work around this.");
48
+ }
49
+
50
+ // [NOTE] Writing name inference rules
51
+ //
52
+ // Operators that support named tensors are either composed of operations that
53
+ // support named tensors or implement some name inference rule. An op that
54
+ // implements its own name inference rule generally looks like the following:
55
+ //
56
+ // Tensor op(...) {
57
+ // perform_shape_checks(...);
58
+ // # (1)
59
+ // auto maybe_outnames = compute_outnames(...);
60
+ // auto result = [&]() {
61
+ // NoNamesGuard guard;
62
+ // return op_impl(...);
63
+ // }();
64
+ // # (2)
65
+ // propagate_names_if_nonempty(result, maybe_outnames);
66
+ //
67
+ // Each op has (1) a compute outnames step and (2) a propagate names step.
68
+ //
69
+ // compute_outnames is responsible for checking that input names match and
70
+ // determining what the output names should be. It returns either:
71
+ // - {} (if the inputs tensors are all unnamed)
72
+ // - non-empty outnames.
73
+ //
74
+ // propagate_names_if_nonempty propagates the outnames if they exist to the
75
+ // result tensors.
76
+ //
77
+ // The {} case is an optimization; if the user does not use named tensors they
78
+ // pay no perf cost for it.
79
+
80
+ namespace namedinference {
81
+
82
+ const Tensor& propagate_names_if_present_and_nonempty(
83
+ const Tensor& result,
84
+ c10::optional<DimnameList> maybe_names,
85
+ bool validate_names = false);
86
+ // Propagates `names` to `result` if `names` is not empty.
87
+ // `names` can be empty; see [NOTE] Writing name inference rules
88
+ // If `names` is not empty, `names.size()` should equal `result.dim()`.
89
+ // When in doubt, use this overload instead of the others.
90
+ TORCH_API const Tensor& propagate_names_if_nonempty(
91
+ const Tensor& result,
92
+ DimnameList maybe_names,
93
+ bool validate_names = false);
94
+
95
+ // Propagates `names` to `result`. Only use this if we are certain that there
96
+ // are names to propagate (that names is not empty).
97
+ TORCH_API const Tensor& propagate_names(
98
+ const Tensor& result,
99
+ DimnameList names,
100
+ bool validate_names = false);
101
+
102
+ // Propagates all names from src to result.
103
+ TORCH_API void propagate_names(const Tensor& result, const Tensor& src);
104
+
105
+ // Propagates all names except for those at the excluded_idxs.
106
+ TORCH_API void propagate_names_except(
107
+ const Tensor& result,
108
+ const Tensor& src,
109
+ IntArrayRef excluded_idxs);
110
+
111
+ // Used for reduction ops that have a `keepdim` arg.
112
+ TORCH_API void propagate_names_for_reduction(
113
+ const Tensor& result,
114
+ const Tensor& src,
115
+ IntArrayRef excluded_idxs,
116
+ bool keepdim);
117
+
118
+ TORCH_API void propagate_names_for_expand(
119
+ const Tensor& result,
120
+ const Tensor& self);
121
+
122
+ TORCH_API std::vector<Dimname> compute_cat_outnames(
123
+ const MaterializedITensorListRef& tensors);
124
+
125
+ TORCH_API std::vector<Dimname> compute_broadcast_outnames(
126
+ const Tensor& self,
127
+ const Tensor& other);
128
+
129
+ TORCH_API std::vector<Dimname> broadcast_to_outnames(
130
+ const Tensor& tensor,
131
+ const Tensor& reference_tensor,
132
+ const char* op_name);
133
+
134
+ TORCH_API std::vector<Dimname> compute_matmul_outnames(
135
+ const Tensor& self,
136
+ const Tensor& other);
137
+
138
+ TORCH_API std::vector<Dimname> compute_cdist_outnames(
139
+ const Tensor& self,
140
+ const Tensor& other);
141
+
142
+ TORCH_API std::vector<Dimname> compute_bmm_outnames(
143
+ const Tensor& result,
144
+ const Tensor& self,
145
+ const Tensor& other);
146
+
147
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
148
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(
149
+ const Tensor& tensor,
150
+ std::bitset<dim_bitset_size> dims);
151
+
152
+ std::vector<Dimname> compute_diagonal_outnames(
153
+ const Tensor& tensor,
154
+ int64_t dim1,
155
+ int64_t dim2);
156
+
157
+ // TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
158
+
159
+ TORCH_API TensorImpl* propagate_names_if_nonempty(
160
+ TensorImpl* result,
161
+ DimnameList maybe_names,
162
+ bool validate_names = false);
163
+
164
+ TORCH_API TensorImpl* propagate_names(
165
+ TensorImpl* result,
166
+ DimnameList names,
167
+ bool validate_names = false);
168
+
169
+ TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
170
+
171
+ TORCH_API inline void propagate_names(
172
+ const TensorBase& result,
173
+ DimnameList names,
174
+ bool validate_names = false) {
175
+ propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
176
+ }
177
+
178
+ TORCH_API inline void propagate_names_if_nonempty(
179
+ const TensorBase& result,
180
+ DimnameList names,
181
+ bool validate_names = false) {
182
+ propagate_names_if_nonempty(
183
+ result.unsafeGetTensorImpl(), names, validate_names);
184
+ }
185
+
186
+ TORCH_API inline void propagate_names(
187
+ const TensorBase& result,
188
+ const TensorBase& src) {
189
+ propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
190
+ }
191
+
192
+ // result = m1 @ m2 + bias
193
+ TORCH_API std::vector<Dimname> propagate_names_for_addmm(
194
+ const Tensor& m1,
195
+ const Tensor& m2,
196
+ const Tensor& bias);
197
+
198
+ TORCH_API std::vector<Dimname> propagate_names_for_addmv(
199
+ const Tensor& mat,
200
+ const Tensor& vec,
201
+ const Tensor& bias);
202
+
203
+ TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
204
+
205
+ TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
206
+ const Tensor& result,
207
+ const Tensor& self,
208
+ const Tensor& other,
209
+ const Tensor& bias);
210
+
211
+ TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
212
+
213
+ } // namespace namedinference
214
+
215
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/MemoryFormat.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ namespace at {
9
+
10
+ // An "Opaque" TensorImpl -- there are no strides and (for now)
11
+ // even data() is not supported (thus no pointer arithmetic).
12
+
13
+ // NOTE: We could allow data() in the future, but would have to ensure pointer
14
+ // arithmetic code is properly guarded.
15
+ //
16
+ // NOTE: This does not support resize_ (and other metadata-changing ops) because
17
+ // of `shallow_copy_and_detach`. We would need to define an interface to
18
+ // "shallow copy" in order to add support.
19
+
20
+ template <typename OpaqueHandle>
21
+ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
22
+ // public constructor for now...
23
+ OpaqueTensorImpl(
24
+ at::DispatchKeySet key_set,
25
+ const caffe2::TypeMeta data_type,
26
+ c10::Device device,
27
+ OpaqueHandle opaque_handle,
28
+ c10::IntArrayRef sizes,
29
+ bool is_non_overlapping_and_dense = true)
30
+ : TensorImpl(key_set, data_type, device),
31
+ opaque_handle_(std::move(opaque_handle)) {
32
+ set_storage_access_should_throw();
33
+ set_custom_sizes_strides(SizesStridesPolicy::CustomStrides);
34
+ sizes_and_strides_.set_sizes(sizes);
35
+ refresh_numel();
36
+ is_non_overlapping_and_dense_ = is_non_overlapping_and_dense;
37
+ }
38
+
39
+ // Destructor doesn't call release_resources because it's
40
+ // unnecessary; don't forget to change that if needed!
41
+ void release_resources() override {
42
+ TensorImpl::release_resources();
43
+ opaque_handle_ = {};
44
+ }
45
+
46
+ void set_size(int64_t dim, int64_t new_size) override {
47
+ AT_ERROR("opaque tensors do not have set_size");
48
+ }
49
+
50
+ void set_stride(int64_t dim, int64_t new_stride) override {
51
+ AT_ERROR("opaque tensors do not have set_stride");
52
+ }
53
+
54
+ void set_storage_offset(int64_t storage_offset) override {
55
+ AT_ERROR("opaque tensors do not have set_storage_offset");
56
+ }
57
+
58
+ #ifdef DEBUG
59
+ bool has_storage() const override {
60
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
61
+ !storage_, "OpaqueTensorImpl assumes that storage_ is never set");
62
+ return false;
63
+ }
64
+ #endif
65
+
66
+ /**
67
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
68
+ *
69
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
70
+ * see NOTE [ TensorImpl Shallow-Copying ].
71
+ */
72
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
73
+ const c10::VariableVersion& version_counter,
74
+ bool allow_tensor_metadata_change) const override {
75
+ auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
76
+ key_set(),
77
+ dtype(),
78
+ device(),
79
+ opaque_handle_,
80
+ sizes_and_strides_.sizes_arrayref());
81
+ copy_tensor_metadata(
82
+ /*src_opaque_impl=*/this,
83
+ /*dest_opaque_impl=*/impl.get(),
84
+ /*version_counter=*/version_counter,
85
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
86
+ impl->refresh_numel();
87
+ return impl;
88
+ }
89
+
90
+ /**
91
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
92
+ *
93
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
94
+ * see NOTE [ TensorImpl Shallow-Copying ].
95
+ */
96
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
97
+ c10::VariableVersion&& version_counter,
98
+ bool allow_tensor_metadata_change) const override {
99
+ auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
100
+ key_set(),
101
+ dtype(),
102
+ device(),
103
+ opaque_handle_,
104
+ sizes_and_strides_.sizes_arrayref());
105
+ copy_tensor_metadata(
106
+ /*src_opaque_impl=*/this,
107
+ /*dest_opaque_impl=*/impl.get(),
108
+ /*version_counter=*/std::move(version_counter),
109
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
110
+ impl->refresh_numel();
111
+ return impl;
112
+ }
113
+
114
+ /**
115
+ * Shallow-copies data from another TensorImpl into this TensorImpl.
116
+ *
117
+ * For why this function doesn't check this TensorImpl's
118
+ * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
119
+ */
120
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
121
+ AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
122
+ auto opaque_impl =
123
+ static_cast<const OpaqueTensorImpl<OpaqueHandle>*>(impl.get());
124
+ copy_tensor_metadata(
125
+ /*src_impl=*/opaque_impl,
126
+ /*dest_impl=*/this,
127
+ /*version_counter=*/version_counter(),
128
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
129
+ refresh_numel();
130
+ }
131
+
132
+ const OpaqueHandle& opaque_handle() const {
133
+ return opaque_handle_;
134
+ }
135
+
136
+ OpaqueHandle& unsafe_opaque_handle() {
137
+ return opaque_handle_;
138
+ }
139
+
140
+ protected:
141
+ /**
142
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
143
+ * storage_offset) from one TensorImpl to another TensorImpl.
144
+ *
145
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
146
+ * [ TensorImpl Shallow-Copying ].
147
+ */
148
+ static void copy_tensor_metadata(
149
+ const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
150
+ OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
151
+ const c10::VariableVersion& version_counter,
152
+ bool allow_tensor_metadata_change) {
153
+ TensorImpl::copy_tensor_metadata(
154
+ src_opaque_impl,
155
+ dest_opaque_impl,
156
+ version_counter,
157
+ allow_tensor_metadata_change);
158
+
159
+ // OpaqueTensorImpl-specific fields.
160
+ dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
161
+ }
162
+
163
+ static void copy_tensor_metadata(
164
+ const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
165
+ OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
166
+ c10::VariableVersion&& version_counter,
167
+ bool allow_tensor_metadata_change) {
168
+ TensorImpl::copy_tensor_metadata(
169
+ src_opaque_impl,
170
+ dest_opaque_impl,
171
+ std::move(version_counter),
172
+ allow_tensor_metadata_change);
173
+
174
+ // OpaqueTensorImpl-specific fields.
175
+ dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
176
+ }
177
+
178
+ private:
179
+ const char* tensorimpl_type_name() const override {
180
+ return "OpaqueTensorImpl";
181
+ }
182
+
183
+ OpaqueHandle opaque_handle_;
184
+ };
185
+
186
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Config.h>
3
+ #include <c10/macros/Macros.h>
4
+ #include <functional>
5
+ #include <string>
6
+
7
+ namespace at {
8
+
9
+ inline int64_t divup(int64_t x, int64_t y) {
10
+ return (x + y - 1) / y;
11
+ }
12
+
13
+ // Called during new thread initialization
14
+ TORCH_API void init_num_threads();
15
+
16
+ // Sets the number of threads to be used in parallel region
17
+ TORCH_API void set_num_threads(int);
18
+
19
+ // Returns the maximum number of threads that may be used in a parallel region
20
+ TORCH_API int get_num_threads();
21
+
22
+ // Returns the current thread number (starting from 0)
23
+ // in the current parallel region, or 0 in the sequential region
24
+ TORCH_API int get_thread_num();
25
+
26
+ // Checks whether the code runs in parallel region
27
+ TORCH_API bool in_parallel_region();
28
+
29
+ namespace internal {
30
+
31
+ // Initialise num_threads lazily at first parallel call
32
+ inline void lazy_init_num_threads() {
33
+ thread_local bool init = false;
34
+ if (C10_UNLIKELY(!init)) {
35
+ at::init_num_threads();
36
+ init = true;
37
+ }
38
+ }
39
+
40
+ TORCH_API void set_thread_num(int);
41
+
42
+ class TORCH_API ThreadIdGuard {
43
+ public:
44
+ ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) {
45
+ set_thread_num(new_id);
46
+ }
47
+
48
+ ~ThreadIdGuard() {
49
+ set_thread_num(old_id_);
50
+ }
51
+
52
+ private:
53
+ int old_id_;
54
+ };
55
+
56
+ } // namespace internal
57
+
58
+ /*
59
+ parallel_for
60
+
61
+ begin: index at which to start applying user function
62
+
63
+ end: index at which to stop applying user function
64
+
65
+ grain_size: number of elements per chunk. impacts the degree of parallelization
66
+
67
+ f: user function applied in parallel to the chunks, signature:
68
+ void f(int64_t begin, int64_t end)
69
+
70
+ Warning: parallel_for does NOT copy thread local
71
+ states from the current thread to the worker threads.
72
+ This means for example that Tensor operations CANNOT be used in the
73
+ body of your function, only data pointers.
74
+ */
75
+ template <class F>
76
+ inline void parallel_for(
77
+ const int64_t begin,
78
+ const int64_t end,
79
+ const int64_t grain_size,
80
+ const F& f);
81
+
82
+ /*
83
+ parallel_reduce
84
+
85
+ begin: index at which to start applying reduction
86
+
87
+ end: index at which to stop applying reduction
88
+
89
+ grain_size: number of elements per chunk. impacts number of elements in
90
+ intermediate results tensor and degree of parallelization.
91
+
92
+ ident: identity for binary combination function sf. sf(ident, x) needs to return
93
+ x.
94
+
95
+ f: function for reduction over a chunk. f needs to be of signature scalar_t
96
+ f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
97
+
98
+ sf: function to combine two partial results. sf needs to be of signature
99
+ scalar_t sf(scalar_t x, scalar_t y)
100
+
101
+ For example, you might have a tensor of 10000 entires and want to sum together
102
+ all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
103
+ an intermediate result tensor with 4 elements. Then it will execute the function
104
+ "f" you provide and pass the beginning and end index of these chunks, so
105
+ 0-2499, 2500-4999, etc. and the combination identity. It will then write out
106
+ the result from each of these chunks into the intermediate result tensor. After
107
+ that it'll reduce the partial results from each chunk into a single number using
108
+ the combination function sf and the identity ident. For a total summation this
109
+ would be "+" and 0 respectively. This is similar to tbb's approach [1], where
110
+ you need to provide a function to accumulate a subrange, a function to combine
111
+ two partial results and an identity.
112
+
113
+ Warning: parallel_reduce does NOT copy thread local
114
+ states from the current thread to the worker threads.
115
+ This means for example that Tensor operations CANNOT be used in the
116
+ body of your function, only data pointers.
117
+
118
+ [1] https://software.intel.com/en-us/node/506154
119
+ */
120
+ template <class scalar_t, class F, class SF>
121
+ inline scalar_t parallel_reduce(
122
+ const int64_t begin,
123
+ const int64_t end,
124
+ const int64_t grain_size,
125
+ const scalar_t ident,
126
+ const F& f,
127
+ const SF& sf);
128
+
129
+ // Returns a detailed string describing parallelization settings
130
+ TORCH_API std::string get_parallel_info();
131
+
132
+ // Sets number of threads used for inter-op parallelism
133
+ TORCH_API void set_num_interop_threads(int);
134
+
135
+ // Returns the number of threads used for inter-op parallelism
136
+ TORCH_API int get_num_interop_threads();
137
+
138
+ // Launches inter-op parallel task
139
+ TORCH_API void launch(std::function<void()> func);
140
+ namespace internal {
141
+ void launch_no_thread_state(std::function<void()> fn);
142
+ } // namespace internal
143
+
144
+ // Launches intra-op parallel task
145
+ TORCH_API void intraop_launch(std::function<void()> func);
146
+
147
+ // Returns number of intra-op threads used by default
148
+ TORCH_API int intraop_default_num_threads();
149
+
150
+ } // namespace at
151
+
152
+ #if AT_PARALLEL_OPENMP
153
+ #include <ATen/ParallelOpenMP.h> // IWYU pragma: keep
154
+ #elif AT_PARALLEL_NATIVE
155
+ #include <ATen/ParallelNative.h> // IWYU pragma: keep
156
+ #elif AT_PARALLEL_NATIVE_TBB
157
+ #include <ATen/ParallelNativeTBB.h> // IWYU pragma: keep
158
+ #endif
159
+
160
+ #include <ATen/Parallel-inl.h> // IWYU pragma: keep
videollama2/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <cstddef>
5
+ #include <exception>
6
+
7
+ #include <c10/util/Exception.h>
8
+
9
+ #ifdef _WIN32
10
+ #ifndef WIN32_LEAN_AND_MEAN
11
+ #define WIN32_LEAN_AND_MEAN
12
+ #endif
13
+ #endif
14
+ #include <tbb/tbb.h>
15
+
16
+ #define INTRA_OP_PARALLEL
17
+
18
+ namespace at::internal {
19
+
20
+ template <typename F>
21
+ inline void invoke_parallel(
22
+ const int64_t begin,
23
+ const int64_t end,
24
+ const int64_t grain_size,
25
+ const F& f) {
26
+ // Choose number of tasks based on grain size and number of threads.
27
+ int64_t chunk_size = divup((end - begin), get_num_threads());
28
+ // Make sure each task is at least grain_size size.
29
+ chunk_size = std::max(grain_size, chunk_size);
30
+
31
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
32
+ std::exception_ptr eptr;
33
+ tbb::parallel_for(
34
+ tbb::blocked_range<int64_t>(begin, end, chunk_size),
35
+ [&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) {
36
+ try {
37
+ internal::ThreadIdGuard tid_guard(
38
+ tbb::this_task_arena::current_thread_index());
39
+ f(r.begin(), r.end());
40
+ } catch (...) {
41
+ if (!err_flag.test_and_set()) {
42
+ eptr = std::current_exception();
43
+ }
44
+ }
45
+ },
46
+ tbb::static_partitioner{});
47
+ if (eptr) {
48
+ std::rethrow_exception(eptr);
49
+ }
50
+ }
51
+
52
+ } // namespace at::internal
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <ATen/core/Dimname.h>
5
+ #include <c10/core/TensorOptions.h>
6
+ #include <c10/util/strides.h>
7
+
8
+ namespace at {
9
+
10
+ class Tensor;
11
+
12
+ namespace impl {
13
+
14
+ // Use this to define the prototype for a meta function. There are two
15
+ // versions; one that takes one argument (just the operator name), or FUNC2
16
+ // variant that takes two arguments (operator name and overload name).
17
+ //
18
+ // Example usage:
19
+ //
20
+ // TORCH_META_FUNC2(add, Tensor) (
21
+ // const Tensor& self, const Tensor& other
22
+ // ) {
23
+ // ... compute sizes and options ...
24
+ // set_output(sizes, options);
25
+ // }
26
+ //
27
+ #define TORCH_META_FUNC(name) void structured_##name::meta
28
+ #define TORCH_META_FUNC2(name, overload) \
29
+ void structured_##name##_##overload::meta
30
+
31
+ // These are versions of TORCH_META_FUNC(2) that include a precompute_out struct
32
+ // as a return value. They should be used when the kernel in question has
33
+ // precomputed values declared in native_functions.yaml and the corresponding
34
+ // implementation should return an instance of the aforementioned struct.
35
+ #define TORCH_PRECOMPUTE_META_FUNC(name) \
36
+ structured_##name::meta_return_ty structured_##name::meta
37
+ #define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \
38
+ structured_##name##_##overload::meta_return_ty \
39
+ structured_##name##_##overload::meta
40
+
41
+ // Use this to create a precompute struct in a meta function.
42
+ #define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<>
43
+ #define TORCH_PRECOMPUTE_STRUCT2(name, overload) \
44
+ structured_##name##_##overload::precompute_out<>
45
+
46
+ // Use this to define the prototype for an implementation. This takes only
47
+ // one argument, which is the name of the dispatch key entry you're
48
+ // implementing.
49
+ //
50
+ // Example usage:
51
+ //
52
+ // TORCH_IMPL_FUNC(add_cpu) (
53
+ // Tensor& result, const Tensor& self, const Tensor& other
54
+ // ) {
55
+ // ... do the actual implementation ...
56
+ // }
57
+ //
58
+ #define TORCH_IMPL_FUNC(name) void structured_##name::impl
59
+
60
+ // Base class for all structured kernel classes. The set_output virtual
61
+ // method is varied depending whether or not the operator is
62
+ // functional/out/inplace, and could also be specialized for CPU/CUDA/etc
63
+ // (although presently it isn't).
64
+ //
65
+ // A notable subclass of this interface is TensorIteratorBase.
66
+ struct TORCH_API MetaBase {
67
+ MetaBase() = default;
68
+ MetaBase(const MetaBase&) = default;
69
+ MetaBase& operator=(const MetaBase&) = default;
70
+ MetaBase(MetaBase&&) noexcept = default;
71
+ MetaBase& operator=(MetaBase&&) noexcept = default;
72
+ virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
73
+
74
+ // Note: [set_output_*]
75
+ // See: https://github.com/pytorch/pytorch/issues/69813
76
+ // Whenever defining the output properties in the META function of a
77
+ // structured kernel (what was usually done with `set_output`), use one of
78
+ // these 3 variants, instead. In order to decide which variant to use, check
79
+ // the following decision tree:
80
+ //
81
+ // - Can the kernel you are going to implement support output tensors
82
+ // with arbitrary strides?
83
+ // |
84
+ // -- YES: `set_output_raw_strided`
85
+ // |
86
+ // -- NO: Should the output tensor strides be contiguous?
87
+ // |
88
+ // -- YES: `set_output_contiguous`
89
+ // |
90
+ // -- NO: `set_output_strided`
91
+ //
92
+ // Use this function whenever the kernel requires specific strides for the
93
+ // output. If `strides` does not match the given output strides, proxy outputs
94
+ // will be created and passed to the IMPL function.
95
+ virtual void set_output_strided(
96
+ int64_t output_idx,
97
+ IntArrayRef sizes,
98
+ IntArrayRef strides,
99
+ TensorOptions options,
100
+ DimnameList names = {}) {
101
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
102
+ }
103
+
104
+ // Use this function whenever the kernel knows how to handle arbitrary strided
105
+ // outputs. This function has the same behavior as the old `set_output`: it
106
+ // will only re-stride if the given output was resized.
107
+ virtual void set_output_raw_strided(
108
+ int64_t output_idx,
109
+ IntArrayRef sizes,
110
+ IntArrayRef strides_hint,
111
+ TensorOptions options,
112
+ DimnameList names = {}) {
113
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
114
+ }
115
+
116
+ // Use this function if the kernel requires contiguous strides.
117
+ // Alias for `set_output_strided`, but with contiguous strides.
118
+ void set_output_contiguous(
119
+ int64_t output_idx,
120
+ IntArrayRef sizes,
121
+ TensorOptions options,
122
+ DimnameList names = {}) {
123
+ auto strides = c10::contiguous_strides(sizes);
124
+ set_output_strided(output_idx, sizes, strides, options, names);
125
+ }
126
+
127
+ // Returns a reference to an undefined tensor if there is no presupplied
128
+ // output
129
+ const Tensor& maybe_get_output() {
130
+ return maybe_get_output(0);
131
+ }
132
+ virtual ~MetaBase() = default;
133
+ };
134
+
135
+ } // namespace impl
136
+
137
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/WrapDimUtils.h>
4
+
5
+ namespace at::namedinference {
6
+
7
+ // TensorName and TensorNames are wrappers around Dimname and DimnameList
8
+ // that contain helper functions to make writing name inference rules easier.
9
+ //
10
+ // A TensorName represents a Dimname associated with some DimnameList (from a
11
+ // Tensor). This encapsulates all the information that is needed to check if
12
+ // names *match* and to *unify* names.
13
+ //
14
+ // Definition: Two names in two tensors *match* if they are equal, or if at
15
+ // least one of them is a wildcard that can be *refined* to the other name.
16
+ //
17
+ // Definition: unify(name, other) fails if the names do not match. Otherwise,
18
+ // it returns the most refined of name and other.
19
+ //
20
+ // Here is an example of checking if two names match.
21
+ // tensor: Tensor[A, None]
22
+ // other: Tensor[A]
23
+ //
24
+ // Let's say we wish to check if tensor.names[-1] matches other.names[-1].
25
+ // None (in tensor) cannot match A (in other) because if the None were refined
26
+ // to A, `tensor` would have duplicate names [A, A]. Therefore we need to check
27
+ // tensor.names [A, None] for the existence of A.
28
+ struct TORCH_API TensorName {
29
+ explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
30
+ : origin_(origin),
31
+ name_(origin[maybe_wrap_dim(
32
+ origin_idx,
33
+ static_cast<int64_t>(origin.size()))]),
34
+ origin_idx_(origin_idx) {}
35
+
36
+ // op_name is only used for error reporting.
37
+ const TensorName& unify(const TensorName& other, const char* op_name) const;
38
+ Dimname toDimname() const;
39
+
40
+ private:
41
+ ArrayRef<Dimname> origin_;
42
+ Dimname name_;
43
+ int origin_idx_; // A named tensor can have at most 64 dims.
44
+
45
+ TORCH_API friend std::ostream& operator<<(
46
+ std::ostream& out,
47
+ const TensorName& tensorname);
48
+ };
49
+
50
+ using TensorNameVec = SmallVector<TensorName, 10>;
51
+
52
+ struct TORCH_API TensorNames {
53
+ explicit TensorNames(ArrayRef<Dimname> names);
54
+
55
+ // Create TensorNames from names[start:end]. Each individual TensorName stores
56
+ // `names`, NOT names[start:end], because the original tensor's names are
57
+ // `names`.
58
+ explicit TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end);
59
+
60
+ // op_name is only used for error reporting.
61
+ TensorNames& unifyFromRightInplace(
62
+ const TensorNames& other,
63
+ const char* op_name = "unify");
64
+ void checkUnique(const char* op_name) const;
65
+
66
+ void append(TensorName&& name);
67
+ std::vector<Dimname> toDimnameVec() const;
68
+
69
+ private:
70
+ explicit TensorNames(TensorNameVec&& names) : names_(names){};
71
+
72
+ TensorNameVec names_;
73
+ };
74
+
75
+ } // namespace at::namedinference
videollama2/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <type_traits>
4
+
5
+ namespace at {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
11
+ C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ /**
16
+ Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest
17
+ multiple of b
18
+ */
19
+ template <typename T>
20
+ C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) {
21
+ return ceil_div(a, b) * b;
22
+ }
23
+
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/CUDAConfig.h>
3
+ #include <string>
4
+
5
+ // AT_USE_JITERATOR(), controls whether we jit some elementwise kernels
6
+ #define AT_USE_JITERATOR() true
7
+ #define jiterator_stringify(...) std::string(#__VA_ARGS__);
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <optional>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/exec.h"
26
+ #include "arrow/result.h"
27
+
28
+ namespace arrow {
29
+ namespace acero {
30
+ namespace util {
31
+
32
+ using arrow::compute::ExecBatch;
33
+
34
+ /// \brief A container that accumulates batches until they are ready to
35
+ /// be processed.
36
+ class AccumulationQueue {
37
+ public:
38
+ AccumulationQueue() : row_count_(0) {}
39
+ ~AccumulationQueue() = default;
40
+
41
+ // We should never be copying ExecBatch around
42
+ AccumulationQueue(const AccumulationQueue&) = delete;
43
+ AccumulationQueue& operator=(const AccumulationQueue&) = delete;
44
+
45
+ AccumulationQueue(AccumulationQueue&& that);
46
+ AccumulationQueue& operator=(AccumulationQueue&& that);
47
+
48
+ void Concatenate(AccumulationQueue&& that);
49
+ void InsertBatch(ExecBatch batch);
50
+ int64_t row_count() { return row_count_; }
51
+ size_t batch_count() { return batches_.size(); }
52
+ bool empty() const { return batches_.empty(); }
53
+ void Clear();
54
+ ExecBatch& operator[](size_t i);
55
+
56
+ private:
57
+ int64_t row_count_;
58
+ std::vector<ExecBatch> batches_;
59
+ };
60
+
61
+ /// A queue that sequences incoming batches
62
+ ///
63
+ /// This can be used when a node needs to do some kind of ordered processing on
64
+ /// the stream.
65
+ ///
66
+ /// Batches can be inserted in any order. The process_callback will be called on
67
+ /// the batches, in order, without reentrant calls. For this reason the callback
68
+ /// should be quick.
69
+ ///
70
+ /// For example, in a top-n node, the process callback should determine how many
71
+ /// rows need to be delivered for the given batch, and then return a task to actually
72
+ /// deliver those rows.
73
+ class SequencingQueue {
74
+ public:
75
+ using Task = std::function<Status()>;
76
+
77
+ /// Strategy that describes how to handle items
78
+ class Processor {
79
+ public:
80
+ /// Process the batch, potentially generating a task
81
+ ///
82
+ /// This method will be called on each batch in order. Calls to this method
83
+ /// will be serialized and it will not be called reentrantly. This makes it
84
+ /// safe to do things that rely on order but minimal time should be spent here
85
+ /// to avoid becoming a bottleneck.
86
+ ///
87
+ /// \return a follow-up task that will be scheduled. The follow-up task(s) are
88
+ /// is not guaranteed to run in any particular order. If nullopt is
89
+ /// returned then nothing will be scheduled.
90
+ virtual Result<std::optional<Task>> Process(ExecBatch batch) = 0;
91
+ /// Schedule a task
92
+ virtual void Schedule(Task task) = 0;
93
+ };
94
+
95
+ virtual ~SequencingQueue() = default;
96
+
97
+ /// Insert a batch into the queue
98
+ ///
99
+ /// This will insert the batch into the queue. If this batch was the next batch
100
+ /// to deliver then this will trigger 1+ calls to the process callback to generate
101
+ /// 1+ tasks.
102
+ ///
103
+ /// The task generated by this call will be executed immediately. The remaining
104
+ /// tasks will be scheduled using the schedule callback.
105
+ ///
106
+ /// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If
107
+ /// a task arrives in order then this call will usually execute the downstream pipeline.
108
+ /// If this task arrives early then this call will only queue the data.
109
+ virtual Status InsertBatch(ExecBatch batch) = 0;
110
+
111
+ /// Create a queue
112
+ /// \param processor describes how to process the batches, must outlive the queue
113
+ static std::unique_ptr<SequencingQueue> Make(Processor* processor);
114
+ };
115
+
116
+ /// A queue that sequences incoming batches
117
+ ///
118
+ /// Unlike SequencingQueue the Process method is not expected to schedule new tasks.
119
+ ///
120
+ /// If a batch arrives and another thread is currently processing then the batch
121
+ /// will be queued and control will return. In other words, delivery of batches will
122
+ /// not block on the Process method.
123
+ ///
124
+ /// It can be helpful to think of this as if a dedicated thread is running Process as
125
+ /// batches arrive
126
+ class SerialSequencingQueue {
127
+ public:
128
+ /// Strategy that describes how to handle items
129
+ class Processor {
130
+ public:
131
+ /// Process the batch
132
+ ///
133
+ /// This method will be called on each batch in order. Calls to this method
134
+ /// will be serialized and it will not be called reentrantly. This makes it
135
+ /// safe to do things that rely on order.
136
+ ///
137
+ /// If this falls behind then data may accumulate
138
+ ///
139
+ /// TODO: Could add backpressure if needed but right now all uses of this should
140
+ /// be pretty fast and so are unlikely to block.
141
+ virtual Status Process(ExecBatch batch) = 0;
142
+ };
143
+
144
+ virtual ~SerialSequencingQueue() = default;
145
+
146
+ /// Insert a batch into the queue
147
+ ///
148
+ /// This will insert the batch into the queue. If this batch was the next batch
149
+ /// to deliver then this may trigger calls to the processor which will be run
150
+ /// as part of this call.
151
+ virtual Status InsertBatch(ExecBatch batch) = 0;
152
+
153
+ /// Create a queue
154
+ /// \param processor describes how to process the batches, must outlive the queue
155
+ static std::unique_ptr<SerialSequencingQueue> Make(Processor* processor);
156
+ };
157
+
158
+ } // namespace util
159
+ } // namespace acero
160
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/visibility.h"
26
+ #include "arrow/compute/api_aggregate.h"
27
+ #include "arrow/compute/type_fwd.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ namespace arrow {
32
+ namespace acero {
33
+ namespace aggregate {
34
+
35
+ using compute::Aggregate;
36
+ using compute::default_exec_context;
37
+ using compute::ExecContext;
38
+
39
+ /// \brief Make the output schema of an aggregate node
40
+ ///
41
+ /// The output schema is determined by the aggregation kernels, which may depend on the
42
+ /// ExecContext argument. To guarantee correct results, the same ExecContext argument
43
+ /// should be used in execution.
44
+ ///
45
+ /// \param[in] input_schema the schema of the input to the node
46
+ /// \param[in] keys the grouping keys for the aggregation
47
+ /// \param[in] segment_keys the segmenting keys for the aggregation
48
+ /// \param[in] aggregates the aggregates for the aggregation
49
+ /// \param[in] exec_ctx the execution context for the aggregation
50
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
51
+ const std::shared_ptr<Schema>& input_schema, const std::vector<FieldRef>& keys,
52
+ const std::vector<FieldRef>& segment_keys, const std::vector<Aggregate>& aggregates,
53
+ ExecContext* exec_ctx = default_exec_context());
54
+
55
+ } // namespace aggregate
56
+ } // namespace acero
57
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "benchmark/benchmark.h"
25
+
26
+ #include "arrow/acero/exec_plan.h"
27
+ #include "arrow/acero/test_util_internal.h"
28
+ #include "arrow/compute/exec.h"
29
+
30
+ namespace arrow {
31
+
32
+ namespace acero {
33
+
34
+ Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches,
35
+ int32_t batch_size, arrow::acero::BatchesWithSchema data,
36
+ std::vector<arrow::acero::Declaration>& node_declarations,
37
+ arrow::MemoryPool* pool = default_memory_pool());
38
+
39
+ Status BenchmarkIsolatedNodeOverhead(benchmark::State& state,
40
+ arrow::compute::Expression expr, int32_t num_batches,
41
+ int32_t batch_size,
42
+ arrow::acero::BatchesWithSchema data,
43
+ std::string factory_name,
44
+ arrow::acero::ExecNodeOptions& options,
45
+ arrow::MemoryPool* pool = default_memory_pool());
46
+
47
+ } // namespace acero
48
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/acero/type_fwd.h"
30
+ #include "arrow/acero/visibility.h"
31
+ #include "arrow/compute/api_vector.h"
32
+ #include "arrow/compute/exec.h"
33
+ #include "arrow/compute/ordering.h"
34
+ #include "arrow/type_fwd.h"
35
+ #include "arrow/util/future.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/tracing.h"
38
+ #include "arrow/util/type_fwd.h"
39
+
40
+ namespace arrow {
41
+
42
+ using compute::ExecBatch;
43
+ using compute::ExecContext;
44
+ using compute::FunctionRegistry;
45
+ using compute::GetFunctionRegistry;
46
+ using compute::Ordering;
47
+ using compute::threaded_exec_context;
48
+
49
+ namespace acero {
50
+
51
+ /// \addtogroup acero-internals
52
+ /// @{
53
+
54
+ class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this<ExecPlan> {
55
+ public:
56
+ // This allows operators to rely on signed 16-bit indices
57
+ static const uint32_t kMaxBatchSize = 1 << 15;
58
+ using NodeVector = std::vector<ExecNode*>;
59
+
60
+ virtual ~ExecPlan() = default;
61
+
62
+ QueryContext* query_context();
63
+
64
+ /// \brief retrieve the nodes in the plan
65
+ const NodeVector& nodes() const;
66
+
67
+ /// Make an empty exec plan
68
+ static Result<std::shared_ptr<ExecPlan>> Make(
69
+ QueryOptions options, ExecContext exec_context = *threaded_exec_context(),
70
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
71
+
72
+ static Result<std::shared_ptr<ExecPlan>> Make(
73
+ ExecContext exec_context = *threaded_exec_context(),
74
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
75
+
76
+ static Result<std::shared_ptr<ExecPlan>> Make(
77
+ QueryOptions options, ExecContext* exec_context,
78
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
79
+
80
+ static Result<std::shared_ptr<ExecPlan>> Make(
81
+ ExecContext* exec_context,
82
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
83
+
84
+ ExecNode* AddNode(std::unique_ptr<ExecNode> node);
85
+
86
+ template <typename Node, typename... Args>
87
+ Node* EmplaceNode(Args&&... args) {
88
+ std::unique_ptr<Node> node{new Node{std::forward<Args>(args)...}};
89
+ auto out = node.get();
90
+ AddNode(std::move(node));
91
+ return out;
92
+ }
93
+
94
+ Status Validate();
95
+
96
+ /// \brief Start producing on all nodes
97
+ ///
98
+ /// Nodes are started in reverse topological order, such that any node
99
+ /// is started before all of its inputs.
100
+ void StartProducing();
101
+
102
+ /// \brief Stop producing on all nodes
103
+ ///
104
+ /// Triggers all sources to stop producing new data. In order to cleanly stop the plan
105
+ /// will continue to run any tasks that are already in progress. The caller should
106
+ /// still wait for `finished` to complete before destroying the plan.
107
+ void StopProducing();
108
+
109
+ /// \brief A future which will be marked finished when all tasks have finished.
110
+ Future<> finished();
111
+
112
+ /// \brief Return whether the plan has non-empty metadata
113
+ bool HasMetadata() const;
114
+
115
+ /// \brief Return the plan's attached metadata
116
+ std::shared_ptr<const KeyValueMetadata> metadata() const;
117
+
118
+ std::string ToString() const;
119
+ };
120
+
121
+ // Acero can be extended by providing custom implementations of ExecNode. The methods
122
+ // below are documented in detail and provide careful instruction on how to fulfill the
123
+ // ExecNode contract. It's suggested you familiarize yourself with the Acero
124
+ // documentation in the C++ user guide.
125
+ class ARROW_ACERO_EXPORT ExecNode {
126
+ public:
127
+ using NodeVector = std::vector<ExecNode*>;
128
+
129
+ virtual ~ExecNode() = default;
130
+
131
+ virtual const char* kind_name() const = 0;
132
+
133
+ // The number of inputs expected by this node
134
+ int num_inputs() const { return static_cast<int>(inputs_.size()); }
135
+
136
+ /// This node's predecessors in the exec plan
137
+ const NodeVector& inputs() const { return inputs_; }
138
+
139
+ /// True if the plan has no output schema (is a sink)
140
+ bool is_sink() const { return !output_schema_; }
141
+
142
+ /// \brief Labels identifying the function of each input.
143
+ const std::vector<std::string>& input_labels() const { return input_labels_; }
144
+
145
+ /// This node's successor in the exec plan
146
+ const ExecNode* output() const { return output_; }
147
+
148
+ /// The datatypes for batches produced by this node
149
+ const std::shared_ptr<Schema>& output_schema() const { return output_schema_; }
150
+
151
+ /// This node's exec plan
152
+ ExecPlan* plan() { return plan_; }
153
+
154
+ /// \brief An optional label, for display and debugging
155
+ ///
156
+ /// There is no guarantee that this value is non-empty or unique.
157
+ const std::string& label() const { return label_; }
158
+ void SetLabel(std::string label) { label_ = std::move(label); }
159
+
160
+ virtual Status Validate() const;
161
+
162
+ /// \brief the ordering of the output batches
163
+ ///
164
+ /// This does not guarantee the batches will be emitted by this node
165
+ /// in order. Instead it guarantees that the batches will have their
166
+ /// ExecBatch::index property set in a way that respects this ordering.
167
+ ///
168
+ /// In other words, given the ordering {{"x", SortOrder::Ascending}} we
169
+ /// know that all values of x in a batch with index N will be less than
170
+ /// or equal to all values of x in a batch with index N+k (assuming k > 0).
171
+ /// Furthermore, we also know that values will be sorted within a batch.
172
+ /// Any row N will have a value of x that is less than the value for
173
+ /// any row N+k.
174
+ ///
175
+ /// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit.
176
+ /// A node's output should be marked Ordering::Unordered if the order is
177
+ /// non-deterministic. For example, a hash-join has no predictable output order.
178
+ ///
179
+ /// If the ordering is Ordering::Implicit then there is a meaningful order but that
180
+ /// ordering is not represented by any column in the data. The most common case for
181
+ /// this is when reading data from an in-memory table. The data has an implicit "row
182
+ /// order" which is not necessarily represented in the data set.
183
+ ///
184
+ /// A filter or project node will not modify the ordering. Nothing needs to be done
185
+ /// other than ensure the index assigned to output batches is the same as the
186
+ /// input batch that was mapped.
187
+ ///
188
+ /// Other nodes may introduce order. For example, an order-by node will emit
189
+ /// a brand new ordering independent of the input ordering.
190
+ ///
191
+ /// Finally, as described above, such as a hash-join or aggregation may may
192
+ /// destroy ordering (although these nodes could also choose to establish a
193
+ /// new ordering based on the hash keys).
194
+ ///
195
+ /// Some nodes will require an ordering. For example, a fetch node or an
196
+ /// asof join node will only function if the input data is ordered (for fetch
197
+ /// it is enough to be implicitly ordered. For an asof join the ordering must
198
+ /// be explicit and compatible with the on key.)
199
+ ///
200
+ /// Nodes that maintain ordering should be careful to avoid introducing gaps
201
+ /// in the batch index. This may require emitting empty batches in order to
202
+ /// maintain continuity.
203
+ virtual const Ordering& ordering() const;
204
+
205
+ /// Upstream API:
206
+ /// These functions are called by input nodes that want to inform this node
207
+ /// about an updated condition (a new input batch or an impending
208
+ /// end of stream).
209
+ ///
210
+ /// Implementation rules:
211
+ /// - these may be called anytime after StartProducing() has succeeded
212
+ /// (and even during or after StopProducing())
213
+ /// - these may be called concurrently
214
+ /// - these are allowed to call back into PauseProducing(), ResumeProducing()
215
+ /// and StopProducing()
216
+
217
+ /// Transfer input batch to ExecNode
218
+ ///
219
+ /// A node will typically perform some kind of operation on the batch
220
+ /// and then call InputReceived on its outputs with the result.
221
+ ///
222
+ /// Other nodes may need to accumulate some number of inputs before any
223
+ /// output can be produced. These nodes will add the batch to some kind
224
+ /// of in-memory accumulation queue and return.
225
+ virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0;
226
+
227
+ /// Mark the inputs finished after the given number of batches.
228
+ ///
229
+ /// This may be called before all inputs are received. This simply fixes
230
+ /// the total number of incoming batches for an input, so that the ExecNode
231
+ /// knows when it has received all input, regardless of order.
232
+ virtual Status InputFinished(ExecNode* input, int total_batches) = 0;
233
+
234
+ /// \brief Perform any needed initialization
235
+ ///
236
+ /// This hook performs any actions in between creation of ExecPlan and the call to
237
+ /// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes
238
+ /// that executes this method is undefined, but the calls are made synchronously.
239
+ ///
240
+ /// At this point a node can rely on all inputs & outputs (and the input schemas)
241
+ /// being well defined.
242
+ virtual Status Init();
243
+
244
+ /// Lifecycle API:
245
+ /// - start / stop to initiate and terminate production
246
+ /// - pause / resume to apply backpressure
247
+ ///
248
+ /// Implementation rules:
249
+ /// - StartProducing() should not recurse into the inputs, as it is
250
+ /// handled by ExecPlan::StartProducing()
251
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
252
+ /// concurrently, potentially even before the call to StartProducing
253
+ /// has finished.
254
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
255
+ /// by the downstream nodes' InputReceived(), InputFinished() methods
256
+ ///
257
+ /// StopProducing may be called due to an error, by the user (e.g. cancel), or
258
+ /// because a node has all the data it needs (e.g. limit, top-k on sorted data).
259
+ /// This means the method may be called multiple times and we have the following
260
+ /// additional rules
261
+ /// - StopProducing() must be idempotent
262
+ /// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k
263
+ /// case because we may not be stopping the entire plan)
264
+
265
+ // Right now, since synchronous calls happen in both directions (input to
266
+ // output and then output to input), a node must be careful to be reentrant
267
+ // against synchronous calls from its output, *and* also concurrent calls from
268
+ // other threads. The most reliable solution is to update the internal state
269
+ // first, and notify outputs only at the end.
270
+ //
271
+ // Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence
272
+ // as they may travel at different speeds through the plan.
273
+ //
274
+ // For example, consider a resume that comes quickly after a pause. If the source
275
+ // receives the resume before the pause the source may think the destination is full
276
+ // and halt production which would lead to deadlock.
277
+ //
278
+ // To resolve this a counter is sent for all calls to pause/resume. Only the call with
279
+ // the highest counter value is valid. So if a call to PauseProducing(5) comes after
280
+ // a call to ResumeProducing(6) then the source should continue producing.
281
+
282
+ /// \brief Start producing
283
+ ///
284
+ /// This must only be called once.
285
+ ///
286
+ /// This is typically called automatically by ExecPlan::StartProducing().
287
+ virtual Status StartProducing() = 0;
288
+
289
+ /// \brief Pause producing temporarily
290
+ ///
291
+ /// \param output Pointer to the output that is full
292
+ /// \param counter Counter used to sequence calls to pause/resume
293
+ ///
294
+ /// This call is a hint that an output node is currently not willing
295
+ /// to receive data.
296
+ ///
297
+ /// This may be called any number of times.
298
+ /// However, the node is still free to produce data (which may be difficult
299
+ /// to prevent anyway if data is produced using multiple threads).
300
+ virtual void PauseProducing(ExecNode* output, int32_t counter) = 0;
301
+
302
+ /// \brief Resume producing after a temporary pause
303
+ ///
304
+ /// \param output Pointer to the output that is now free
305
+ /// \param counter Counter used to sequence calls to pause/resume
306
+ ///
307
+ /// This call is a hint that an output node is willing to receive data again.
308
+ ///
309
+ /// This may be called any number of times.
310
+ virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0;
311
+
312
+ /// \brief Stop producing new data
313
+ ///
314
+ /// If this node is a source then the source should stop generating data
315
+ /// as quickly as possible. If this node is not a source then there is typically
316
+ /// nothing that needs to be done although a node may choose to start ignoring incoming
317
+ /// data.
318
+ ///
319
+ /// This method will be called when an error occurs in the plan
320
+ /// This method may also be called by the user if they wish to end a plan early
321
+ /// Finally, this method may be called if a node determines it no longer needs any more
322
+ /// input (for example, a limit node).
323
+ ///
324
+ /// This method may be called multiple times.
325
+ ///
326
+ /// This is not a pause. There will be no way to start the source again after this has
327
+ /// been called.
328
+ virtual Status StopProducing();
329
+
330
+ std::string ToString(int indent = 0) const;
331
+
332
+ protected:
333
+ ExecNode(ExecPlan* plan, NodeVector inputs, std::vector<std::string> input_labels,
334
+ std::shared_ptr<Schema> output_schema);
335
+
336
+ virtual Status StopProducingImpl() = 0;
337
+
338
+ /// Provide extra info to include in the string representation.
339
+ virtual std::string ToStringExtra(int indent = 0) const;
340
+
341
+ std::atomic<bool> stopped_;
342
+ ExecPlan* plan_;
343
+ std::string label_;
344
+
345
+ NodeVector inputs_;
346
+ std::vector<std::string> input_labels_;
347
+
348
+ std::shared_ptr<Schema> output_schema_;
349
+ ExecNode* output_ = NULLPTR;
350
+ };
351
+
352
+ /// \brief An extensible registry for factories of ExecNodes
353
+ class ARROW_ACERO_EXPORT ExecFactoryRegistry {
354
+ public:
355
+ using Factory = std::function<Result<ExecNode*>(ExecPlan*, std::vector<ExecNode*>,
356
+ const ExecNodeOptions&)>;
357
+
358
+ virtual ~ExecFactoryRegistry() = default;
359
+
360
+ /// \brief Get the named factory from this registry
361
+ ///
362
+ /// will raise if factory_name is not found
363
+ virtual Result<Factory> GetFactory(const std::string& factory_name) = 0;
364
+
365
+ /// \brief Add a factory to this registry with the provided name
366
+ ///
367
+ /// will raise if factory_name is already in the registry
368
+ virtual Status AddFactory(std::string factory_name, Factory factory) = 0;
369
+ };
370
+
371
+ /// The default registry, which includes built-in factories.
372
+ ARROW_ACERO_EXPORT
373
+ ExecFactoryRegistry* default_exec_factory_registry();
374
+
375
+ /// \brief Construct an ExecNode using the named factory
376
+ inline Result<ExecNode*> MakeExecNode(
377
+ const std::string& factory_name, ExecPlan* plan, std::vector<ExecNode*> inputs,
378
+ const ExecNodeOptions& options,
379
+ ExecFactoryRegistry* registry = default_exec_factory_registry()) {
380
+ ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name));
381
+ return factory(plan, std::move(inputs), options);
382
+ }
383
+
384
+ /// @}
385
+
386
+ /// \addtogroup acero-api
387
+ /// @{
388
+
389
+ /// \brief Helper class for declaring execution nodes
390
+ ///
391
+ /// A Declaration represents an unconstructed ExecNode (and potentially an entire graph
392
+ /// since its inputs may also be Declarations)
393
+ ///
394
+ /// A Declaration can be converted to a plan and executed using one of the
395
+ /// DeclarationToXyz methods.
396
+ ///
397
+ /// For more direct control, a Declaration can be added to an existing execution
398
+ /// plan with Declaration::AddToPlan, which will recursively construct any inputs as
399
+ /// necessary.
400
+ struct ARROW_ACERO_EXPORT Declaration {
401
+ using Input = std::variant<ExecNode*, Declaration>;
402
+
403
+ Declaration() {}
404
+
405
+ /// \brief construct a declaration
406
+ /// \param factory_name the name of the exec node to construct. The node must have
407
+ /// been added to the exec node registry with this name.
408
+ /// \param inputs the inputs to the node, these should be other declarations
409
+ /// \param options options that control the behavior of the node. You must use
410
+ /// the appropriate subclass. For example, if `factory_name` is
411
+ /// "project" then `options` should be ProjectNodeOptions.
412
+ /// \param label a label to give the node. Can be used to distinguish it from other
413
+ /// nodes of the same type in the plan.
414
+ Declaration(std::string factory_name, std::vector<Input> inputs,
415
+ std::shared_ptr<ExecNodeOptions> options, std::string label)
416
+ : factory_name{std::move(factory_name)},
417
+ inputs{std::move(inputs)},
418
+ options{std::move(options)},
419
+ label{std::move(label)} {}
420
+
421
+ template <typename Options>
422
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options,
423
+ std::string label)
424
+ : Declaration{std::move(factory_name), std::move(inputs),
425
+ std::shared_ptr<ExecNodeOptions>(
426
+ std::make_shared<Options>(std::move(options))),
427
+ std::move(label)} {}
428
+
429
+ template <typename Options>
430
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options)
431
+ : Declaration{std::move(factory_name), std::move(inputs), std::move(options),
432
+ /*label=*/""} {}
433
+
434
+ template <typename Options>
435
+ Declaration(std::string factory_name, Options options)
436
+ : Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {}
437
+
438
+ template <typename Options>
439
+ Declaration(std::string factory_name, Options options, std::string label)
440
+ : Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {}
441
+
442
+ /// \brief Convenience factory for the common case of a simple sequence of nodes.
443
+ ///
444
+ /// Each of decls will be appended to the inputs of the subsequent declaration,
445
+ /// and the final modified declaration will be returned.
446
+ ///
447
+ /// Without this convenience factory, constructing a sequence would require explicit,
448
+ /// difficult-to-read nesting:
449
+ ///
450
+ /// Declaration{"n3",
451
+ /// {
452
+ /// Declaration{"n2",
453
+ /// {
454
+ /// Declaration{"n1",
455
+ /// {
456
+ /// Declaration{"n0", N0Opts{}},
457
+ /// },
458
+ /// N1Opts{}},
459
+ /// },
460
+ /// N2Opts{}},
461
+ /// },
462
+ /// N3Opts{}};
463
+ ///
464
+ /// An equivalent Declaration can be constructed more tersely using Sequence:
465
+ ///
466
+ /// Declaration::Sequence({
467
+ /// {"n0", N0Opts{}},
468
+ /// {"n1", N1Opts{}},
469
+ /// {"n2", N2Opts{}},
470
+ /// {"n3", N3Opts{}},
471
+ /// });
472
+ static Declaration Sequence(std::vector<Declaration> decls);
473
+
474
+ /// \brief add the declaration to an already created execution plan
475
+ /// \param plan the plan to add the node to
476
+ /// \param registry the registry to use to lookup the node factory
477
+ ///
478
+ /// This method will recursively call AddToPlan on all of the declaration's inputs.
479
+ /// This method is only for advanced use when the DeclarationToXyz methods are not
480
+ /// sufficient.
481
+ ///
482
+ /// \return the instantiated execution node
483
+ Result<ExecNode*> AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry =
484
+ default_exec_factory_registry()) const;
485
+
486
+ // Validate a declaration
487
+ bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const;
488
+
489
+ /// \brief the name of the factory to use when creating a node
490
+ std::string factory_name;
491
+ /// \brief the declarations's inputs
492
+ std::vector<Input> inputs;
493
+ /// \brief options to control the behavior of the node
494
+ std::shared_ptr<ExecNodeOptions> options;
495
+ /// \brief a label to give the node in the plan
496
+ std::string label;
497
+ };
498
+
499
+ /// \brief How to handle unaligned buffers
500
+ enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError };
501
+
502
+ /// \brief get the default behavior of unaligned buffer handling
503
+ ///
504
+ /// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which
505
+ /// can be set to "warn", "ignore", "reallocate", or "error". If the environment
506
+ /// variable is not set, or is set to an invalid value, this will return kWarn
507
+ UnalignedBufferHandling GetDefaultUnalignedBufferHandling();
508
+
509
+ /// \brief plan-wide options that can be specified when executing an execution plan
510
+ struct ARROW_ACERO_EXPORT QueryOptions {
511
+ /// \brief Should the plan use a legacy batching strategy
512
+ ///
513
+ /// This is currently in place only to support the Scanner::ToTable
514
+ /// method. This method relies on batch indices from the scanner
515
+ /// remaining consistent. This is impractical in the ExecPlan which
516
+ /// might slice batches as needed (e.g. for a join)
517
+ ///
518
+ /// However, it still works for simple plans and this is the only way
519
+ /// we have at the moment for maintaining implicit order.
520
+ bool use_legacy_batching = false;
521
+
522
+ /// If the output has a meaningful order then sequence the output of the plan
523
+ ///
524
+ /// The default behavior (std::nullopt) will sequence output batches if there
525
+ /// is a meaningful ordering in the final node and will emit batches immediately
526
+ /// otherwise.
527
+ ///
528
+ /// If explicitly set to true then plan execution will fail if there is no
529
+ /// meaningful ordering. This can be useful to validate a query that should
530
+ /// be emitting ordered results.
531
+ ///
532
+ /// If explicitly set to false then batches will be emit immediately even if there
533
+ /// is a meaningful ordering. This could cause batches to be emit out of order but
534
+ /// may offer a small decrease to latency.
535
+ std::optional<bool> sequence_output = std::nullopt;
536
+
537
+ /// \brief should the plan use multiple background threads for CPU-intensive work
538
+ ///
539
+ /// If this is false then all CPU work will be done on the calling thread. I/O tasks
540
+ /// will still happen on the I/O executor and may be multi-threaded (but should not use
541
+ /// significant CPU resources).
542
+ ///
543
+ /// Will be ignored if custom_cpu_executor is set
544
+ bool use_threads = true;
545
+
546
+ /// \brief custom executor to use for CPU-intensive work
547
+ ///
548
+ /// Must be null or remain valid for the duration of the plan. If this is null then
549
+ /// a default thread pool will be chosen whose behavior will be controlled by
550
+ /// the `use_threads` option.
551
+ ::arrow::internal::Executor* custom_cpu_executor = NULLPTR;
552
+
553
+ /// \brief custom executor to use for IO work
554
+ ///
555
+ /// Must be null or remain valid for the duration of the plan. If this is null then
556
+ /// the global io thread pool will be chosen whose behavior will be controlled by
557
+ /// the "ARROW_IO_THREADS" environment.
558
+ ::arrow::internal::Executor* custom_io_executor = NULLPTR;
559
+
560
+ /// \brief a memory pool to use for allocations
561
+ ///
562
+ /// Must remain valid for the duration of the plan.
563
+ MemoryPool* memory_pool = default_memory_pool();
564
+
565
+ /// \brief a function registry to use for the plan
566
+ ///
567
+ /// Must remain valid for the duration of the plan.
568
+ FunctionRegistry* function_registry = GetFunctionRegistry();
569
+ /// \brief the names of the output columns
570
+ ///
571
+ /// If this is empty then names will be generated based on the input columns
572
+ ///
573
+ /// If set then the number of names must equal the number of output columns
574
+ std::vector<std::string> field_names;
575
+
576
+ /// \brief Policy for unaligned buffers in source data
577
+ ///
578
+ /// Various compute functions and acero internals will type pun array
579
+ /// buffers from uint8_t* to some kind of value type (e.g. we might
580
+ /// cast to int32_t* to add two int32 arrays)
581
+ ///
582
+ /// If the buffer is poorly aligned (e.g. an int32 array is not aligned
583
+ /// on a 4-byte boundary) then this is technically undefined behavior in C++.
584
+ /// However, most modern compilers and CPUs are fairly tolerant of this
585
+ /// behavior and nothing bad (beyond a small hit to performance) is likely
586
+ /// to happen.
587
+ ///
588
+ /// Note that this only applies to source buffers. All buffers allocated internally
589
+ /// by Acero will be suitably aligned.
590
+ ///
591
+ /// If this field is set to kWarn then Acero will check if any buffers are unaligned
592
+ /// and, if they are, will emit a warning.
593
+ ///
594
+ /// If this field is set to kReallocate then Acero will allocate a new, suitably aligned
595
+ /// buffer and copy the contents from the old buffer into this new buffer.
596
+ ///
597
+ /// If this field is set to kError then Acero will gracefully abort the plan instead.
598
+ ///
599
+ /// If this field is set to kIgnore then Acero will not even check if the buffers are
600
+ /// unaligned.
601
+ ///
602
+ /// If this field is not set then it will be treated as kWarn unless overridden
603
+ /// by the ACERO_ALIGNMENT_HANDLING environment variable
604
+ std::optional<UnalignedBufferHandling> unaligned_buffer_handling;
605
+ };
606
+
607
+ /// \brief Calculate the output schema of a declaration
608
+ ///
609
+ /// This does not actually execute the plan. This operation may fail if the
610
+ /// declaration represents an invalid plan (e.g. a project node with multiple inputs)
611
+ ///
612
+ /// \param declaration A declaration describing an execution plan
613
+ /// \param function_registry The function registry to use for function execution. If null
614
+ /// then the default function registry will be used.
615
+ ///
616
+ /// \return the schema that batches would have after going through the execution plan
617
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> DeclarationToSchema(
618
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
619
+
620
+ /// \brief Create a string representation of a plan
621
+ ///
622
+ /// This representation is for debug purposes only.
623
+ ///
624
+ /// Conversion to a string may fail if the declaration represents an
625
+ /// invalid plan.
626
+ ///
627
+ /// Use Substrait for complete serialization of plans
628
+ ///
629
+ /// \param declaration A declaration describing an execution plan
630
+ /// \param function_registry The function registry to use for function execution. If null
631
+ /// then the default function registry will be used.
632
+ ///
633
+ /// \return a string representation of the plan suitable for debugging output
634
+ ARROW_ACERO_EXPORT Result<std::string> DeclarationToString(
635
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
636
+
637
+ /// \brief Utility method to run a declaration and collect the results into a table
638
+ ///
639
+ /// \param declaration A declaration describing the plan to run
640
+ /// \param use_threads If `use_threads` is false then all CPU work will be done on the
641
+ /// calling thread. I/O tasks will still happen on the I/O executor
642
+ /// and may be multi-threaded (but should not use significant CPU
643
+ /// resources).
644
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
645
+ /// \param function_registry The function registry to use for function execution. If null
646
+ /// then the default function registry will be used.
647
+ ///
648
+ /// This method will add a sink node to the declaration to collect results into a
649
+ /// table. It will then create an ExecPlan from the declaration, start the exec plan,
650
+ /// block until the plan has finished, and return the created table.
651
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
652
+ Declaration declaration, bool use_threads = true,
653
+ MemoryPool* memory_pool = default_memory_pool(),
654
+ FunctionRegistry* function_registry = NULLPTR);
655
+
656
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
657
+ Declaration declaration, QueryOptions query_options);
658
+
659
+ /// \brief Asynchronous version of \see DeclarationToTable
660
+ ///
661
+ /// \param declaration A declaration describing the plan to run
662
+ /// \param use_threads The behavior of use_threads is slightly different than the
663
+ /// synchronous version since we cannot run synchronously on the
664
+ /// calling thread. Instead, if use_threads=false then a new thread
665
+ /// pool will be created with a single thread and this will be used for
666
+ /// all compute work.
667
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
668
+ /// \param function_registry The function registry to use for function execution. If null
669
+ /// then the default function registry will be used.
670
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
671
+ Declaration declaration, bool use_threads = true,
672
+ MemoryPool* memory_pool = default_memory_pool(),
673
+ FunctionRegistry* function_registry = NULLPTR);
674
+
675
+ /// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context
676
+ ///
677
+ /// The executor must be specified (cannot be null) and must be kept alive until the
678
+ /// returned future finishes.
679
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
680
+ Declaration declaration, ExecContext custom_exec_context);
681
+
682
+ /// \brief a collection of exec batches with a common schema
683
+ struct BatchesWithCommonSchema {
684
+ std::vector<ExecBatch> batches;
685
+ std::shared_ptr<Schema> schema;
686
+ };
687
+
688
+ /// \brief Utility method to run a declaration and collect the results into ExecBatch
689
+ /// vector
690
+ ///
691
+ /// \see DeclarationToTable for details on threading & execution
692
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
693
+ Declaration declaration, bool use_threads = true,
694
+ MemoryPool* memory_pool = default_memory_pool(),
695
+ FunctionRegistry* function_registry = NULLPTR);
696
+
697
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
698
+ Declaration declaration, QueryOptions query_options);
699
+
700
+ /// \brief Asynchronous version of \see DeclarationToExecBatches
701
+ ///
702
+ /// \see DeclarationToTableAsync for details on threading & execution
703
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
704
+ Declaration declaration, bool use_threads = true,
705
+ MemoryPool* memory_pool = default_memory_pool(),
706
+ FunctionRegistry* function_registry = NULLPTR);
707
+
708
+ /// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context
709
+ ///
710
+ /// \see DeclarationToTableAsync for details on threading & execution
711
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
712
+ Declaration declaration, ExecContext custom_exec_context);
713
+
714
+ /// \brief Utility method to run a declaration and collect the results into a vector
715
+ ///
716
+ /// \see DeclarationToTable for details on threading & execution
717
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
718
+ Declaration declaration, bool use_threads = true,
719
+ MemoryPool* memory_pool = default_memory_pool(),
720
+ FunctionRegistry* function_registry = NULLPTR);
721
+
722
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
723
+ Declaration declaration, QueryOptions query_options);
724
+
725
+ /// \brief Asynchronous version of \see DeclarationToBatches
726
+ ///
727
+ /// \see DeclarationToTableAsync for details on threading & execution
728
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
729
+ DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true,
730
+ MemoryPool* memory_pool = default_memory_pool(),
731
+ FunctionRegistry* function_registry = NULLPTR);
732
+
733
+ /// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context
734
+ ///
735
+ /// \see DeclarationToTableAsync for details on threading & execution
736
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
737
+ DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context);
738
+
739
+ /// \brief Utility method to run a declaration and return results as a RecordBatchReader
740
+ ///
741
+ /// If an exec context is not provided then a default exec context will be used based
742
+ /// on the value of `use_threads`. If `use_threads` is false then the CPU executor will
743
+ /// be a serial executor and all CPU work will be done on the calling thread. I/O tasks
744
+ /// will still happen on the I/O executor and may be multi-threaded.
745
+ ///
746
+ /// If `use_threads` is false then all CPU work will happen during the calls to
747
+ /// RecordBatchReader::Next and no CPU work will happen in the background. If
748
+ /// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may
749
+ /// run in between calls to RecordBatchReader::Next. If the returned reader is not
750
+ /// consumed quickly enough then the plan will eventually pause as the backpressure queue
751
+ /// fills up.
752
+ ///
753
+ /// If a custom exec context is provided then the value of `use_threads` will be ignored.
754
+ ///
755
+ /// The returned RecordBatchReader can be closed early to cancel the computation of record
756
+ /// batches. In this case, only errors encountered by the computation may be reported. In
757
+ /// particular, no cancellation error may be reported.
758
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
759
+ Declaration declaration, bool use_threads = true,
760
+ MemoryPool* memory_pool = default_memory_pool(),
761
+ FunctionRegistry* function_registry = NULLPTR);
762
+
763
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
764
+ Declaration declaration, QueryOptions query_options);
765
+
766
+ /// \brief Utility method to run a declaration and ignore results
767
+ ///
768
+ /// This can be useful when the data are consumed as part of the plan itself, for
769
+ /// example, when the plan ends with a write node.
770
+ ///
771
+ /// \see DeclarationToTable for details on threading & execution
772
+ ARROW_ACERO_EXPORT Status
773
+ DeclarationToStatus(Declaration declaration, bool use_threads = true,
774
+ MemoryPool* memory_pool = default_memory_pool(),
775
+ FunctionRegistry* function_registry = NULLPTR);
776
+
777
+ ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration,
778
+ QueryOptions query_options);
779
+
780
+ /// \brief Asynchronous version of \see DeclarationToStatus
781
+ ///
782
+ /// This can be useful when the data are consumed as part of the plan itself, for
783
+ /// example, when the plan ends with a write node.
784
+ ///
785
+ /// \see DeclarationToTableAsync for details on threading & execution
786
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(
787
+ Declaration declaration, bool use_threads = true,
788
+ MemoryPool* memory_pool = default_memory_pool(),
789
+ FunctionRegistry* function_registry = NULLPTR);
790
+
791
+ /// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context
792
+ ///
793
+ /// \see DeclarationToTableAsync for details on threading & execution
794
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration,
795
+ ExecContext exec_context);
796
+
797
+ /// @}
798
+
799
+ /// \brief Wrap an ExecBatch generator in a RecordBatchReader.
800
+ ///
801
+ /// The RecordBatchReader does not impose any ordering on emitted batches.
802
+ ARROW_ACERO_EXPORT
803
+ std::shared_ptr<RecordBatchReader> MakeGeneratorReader(
804
+ std::shared_ptr<Schema>, std::function<Future<std::optional<ExecBatch>>()>,
805
+ MemoryPool*);
806
+
807
+ constexpr int kDefaultBackgroundMaxQ = 32;
808
+ constexpr int kDefaultBackgroundQRestart = 16;
809
+
810
+ /// \brief Make a generator of RecordBatchReaders
811
+ ///
812
+ /// Useful as a source node for an Exec plan
813
+ ARROW_ACERO_EXPORT
814
+ Result<std::function<Future<std::optional<ExecBatch>>()>> MakeReaderGenerator(
815
+ std::shared_ptr<RecordBatchReader> reader, arrow::internal::Executor* io_executor,
816
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart);
817
+
818
+ } // namespace acero
819
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <vector>
22
+
23
+ #include "arrow/acero/options.h"
24
+ #include "arrow/acero/schema_util.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ using compute::ExecContext;
31
+
32
+ namespace acero {
33
+
34
+ class ARROW_ACERO_EXPORT HashJoinSchema {
35
+ public:
36
+ Status Init(JoinType join_type, const Schema& left_schema,
37
+ const std::vector<FieldRef>& left_keys, const Schema& right_schema,
38
+ const std::vector<FieldRef>& right_keys, const Expression& filter,
39
+ const std::string& left_field_name_prefix,
40
+ const std::string& right_field_name_prefix);
41
+
42
+ Status Init(JoinType join_type, const Schema& left_schema,
43
+ const std::vector<FieldRef>& left_keys,
44
+ const std::vector<FieldRef>& left_output, const Schema& right_schema,
45
+ const std::vector<FieldRef>& right_keys,
46
+ const std::vector<FieldRef>& right_output, const Expression& filter,
47
+ const std::string& left_field_name_prefix,
48
+ const std::string& right_field_name_prefix);
49
+
50
+ static Status ValidateSchemas(JoinType join_type, const Schema& left_schema,
51
+ const std::vector<FieldRef>& left_keys,
52
+ const std::vector<FieldRef>& left_output,
53
+ const Schema& right_schema,
54
+ const std::vector<FieldRef>& right_keys,
55
+ const std::vector<FieldRef>& right_output,
56
+ const std::string& left_field_name_prefix,
57
+ const std::string& right_field_name_prefix);
58
+
59
+ bool HasDictionaries() const;
60
+
61
+ bool HasLargeBinary() const;
62
+
63
+ Result<Expression> BindFilter(Expression filter, const Schema& left_schema,
64
+ const Schema& right_schema, ExecContext* exec_context);
65
+ std::shared_ptr<Schema> MakeOutputSchema(const std::string& left_field_name_suffix,
66
+ const std::string& right_field_name_suffix);
67
+
68
+ bool LeftPayloadIsEmpty() const { return PayloadIsEmpty(0); }
69
+
70
+ bool RightPayloadIsEmpty() const { return PayloadIsEmpty(1); }
71
+
72
+ static int kMissingField() {
73
+ return SchemaProjectionMaps<HashJoinProjection>::kMissingField;
74
+ }
75
+
76
+ SchemaProjectionMaps<HashJoinProjection> proj_maps[2];
77
+
78
+ private:
79
+ static bool IsTypeSupported(const DataType& type);
80
+
81
+ Status CollectFilterColumns(std::vector<FieldRef>& left_filter,
82
+ std::vector<FieldRef>& right_filter,
83
+ const Expression& filter, const Schema& left_schema,
84
+ const Schema& right_schema);
85
+
86
+ Expression RewriteFilterToUseFilterSchema(int right_filter_offset,
87
+ const SchemaProjectionMap& left_to_filter,
88
+ const SchemaProjectionMap& right_to_filter,
89
+ const Expression& filter);
90
+
91
+ bool PayloadIsEmpty(int side) const {
92
+ assert(side == 0 || side == 1);
93
+ return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0;
94
+ }
95
+
96
+ static Result<std::vector<FieldRef>> ComputePayload(const Schema& schema,
97
+ const std::vector<FieldRef>& output,
98
+ const std::vector<FieldRef>& filter,
99
+ const std::vector<FieldRef>& key);
100
+ };
101
+
102
+ } // namespace acero
103
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/type.h" // for DataType, FieldRef, Field and Schema
27
+
28
+ namespace arrow {
29
+
30
+ using internal::checked_cast;
31
+
32
+ namespace acero {
33
+
34
+ // Identifiers for all different row schemas that are used in a join
35
+ //
36
+ enum class HashJoinProjection : int {
37
+ INPUT = 0,
38
+ KEY = 1,
39
+ PAYLOAD = 2,
40
+ FILTER = 3,
41
+ OUTPUT = 4
42
+ };
43
+
44
+ struct SchemaProjectionMap {
45
+ static constexpr int kMissingField = -1;
46
+ int num_cols;
47
+ const int* source_to_base;
48
+ const int* base_to_target;
49
+ inline int get(int i) const {
50
+ assert(i >= 0 && i < num_cols);
51
+ assert(source_to_base[i] != kMissingField);
52
+ return base_to_target[source_to_base[i]];
53
+ }
54
+ };
55
+
56
+ /// Helper class for managing different projections of the same row schema.
57
+ /// Used to efficiently map any field in one projection to a corresponding field in
58
+ /// another projection.
59
+ /// Materialized mappings are generated lazily at the time of the first access.
60
+ /// Thread-safe apart from initialization.
61
+ template <typename ProjectionIdEnum>
62
+ class SchemaProjectionMaps {
63
+ public:
64
+ static constexpr int kMissingField = -1;
65
+
66
+ Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema,
67
+ const std::vector<ProjectionIdEnum>& projection_handles,
68
+ const std::vector<const std::vector<FieldRef>*>& projections) {
69
+ assert(projection_handles.size() == projections.size());
70
+ ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema));
71
+ for (size_t i = 0; i < projections.size(); ++i) {
72
+ ARROW_RETURN_NOT_OK(
73
+ RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema));
74
+ }
75
+ RegisterEnd();
76
+ return Status::OK();
77
+ }
78
+
79
+ int num_cols(ProjectionIdEnum schema_handle) const {
80
+ int id = schema_id(schema_handle);
81
+ return static_cast<int>(schemas_[id].second.data_types.size());
82
+ }
83
+
84
+ bool is_empty(ProjectionIdEnum schema_handle) const {
85
+ return num_cols(schema_handle) == 0;
86
+ }
87
+
88
+ const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const {
89
+ int id = schema_id(schema_handle);
90
+ return schemas_[id].second.field_names[field_id];
91
+ }
92
+
93
+ const std::shared_ptr<DataType>& data_type(ProjectionIdEnum schema_handle,
94
+ int field_id) const {
95
+ int id = schema_id(schema_handle);
96
+ return schemas_[id].second.data_types[field_id];
97
+ }
98
+
99
+ const std::vector<std::shared_ptr<DataType>>& data_types(
100
+ ProjectionIdEnum schema_handle) const {
101
+ int id = schema_id(schema_handle);
102
+ return schemas_[id].second.data_types;
103
+ }
104
+
105
+ SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const {
106
+ int id_from = schema_id(from);
107
+ int id_to = schema_id(to);
108
+ SchemaProjectionMap result;
109
+ result.num_cols = num_cols(from);
110
+ result.source_to_base = mappings_[id_from].data();
111
+ result.base_to_target = inverse_mappings_[id_to].data();
112
+ return result;
113
+ }
114
+
115
+ protected:
116
+ struct FieldInfos {
117
+ std::vector<int> field_paths;
118
+ std::vector<std::string> field_names;
119
+ std::vector<std::shared_ptr<DataType>> data_types;
120
+ };
121
+
122
+ Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) {
123
+ FieldInfos out_fields;
124
+ const FieldVector& in_fields = schema.fields();
125
+ out_fields.field_paths.resize(in_fields.size());
126
+ out_fields.field_names.resize(in_fields.size());
127
+ out_fields.data_types.resize(in_fields.size());
128
+ for (size_t i = 0; i < in_fields.size(); ++i) {
129
+ const std::string& name = in_fields[i]->name();
130
+ const std::shared_ptr<DataType>& type = in_fields[i]->type();
131
+ out_fields.field_paths[i] = static_cast<int>(i);
132
+ out_fields.field_names[i] = name;
133
+ out_fields.data_types[i] = type;
134
+ }
135
+ schemas_.push_back(std::make_pair(handle, out_fields));
136
+ return Status::OK();
137
+ }
138
+
139
+ Status RegisterProjectedSchema(ProjectionIdEnum handle,
140
+ const std::vector<FieldRef>& selected_fields,
141
+ const Schema& full_schema) {
142
+ FieldInfos out_fields;
143
+ const FieldVector& in_fields = full_schema.fields();
144
+ out_fields.field_paths.resize(selected_fields.size());
145
+ out_fields.field_names.resize(selected_fields.size());
146
+ out_fields.data_types.resize(selected_fields.size());
147
+ for (size_t i = 0; i < selected_fields.size(); ++i) {
148
+ // All fields must be found in schema without ambiguity
149
+ ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema));
150
+ const std::string& name = in_fields[match[0]]->name();
151
+ const std::shared_ptr<DataType>& type = in_fields[match[0]]->type();
152
+ out_fields.field_paths[i] = match[0];
153
+ out_fields.field_names[i] = name;
154
+ out_fields.data_types[i] = type;
155
+ }
156
+ schemas_.push_back(std::make_pair(handle, out_fields));
157
+ return Status::OK();
158
+ }
159
+
160
+ void RegisterEnd() {
161
+ size_t size = schemas_.size();
162
+ mappings_.resize(size);
163
+ inverse_mappings_.resize(size);
164
+ int id_base = 0;
165
+ for (size_t i = 0; i < size; ++i) {
166
+ GenerateMapForProjection(static_cast<int>(i), id_base);
167
+ }
168
+ }
169
+
170
+ int schema_id(ProjectionIdEnum schema_handle) const {
171
+ for (size_t i = 0; i < schemas_.size(); ++i) {
172
+ if (schemas_[i].first == schema_handle) {
173
+ return static_cast<int>(i);
174
+ }
175
+ }
176
+ // We should never get here
177
+ assert(false);
178
+ return -1;
179
+ }
180
+
181
+ void GenerateMapForProjection(int id_proj, int id_base) {
182
+ int num_cols_proj = static_cast<int>(schemas_[id_proj].second.data_types.size());
183
+ int num_cols_base = static_cast<int>(schemas_[id_base].second.data_types.size());
184
+
185
+ std::vector<int>& mapping = mappings_[id_proj];
186
+ std::vector<int>& inverse_mapping = inverse_mappings_[id_proj];
187
+ mapping.resize(num_cols_proj);
188
+ inverse_mapping.resize(num_cols_base);
189
+
190
+ if (id_proj == id_base) {
191
+ for (int i = 0; i < num_cols_base; ++i) {
192
+ mapping[i] = inverse_mapping[i] = i;
193
+ }
194
+ } else {
195
+ const FieldInfos& fields_proj = schemas_[id_proj].second;
196
+ const FieldInfos& fields_base = schemas_[id_base].second;
197
+ for (int i = 0; i < num_cols_base; ++i) {
198
+ inverse_mapping[i] = SchemaProjectionMap::kMissingField;
199
+ }
200
+ for (int i = 0; i < num_cols_proj; ++i) {
201
+ int field_id = SchemaProjectionMap::kMissingField;
202
+ for (int j = 0; j < num_cols_base; ++j) {
203
+ if (fields_proj.field_paths[i] == fields_base.field_paths[j]) {
204
+ field_id = j;
205
+ // If there are multiple matches for the same input field,
206
+ // it will be mapped to the first match.
207
+ break;
208
+ }
209
+ }
210
+ assert(field_id != SchemaProjectionMap::kMissingField);
211
+ mapping[i] = field_id;
212
+ inverse_mapping[field_id] = i;
213
+ }
214
+ }
215
+ }
216
+
217
+ // vector used as a mapping from ProjectionIdEnum to fields
218
+ std::vector<std::pair<ProjectionIdEnum, FieldInfos>> schemas_;
219
+ std::vector<std::vector<int>> mappings_;
220
+ std::vector<std::vector<int>> inverse_mappings_;
221
+ };
222
+
223
+ using HashJoinProjectionMaps = SchemaProjectionMaps<HashJoinProjection>;
224
+
225
+ } // namespace acero
226
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/record_batch.h"
21
+ #include "arrow/type_traits.h"
22
+
23
+ namespace arrow::acero {
24
+
25
+ // normalize the value to unsigned 64-bits while preserving ordering of values
26
+ template <typename T, enable_if_t<std::is_integral<T>::value, bool> = true>
27
+ uint64_t NormalizeTime(T t);
28
+
29
+ uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row);
30
+
31
+ } // namespace arrow::acero
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ # if defined(_MSC_VER)
24
+ # pragma warning(push)
25
+ # pragma warning(disable : 4251)
26
+ # else
27
+ # pragma GCC diagnostic ignored "-Wattributes"
28
+ # endif
29
+
30
+ # ifdef ARROW_ACERO_STATIC
31
+ # define ARROW_ACERO_EXPORT
32
+ # elif defined(ARROW_ACERO_EXPORTING)
33
+ # define ARROW_ACERO_EXPORT __declspec(dllexport)
34
+ # else
35
+ # define ARROW_ACERO_EXPORT __declspec(dllimport)
36
+ # endif
37
+
38
+ # define ARROW_ACERO_NO_EXPORT
39
+ #else // Not Windows
40
+ # ifndef ARROW_ACERO_EXPORT
41
+ # define ARROW_ACERO_EXPORT __attribute__((visibility("default")))
42
+ # endif
43
+ # ifndef ARROW_ACERO_NO_EXPORT
44
+ # define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden")))
45
+ # endif
46
+ #endif // Not-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ # pragma warning(pop)
50
+ #endif
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/adapters/orc/options.h"
25
+ #include "arrow/io/interfaces.h"
26
+ #include "arrow/memory_pool.h"
27
+ #include "arrow/record_batch.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/type.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/macros.h"
32
+ #include "arrow/util/visibility.h"
33
+
34
+ namespace arrow {
35
+ namespace adapters {
36
+ namespace orc {
37
+
38
+ /// \brief Information about an ORC stripe
39
+ struct StripeInformation {
40
+ /// \brief Offset of the stripe from the start of the file, in bytes
41
+ int64_t offset;
42
+ /// \brief Length of the stripe, in bytes
43
+ int64_t length;
44
+ /// \brief Number of rows in the stripe
45
+ int64_t num_rows;
46
+ /// \brief Index of the first row of the stripe
47
+ int64_t first_row_id;
48
+ };
49
+
50
+ /// \class ORCFileReader
51
+ /// \brief Read an Arrow Table or RecordBatch from an ORC file.
52
+ class ARROW_EXPORT ORCFileReader {
53
+ public:
54
+ ~ORCFileReader();
55
+
56
+ /// \brief Creates a new ORC reader
57
+ ///
58
+ /// \param[in] file the data source
59
+ /// \param[in] pool a MemoryPool to use for buffer allocations
60
+ /// \return the returned reader object
61
+ static Result<std::unique_ptr<ORCFileReader>> Open(
62
+ const std::shared_ptr<io::RandomAccessFile>& file, MemoryPool* pool);
63
+
64
+ /// \brief Return the schema read from the ORC file
65
+ ///
66
+ /// \return the returned Schema object
67
+ Result<std::shared_ptr<Schema>> ReadSchema();
68
+
69
+ /// \brief Read the file as a Table
70
+ ///
71
+ /// The table will be composed of one record batch per stripe.
72
+ ///
73
+ /// \return the returned Table
74
+ Result<std::shared_ptr<Table>> Read();
75
+
76
+ /// \brief Read the file as a Table
77
+ ///
78
+ /// The table will be composed of one record batch per stripe.
79
+ ///
80
+ /// \param[in] schema the Table schema
81
+ /// \return the returned Table
82
+ Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema);
83
+
84
+ /// \brief Read the file as a Table
85
+ ///
86
+ /// The table will be composed of one record batch per stripe.
87
+ ///
88
+ /// \param[in] include_indices the selected field indices to read
89
+ /// \return the returned Table
90
+ Result<std::shared_ptr<Table>> Read(const std::vector<int>& include_indices);
91
+
92
+ /// \brief Read the file as a Table
93
+ ///
94
+ /// The table will be composed of one record batch per stripe.
95
+ ///
96
+ /// \param[in] include_names the selected field names to read
97
+ /// \return the returned Table
98
+ Result<std::shared_ptr<Table>> Read(const std::vector<std::string>& include_names);
99
+
100
+ /// \brief Read the file as a Table
101
+ ///
102
+ /// The table will be composed of one record batch per stripe.
103
+ ///
104
+ /// \param[in] schema the Table schema
105
+ /// \param[in] include_indices the selected field indices to read
106
+ /// \return the returned Table
107
+ Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema,
108
+ const std::vector<int>& include_indices);
109
+
110
+ /// \brief Read a single stripe as a RecordBatch
111
+ ///
112
+ /// \param[in] stripe the stripe index
113
+ /// \return the returned RecordBatch
114
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(int64_t stripe);
115
+
116
+ /// \brief Read a single stripe as a RecordBatch
117
+ ///
118
+ /// \param[in] stripe the stripe index
119
+ /// \param[in] include_indices the selected field indices to read
120
+ /// \return the returned RecordBatch
121
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(
122
+ int64_t stripe, const std::vector<int>& include_indices);
123
+
124
+ /// \brief Read a single stripe as a RecordBatch
125
+ ///
126
+ /// \param[in] stripe the stripe index
127
+ /// \param[in] include_names the selected field names to read
128
+ /// \return the returned RecordBatch
129
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(
130
+ int64_t stripe, const std::vector<std::string>& include_names);
131
+
132
+ /// \brief Seek to designated row. Invoke NextStripeReader() after seek
133
+ /// will return stripe reader starting from designated row.
134
+ ///
135
+ /// \param[in] row_number the rows number to seek
136
+ Status Seek(int64_t row_number);
137
+
138
+ /// \brief Get a stripe level record batch iterator.
139
+ ///
140
+ /// Each record batch will have up to `batch_size` rows.
141
+ /// NextStripeReader serves as a fine-grained alternative to ReadStripe
142
+ /// which may cause OOM issues by loading the whole stripe into memory.
143
+ ///
144
+ /// Note this will only read rows for the current stripe, not the entire
145
+ /// file.
146
+ ///
147
+ /// \param[in] batch_size the maximum number of rows in each record batch
148
+ /// \return the returned stripe reader
149
+ Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(int64_t batch_size);
150
+
151
+ /// \brief Get a stripe level record batch iterator.
152
+ ///
153
+ /// Each record batch will have up to `batch_size` rows.
154
+ /// NextStripeReader serves as a fine-grained alternative to ReadStripe
155
+ /// which may cause OOM issues by loading the whole stripe into memory.
156
+ ///
157
+ /// Note this will only read rows for the current stripe, not the entire
158
+ /// file.
159
+ ///
160
+ /// \param[in] batch_size the maximum number of rows in each record batch
161
+ /// \param[in] include_indices the selected field indices to read
162
+ /// \return the stripe reader
163
+ Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(
164
+ int64_t batch_size, const std::vector<int>& include_indices);
165
+
166
+ /// \brief Get a record batch iterator for the entire file.
167
+ ///
168
+ /// Each record batch will have up to `batch_size` rows.
169
+ ///
170
+ /// \param[in] batch_size the maximum number of rows in each record batch
171
+ /// \param[in] include_names the selected field names to read, if not empty
172
+ /// (otherwise all fields are read)
173
+ /// \return the record batch iterator
174
+ Result<std::shared_ptr<RecordBatchReader>> GetRecordBatchReader(
175
+ int64_t batch_size, const std::vector<std::string>& include_names);
176
+
177
+ /// \brief The number of stripes in the file
178
+ int64_t NumberOfStripes();
179
+
180
+ /// \brief The number of rows in the file
181
+ int64_t NumberOfRows();
182
+
183
+ /// \brief StripeInformation for each stripe.
184
+ StripeInformation GetStripeInformation(int64_t stripe);
185
+
186
+ /// \brief Get the format version of the file.
187
+ /// Currently known values are 0.11 and 0.12.
188
+ ///
189
+ /// \return The FileVersion of the ORC file.
190
+ FileVersion GetFileVersion();
191
+
192
+ /// \brief Get the software instance and version that wrote this file.
193
+ ///
194
+ /// \return a user-facing string that specifies the software version
195
+ std::string GetSoftwareVersion();
196
+
197
+ /// \brief Get the compression kind of the file.
198
+ ///
199
+ /// \return The kind of compression in the ORC file.
200
+ Result<Compression::type> GetCompression();
201
+
202
+ /// \brief Get the buffer size for the compression.
203
+ ///
204
+ /// \return Number of bytes to buffer for the compression codec.
205
+ int64_t GetCompressionSize();
206
+
207
+ /// \brief Get the number of rows per an entry in the row index.
208
+ /// \return the number of rows per an entry in the row index or 0 if there
209
+ /// is no row index.
210
+ int64_t GetRowIndexStride();
211
+
212
+ /// \brief Get ID of writer that generated the file.
213
+ ///
214
+ /// \return UNKNOWN_WRITER if the writer ID is undefined
215
+ WriterId GetWriterId();
216
+
217
+ /// \brief Get the writer id value when getWriterId() returns an unknown writer.
218
+ ///
219
+ /// \return the integer value of the writer ID.
220
+ int32_t GetWriterIdValue();
221
+
222
+ /// \brief Get the version of the writer.
223
+ ///
224
+ /// \return the version of the writer.
225
+
226
+ WriterVersion GetWriterVersion();
227
+
228
+ /// \brief Get the number of stripe statistics in the file.
229
+ ///
230
+ /// \return the number of stripe statistics
231
+ int64_t GetNumberOfStripeStatistics();
232
+
233
+ /// \brief Get the length of the data stripes in the file.
234
+ ///
235
+ /// \return return the number of bytes in stripes
236
+ int64_t GetContentLength();
237
+
238
+ /// \brief Get the length of the file stripe statistics.
239
+ ///
240
+ /// \return the number of compressed bytes in the file stripe statistics
241
+ int64_t GetStripeStatisticsLength();
242
+
243
+ /// \brief Get the length of the file footer.
244
+ ///
245
+ /// \return the number of compressed bytes in the file footer
246
+ int64_t GetFileFooterLength();
247
+
248
+ /// \brief Get the length of the file postscript.
249
+ ///
250
+ /// \return the number of bytes in the file postscript
251
+ int64_t GetFilePostscriptLength();
252
+
253
+ /// \brief Get the total length of the file.
254
+ ///
255
+ /// \return the number of bytes in the file
256
+ int64_t GetFileLength();
257
+
258
+ /// \brief Get the serialized file tail.
259
+ /// Useful if another reader of the same file wants to avoid re-reading
260
+ /// the file tail. See ReadOptions.SetSerializedFileTail().
261
+ ///
262
+ /// \return a string of bytes with the file tail
263
+ std::string GetSerializedFileTail();
264
+
265
+ /// \brief Return the metadata read from the ORC file
266
+ ///
267
+ /// \return A KeyValueMetadata object containing the ORC metadata
268
+ Result<std::shared_ptr<const KeyValueMetadata>> ReadMetadata();
269
+
270
+ private:
271
+ class Impl;
272
+ std::unique_ptr<Impl> impl_;
273
+ ORCFileReader();
274
+ };
275
+
276
+ /// \class ORCFileWriter
277
+ /// \brief Write an Arrow Table or RecordBatch to an ORC file.
278
+ class ARROW_EXPORT ORCFileWriter {
279
+ public:
280
+ ~ORCFileWriter();
281
+ /// \brief Creates a new ORC writer.
282
+ ///
283
+ /// \param[in] output_stream a pointer to the io::OutputStream to write into
284
+ /// \param[in] write_options the ORC writer options for Arrow
285
+ /// \return the returned writer object
286
+ static Result<std::unique_ptr<ORCFileWriter>> Open(
287
+ io::OutputStream* output_stream,
288
+ const WriteOptions& write_options = WriteOptions());
289
+
290
+ /// \brief Write a table. This can be called multiple times.
291
+ ///
292
+ /// Tables passed in subsequent calls must match the schema of the table that was
293
+ /// written first.
294
+ ///
295
+ /// \param[in] table the Arrow table from which data is extracted.
296
+ /// \return Status
297
+ Status Write(const Table& table);
298
+
299
+ /// \brief Write a RecordBatch. This can be called multiple times.
300
+ ///
301
+ /// RecordBatches passed in subsequent calls must match the schema of the
302
+ /// RecordBatch that was written first.
303
+ ///
304
+ /// \param[in] record_batch the Arrow RecordBatch from which data is extracted.
305
+ /// \return Status
306
+ Status Write(const RecordBatch& record_batch);
307
+
308
+ /// \brief Close an ORC writer (orc::Writer)
309
+ ///
310
+ /// \return Status
311
+ Status Close();
312
+
313
+ private:
314
+ class Impl;
315
+ std::unique_ptr<Impl> impl_;
316
+
317
+ private:
318
+ ORCFileWriter();
319
+ };
320
+
321
+ } // namespace orc
322
+ } // namespace adapters
323
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <vector>
21
+
22
+ #include "arrow/io/interfaces.h"
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/type_fwd.h"
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ namespace adapters {
30
+
31
+ namespace orc {
32
+
33
+ enum class WriterId : int32_t {
34
+ kOrcJava = 0,
35
+ kOrcCpp = 1,
36
+ kPresto = 2,
37
+ kScritchleyGo = 3,
38
+ kTrino = 4,
39
+ kUnknown = INT32_MAX
40
+ };
41
+
42
+ enum class WriterVersion : int32_t {
43
+ kOriginal = 0,
44
+ kHive8732 = 1,
45
+ kHive4243 = 2,
46
+ kHive12055 = 3,
47
+ kHive13083 = 4,
48
+ kOrc101 = 5,
49
+ kOrc135 = 6,
50
+ kOrc517 = 7,
51
+ kOrc203 = 8,
52
+ kOrc14 = 9,
53
+ kMax = INT32_MAX
54
+ };
55
+
56
+ enum class CompressionStrategy : int32_t { kSpeed = 0, kCompression };
57
+
58
+ class ARROW_EXPORT FileVersion {
59
+ private:
60
+ int32_t major_version_;
61
+ int32_t minor_version_;
62
+
63
+ public:
64
+ static const FileVersion& v_0_11();
65
+ static const FileVersion& v_0_12();
66
+
67
+ FileVersion(int32_t major, int32_t minor)
68
+ : major_version_(major), minor_version_(minor) {}
69
+
70
+ /**
71
+ * Get major version
72
+ */
73
+ int32_t major_version() const { return this->major_version_; }
74
+
75
+ /**
76
+ * Get minor version
77
+ */
78
+ int32_t minor_version() const { return this->minor_version_; }
79
+
80
+ bool operator==(const FileVersion& right) const {
81
+ return this->major_version() == right.major_version() &&
82
+ this->minor_version() == right.minor_version();
83
+ }
84
+
85
+ bool operator!=(const FileVersion& right) const { return !(*this == right); }
86
+
87
+ std::string ToString() const;
88
+ };
89
+
90
+ /// Options for the ORC Writer
91
+ struct ARROW_EXPORT WriteOptions {
92
+ /// Number of rows the ORC writer writes at a time, default 1024
93
+ int64_t batch_size = 1024;
94
+ /// Which ORC file version to use, default FileVersion(0, 12)
95
+ FileVersion file_version = FileVersion(0, 12);
96
+ /// Size of each ORC stripe in bytes, default 64 MiB
97
+ int64_t stripe_size = 64 * 1024 * 1024;
98
+ /// The compression codec of the ORC file, there is no compression by default
99
+ Compression::type compression = Compression::UNCOMPRESSED;
100
+ /// The size of each compression block in bytes, default 64 KiB
101
+ int64_t compression_block_size = 64 * 1024;
102
+ /// The compression strategy i.e. speed vs size reduction, default
103
+ /// CompressionStrategy::kSpeed
104
+ CompressionStrategy compression_strategy = CompressionStrategy::kSpeed;
105
+ /// The number of rows per an entry in the row index, default 10000
106
+ int64_t row_index_stride = 10000;
107
+ /// The padding tolerance, default 0.0
108
+ double padding_tolerance = 0.0;
109
+ /// The dictionary key size threshold. 0 to disable dictionary encoding.
110
+ /// 1 to always enable dictionary encoding, default 0.0
111
+ double dictionary_key_size_threshold = 0.0;
112
+ /// The array of columns that use the bloom filter, default empty
113
+ std::vector<int64_t> bloom_filter_columns;
114
+ /// The upper limit of the false-positive rate of the bloom filter, default 0.05
115
+ double bloom_filter_fpp = 0.05;
116
+ };
117
+
118
+ } // namespace orc
119
+ } // namespace adapters
120
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "tensorflow/core/framework/op.h"
23
+
24
+ #include "arrow/type.h"
25
+
26
+ // These utilities are supposed to be included in TensorFlow operators
27
+ // that need to be compiled separately from Arrow because of ABI issues.
28
+ // They therefore need to be header-only.
29
+
30
+ namespace arrow {
31
+
32
+ namespace adapters {
33
+
34
+ namespace tensorflow {
35
+
36
+ Status GetArrowType(::tensorflow::DataType dtype, std::shared_ptr<DataType>* out) {
37
+ switch (dtype) {
38
+ case ::tensorflow::DT_BOOL:
39
+ *out = arrow::boolean();
40
+ break;
41
+ case ::tensorflow::DT_FLOAT:
42
+ *out = arrow::float32();
43
+ break;
44
+ case ::tensorflow::DT_DOUBLE:
45
+ *out = arrow::float64();
46
+ break;
47
+ case ::tensorflow::DT_HALF:
48
+ *out = arrow::float16();
49
+ break;
50
+ case ::tensorflow::DT_INT8:
51
+ *out = arrow::int8();
52
+ break;
53
+ case ::tensorflow::DT_INT16:
54
+ *out = arrow::int16();
55
+ break;
56
+ case ::tensorflow::DT_INT32:
57
+ *out = arrow::int32();
58
+ break;
59
+ case ::tensorflow::DT_INT64:
60
+ *out = arrow::int64();
61
+ break;
62
+ case ::tensorflow::DT_UINT8:
63
+ *out = arrow::uint8();
64
+ break;
65
+ case ::tensorflow::DT_UINT16:
66
+ *out = arrow::uint16();
67
+ break;
68
+ case ::tensorflow::DT_UINT32:
69
+ *out = arrow::uint32();
70
+ break;
71
+ case ::tensorflow::DT_UINT64:
72
+ *out = arrow::uint64();
73
+ break;
74
+ default:
75
+ return Status::TypeError("TensorFlow data type is not supported");
76
+ }
77
+ return Status::OK();
78
+ }
79
+
80
+ Status GetTensorFlowType(std::shared_ptr<DataType> dtype, ::tensorflow::DataType* out) {
81
+ switch (dtype->id()) {
82
+ case Type::BOOL:
83
+ *out = ::tensorflow::DT_BOOL;
84
+ break;
85
+ case Type::UINT8:
86
+ *out = ::tensorflow::DT_UINT8;
87
+ break;
88
+ case Type::INT8:
89
+ *out = ::tensorflow::DT_INT8;
90
+ break;
91
+ case Type::UINT16:
92
+ *out = ::tensorflow::DT_UINT16;
93
+ break;
94
+ case Type::INT16:
95
+ *out = ::tensorflow::DT_INT16;
96
+ break;
97
+ case Type::UINT32:
98
+ *out = ::tensorflow::DT_UINT32;
99
+ break;
100
+ case Type::INT32:
101
+ *out = ::tensorflow::DT_INT32;
102
+ break;
103
+ case Type::UINT64:
104
+ *out = ::tensorflow::DT_UINT64;
105
+ break;
106
+ case Type::INT64:
107
+ *out = ::tensorflow::DT_INT64;
108
+ break;
109
+ case Type::HALF_FLOAT:
110
+ *out = ::tensorflow::DT_HALF;
111
+ break;
112
+ case Type::FLOAT:
113
+ *out = ::tensorflow::DT_FLOAT;
114
+ break;
115
+ case Type::DOUBLE:
116
+ *out = ::tensorflow::DT_DOUBLE;
117
+ break;
118
+ default:
119
+ return Status::TypeError("Arrow data type is not supported");
120
+ }
121
+ return arrow::Status::OK();
122
+ }
123
+
124
+ } // namespace tensorflow
125
+
126
+ } // namespace adapters
127
+
128
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h ADDED
@@ -0,0 +1,887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Array accessor classes for List, LargeList, ListView, LargeListView, FixedSizeList,
19
+ // Map, Struct, and Union
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/array/array_base.h"
30
+ #include "arrow/array/data.h"
31
+ #include "arrow/result.h"
32
+ #include "arrow/status.h"
33
+ #include "arrow/type.h"
34
+ #include "arrow/type_fwd.h"
35
+ #include "arrow/util/checked_cast.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/visibility.h"
38
+
39
+ namespace arrow {
40
+
41
+ /// \addtogroup nested-arrays
42
+ ///
43
+ /// @{
44
+
45
+ // ----------------------------------------------------------------------
46
+ // VarLengthListLikeArray
47
+
48
+ template <typename TYPE>
49
+ class VarLengthListLikeArray;
50
+
51
+ namespace internal {
52
+
53
+ // Private helper for [Large]List[View]Array::SetData.
54
+ // Unfortunately, trying to define VarLengthListLikeArray::SetData outside of this header
55
+ // doesn't play well with MSVC.
56
+ template <typename TYPE>
57
+ void SetListData(VarLengthListLikeArray<TYPE>* self,
58
+ const std::shared_ptr<ArrayData>& data,
59
+ Type::type expected_type_id = TYPE::type_id);
60
+
61
+ /// \brief A version of Flatten that keeps recursively flattening until an array of
62
+ /// non-list values is reached.
63
+ ///
64
+ /// Array types considered to be lists by this function:
65
+ /// - list
66
+ /// - large_list
67
+ /// - list_view
68
+ /// - large_list_view
69
+ /// - fixed_size_list
70
+ ///
71
+ /// \see ListArray::Flatten
72
+ ARROW_EXPORT Result<std::shared_ptr<Array>> FlattenLogicalListRecursively(
73
+ const Array& in_array, MemoryPool* memory_pool);
74
+
75
+ } // namespace internal
76
+
77
+ /// Base class for variable-sized list and list-view arrays, regardless of offset size.
78
+ template <typename TYPE>
79
+ class VarLengthListLikeArray : public Array {
80
+ public:
81
+ using TypeClass = TYPE;
82
+ using offset_type = typename TypeClass::offset_type;
83
+
84
+ const TypeClass* var_length_list_like_type() const { return this->list_type_; }
85
+
86
+ /// \brief Return array object containing the list's values
87
+ ///
88
+ /// Note that this buffer does not account for any slice offset or length.
89
+ const std::shared_ptr<Array>& values() const { return values_; }
90
+
91
+ /// Note that this buffer does not account for any slice offset or length.
92
+ const std::shared_ptr<Buffer>& value_offsets() const { return data_->buffers[1]; }
93
+
94
+ const std::shared_ptr<DataType>& value_type() const { return list_type_->value_type(); }
95
+
96
+ /// Return pointer to raw value offsets accounting for any slice offset
97
+ const offset_type* raw_value_offsets() const { return raw_value_offsets_; }
98
+
99
+ // The following functions will not perform boundschecking
100
+
101
+ offset_type value_offset(int64_t i) const { return raw_value_offsets_[i]; }
102
+
103
+ /// \brief Return the size of the value at a particular index
104
+ ///
105
+ /// Since non-empty null lists and list-views are possible, avoid calling this
106
+ /// function when the list at slot i is null.
107
+ ///
108
+ /// \pre IsValid(i)
109
+ virtual offset_type value_length(int64_t i) const = 0;
110
+
111
+ /// \pre IsValid(i)
112
+ std::shared_ptr<Array> value_slice(int64_t i) const {
113
+ return values_->Slice(value_offset(i), value_length(i));
114
+ }
115
+
116
+ /// \brief Flatten all level recursively until reach a non-list type, and return
117
+ /// a non-list type Array.
118
+ ///
119
+ /// \see internal::FlattenLogicalListRecursively
120
+ Result<std::shared_ptr<Array>> FlattenRecursively(
121
+ MemoryPool* memory_pool = default_memory_pool()) const {
122
+ return internal::FlattenLogicalListRecursively(*this, memory_pool);
123
+ }
124
+
125
+ protected:
126
+ friend void internal::SetListData<TYPE>(VarLengthListLikeArray<TYPE>* self,
127
+ const std::shared_ptr<ArrayData>& data,
128
+ Type::type expected_type_id);
129
+
130
+ const TypeClass* list_type_ = NULLPTR;
131
+ std::shared_ptr<Array> values_;
132
+ const offset_type* raw_value_offsets_ = NULLPTR;
133
+ };
134
+
135
+ // ----------------------------------------------------------------------
136
+ // ListArray / LargeListArray
137
+
138
+ template <typename TYPE>
139
+ class BaseListArray : public VarLengthListLikeArray<TYPE> {
140
+ public:
141
+ using TypeClass = TYPE;
142
+ using offset_type = typename TYPE::offset_type;
143
+
144
+ const TypeClass* list_type() const { return this->var_length_list_like_type(); }
145
+
146
+ /// \brief Return the size of the value at a particular index
147
+ ///
148
+ /// Since non-empty null lists are possible, avoid calling this
149
+ /// function when the list at slot i is null.
150
+ ///
151
+ /// \pre IsValid(i)
152
+ offset_type value_length(int64_t i) const final {
153
+ return this->raw_value_offsets_[i + 1] - this->raw_value_offsets_[i];
154
+ }
155
+ };
156
+
157
+ /// Concrete Array class for list data
158
+ class ARROW_EXPORT ListArray : public BaseListArray<ListType> {
159
+ public:
160
+ explicit ListArray(std::shared_ptr<ArrayData> data);
161
+
162
+ ListArray(std::shared_ptr<DataType> type, int64_t length,
163
+ std::shared_ptr<Buffer> value_offsets, std::shared_ptr<Array> values,
164
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
165
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
166
+
167
+ /// \brief Construct ListArray from array of offsets and child value array
168
+ ///
169
+ /// This function does the bare minimum of validation of the offsets and
170
+ /// input types, and will allocate a new offsets array if necessary (i.e. if
171
+ /// the offsets contain any nulls). If the offsets do not have nulls, they
172
+ /// are assumed to be well-formed.
173
+ ///
174
+ /// If a null_bitmap is not provided, the nulls will be inferred from the offsets'
175
+ /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls.
176
+ ///
177
+ /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an
178
+ /// array with offset() > 0).
179
+ ///
180
+ /// \param[in] offsets Array containing n + 1 offsets encoding length and
181
+ /// size. Must be of int32 type
182
+ /// \param[in] values Array containing list values
183
+ /// \param[in] pool MemoryPool in case new offsets array needs to be
184
+ /// allocated because of null values
185
+ /// \param[in] null_bitmap Optional validity bitmap
186
+ /// \param[in] null_count Optional null count in null_bitmap
187
+ static Result<std::shared_ptr<ListArray>> FromArrays(
188
+ const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(),
189
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
190
+ int64_t null_count = kUnknownNullCount);
191
+
192
+ static Result<std::shared_ptr<ListArray>> FromArrays(
193
+ std::shared_ptr<DataType> type, const Array& offsets, const Array& values,
194
+ MemoryPool* pool = default_memory_pool(),
195
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
196
+ int64_t null_count = kUnknownNullCount);
197
+
198
+ /// \brief Build a ListArray from a ListViewArray
199
+ static Result<std::shared_ptr<ListArray>> FromListView(const ListViewArray& source,
200
+ MemoryPool* pool);
201
+
202
+ /// \brief Return an Array that is a concatenation of the lists in this array.
203
+ ///
204
+ /// Note that it's different from `values()` in that it takes into
205
+ /// consideration of this array's offsets as well as null elements backed
206
+ /// by non-empty lists (they are skipped, thus copying may be needed).
207
+ Result<std::shared_ptr<Array>> Flatten(
208
+ MemoryPool* memory_pool = default_memory_pool()) const;
209
+
210
+ /// \brief Return list offsets as an Int32Array
211
+ ///
212
+ /// The returned array will not have a validity bitmap, so you cannot expect
213
+ /// to pass it to ListArray::FromArrays() and get back the same list array
214
+ /// if the original one has nulls.
215
+ std::shared_ptr<Array> offsets() const;
216
+
217
+ protected:
218
+ // This constructor defers SetData to a derived array class
219
+ ListArray() = default;
220
+
221
+ void SetData(const std::shared_ptr<ArrayData>& data);
222
+ };
223
+
224
+ /// Concrete Array class for large list data (with 64-bit offsets)
225
+ class ARROW_EXPORT LargeListArray : public BaseListArray<LargeListType> {
226
+ public:
227
+ explicit LargeListArray(const std::shared_ptr<ArrayData>& data);
228
+
229
+ LargeListArray(const std::shared_ptr<DataType>& type, int64_t length,
230
+ const std::shared_ptr<Buffer>& value_offsets,
231
+ const std::shared_ptr<Array>& values,
232
+ const std::shared_ptr<Buffer>& null_bitmap = NULLPTR,
233
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
234
+
235
+ /// \brief Construct LargeListArray from array of offsets and child value array
236
+ ///
237
+ /// This function does the bare minimum of validation of the offsets and
238
+ /// input types, and will allocate a new offsets array if necessary (i.e. if
239
+ /// the offsets contain any nulls). If the offsets do not have nulls, they
240
+ /// are assumed to be well-formed.
241
+ ///
242
+ /// If a null_bitmap is not provided, the nulls will be inferred from the offsets'
243
+ /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls.
244
+ ///
245
+ /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an
246
+ /// array with offset() > 0).
247
+ ///
248
+ /// \param[in] offsets Array containing n + 1 offsets encoding length and
249
+ /// size. Must be of int64 type
250
+ /// \param[in] values Array containing list values
251
+ /// \param[in] pool MemoryPool in case new offsets array needs to be
252
+ /// allocated because of null values
253
+ /// \param[in] null_bitmap Optional validity bitmap
254
+ /// \param[in] null_count Optional null count in null_bitmap
255
+ static Result<std::shared_ptr<LargeListArray>> FromArrays(
256
+ const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(),
257
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
258
+ int64_t null_count = kUnknownNullCount);
259
+
260
+ static Result<std::shared_ptr<LargeListArray>> FromArrays(
261
+ std::shared_ptr<DataType> type, const Array& offsets, const Array& values,
262
+ MemoryPool* pool = default_memory_pool(),
263
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
264
+ int64_t null_count = kUnknownNullCount);
265
+
266
+ /// \brief Build a LargeListArray from a LargeListViewArray
267
+ static Result<std::shared_ptr<LargeListArray>> FromListView(
268
+ const LargeListViewArray& source, MemoryPool* pool);
269
+
270
+ /// \brief Return an Array that is a concatenation of the lists in this array.
271
+ ///
272
+ /// Note that it's different from `values()` in that it takes into
273
+ /// consideration of this array's offsets as well as null elements backed
274
+ /// by non-empty lists (they are skipped, thus copying may be needed).
275
+ Result<std::shared_ptr<Array>> Flatten(
276
+ MemoryPool* memory_pool = default_memory_pool()) const;
277
+
278
+ /// \brief Return list offsets as an Int64Array
279
+ std::shared_ptr<Array> offsets() const;
280
+
281
+ protected:
282
+ void SetData(const std::shared_ptr<ArrayData>& data);
283
+ };
284
+
285
+ // ----------------------------------------------------------------------
286
+ // ListViewArray / LargeListViewArray
287
+
288
+ template <typename TYPE>
289
+ class BaseListViewArray : public VarLengthListLikeArray<TYPE> {
290
+ public:
291
+ using TypeClass = TYPE;
292
+ using offset_type = typename TYPE::offset_type;
293
+
294
+ const TypeClass* list_view_type() const { return this->var_length_list_like_type(); }
295
+
296
+ /// \brief Note that this buffer does not account for any slice offset or length.
297
+ const std::shared_ptr<Buffer>& value_sizes() const { return this->data_->buffers[2]; }
298
+
299
+ /// \brief Return pointer to raw value offsets accounting for any slice offset
300
+ const offset_type* raw_value_sizes() const { return raw_value_sizes_; }
301
+
302
+ /// \brief Return the size of the value at a particular index
303
+ ///
304
+ /// This should not be called if the list-view at slot i is null.
305
+ /// The returned size in those cases could be any value from 0 to the
306
+ /// length of the child values array.
307
+ ///
308
+ /// \pre IsValid(i)
309
+ offset_type value_length(int64_t i) const final { return this->raw_value_sizes_[i]; }
310
+
311
+ protected:
312
+ const offset_type* raw_value_sizes_ = NULLPTR;
313
+ };
314
+
315
+ /// \brief Concrete Array class for list-view data
316
+ class ARROW_EXPORT ListViewArray : public BaseListViewArray<ListViewType> {
317
+ public:
318
+ explicit ListViewArray(std::shared_ptr<ArrayData> data);
319
+
320
+ ListViewArray(std::shared_ptr<DataType> type, int64_t length,
321
+ std::shared_ptr<Buffer> value_offsets,
322
+ std::shared_ptr<Buffer> value_sizes, std::shared_ptr<Array> values,
323
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
324
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
325
+
326
+ /// \brief Construct ListViewArray from array of offsets, sizes, and child
327
+ /// value array
328
+ ///
329
+ /// Construct a ListViewArray using buffers from offsets and sizes arrays
330
+ /// that project views into the child values array.
331
+ ///
332
+ /// This function does the bare minimum of validation of the offsets/sizes and
333
+ /// input types. The offset and length of the offsets and sizes arrays must
334
+ /// match and that will be checked, but their contents will be assumed to be
335
+ /// well-formed.
336
+ ///
337
+ /// If a null_bitmap is not provided, the nulls will be inferred from the
338
+ /// offsets's null bitmap. But if a null_bitmap is provided, the offsets array
339
+ /// can't have nulls.
340
+ ///
341
+ /// And when a null_bitmap is provided, neither the offsets or sizes array can be a
342
+ /// slice (i.e. an array with offset() > 0).
343
+ ///
344
+ /// \param[in] offsets An array of int32 offsets into the values array. NULL values are
345
+ /// supported if the corresponding values in sizes is NULL or 0.
346
+ /// \param[in] sizes An array containing the int32 sizes of every view. NULL values are
347
+ /// taken to represent a NULL list-view in the array being created.
348
+ /// \param[in] values Array containing list values
349
+ /// \param[in] pool MemoryPool
350
+ /// \param[in] null_bitmap Optional validity bitmap
351
+ /// \param[in] null_count Optional null count in null_bitmap
352
+ static Result<std::shared_ptr<ListViewArray>> FromArrays(
353
+ const Array& offsets, const Array& sizes, const Array& values,
354
+ MemoryPool* pool = default_memory_pool(),
355
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
356
+ int64_t null_count = kUnknownNullCount);
357
+
358
+ static Result<std::shared_ptr<ListViewArray>> FromArrays(
359
+ std::shared_ptr<DataType> type, const Array& offsets, const Array& sizes,
360
+ const Array& values, MemoryPool* pool = default_memory_pool(),
361
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
362
+ int64_t null_count = kUnknownNullCount);
363
+
364
+ /// \brief Build a ListViewArray from a ListArray
365
+ static Result<std::shared_ptr<ListViewArray>> FromList(const ListArray& list_array,
366
+ MemoryPool* pool);
367
+
368
+ /// \brief Return an Array that is a concatenation of the list-views in this array.
369
+ ///
370
+ /// Note that it's different from `values()` in that it takes into
371
+ /// consideration this array's offsets (which can be in any order)
372
+ /// and sizes. Nulls are skipped.
373
+ ///
374
+ /// This function invokes Concatenate() if list-views are non-contiguous. It
375
+ /// will try to minimize the number of array slices passed to Concatenate() by
376
+ /// maximizing the size of each slice (containing as many contiguous
377
+ /// list-views as possible).
378
+ Result<std::shared_ptr<Array>> Flatten(
379
+ MemoryPool* memory_pool = default_memory_pool()) const;
380
+
381
+ /// \brief Return list-view offsets as an Int32Array
382
+ ///
383
+ /// The returned array will not have a validity bitmap, so you cannot expect
384
+ /// to pass it to ListArray::FromArrays() and get back the same list array
385
+ /// if the original one has nulls.
386
+ std::shared_ptr<Array> offsets() const;
387
+
388
+ /// \brief Return list-view sizes as an Int32Array
389
+ ///
390
+ /// The returned array will not have a validity bitmap, so you cannot expect
391
+ /// to pass it to ListViewArray::FromArrays() and get back the same list
392
+ /// array if the original one has nulls.
393
+ std::shared_ptr<Array> sizes() const;
394
+
395
+ protected:
396
+ // This constructor defers SetData to a derived array class
397
+ ListViewArray() = default;
398
+
399
+ void SetData(const std::shared_ptr<ArrayData>& data);
400
+ };
401
+
402
+ /// \brief Concrete Array class for large list-view data (with 64-bit offsets
403
+ /// and sizes)
404
+ class ARROW_EXPORT LargeListViewArray : public BaseListViewArray<LargeListViewType> {
405
+ public:
406
+ explicit LargeListViewArray(std::shared_ptr<ArrayData> data);
407
+
408
+ LargeListViewArray(std::shared_ptr<DataType> type, int64_t length,
409
+ std::shared_ptr<Buffer> value_offsets,
410
+ std::shared_ptr<Buffer> value_sizes, std::shared_ptr<Array> values,
411
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
412
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
413
+
414
+ /// \brief Construct LargeListViewArray from array of offsets, sizes, and child
415
+ /// value array
416
+ ///
417
+ /// Construct an LargeListViewArray using buffers from offsets and sizes arrays
418
+ /// that project views into the values array.
419
+ ///
420
+ /// This function does the bare minimum of validation of the offsets/sizes and
421
+ /// input types. The offset and length of the offsets and sizes arrays must
422
+ /// match and that will be checked, but their contents will be assumed to be
423
+ /// well-formed.
424
+ ///
425
+ /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' or
426
+ /// sizes' null bitmap. Only one of these two is allowed to have a null bitmap. But if a
427
+ /// null_bitmap is provided, the offsets array and the sizes array can't have nulls.
428
+ ///
429
+ /// And when a null_bitmap is provided, neither the offsets or sizes array can be a
430
+ /// slice (i.e. an array with offset() > 0).
431
+ ///
432
+ /// \param[in] offsets An array of int64 offsets into the values array. NULL values are
433
+ /// supported if the corresponding values in sizes is NULL or 0.
434
+ /// \param[in] sizes An array containing the int64 sizes of every view. NULL values are
435
+ /// taken to represent a NULL list-view in the array being created.
436
+ /// \param[in] values Array containing list values
437
+ /// \param[in] pool MemoryPool
438
+ /// \param[in] null_bitmap Optional validity bitmap
439
+ /// \param[in] null_count Optional null count in null_bitmap
440
+ static Result<std::shared_ptr<LargeListViewArray>> FromArrays(
441
+ const Array& offsets, const Array& sizes, const Array& values,
442
+ MemoryPool* pool = default_memory_pool(),
443
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
444
+ int64_t null_count = kUnknownNullCount);
445
+
446
+ static Result<std::shared_ptr<LargeListViewArray>> FromArrays(
447
+ std::shared_ptr<DataType> type, const Array& offsets, const Array& sizes,
448
+ const Array& values, MemoryPool* pool = default_memory_pool(),
449
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
450
+ int64_t null_count = kUnknownNullCount);
451
+
452
+ /// \brief Build a LargeListViewArray from a LargeListArray
453
+ static Result<std::shared_ptr<LargeListViewArray>> FromList(
454
+ const LargeListArray& list_array, MemoryPool* pool);
455
+
456
+ /// \brief Return an Array that is a concatenation of the large list-views in this
457
+ /// array.
458
+ ///
459
+ /// Note that it's different from `values()` in that it takes into
460
+ /// consideration this array's offsets (which can be in any order)
461
+ /// and sizes. Nulls are skipped.
462
+ Result<std::shared_ptr<Array>> Flatten(
463
+ MemoryPool* memory_pool = default_memory_pool()) const;
464
+
465
+ /// \brief Return list-view offsets as an Int64Array
466
+ ///
467
+ /// The returned array will not have a validity bitmap, so you cannot expect
468
+ /// to pass it to LargeListArray::FromArrays() and get back the same list array
469
+ /// if the original one has nulls.
470
+ std::shared_ptr<Array> offsets() const;
471
+
472
+ /// \brief Return list-view sizes as an Int64Array
473
+ ///
474
+ /// The returned array will not have a validity bitmap, so you cannot expect
475
+ /// to pass it to LargeListViewArray::FromArrays() and get back the same list
476
+ /// array if the original one has nulls.
477
+ std::shared_ptr<Array> sizes() const;
478
+
479
+ protected:
480
+ // This constructor defers SetData to a derived array class
481
+ LargeListViewArray() = default;
482
+
483
+ void SetData(const std::shared_ptr<ArrayData>& data);
484
+ };
485
+
486
+ // ----------------------------------------------------------------------
487
+ // MapArray
488
+
489
+ /// Concrete Array class for map data
490
+ ///
491
+ /// NB: "value" in this context refers to a pair of a key and the corresponding item
492
+ class ARROW_EXPORT MapArray : public ListArray {
493
+ public:
494
+ using TypeClass = MapType;
495
+
496
+ explicit MapArray(const std::shared_ptr<ArrayData>& data);
497
+
498
+ MapArray(const std::shared_ptr<DataType>& type, int64_t length,
499
+ const std::shared_ptr<Buffer>& value_offsets,
500
+ const std::shared_ptr<Array>& keys, const std::shared_ptr<Array>& items,
501
+ const std::shared_ptr<Buffer>& null_bitmap = NULLPTR,
502
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
503
+
504
+ MapArray(const std::shared_ptr<DataType>& type, int64_t length, BufferVector buffers,
505
+ const std::shared_ptr<Array>& keys, const std::shared_ptr<Array>& items,
506
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
507
+
508
+ MapArray(const std::shared_ptr<DataType>& type, int64_t length,
509
+ const std::shared_ptr<Buffer>& value_offsets,
510
+ const std::shared_ptr<Array>& values,
511
+ const std::shared_ptr<Buffer>& null_bitmap = NULLPTR,
512
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
513
+
514
+ /// \brief Construct MapArray from array of offsets and child key, item arrays
515
+ ///
516
+ /// This function does the bare minimum of validation of the offsets and
517
+ /// input types, and will allocate a new offsets array if necessary (i.e. if
518
+ /// the offsets contain any nulls). If the offsets do not have nulls, they
519
+ /// are assumed to be well-formed
520
+ ///
521
+ /// \param[in] offsets Array containing n + 1 offsets encoding length and
522
+ /// size. Must be of int32 type
523
+ /// \param[in] keys Array containing key values
524
+ /// \param[in] items Array containing item values
525
+ /// \param[in] pool MemoryPool in case new offsets array needs to be
526
+ /// \param[in] null_bitmap Optional validity bitmap
527
+ /// allocated because of null values
528
+ static Result<std::shared_ptr<Array>> FromArrays(
529
+ const std::shared_ptr<Array>& offsets, const std::shared_ptr<Array>& keys,
530
+ const std::shared_ptr<Array>& items, MemoryPool* pool = default_memory_pool(),
531
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR);
532
+
533
+ static Result<std::shared_ptr<Array>> FromArrays(
534
+ std::shared_ptr<DataType> type, const std::shared_ptr<Array>& offsets,
535
+ const std::shared_ptr<Array>& keys, const std::shared_ptr<Array>& items,
536
+ MemoryPool* pool = default_memory_pool(),
537
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR);
538
+
539
+ const MapType* map_type() const { return map_type_; }
540
+
541
+ /// \brief Return array object containing all map keys
542
+ const std::shared_ptr<Array>& keys() const { return keys_; }
543
+
544
+ /// \brief Return array object containing all mapped items
545
+ const std::shared_ptr<Array>& items() const { return items_; }
546
+
547
+ /// Validate child data before constructing the actual MapArray.
548
+ static Status ValidateChildData(
549
+ const std::vector<std::shared_ptr<ArrayData>>& child_data);
550
+
551
+ protected:
552
+ void SetData(const std::shared_ptr<ArrayData>& data);
553
+
554
+ static Result<std::shared_ptr<Array>> FromArraysInternal(
555
+ std::shared_ptr<DataType> type, const std::shared_ptr<Array>& offsets,
556
+ const std::shared_ptr<Array>& keys, const std::shared_ptr<Array>& items,
557
+ MemoryPool* pool, std::shared_ptr<Buffer> null_bitmap = NULLPTR);
558
+
559
+ private:
560
+ const MapType* map_type_;
561
+ std::shared_ptr<Array> keys_, items_;
562
+ };
563
+
564
+ // ----------------------------------------------------------------------
565
+ // FixedSizeListArray
566
+
567
+ /// Concrete Array class for fixed size list data
568
+ class ARROW_EXPORT FixedSizeListArray : public Array {
569
+ public:
570
+ using TypeClass = FixedSizeListType;
571
+ using offset_type = TypeClass::offset_type;
572
+
573
+ explicit FixedSizeListArray(const std::shared_ptr<ArrayData>& data);
574
+
575
+ FixedSizeListArray(const std::shared_ptr<DataType>& type, int64_t length,
576
+ const std::shared_ptr<Array>& values,
577
+ const std::shared_ptr<Buffer>& null_bitmap = NULLPTR,
578
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
579
+
580
+ const FixedSizeListType* list_type() const;
581
+
582
+ /// \brief Return array object containing the list's values
583
+ const std::shared_ptr<Array>& values() const;
584
+
585
+ const std::shared_ptr<DataType>& value_type() const;
586
+
587
+ // The following functions will not perform boundschecking
588
+ int64_t value_offset(int64_t i) const {
589
+ i += data_->offset;
590
+ return list_size_ * i;
591
+ }
592
+ /// \brief Return the fixed-size of the values
593
+ ///
594
+ /// No matter the value of the index parameter, the result is the same.
595
+ /// So even when the value at slot i is null, this function will return a
596
+ /// non-zero size.
597
+ ///
598
+ /// \pre IsValid(i)
599
+ int32_t value_length(int64_t i = 0) const {
600
+ ARROW_UNUSED(i);
601
+ return list_size_;
602
+ }
603
+ /// \pre IsValid(i)
604
+ std::shared_ptr<Array> value_slice(int64_t i) const {
605
+ return values_->Slice(value_offset(i), value_length(i));
606
+ }
607
+
608
+ /// \brief Return an Array that is a concatenation of the lists in this array.
609
+ ///
610
+ /// Note that it's different from `values()` in that it takes into
611
+ /// consideration null elements (they are skipped, thus copying may be needed).
612
+ Result<std::shared_ptr<Array>> Flatten(
613
+ MemoryPool* memory_pool = default_memory_pool()) const;
614
+
615
+ /// \brief Flatten all level recursively until reach a non-list type, and return
616
+ /// a non-list type Array.
617
+ ///
618
+ /// \see internal::FlattenLogicalListRecursively
619
+ Result<std::shared_ptr<Array>> FlattenRecursively(
620
+ MemoryPool* memory_pool = default_memory_pool()) const {
621
+ return internal::FlattenLogicalListRecursively(*this, memory_pool);
622
+ }
623
+
624
+ /// \brief Construct FixedSizeListArray from child value array and value_length
625
+ ///
626
+ /// \param[in] values Array containing list values
627
+ /// \param[in] list_size The fixed length of each list
628
+ /// \param[in] null_bitmap Optional validity bitmap
629
+ /// \param[in] null_count Optional null count in null_bitmap
630
+ /// \return Will have length equal to values.length() / list_size
631
+ static Result<std::shared_ptr<Array>> FromArrays(
632
+ const std::shared_ptr<Array>& values, int32_t list_size,
633
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
634
+ int64_t null_count = kUnknownNullCount);
635
+
636
+ /// \brief Construct FixedSizeListArray from child value array and type
637
+ ///
638
+ /// \param[in] values Array containing list values
639
+ /// \param[in] type The fixed sized list type
640
+ /// \param[in] null_bitmap Optional validity bitmap
641
+ /// \param[in] null_count Optional null count in null_bitmap
642
+ /// \return Will have length equal to values.length() / type.list_size()
643
+ static Result<std::shared_ptr<Array>> FromArrays(
644
+ const std::shared_ptr<Array>& values, std::shared_ptr<DataType> type,
645
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
646
+ int64_t null_count = kUnknownNullCount);
647
+
648
+ protected:
649
+ void SetData(const std::shared_ptr<ArrayData>& data);
650
+ int32_t list_size_;
651
+
652
+ private:
653
+ std::shared_ptr<Array> values_;
654
+ };
655
+
656
+ // ----------------------------------------------------------------------
657
+ // Struct
658
+
659
+ /// Concrete Array class for struct data
660
+ class ARROW_EXPORT StructArray : public Array {
661
+ public:
662
+ using TypeClass = StructType;
663
+
664
+ explicit StructArray(const std::shared_ptr<ArrayData>& data);
665
+
666
+ StructArray(const std::shared_ptr<DataType>& type, int64_t length,
667
+ const std::vector<std::shared_ptr<Array>>& children,
668
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
669
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
670
+
671
+ /// \brief Return a StructArray from child arrays and field names.
672
+ ///
673
+ /// The length and data type are automatically inferred from the arguments.
674
+ /// There should be at least one child array.
675
+ static Result<std::shared_ptr<StructArray>> Make(
676
+ const ArrayVector& children, const std::vector<std::string>& field_names,
677
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
678
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
679
+
680
+ /// \brief Return a StructArray from child arrays and fields.
681
+ ///
682
+ /// The length is automatically inferred from the arguments.
683
+ /// There should be at least one child array. This method does not
684
+ /// check that field types and child array types are consistent.
685
+ static Result<std::shared_ptr<StructArray>> Make(
686
+ const ArrayVector& children, const FieldVector& fields,
687
+ std::shared_ptr<Buffer> null_bitmap = NULLPTR,
688
+ int64_t null_count = kUnknownNullCount, int64_t offset = 0);
689
+
690
+ const StructType* struct_type() const;
691
+
692
+ // Return a shared pointer in case the requestor desires to share ownership
693
+ // with this array. The returned array has its offset, length and null
694
+ // count adjusted.
695
+ const std::shared_ptr<Array>& field(int pos) const;
696
+
697
+ const ArrayVector& fields() const;
698
+
699
+ /// Returns null if name not found
700
+ std::shared_ptr<Array> GetFieldByName(const std::string& name) const;
701
+
702
+ /// Indicate if field named `name` can be found unambiguously in the struct.
703
+ Status CanReferenceFieldByName(const std::string& name) const;
704
+
705
+ /// Indicate if fields named `names` can be found unambiguously in the struct.
706
+ Status CanReferenceFieldsByNames(const std::vector<std::string>& names) const;
707
+
708
+ /// \brief Flatten this array as a vector of arrays, one for each field
709
+ ///
710
+ /// \param[in] pool The pool to allocate null bitmaps from, if necessary
711
+ Result<ArrayVector> Flatten(MemoryPool* pool = default_memory_pool()) const;
712
+
713
+ /// \brief Get one of the child arrays, combining its null bitmap
714
+ /// with the parent struct array's bitmap.
715
+ ///
716
+ /// \param[in] index Which child array to get
717
+ /// \param[in] pool The pool to allocate null bitmaps from, if necessary
718
+ Result<std::shared_ptr<Array>> GetFlattenedField(
719
+ int index, MemoryPool* pool = default_memory_pool()) const;
720
+
721
+ private:
722
+ // For caching boxed child data
723
+ // XXX This is not handled in a thread-safe manner.
724
+ mutable ArrayVector boxed_fields_;
725
+ };
726
+
727
+ // ----------------------------------------------------------------------
728
+ // Union
729
+
730
+ /// Base class for SparseUnionArray and DenseUnionArray
731
+ class ARROW_EXPORT UnionArray : public Array {
732
+ public:
733
+ using type_code_t = int8_t;
734
+
735
+ /// Note that this buffer does not account for any slice offset
736
+ const std::shared_ptr<Buffer>& type_codes() const { return data_->buffers[1]; }
737
+
738
+ const type_code_t* raw_type_codes() const { return raw_type_codes_; }
739
+
740
+ /// The logical type code of the value at index.
741
+ type_code_t type_code(int64_t i) const { return raw_type_codes_[i]; }
742
+
743
+ /// The physical child id containing value at index.
744
+ int child_id(int64_t i) const { return union_type_->child_ids()[raw_type_codes_[i]]; }
745
+
746
+ const UnionType* union_type() const { return union_type_; }
747
+
748
+ UnionMode::type mode() const { return union_type_->mode(); }
749
+
750
+ /// \brief Return the given field as an individual array.
751
+ ///
752
+ /// For sparse unions, the returned array has its offset, length and null
753
+ /// count adjusted.
754
+ std::shared_ptr<Array> field(int pos) const;
755
+
756
+ protected:
757
+ void SetData(std::shared_ptr<ArrayData> data);
758
+
759
+ const type_code_t* raw_type_codes_;
760
+ const UnionType* union_type_;
761
+
762
+ // For caching boxed child data
763
+ mutable std::vector<std::shared_ptr<Array>> boxed_fields_;
764
+ };
765
+
766
+ /// Concrete Array class for sparse union data
767
+ class ARROW_EXPORT SparseUnionArray : public UnionArray {
768
+ public:
769
+ using TypeClass = SparseUnionType;
770
+
771
+ explicit SparseUnionArray(std::shared_ptr<ArrayData> data);
772
+
773
+ SparseUnionArray(std::shared_ptr<DataType> type, int64_t length, ArrayVector children,
774
+ std::shared_ptr<Buffer> type_ids, int64_t offset = 0);
775
+
776
+ /// \brief Construct SparseUnionArray from type_ids and children
777
+ ///
778
+ /// This function does the bare minimum of validation of the input types.
779
+ ///
780
+ /// \param[in] type_ids An array of logical type ids for the union type
781
+ /// \param[in] children Vector of children Arrays containing the data for each type.
782
+ /// \param[in] type_codes Vector of type codes.
783
+ static Result<std::shared_ptr<Array>> Make(const Array& type_ids, ArrayVector children,
784
+ std::vector<type_code_t> type_codes) {
785
+ return Make(std::move(type_ids), std::move(children), std::vector<std::string>{},
786
+ std::move(type_codes));
787
+ }
788
+
789
+ /// \brief Construct SparseUnionArray with custom field names from type_ids and children
790
+ ///
791
+ /// This function does the bare minimum of validation of the input types.
792
+ ///
793
+ /// \param[in] type_ids An array of logical type ids for the union type
794
+ /// \param[in] children Vector of children Arrays containing the data for each type.
795
+ /// \param[in] field_names Vector of strings containing the name of each field.
796
+ /// \param[in] type_codes Vector of type codes.
797
+ static Result<std::shared_ptr<Array>> Make(const Array& type_ids, ArrayVector children,
798
+ std::vector<std::string> field_names = {},
799
+ std::vector<type_code_t> type_codes = {});
800
+
801
+ const SparseUnionType* union_type() const {
802
+ return internal::checked_cast<const SparseUnionType*>(union_type_);
803
+ }
804
+
805
+ /// \brief Get one of the child arrays, adjusting its null bitmap
806
+ /// where the union array type code does not match.
807
+ ///
808
+ /// \param[in] index Which child array to get (i.e. the physical index, not the type
809
+ /// code) \param[in] pool The pool to allocate null bitmaps from, if necessary
810
+ Result<std::shared_ptr<Array>> GetFlattenedField(
811
+ int index, MemoryPool* pool = default_memory_pool()) const;
812
+
813
+ protected:
814
+ void SetData(std::shared_ptr<ArrayData> data);
815
+ };
816
+
817
+ /// \brief Concrete Array class for dense union data
818
+ ///
819
+ /// Note that union types do not have a validity bitmap
820
+ class ARROW_EXPORT DenseUnionArray : public UnionArray {
821
+ public:
822
+ using TypeClass = DenseUnionType;
823
+
824
+ explicit DenseUnionArray(const std::shared_ptr<ArrayData>& data);
825
+
826
+ DenseUnionArray(std::shared_ptr<DataType> type, int64_t length, ArrayVector children,
827
+ std::shared_ptr<Buffer> type_ids,
828
+ std::shared_ptr<Buffer> value_offsets = NULLPTR, int64_t offset = 0);
829
+
830
+ /// \brief Construct DenseUnionArray from type_ids, value_offsets, and children
831
+ ///
832
+ /// This function does the bare minimum of validation of the offsets and
833
+ /// input types.
834
+ ///
835
+ /// \param[in] type_ids An array of logical type ids for the union type
836
+ /// \param[in] value_offsets An array of signed int32 values indicating the
837
+ /// relative offset into the respective child array for the type in a given slot.
838
+ /// The respective offsets for each child value array must be in order / increasing.
839
+ /// \param[in] children Vector of children Arrays containing the data for each type.
840
+ /// \param[in] type_codes Vector of type codes.
841
+ static Result<std::shared_ptr<Array>> Make(const Array& type_ids,
842
+ const Array& value_offsets,
843
+ ArrayVector children,
844
+ std::vector<type_code_t> type_codes) {
845
+ return Make(type_ids, value_offsets, std::move(children), std::vector<std::string>{},
846
+ std::move(type_codes));
847
+ }
848
+
849
+ /// \brief Construct DenseUnionArray with custom field names from type_ids,
850
+ /// value_offsets, and children
851
+ ///
852
+ /// This function does the bare minimum of validation of the offsets and
853
+ /// input types.
854
+ ///
855
+ /// \param[in] type_ids An array of logical type ids for the union type
856
+ /// \param[in] value_offsets An array of signed int32 values indicating the
857
+ /// relative offset into the respective child array for the type in a given slot.
858
+ /// The respective offsets for each child value array must be in order / increasing.
859
+ /// \param[in] children Vector of children Arrays containing the data for each type.
860
+ /// \param[in] field_names Vector of strings containing the name of each field.
861
+ /// \param[in] type_codes Vector of type codes.
862
+ static Result<std::shared_ptr<Array>> Make(const Array& type_ids,
863
+ const Array& value_offsets,
864
+ ArrayVector children,
865
+ std::vector<std::string> field_names = {},
866
+ std::vector<type_code_t> type_codes = {});
867
+
868
+ const DenseUnionType* union_type() const {
869
+ return internal::checked_cast<const DenseUnionType*>(union_type_);
870
+ }
871
+
872
+ /// Note that this buffer does not account for any slice offset
873
+ const std::shared_ptr<Buffer>& value_offsets() const { return data_->buffers[2]; }
874
+
875
+ int32_t value_offset(int64_t i) const { return raw_value_offsets_[i]; }
876
+
877
+ const int32_t* raw_value_offsets() const { return raw_value_offsets_; }
878
+
879
+ protected:
880
+ const int32_t* raw_value_offsets_;
881
+
882
+ void SetData(const std::shared_ptr<ArrayData>& data);
883
+ };
884
+
885
+ /// @}
886
+
887
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array/array_base.h"
21
+ #include "arrow/c/dlpack_abi.h"
22
+
23
+ namespace arrow::dlpack {
24
+
25
+ /// \brief Export Arrow array as DLPack tensor.
26
+ ///
27
+ /// DLMangedTensor is produced as defined by the DLPack protocol,
28
+ /// see https://dmlc.github.io/dlpack/latest/.
29
+ ///
30
+ /// Data types for which the protocol is supported are
31
+ /// integer and floating-point data types.
32
+ ///
33
+ /// DLPack protocol only supports arrays with one contiguous
34
+ /// memory region which means Arrow Arrays with validity buffers
35
+ /// are not supported.
36
+ ///
37
+ /// \param[in] arr Arrow array
38
+ /// \return DLManagedTensor struct
39
+ ARROW_EXPORT
40
+ Result<DLManagedTensor*> ExportArray(const std::shared_ptr<Array>& arr);
41
+
42
+ /// \brief Get DLDevice with enumerator specifying the
43
+ /// type of the device data is stored on and index of the
44
+ /// device which is 0 by default for CPU.
45
+ ///
46
+ /// \param[in] arr Arrow array
47
+ /// \return DLDevice struct
48
+ ARROW_EXPORT
49
+ Result<DLDevice> ExportDevice(const std::shared_ptr<Array>& arr);
50
+
51
+ } // namespace arrow::dlpack
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ /// \defgroup compute-functions Abstract compute function API
24
+ /// @{
25
+ /// @}
26
+
27
+ /// \defgroup compute-concrete-options Concrete option classes for compute functions
28
+ /// @{
29
+ /// @}
30
+
31
+ #include "arrow/compute/api_aggregate.h" // IWYU pragma: export
32
+ #include "arrow/compute/api_scalar.h" // IWYU pragma: export
33
+ #include "arrow/compute/api_vector.h" // IWYU pragma: export
34
+ #include "arrow/compute/cast.h" // IWYU pragma: export
35
+ #include "arrow/compute/function.h" // IWYU pragma: export
36
+ #include "arrow/compute/function_options.h" // IWYU pragma: export
37
+ #include "arrow/compute/kernel.h" // IWYU pragma: export
38
+ #include "arrow/compute/registry.h" // IWYU pragma: export
39
+ #include "arrow/datum.h" // IWYU pragma: export
40
+
41
+ #include "arrow/compute/expression.h" // IWYU pragma: export
42
+
43
+ /// \defgroup execnode-row Utilities for working with data in a row-major format
44
+ /// @{
45
+ /// @}
46
+
47
+ #include "arrow/compute/row/grouper.h" // IWYU pragma: export
48
+
49
+ /// \defgroup acero-internals Acero internals, useful for those extending Acero
50
+ /// @{
51
+ /// @}
52
+
53
+ #include "arrow/compute/exec.h" // IWYU pragma: export
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <utility>
22
+
23
+ #include "arrow/compute/function_options.h"
24
+ #include "arrow/compute/ordering.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/type_fwd.h"
27
+
28
+ namespace arrow {
29
+ namespace compute {
30
+
31
+ class ExecContext;
32
+
33
+ /// \addtogroup compute-concrete-options
34
+ /// @{
35
+
36
+ class ARROW_EXPORT FilterOptions : public FunctionOptions {
37
+ public:
38
+ /// Configure the action taken when a slot of the selection mask is null
39
+ enum NullSelectionBehavior {
40
+ /// The corresponding filtered value will be removed in the output.
41
+ DROP,
42
+ /// The corresponding filtered value will be null in the output.
43
+ EMIT_NULL,
44
+ };
45
+
46
+ explicit FilterOptions(NullSelectionBehavior null_selection = DROP);
47
+ static constexpr char const kTypeName[] = "FilterOptions";
48
+ static FilterOptions Defaults() { return FilterOptions(); }
49
+
50
+ NullSelectionBehavior null_selection_behavior = DROP;
51
+ };
52
+
53
+ class ARROW_EXPORT TakeOptions : public FunctionOptions {
54
+ public:
55
+ explicit TakeOptions(bool boundscheck = true);
56
+ static constexpr char const kTypeName[] = "TakeOptions";
57
+ static TakeOptions BoundsCheck() { return TakeOptions(true); }
58
+ static TakeOptions NoBoundsCheck() { return TakeOptions(false); }
59
+ static TakeOptions Defaults() { return BoundsCheck(); }
60
+
61
+ bool boundscheck = true;
62
+ };
63
+
64
+ /// \brief Options for the dictionary encode function
65
+ class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions {
66
+ public:
67
+ /// Configure how null values will be encoded
68
+ enum NullEncodingBehavior {
69
+ /// The null value will be added to the dictionary with a proper index.
70
+ ENCODE,
71
+ /// The null value will be masked in the indices array.
72
+ MASK
73
+ };
74
+
75
+ explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK);
76
+ static constexpr char const kTypeName[] = "DictionaryEncodeOptions";
77
+ static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); }
78
+
79
+ NullEncodingBehavior null_encoding_behavior = MASK;
80
+ };
81
+
82
+ /// \brief Options for the run-end encode function
83
+ class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions {
84
+ public:
85
+ explicit RunEndEncodeOptions(std::shared_ptr<DataType> run_end_type = int32());
86
+ static constexpr char const kTypeName[] = "RunEndEncodeOptions";
87
+ static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); }
88
+
89
+ std::shared_ptr<DataType> run_end_type;
90
+ };
91
+
92
+ class ARROW_EXPORT ArraySortOptions : public FunctionOptions {
93
+ public:
94
+ explicit ArraySortOptions(SortOrder order = SortOrder::Ascending,
95
+ NullPlacement null_placement = NullPlacement::AtEnd);
96
+ static constexpr char const kTypeName[] = "ArraySortOptions";
97
+ static ArraySortOptions Defaults() { return ArraySortOptions(); }
98
+
99
+ /// Sorting order
100
+ SortOrder order;
101
+ /// Whether nulls and NaNs are placed at the start or at the end
102
+ NullPlacement null_placement;
103
+ };
104
+
105
+ class ARROW_EXPORT SortOptions : public FunctionOptions {
106
+ public:
107
+ explicit SortOptions(std::vector<SortKey> sort_keys = {},
108
+ NullPlacement null_placement = NullPlacement::AtEnd);
109
+ explicit SortOptions(const Ordering& ordering);
110
+ static constexpr char const kTypeName[] = "SortOptions";
111
+ static SortOptions Defaults() { return SortOptions(); }
112
+ /// Convenience constructor to create an ordering from SortOptions
113
+ ///
114
+ /// Note: Both classes contain the exact same information. However,
115
+ /// sort_options should only be used in a "function options" context while Ordering
116
+ /// is used more generally.
117
+ Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); }
118
+ Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); }
119
+
120
+ /// Column key(s) to order by and how to order by these sort keys.
121
+ std::vector<SortKey> sort_keys;
122
+ /// Whether nulls and NaNs are placed at the start or at the end
123
+ NullPlacement null_placement;
124
+ };
125
+
126
+ /// \brief SelectK options
127
+ class ARROW_EXPORT SelectKOptions : public FunctionOptions {
128
+ public:
129
+ explicit SelectKOptions(int64_t k = -1, std::vector<SortKey> sort_keys = {});
130
+ static constexpr char const kTypeName[] = "SelectKOptions";
131
+ static SelectKOptions Defaults() { return SelectKOptions(); }
132
+
133
+ static SelectKOptions TopKDefault(int64_t k, std::vector<std::string> key_names = {}) {
134
+ std::vector<SortKey> keys;
135
+ for (const auto& name : key_names) {
136
+ keys.emplace_back(SortKey(name, SortOrder::Descending));
137
+ }
138
+ if (key_names.empty()) {
139
+ keys.emplace_back(SortKey("not-used", SortOrder::Descending));
140
+ }
141
+ return SelectKOptions{k, keys};
142
+ }
143
+ static SelectKOptions BottomKDefault(int64_t k,
144
+ std::vector<std::string> key_names = {}) {
145
+ std::vector<SortKey> keys;
146
+ for (const auto& name : key_names) {
147
+ keys.emplace_back(SortKey(name, SortOrder::Ascending));
148
+ }
149
+ if (key_names.empty()) {
150
+ keys.emplace_back(SortKey("not-used", SortOrder::Ascending));
151
+ }
152
+ return SelectKOptions{k, keys};
153
+ }
154
+
155
+ /// The number of `k` elements to keep.
156
+ int64_t k;
157
+ /// Column key(s) to order by and how to order by these sort keys.
158
+ std::vector<SortKey> sort_keys;
159
+ };
160
+
161
+ /// \brief Rank options
162
+ class ARROW_EXPORT RankOptions : public FunctionOptions {
163
+ public:
164
+ /// Configure how ties between equal values are handled
165
+ enum Tiebreaker {
166
+ /// Ties get the smallest possible rank in sorted order.
167
+ Min,
168
+ /// Ties get the largest possible rank in sorted order.
169
+ Max,
170
+ /// Ranks are assigned in order of when ties appear in the input.
171
+ /// This ensures the ranks are a stable permutation of the input.
172
+ First,
173
+ /// The ranks span a dense [1, M] interval where M is the number
174
+ /// of distinct values in the input.
175
+ Dense
176
+ };
177
+
178
+ explicit RankOptions(std::vector<SortKey> sort_keys = {},
179
+ NullPlacement null_placement = NullPlacement::AtEnd,
180
+ Tiebreaker tiebreaker = RankOptions::First);
181
+ /// Convenience constructor for array inputs
182
+ explicit RankOptions(SortOrder order,
183
+ NullPlacement null_placement = NullPlacement::AtEnd,
184
+ Tiebreaker tiebreaker = RankOptions::First)
185
+ : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {}
186
+
187
+ static constexpr char const kTypeName[] = "RankOptions";
188
+ static RankOptions Defaults() { return RankOptions(); }
189
+
190
+ /// Column key(s) to order by and how to order by these sort keys.
191
+ std::vector<SortKey> sort_keys;
192
+ /// Whether nulls and NaNs are placed at the start or at the end
193
+ NullPlacement null_placement;
194
+ /// Tiebreaker for dealing with equal values in ranks
195
+ Tiebreaker tiebreaker;
196
+ };
197
+
198
+ /// \brief Partitioning options for NthToIndices
199
+ class ARROW_EXPORT PartitionNthOptions : public FunctionOptions {
200
+ public:
201
+ explicit PartitionNthOptions(int64_t pivot,
202
+ NullPlacement null_placement = NullPlacement::AtEnd);
203
+ PartitionNthOptions() : PartitionNthOptions(0) {}
204
+ static constexpr char const kTypeName[] = "PartitionNthOptions";
205
+
206
+ /// The index into the equivalent sorted array of the partition pivot element.
207
+ int64_t pivot;
208
+ /// Whether nulls and NaNs are partitioned at the start or at the end
209
+ NullPlacement null_placement;
210
+ };
211
+
212
+ /// \brief Options for cumulative functions
213
+ /// \note Also aliased as CumulativeSumOptions for backward compatibility
214
+ class ARROW_EXPORT CumulativeOptions : public FunctionOptions {
215
+ public:
216
+ explicit CumulativeOptions(bool skip_nulls = false);
217
+ explicit CumulativeOptions(double start, bool skip_nulls = false);
218
+ explicit CumulativeOptions(std::shared_ptr<Scalar> start, bool skip_nulls = false);
219
+ static constexpr char const kTypeName[] = "CumulativeOptions";
220
+ static CumulativeOptions Defaults() { return CumulativeOptions(); }
221
+
222
+ /// Optional starting value for cumulative operation computation, default depends on the
223
+ /// operation and input type.
224
+ /// - sum: 0
225
+ /// - prod: 1
226
+ /// - min: maximum of the input type
227
+ /// - max: minimum of the input type
228
+ /// - mean: start is ignored because it has no meaning for mean
229
+ std::optional<std::shared_ptr<Scalar>> start;
230
+
231
+ /// If true, nulls in the input are ignored and produce a corresponding null output.
232
+ /// When false, the first null encountered is propagated through the remaining output.
233
+ bool skip_nulls = false;
234
+ };
235
+ using CumulativeSumOptions = CumulativeOptions; // For backward compatibility
236
+
237
+ /// \brief Options for pairwise functions
238
+ class ARROW_EXPORT PairwiseOptions : public FunctionOptions {
239
+ public:
240
+ explicit PairwiseOptions(int64_t periods = 1);
241
+ static constexpr char const kTypeName[] = "PairwiseOptions";
242
+ static PairwiseOptions Defaults() { return PairwiseOptions(); }
243
+
244
+ /// Periods to shift for applying the binary operation, accepts negative values.
245
+ int64_t periods = 1;
246
+ };
247
+
248
+ /// \brief Options for list_flatten function
249
+ class ARROW_EXPORT ListFlattenOptions : public FunctionOptions {
250
+ public:
251
+ explicit ListFlattenOptions(bool recursive = false);
252
+ static constexpr char const kTypeName[] = "ListFlattenOptions";
253
+ static ListFlattenOptions Defaults() { return ListFlattenOptions(); }
254
+
255
+ /// \brief If true, the list is flattened recursively until a non-list
256
+ /// array is formed.
257
+ bool recursive = false;
258
+ };
259
+
260
+ /// @}
261
+
262
+ /// \brief Filter with a boolean selection filter
263
+ ///
264
+ /// The output will be populated with values from the input at positions
265
+ /// where the selection filter is not 0. Nulls in the filter will be handled
266
+ /// based on options.null_selection_behavior.
267
+ ///
268
+ /// For example given values = ["a", "b", "c", null, "e", "f"] and
269
+ /// filter = [0, 1, 1, 0, null, 1], the output will be
270
+ /// (null_selection_behavior == DROP) = ["b", "c", "f"]
271
+ /// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"]
272
+ ///
273
+ /// \param[in] values array to filter
274
+ /// \param[in] filter indicates which values should be filtered out
275
+ /// \param[in] options configures null_selection_behavior
276
+ /// \param[in] ctx the function execution context, optional
277
+ /// \return the resulting datum
278
+ ARROW_EXPORT
279
+ Result<Datum> Filter(const Datum& values, const Datum& filter,
280
+ const FilterOptions& options = FilterOptions::Defaults(),
281
+ ExecContext* ctx = NULLPTR);
282
+
283
+ namespace internal {
284
+
285
+ // These internal functions are implemented in kernels/vector_selection.cc
286
+
287
+ /// \brief Return the number of selected indices in the boolean filter
288
+ ///
289
+ /// \param filter a plain or run-end encoded boolean array with or without nulls
290
+ /// \param null_selection how to handle nulls in the filter
291
+ ARROW_EXPORT
292
+ int64_t GetFilterOutputSize(const ArraySpan& filter,
293
+ FilterOptions::NullSelectionBehavior null_selection);
294
+
295
+ /// \brief Compute uint64 selection indices for use with Take given a boolean
296
+ /// filter
297
+ ///
298
+ /// \param filter a plain or run-end encoded boolean array with or without nulls
299
+ /// \param null_selection how to handle nulls in the filter
300
+ ARROW_EXPORT
301
+ Result<std::shared_ptr<ArrayData>> GetTakeIndices(
302
+ const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection,
303
+ MemoryPool* memory_pool = default_memory_pool());
304
+
305
+ } // namespace internal
306
+
307
+ /// \brief ReplaceWithMask replaces each value in the array corresponding
308
+ /// to a true value in the mask with the next element from `replacements`.
309
+ ///
310
+ /// \param[in] values Array input to replace
311
+ /// \param[in] mask Array or Scalar of Boolean mask values
312
+ /// \param[in] replacements The replacement values to draw from. There must
313
+ /// be as many replacement values as true values in the mask.
314
+ /// \param[in] ctx the function execution context, optional
315
+ ///
316
+ /// \return the resulting datum
317
+ ///
318
+ /// \since 5.0.0
319
+ /// \note API not yet finalized
320
+ ARROW_EXPORT
321
+ Result<Datum> ReplaceWithMask(const Datum& values, const Datum& mask,
322
+ const Datum& replacements, ExecContext* ctx = NULLPTR);
323
+
324
+ /// \brief FillNullForward fill null values in forward direction
325
+ ///
326
+ /// The output array will be of the same type as the input values
327
+ /// array, with replaced null values in forward direction.
328
+ ///
329
+ /// For example given values = ["a", "b", "c", null, null, "f"],
330
+ /// the output will be = ["a", "b", "c", "c", "c", "f"]
331
+ ///
332
+ /// \param[in] values datum from which to take
333
+ /// \param[in] ctx the function execution context, optional
334
+ /// \return the resulting datum
335
+ ARROW_EXPORT
336
+ Result<Datum> FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR);
337
+
338
+ /// \brief FillNullBackward fill null values in backward direction
339
+ ///
340
+ /// The output array will be of the same type as the input values
341
+ /// array, with replaced null values in backward direction.
342
+ ///
343
+ /// For example given values = ["a", "b", "c", null, null, "f"],
344
+ /// the output will be = ["a", "b", "c", "f", "f", "f"]
345
+ ///
346
+ /// \param[in] values datum from which to take
347
+ /// \param[in] ctx the function execution context, optional
348
+ /// \return the resulting datum
349
+ ARROW_EXPORT
350
+ Result<Datum> FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR);
351
+
352
+ /// \brief Take from an array of values at indices in another array
353
+ ///
354
+ /// The output array will be of the same type as the input values
355
+ /// array, with elements taken from the values array at the given
356
+ /// indices. If an index is null then the taken element will be null.
357
+ ///
358
+ /// For example given values = ["a", "b", "c", null, "e", "f"] and
359
+ /// indices = [2, 1, null, 3], the output will be
360
+ /// = [values[2], values[1], null, values[3]]
361
+ /// = ["c", "b", null, null]
362
+ ///
363
+ /// \param[in] values datum from which to take
364
+ /// \param[in] indices which values to take
365
+ /// \param[in] options options
366
+ /// \param[in] ctx the function execution context, optional
367
+ /// \return the resulting datum
368
+ ARROW_EXPORT
369
+ Result<Datum> Take(const Datum& values, const Datum& indices,
370
+ const TakeOptions& options = TakeOptions::Defaults(),
371
+ ExecContext* ctx = NULLPTR);
372
+
373
+ /// \brief Take with Array inputs and output
374
+ ARROW_EXPORT
375
+ Result<std::shared_ptr<Array>> Take(const Array& values, const Array& indices,
376
+ const TakeOptions& options = TakeOptions::Defaults(),
377
+ ExecContext* ctx = NULLPTR);
378
+
379
+ /// \brief Drop Null from an array of values
380
+ ///
381
+ /// The output array will be of the same type as the input values
382
+ /// array, with elements taken from the values array without nulls.
383
+ ///
384
+ /// For example given values = ["a", "b", "c", null, "e", "f"],
385
+ /// the output will be = ["a", "b", "c", "e", "f"]
386
+ ///
387
+ /// \param[in] values datum from which to take
388
+ /// \param[in] ctx the function execution context, optional
389
+ /// \return the resulting datum
390
+ ARROW_EXPORT
391
+ Result<Datum> DropNull(const Datum& values, ExecContext* ctx = NULLPTR);
392
+
393
+ /// \brief DropNull with Array inputs and output
394
+ ARROW_EXPORT
395
+ Result<std::shared_ptr<Array>> DropNull(const Array& values, ExecContext* ctx = NULLPTR);
396
+
397
+ /// \brief Return indices that partition an array around n-th sorted element.
398
+ ///
399
+ /// Find index of n-th(0 based) smallest value and perform indirect
400
+ /// partition of an array around that element. Output indices[0 ~ n-1]
401
+ /// holds values no greater than n-th element, and indices[n+1 ~ end]
402
+ /// holds values no less than n-th element. Elements in each partition
403
+ /// is not sorted. Nulls will be partitioned to the end of the output.
404
+ /// Output is not guaranteed to be stable.
405
+ ///
406
+ /// \param[in] values array to be partitioned
407
+ /// \param[in] n pivot array around sorted n-th element
408
+ /// \param[in] ctx the function execution context, optional
409
+ /// \return offsets indices that would partition an array
410
+ ARROW_EXPORT
411
+ Result<std::shared_ptr<Array>> NthToIndices(const Array& values, int64_t n,
412
+ ExecContext* ctx = NULLPTR);
413
+
414
+ /// \brief Return indices that partition an array around n-th sorted element.
415
+ ///
416
+ /// This overload takes a PartitionNthOptions specifying the pivot index
417
+ /// and the null handling.
418
+ ///
419
+ /// \param[in] values array to be partitioned
420
+ /// \param[in] options options including pivot index and null handling
421
+ /// \param[in] ctx the function execution context, optional
422
+ /// \return offsets indices that would partition an array
423
+ ARROW_EXPORT
424
+ Result<std::shared_ptr<Array>> NthToIndices(const Array& values,
425
+ const PartitionNthOptions& options,
426
+ ExecContext* ctx = NULLPTR);
427
+
428
+ /// \brief Return indices that would select the first `k` elements.
429
+ ///
430
+ /// Perform an indirect sort of the datum, keeping only the first `k` elements. The output
431
+ /// array will contain indices such that the item indicated by the k-th index will be in
432
+ /// the position it would be if the datum were sorted by `options.sort_keys`. However,
433
+ /// indices of null values will not be part of the output. The sort is not guaranteed to
434
+ /// be stable.
435
+ ///
436
+ /// \param[in] datum datum to be partitioned
437
+ /// \param[in] options options
438
+ /// \param[in] ctx the function execution context, optional
439
+ /// \return a datum with the same schema as the input
440
+ ARROW_EXPORT
441
+ Result<std::shared_ptr<Array>> SelectKUnstable(const Datum& datum,
442
+ const SelectKOptions& options,
443
+ ExecContext* ctx = NULLPTR);
444
+
445
+ /// \brief Return the indices that would sort an array.
446
+ ///
447
+ /// Perform an indirect sort of array. The output array will contain
448
+ /// indices that would sort an array, which would be the same length
449
+ /// as input. Nulls will be stably partitioned to the end of the output
450
+ /// regardless of order.
451
+ ///
452
+ /// For example given array = [null, 1, 3.3, null, 2, 5.3] and order
453
+ /// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0,
454
+ /// 3].
455
+ ///
456
+ /// \param[in] array array to sort
457
+ /// \param[in] order ascending or descending
458
+ /// \param[in] ctx the function execution context, optional
459
+ /// \return offsets indices that would sort an array
460
+ ARROW_EXPORT
461
+ Result<std::shared_ptr<Array>> SortIndices(const Array& array,
462
+ SortOrder order = SortOrder::Ascending,
463
+ ExecContext* ctx = NULLPTR);
464
+
465
+ /// \brief Return the indices that would sort an array.
466
+ ///
467
+ /// This overload takes a ArraySortOptions specifying the sort order
468
+ /// and the null handling.
469
+ ///
470
+ /// \param[in] array array to sort
471
+ /// \param[in] options options including sort order and null handling
472
+ /// \param[in] ctx the function execution context, optional
473
+ /// \return offsets indices that would sort an array
474
+ ARROW_EXPORT
475
+ Result<std::shared_ptr<Array>> SortIndices(const Array& array,
476
+ const ArraySortOptions& options,
477
+ ExecContext* ctx = NULLPTR);
478
+
479
+ /// \brief Return the indices that would sort a chunked array.
480
+ ///
481
+ /// Perform an indirect sort of chunked array. The output array will
482
+ /// contain indices that would sort a chunked array, which would be
483
+ /// the same length as input. Nulls will be stably partitioned to the
484
+ /// end of the output regardless of order.
485
+ ///
486
+ /// For example given chunked_array = [[null, 1], [3.3], [null, 2,
487
+ /// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2,
488
+ /// 4, 1, 0, 3].
489
+ ///
490
+ /// \param[in] chunked_array chunked array to sort
491
+ /// \param[in] order ascending or descending
492
+ /// \param[in] ctx the function execution context, optional
493
+ /// \return offsets indices that would sort an array
494
+ ARROW_EXPORT
495
+ Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
496
+ SortOrder order = SortOrder::Ascending,
497
+ ExecContext* ctx = NULLPTR);
498
+
499
+ /// \brief Return the indices that would sort a chunked array.
500
+ ///
501
+ /// This overload takes a ArraySortOptions specifying the sort order
502
+ /// and the null handling.
503
+ ///
504
+ /// \param[in] chunked_array chunked array to sort
505
+ /// \param[in] options options including sort order and null handling
506
+ /// \param[in] ctx the function execution context, optional
507
+ /// \return offsets indices that would sort an array
508
+ ARROW_EXPORT
509
+ Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
510
+ const ArraySortOptions& options,
511
+ ExecContext* ctx = NULLPTR);
512
+
513
+ /// \brief Return the indices that would sort an input in the
514
+ /// specified order. Input is one of array, chunked array record batch
515
+ /// or table.
516
+ ///
517
+ /// Perform an indirect sort of input. The output array will contain
518
+ /// indices that would sort an input, which would be the same length
519
+ /// as input. Nulls will be stably partitioned to the start or to the end
520
+ /// of the output depending on SortOrder::null_placement.
521
+ ///
522
+ /// For example given input (table) = {
523
+ /// "column1": [[null, 1], [ 3, null, 2, 1]],
524
+ /// "column2": [[ 5], [3, null, null, 5, 5]],
525
+ /// } and options = {
526
+ /// {"column1", SortOrder::Ascending},
527
+ /// {"column2", SortOrder::Descending},
528
+ /// }, the output will be [5, 1, 4, 2, 0, 3].
529
+ ///
530
+ /// \param[in] datum array, chunked array, record batch or table to sort
531
+ /// \param[in] options options
532
+ /// \param[in] ctx the function execution context, optional
533
+ /// \return offsets indices that would sort a table
534
+ ARROW_EXPORT
535
+ Result<std::shared_ptr<Array>> SortIndices(const Datum& datum, const SortOptions& options,
536
+ ExecContext* ctx = NULLPTR);
537
+
538
+ /// \brief Compute unique elements from an array-like object
539
+ ///
540
+ /// Note if a null occurs in the input it will NOT be included in the output.
541
+ ///
542
+ /// \param[in] datum array-like input
543
+ /// \param[in] ctx the function execution context, optional
544
+ /// \return result as Array
545
+ ///
546
+ /// \since 1.0.0
547
+ /// \note API not yet finalized
548
+ ARROW_EXPORT
549
+ Result<std::shared_ptr<Array>> Unique(const Datum& datum, ExecContext* ctx = NULLPTR);
550
+
551
+ // Constants for accessing the output of ValueCounts
552
+ ARROW_EXPORT extern const char kValuesFieldName[];
553
+ ARROW_EXPORT extern const char kCountsFieldName[];
554
+ ARROW_EXPORT extern const int32_t kValuesFieldIndex;
555
+ ARROW_EXPORT extern const int32_t kCountsFieldIndex;
556
+
557
+ /// \brief Return counts of unique elements from an array-like object.
558
+ ///
559
+ /// Note that the counts do not include counts for nulls in the array. These can be
560
+ /// obtained separately from metadata.
561
+ ///
562
+ /// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values
563
+ /// which can lead to unexpected results if the input Array has these values.
564
+ ///
565
+ /// \param[in] value array-like input
566
+ /// \param[in] ctx the function execution context, optional
567
+ /// \return counts An array of <input type "Values", int64_t "Counts"> structs.
568
+ ///
569
+ /// \since 1.0.0
570
+ /// \note API not yet finalized
571
+ ARROW_EXPORT
572
+ Result<std::shared_ptr<StructArray>> ValueCounts(const Datum& value,
573
+ ExecContext* ctx = NULLPTR);
574
+
575
+ /// \brief Dictionary-encode values in an array-like object
576
+ ///
577
+ /// Any nulls encountered in the dictionary will be handled according to the
578
+ /// specified null encoding behavior.
579
+ ///
580
+ /// For example, given values ["a", "b", null, "a", null] the output will be
581
+ /// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null]
582
+ /// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"]
583
+ ///
584
+ /// If the input is already dictionary encoded this function is a no-op unless
585
+ /// it needs to modify the null_encoding (TODO)
586
+ ///
587
+ /// \param[in] data array-like input
588
+ /// \param[in] ctx the function execution context, optional
589
+ /// \param[in] options configures null encoding behavior
590
+ /// \return result with same shape and type as input
591
+ ///
592
+ /// \since 1.0.0
593
+ /// \note API not yet finalized
594
+ ARROW_EXPORT
595
+ Result<Datum> DictionaryEncode(
596
+ const Datum& data,
597
+ const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(),
598
+ ExecContext* ctx = NULLPTR);
599
+
600
+ /// \brief Run-end-encode values in an array-like object
601
+ ///
602
+ /// The returned run-end encoded type uses the same value type of the input and
603
+ /// run-end type defined in the options.
604
+ ///
605
+ /// \param[in] value array-like input
606
+ /// \param[in] options configures encoding behavior
607
+ /// \param[in] ctx the function execution context, optional
608
+ /// \return result with same shape but run-end encoded
609
+ ///
610
+ /// \since 12.0.0
611
+ /// \note API not yet finalized
612
+ ARROW_EXPORT
613
+ Result<Datum> RunEndEncode(
614
+ const Datum& value,
615
+ const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(),
616
+ ExecContext* ctx = NULLPTR);
617
+
618
+ /// \brief Decode a Run-End Encoded array to a plain array
619
+ ///
620
+ /// The output data type is the same as the values array type of run-end encoded
621
+ /// input.
622
+ ///
623
+ /// \param[in] value run-end-encoded input
624
+ /// \param[in] ctx the function execution context, optional
625
+ /// \return plain array resulting from decoding the run-end encoded input
626
+ ///
627
+ /// \since 12.0.0
628
+ /// \note API not yet finalized
629
+ ARROW_EXPORT
630
+ Result<Datum> RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR);
631
+
632
+ /// \brief Compute the cumulative sum of an array-like object
633
+ ///
634
+ /// \param[in] values array-like input
635
+ /// \param[in] options configures cumulative sum behavior
636
+ /// \param[in] check_overflow whether to check for overflow, if true, return Invalid
637
+ /// status on overflow, otherwise wrap around on overflow
638
+ /// \param[in] ctx the function execution context, optional
639
+ ARROW_EXPORT
640
+ Result<Datum> CumulativeSum(
641
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
642
+ bool check_overflow = false, ExecContext* ctx = NULLPTR);
643
+
644
+ /// \brief Compute the cumulative product of an array-like object
645
+ ///
646
+ /// \param[in] values array-like input
647
+ /// \param[in] options configures cumulative prod behavior
648
+ /// \param[in] check_overflow whether to check for overflow, if true, return Invalid
649
+ /// status on overflow, otherwise wrap around on overflow
650
+ /// \param[in] ctx the function execution context, optional
651
+ ARROW_EXPORT
652
+ Result<Datum> CumulativeProd(
653
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
654
+ bool check_overflow = false, ExecContext* ctx = NULLPTR);
655
+
656
+ /// \brief Compute the cumulative max of an array-like object
657
+ ///
658
+ /// \param[in] values array-like input
659
+ /// \param[in] options configures cumulative max behavior
660
+ /// \param[in] ctx the function execution context, optional
661
+ ARROW_EXPORT
662
+ Result<Datum> CumulativeMax(
663
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
664
+ ExecContext* ctx = NULLPTR);
665
+
666
+ /// \brief Compute the cumulative min of an array-like object
667
+ ///
668
+ /// \param[in] values array-like input
669
+ /// \param[in] options configures cumulative min behavior
670
+ /// \param[in] ctx the function execution context, optional
671
+ ARROW_EXPORT
672
+ Result<Datum> CumulativeMin(
673
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
674
+ ExecContext* ctx = NULLPTR);
675
+
676
+ /// \brief Compute the cumulative mean of an array-like object
677
+ ///
678
+ /// \param[in] values array-like input
679
+ /// \param[in] options configures cumulative mean behavior, `start` is ignored
680
+ /// \param[in] ctx the function execution context, optional
681
+ ARROW_EXPORT
682
+ Result<Datum> CumulativeMean(
683
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
684
+ ExecContext* ctx = NULLPTR);
685
+
686
+ /// \brief Return the first order difference of an array.
687
+ ///
688
+ /// Computes the first order difference of an array, i.e.
689
+ /// output[i] = input[i] - input[i - p] if i >= p
690
+ /// output[i] = null otherwise
691
+ /// where p is the period. For example, with p = 1,
692
+ /// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5].
693
+ /// With p = 2,
694
+ /// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6]
695
+ /// p can also be negative, in which case the diff is computed in
696
+ /// the opposite direction.
697
+ /// \param[in] array array input
698
+ /// \param[in] options options, specifying overflow behavior and period
699
+ /// \param[in] check_overflow whether to return error on overflow
700
+ /// \param[in] ctx the function execution context, optional
701
+ /// \return result as array
702
+ ARROW_EXPORT
703
+ Result<std::shared_ptr<Array>> PairwiseDiff(const Array& array,
704
+ const PairwiseOptions& options,
705
+ bool check_overflow = false,
706
+ ExecContext* ctx = NULLPTR);
707
+
708
+ } // namespace compute
709
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ #include <atomic>
24
+ #include <cstdint>
25
+ #include <limits>
26
+ #include <memory>
27
+ #include <optional>
28
+ #include <string>
29
+ #include <utility>
30
+ #include <vector>
31
+
32
+ #include "arrow/array/data.h"
33
+ #include "arrow/compute/expression.h"
34
+ #include "arrow/compute/type_fwd.h"
35
+ #include "arrow/datum.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/type_fwd.h"
40
+ #include "arrow/util/visibility.h"
41
+
42
+ namespace arrow {
43
+ namespace compute {
44
+
45
+ // It seems like 64K might be a good default chunksize to use for execution
46
+ // based on the experience of other query processing systems. The current
47
+ // default is not to chunk contiguous arrays, though, but this may change in
48
+ // the future once parallel execution is implemented
49
+ static constexpr int64_t kDefaultExecChunksize = UINT16_MAX;
50
+
51
+ /// \brief Context for expression-global variables and options used by
52
+ /// function evaluation
53
+ class ARROW_EXPORT ExecContext {
54
+ public:
55
+ // If no function registry passed, the default is used.
56
+ explicit ExecContext(MemoryPool* pool = default_memory_pool(),
57
+ ::arrow::internal::Executor* executor = NULLPTR,
58
+ FunctionRegistry* func_registry = NULLPTR);
59
+
60
+ /// \brief The MemoryPool used for allocations, default is
61
+ /// default_memory_pool().
62
+ MemoryPool* memory_pool() const { return pool_; }
63
+
64
+ const ::arrow::internal::CpuInfo* cpu_info() const;
65
+
66
+ /// \brief An Executor which may be used to parallelize execution.
67
+ ::arrow::internal::Executor* executor() const { return executor_; }
68
+
69
+ /// \brief The FunctionRegistry for looking up functions by name and
70
+ /// selecting kernels for execution. Defaults to the library-global function
71
+ /// registry provided by GetFunctionRegistry.
72
+ FunctionRegistry* func_registry() const { return func_registry_; }
73
+
74
+ // \brief Set maximum length unit of work for kernel execution. Larger
75
+ // contiguous array inputs will be split into smaller chunks, and, if
76
+ // possible and enabled, processed in parallel. The default chunksize is
77
+ // INT64_MAX, so contiguous arrays are not split.
78
+ void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; }
79
+
80
+ // \brief Maximum length for ExecBatch data chunks processed by
81
+ // kernels. Contiguous array inputs with longer length will be split into
82
+ // smaller chunks.
83
+ int64_t exec_chunksize() const { return exec_chunksize_; }
84
+
85
+ /// \brief Set whether to use multiple threads for function execution. This
86
+ /// is not yet used.
87
+ void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; }
88
+
89
+ /// \brief If true, then utilize multiple threads where relevant for function
90
+ /// execution. This is not yet used.
91
+ bool use_threads() const { return use_threads_; }
92
+
93
+ // Set the preallocation strategy for kernel execution as it relates to
94
+ // chunked execution. For chunked execution, whether via ChunkedArray inputs
95
+ // or splitting larger Array arguments into smaller pieces, contiguous
96
+ // allocation (if permitted by the kernel) will allocate one large array to
97
+ // write output into yielding it to the caller at the end. If this option is
98
+ // set to off, then preallocations will be performed independently for each
99
+ // chunk of execution
100
+ //
101
+ // TODO: At some point we might want the limit the size of contiguous
102
+ // preallocations. For example, even if the exec_chunksize is 64K or less, we
103
+ // might limit contiguous allocations to 1M records, say.
104
+ void set_preallocate_contiguous(bool preallocate) {
105
+ preallocate_contiguous_ = preallocate;
106
+ }
107
+
108
+ /// \brief If contiguous preallocations should be used when doing chunked
109
+ /// execution as specified by exec_chunksize(). See
110
+ /// set_preallocate_contiguous() for more information.
111
+ bool preallocate_contiguous() const { return preallocate_contiguous_; }
112
+
113
+ private:
114
+ MemoryPool* pool_;
115
+ ::arrow::internal::Executor* executor_;
116
+ FunctionRegistry* func_registry_;
117
+ int64_t exec_chunksize_ = std::numeric_limits<int64_t>::max();
118
+ bool preallocate_contiguous_ = true;
119
+ bool use_threads_ = true;
120
+ };
121
+
122
+ // TODO: Consider standardizing on uint16 selection vectors and only use them
123
+ // when we can ensure that each value is 64K length or smaller
124
+
125
+ /// \brief Container for an array of value selection indices that were
126
+ /// materialized from a filter.
127
+ ///
128
+ /// Columnar query engines (see e.g. [1]) have found that rather than
129
+ /// materializing filtered data, the filter can instead be converted to an
130
+ /// array of the "on" indices and then "fusing" these indices in operator
131
+ /// implementations. This is especially relevant for aggregations but also
132
+ /// applies to scalar operations.
133
+ ///
134
+ /// We are not yet using this so this is mostly a placeholder for now.
135
+ ///
136
+ /// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf
137
+ class ARROW_EXPORT SelectionVector {
138
+ public:
139
+ explicit SelectionVector(std::shared_ptr<ArrayData> data);
140
+
141
+ explicit SelectionVector(const Array& arr);
142
+
143
+ /// \brief Create SelectionVector from boolean mask
144
+ static Result<std::shared_ptr<SelectionVector>> FromMask(const BooleanArray& arr);
145
+
146
+ const int32_t* indices() const { return indices_; }
147
+ int32_t length() const;
148
+
149
+ private:
150
+ std::shared_ptr<ArrayData> data_;
151
+ const int32_t* indices_;
152
+ };
153
+
154
+ /// An index to represent that a batch does not belong to an ordered stream
155
+ constexpr int64_t kUnsequencedIndex = -1;
156
+
157
+ /// \brief A unit of work for kernel execution. It contains a collection of
158
+ /// Array and Scalar values and an optional SelectionVector indicating that
159
+ /// there is an unmaterialized filter that either must be materialized, or (if
160
+ /// the kernel supports it) pushed down into the kernel implementation.
161
+ ///
162
+ /// ExecBatch is semantically similar to RecordBatch in that in a SQL context
163
+ /// it represents a collection of records, but constant "columns" are
164
+ /// represented by Scalar values rather than having to be converted into arrays
165
+ /// with repeated values.
166
+ ///
167
+ /// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight
168
+ /// than is desirable for this class. Microbenchmarks would help determine for
169
+ /// sure. See ARROW-8928.
170
+
171
+ /// \addtogroup acero-internals
172
+ /// @{
173
+
174
+ struct ARROW_EXPORT ExecBatch {
175
+ ExecBatch() = default;
176
+ ExecBatch(std::vector<Datum> values, int64_t length)
177
+ : values(std::move(values)), length(length) {}
178
+
179
+ explicit ExecBatch(const RecordBatch& batch);
180
+
181
+ /// \brief Infer the ExecBatch length from values.
182
+ static Result<int64_t> InferLength(const std::vector<Datum>& values);
183
+
184
+ /// Creates an ExecBatch with length-validation.
185
+ ///
186
+ /// If any value is given, then all values must have a common length. If the given
187
+ /// length is negative, then the length of the ExecBatch is set to this common length,
188
+ /// or to 1 if no values are given. Otherwise, the given length must equal the common
189
+ /// length, if any value is given.
190
+ static Result<ExecBatch> Make(std::vector<Datum> values, int64_t length = -1);
191
+
192
+ Result<std::shared_ptr<RecordBatch>> ToRecordBatch(
193
+ std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool()) const;
194
+
195
+ /// The values representing positional arguments to be passed to a kernel's
196
+ /// exec function for processing.
197
+ std::vector<Datum> values;
198
+
199
+ /// A deferred filter represented as an array of indices into the values.
200
+ ///
201
+ /// For example, the filter [true, true, false, true] would be represented as
202
+ /// the selection vector [0, 1, 3]. When the selection vector is set,
203
+ /// ExecBatch::length is equal to the length of this array.
204
+ std::shared_ptr<SelectionVector> selection_vector;
205
+
206
+ /// A predicate Expression guaranteed to evaluate to true for all rows in this batch.
207
+ Expression guarantee = literal(true);
208
+
209
+ /// The semantic length of the ExecBatch. When the values are all scalars,
210
+ /// the length should be set to 1 for non-aggregate kernels, otherwise the
211
+ /// length is taken from the array values, except when there is a selection
212
+ /// vector. When there is a selection vector set, the length of the batch is
213
+ /// the length of the selection. Aggregate kernels can have an ExecBatch
214
+ /// formed by projecting just the partition columns from a batch in which
215
+ /// case, it would have scalar rows with length greater than 1.
216
+ ///
217
+ /// If the array values are of length 0 then the length is 0 regardless of
218
+ /// whether any values are Scalar.
219
+ int64_t length = 0;
220
+
221
+ /// \brief index of this batch in a sorted stream of batches
222
+ ///
223
+ /// This index must be strictly monotonic starting at 0 without gaps or
224
+ /// it can be set to kUnsequencedIndex if there is no meaningful order
225
+ int64_t index = kUnsequencedIndex;
226
+
227
+ /// \brief The sum of bytes in each buffer referenced by the batch
228
+ ///
229
+ /// Note: Scalars are not counted
230
+ /// Note: Some values may referenced only part of a buffer, for
231
+ /// example, an array with an offset. The actual data
232
+ /// visible to this batch will be smaller than the total
233
+ /// buffer size in this case.
234
+ int64_t TotalBufferSize() const;
235
+
236
+ /// \brief Return the value at the i-th index
237
+ template <typename index_type>
238
+ inline const Datum& operator[](index_type i) const {
239
+ return values[i];
240
+ }
241
+
242
+ bool Equals(const ExecBatch& other) const;
243
+
244
+ /// \brief A convenience for the number of values / arguments.
245
+ int num_values() const { return static_cast<int>(values.size()); }
246
+
247
+ ExecBatch Slice(int64_t offset, int64_t length) const;
248
+
249
+ Result<ExecBatch> SelectValues(const std::vector<int>& ids) const;
250
+
251
+ /// \brief A convenience for returning the types from the batch.
252
+ std::vector<TypeHolder> GetTypes() const {
253
+ std::vector<TypeHolder> result;
254
+ for (const auto& value : this->values) {
255
+ result.emplace_back(value.type());
256
+ }
257
+ return result;
258
+ }
259
+
260
+ std::string ToString() const;
261
+ };
262
+
263
+ inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); }
264
+ inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); }
265
+
266
+ ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*);
267
+
268
+ /// @}
269
+
270
+ /// \defgroup compute-internals Utilities for calling functions, useful for those
271
+ /// extending the function registry
272
+ ///
273
+ /// @{
274
+
275
+ struct ExecValue {
276
+ ArraySpan array = {};
277
+ const Scalar* scalar = NULLPTR;
278
+
279
+ ExecValue(Scalar* scalar) // NOLINT implicit conversion
280
+ : scalar(scalar) {}
281
+
282
+ ExecValue(ArraySpan array) // NOLINT implicit conversion
283
+ : array(std::move(array)) {}
284
+
285
+ ExecValue(const ArrayData& array) { // NOLINT implicit conversion
286
+ this->array.SetMembers(array);
287
+ }
288
+
289
+ ExecValue() = default;
290
+ ExecValue(const ExecValue& other) = default;
291
+ ExecValue& operator=(const ExecValue& other) = default;
292
+ ExecValue(ExecValue&& other) = default;
293
+ ExecValue& operator=(ExecValue&& other) = default;
294
+
295
+ int64_t length() const { return this->is_array() ? this->array.length : 1; }
296
+
297
+ bool is_array() const { return this->scalar == NULLPTR; }
298
+ bool is_scalar() const { return !this->is_array(); }
299
+
300
+ void SetArray(const ArrayData& array) {
301
+ this->array.SetMembers(array);
302
+ this->scalar = NULLPTR;
303
+ }
304
+
305
+ void SetScalar(const Scalar* scalar) { this->scalar = scalar; }
306
+
307
+ template <typename ExactType>
308
+ const ExactType& scalar_as() const {
309
+ return ::arrow::internal::checked_cast<const ExactType&>(*this->scalar);
310
+ }
311
+
312
+ /// XXX: here temporarily for compatibility with datum, see
313
+ /// e.g. MakeStructExec in scalar_nested.cc
314
+ int64_t null_count() const {
315
+ if (this->is_array()) {
316
+ return this->array.GetNullCount();
317
+ } else {
318
+ return this->scalar->is_valid ? 0 : 1;
319
+ }
320
+ }
321
+
322
+ const DataType* type() const {
323
+ if (this->is_array()) {
324
+ return array.type;
325
+ } else {
326
+ return scalar->type.get();
327
+ }
328
+ }
329
+ };
330
+
331
+ struct ARROW_EXPORT ExecResult {
332
+ // The default value of the variant is ArraySpan
333
+ std::variant<ArraySpan, std::shared_ptr<ArrayData>> value;
334
+
335
+ int64_t length() const {
336
+ if (this->is_array_span()) {
337
+ return this->array_span()->length;
338
+ } else {
339
+ return this->array_data()->length;
340
+ }
341
+ }
342
+
343
+ const DataType* type() const {
344
+ if (this->is_array_span()) {
345
+ return this->array_span()->type;
346
+ } else {
347
+ return this->array_data()->type.get();
348
+ }
349
+ }
350
+
351
+ const ArraySpan* array_span() const { return &std::get<ArraySpan>(this->value); }
352
+ ArraySpan* array_span_mutable() { return &std::get<ArraySpan>(this->value); }
353
+
354
+ bool is_array_span() const { return this->value.index() == 0; }
355
+
356
+ const std::shared_ptr<ArrayData>& array_data() const {
357
+ return std::get<std::shared_ptr<ArrayData>>(this->value);
358
+ }
359
+ ArrayData* array_data_mutable() {
360
+ return std::get<std::shared_ptr<ArrayData>>(this->value).get();
361
+ }
362
+
363
+ bool is_array_data() const { return this->value.index() == 1; }
364
+ };
365
+
366
+ /// \brief A "lightweight" column batch object which contains no
367
+ /// std::shared_ptr objects and does not have any memory ownership
368
+ /// semantics. Can represent a view onto an "owning" ExecBatch.
369
+ struct ARROW_EXPORT ExecSpan {
370
+ ExecSpan() = default;
371
+ ExecSpan(const ExecSpan& other) = default;
372
+ ExecSpan& operator=(const ExecSpan& other) = default;
373
+ ExecSpan(ExecSpan&& other) = default;
374
+ ExecSpan& operator=(ExecSpan&& other) = default;
375
+
376
+ explicit ExecSpan(std::vector<ExecValue> values, int64_t length)
377
+ : length(length), values(std::move(values)) {}
378
+
379
+ explicit ExecSpan(const ExecBatch& batch) {
380
+ this->length = batch.length;
381
+ this->values.resize(batch.values.size());
382
+ for (size_t i = 0; i < batch.values.size(); ++i) {
383
+ const Datum& in_value = batch[i];
384
+ ExecValue* out_value = &this->values[i];
385
+ if (in_value.is_array()) {
386
+ out_value->SetArray(*in_value.array());
387
+ } else {
388
+ out_value->SetScalar(in_value.scalar().get());
389
+ }
390
+ }
391
+ }
392
+
393
+ /// \brief Return the value at the i-th index
394
+ template <typename index_type>
395
+ inline const ExecValue& operator[](index_type i) const {
396
+ return values[i];
397
+ }
398
+
399
+ /// \brief A convenience for the number of values / arguments.
400
+ int num_values() const { return static_cast<int>(values.size()); }
401
+
402
+ std::vector<TypeHolder> GetTypes() const {
403
+ std::vector<TypeHolder> result;
404
+ for (const auto& value : this->values) {
405
+ result.emplace_back(value.type());
406
+ }
407
+ return result;
408
+ }
409
+
410
+ ExecBatch ToExecBatch() const {
411
+ ExecBatch result;
412
+ result.length = this->length;
413
+ for (const ExecValue& value : this->values) {
414
+ if (value.is_array()) {
415
+ result.values.push_back(value.array.ToArrayData());
416
+ } else {
417
+ result.values.push_back(value.scalar->GetSharedPtr());
418
+ }
419
+ }
420
+ return result;
421
+ }
422
+
423
+ int64_t length = 0;
424
+ std::vector<ExecValue> values;
425
+ };
426
+
427
+ /// \defgroup compute-call-function One-shot calls to compute functions
428
+ ///
429
+ /// @{
430
+
431
+ /// \brief One-shot invoker for all types of functions.
432
+ ///
433
+ /// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
434
+ /// and wrapping of outputs.
435
+ ARROW_EXPORT
436
+ Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
437
+ const FunctionOptions* options, ExecContext* ctx = NULLPTR);
438
+
439
+ /// \brief Variant of CallFunction which uses a function's default options.
440
+ ///
441
+ /// NB: Some functions require FunctionOptions be provided.
442
+ ARROW_EXPORT
443
+ Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
444
+ ExecContext* ctx = NULLPTR);
445
+
446
+ /// \brief One-shot invoker for all types of functions.
447
+ ///
448
+ /// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
449
+ /// and wrapping of outputs.
450
+ ARROW_EXPORT
451
+ Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
452
+ const FunctionOptions* options, ExecContext* ctx = NULLPTR);
453
+
454
+ /// \brief Variant of CallFunction which uses a function's default options.
455
+ ///
456
+ /// NB: Some functions require FunctionOptions be provided.
457
+ ARROW_EXPORT
458
+ Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
459
+ ExecContext* ctx = NULLPTR);
460
+
461
+ /// @}
462
+
463
+ /// \defgroup compute-function-executor One-shot calls to obtain function executors
464
+ ///
465
+ /// @{
466
+
467
+ /// \brief One-shot executor provider for all types of functions.
468
+ ///
469
+ /// This function creates and initializes a `FunctionExecutor` appropriate
470
+ /// for the given function name, input types and function options.
471
+ ARROW_EXPORT
472
+ Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
473
+ const std::string& func_name, std::vector<TypeHolder> in_types,
474
+ const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
475
+
476
+ /// \brief One-shot executor provider for all types of functions.
477
+ ///
478
+ /// This function creates and initializes a `FunctionExecutor` appropriate
479
+ /// for the given function name, input types (taken from the Datum arguments)
480
+ /// and function options.
481
+ ARROW_EXPORT
482
+ Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
483
+ const std::string& func_name, const std::vector<Datum>& args,
484
+ const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
485
+
486
+ /// @}
487
+
488
+ } // namespace compute
489
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle.
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/compute/type_fwd.h"
24
+ #include "arrow/result.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/type_fwd.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace compute {
31
+
32
+ /// \addtogroup compute-functions
33
+ /// @{
34
+
35
+ /// \brief Extension point for defining options outside libarrow (but
36
+ /// still within this project).
37
+ class ARROW_EXPORT FunctionOptionsType {
38
+ public:
39
+ virtual ~FunctionOptionsType() = default;
40
+
41
+ virtual const char* type_name() const = 0;
42
+ virtual std::string Stringify(const FunctionOptions&) const = 0;
43
+ virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0;
44
+ virtual Result<std::shared_ptr<Buffer>> Serialize(const FunctionOptions&) const;
45
+ virtual Result<std::unique_ptr<FunctionOptions>> Deserialize(
46
+ const Buffer& buffer) const;
47
+ virtual std::unique_ptr<FunctionOptions> Copy(const FunctionOptions&) const = 0;
48
+ };
49
+
50
+ /// \brief Base class for specifying options configuring a function's behavior,
51
+ /// such as error handling.
52
+ class ARROW_EXPORT FunctionOptions : public util::EqualityComparable<FunctionOptions> {
53
+ public:
54
+ virtual ~FunctionOptions() = default;
55
+
56
+ const FunctionOptionsType* options_type() const { return options_type_; }
57
+ const char* type_name() const { return options_type()->type_name(); }
58
+
59
+ bool Equals(const FunctionOptions& other) const;
60
+ std::string ToString() const;
61
+ std::unique_ptr<FunctionOptions> Copy() const;
62
+ /// \brief Serialize an options struct to a buffer.
63
+ Result<std::shared_ptr<Buffer>> Serialize() const;
64
+ /// \brief Deserialize an options struct from a buffer.
65
+ /// Note: this will only look for `type_name` in the default FunctionRegistry;
66
+ /// to use a custom FunctionRegistry, look up the FunctionOptionsType, then
67
+ /// call FunctionOptionsType::Deserialize().
68
+ static Result<std::unique_ptr<FunctionOptions>> Deserialize(
69
+ const std::string& type_name, const Buffer& buffer);
70
+
71
+ protected:
72
+ explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {}
73
+ const FunctionOptionsType* options_type_;
74
+ };
75
+
76
+ ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*);
77
+
78
+ /// @}
79
+
80
+ } // namespace compute
81
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ #include <cstddef>
24
+ #include <cstdint>
25
+ #include <functional>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/buffer.h"
32
+ #include "arrow/compute/exec.h"
33
+ #include "arrow/datum.h"
34
+ #include "arrow/device_allocation_type_set.h"
35
+ #include "arrow/memory_pool.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/type.h"
39
+ #include "arrow/util/macros.h"
40
+ #include "arrow/util/visibility.h"
41
+
42
+ // macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h.
43
+ // No other BSD seems to do so. The name is used as an identifier in MemAllocation enum.
44
+ #if defined(__APPLE__) && defined(PREALLOCATE)
45
+ # undef PREALLOCATE
46
+ #endif
47
+
48
+ namespace arrow {
49
+ namespace compute {
50
+
51
+ class FunctionOptions;
52
+
53
+ /// \brief Base class for opaque kernel-specific state. For example, if there
54
+ /// is some kind of initialization required.
55
+ struct ARROW_EXPORT KernelState {
56
+ virtual ~KernelState() = default;
57
+ };
58
+
59
+ /// \brief Context/state for the execution of a particular kernel.
60
+ class ARROW_EXPORT KernelContext {
61
+ public:
62
+ // Can pass optional backreference; not used consistently for the
63
+ // moment but will be made so in the future
64
+ explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR)
65
+ : exec_ctx_(exec_ctx), kernel_(kernel) {}
66
+
67
+ /// \brief Allocate buffer from the context's memory pool. The contents are
68
+ /// not initialized.
69
+ Result<std::shared_ptr<ResizableBuffer>> Allocate(int64_t nbytes);
70
+
71
+ /// \brief Allocate buffer for bitmap from the context's memory pool. Like
72
+ /// Allocate, the contents of the buffer are not initialized but the last
73
+ /// byte is preemptively zeroed to help avoid ASAN or valgrind issues.
74
+ Result<std::shared_ptr<ResizableBuffer>> AllocateBitmap(int64_t num_bits);
75
+
76
+ /// \brief Assign the active KernelState to be utilized for each stage of
77
+ /// kernel execution. Ownership and memory lifetime of the KernelState must
78
+ /// be minded separately.
79
+ void SetState(KernelState* state) { state_ = state; }
80
+
81
+ // Set kernel that is being invoked since some kernel
82
+ // implementations will examine the kernel state.
83
+ void SetKernel(const Kernel* kernel) { kernel_ = kernel; }
84
+
85
+ KernelState* state() { return state_; }
86
+
87
+ /// \brief Configuration related to function execution that is to be shared
88
+ /// across multiple kernels.
89
+ ExecContext* exec_context() { return exec_ctx_; }
90
+
91
+ /// \brief The memory pool to use for allocations. For now, it uses the
92
+ /// MemoryPool contained in the ExecContext used to create the KernelContext.
93
+ MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); }
94
+
95
+ const Kernel* kernel() const { return kernel_; }
96
+
97
+ private:
98
+ ExecContext* exec_ctx_;
99
+ KernelState* state_ = NULLPTR;
100
+ const Kernel* kernel_ = NULLPTR;
101
+ };
102
+
103
+ /// \brief An type-checking interface to permit customizable validation rules
104
+ /// for use with InputType and KernelSignature. This is for scenarios where the
105
+ /// acceptance is not an exact type instance, such as a TIMESTAMP type for a
106
+ /// specific TimeUnit, but permitting any time zone.
107
+ struct ARROW_EXPORT TypeMatcher {
108
+ virtual ~TypeMatcher() = default;
109
+
110
+ /// \brief Return true if this matcher accepts the data type.
111
+ virtual bool Matches(const DataType& type) const = 0;
112
+
113
+ /// \brief A human-interpretable string representation of what the type
114
+ /// matcher checks for, usable when printing KernelSignature or formatting
115
+ /// error messages.
116
+ virtual std::string ToString() const = 0;
117
+
118
+ /// \brief Return true if this TypeMatcher contains the same matching rule as
119
+ /// the other. Currently depends on RTTI.
120
+ virtual bool Equals(const TypeMatcher& other) const = 0;
121
+ };
122
+
123
+ namespace match {
124
+
125
+ /// \brief Match any DataType instance having the same DataType::id.
126
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> SameTypeId(Type::type type_id);
127
+
128
+ /// \brief Match any TimestampType instance having the same unit, but the time
129
+ /// zones can be different.
130
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> TimestampTypeUnit(TimeUnit::type unit);
131
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Time32TypeUnit(TimeUnit::type unit);
132
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Time64TypeUnit(TimeUnit::type unit);
133
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> DurationTypeUnit(TimeUnit::type unit);
134
+
135
+ // \brief Match any integer type
136
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Integer();
137
+
138
+ // Match types using 32-bit varbinary representation
139
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> BinaryLike();
140
+
141
+ // Match types using 64-bit varbinary representation
142
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> LargeBinaryLike();
143
+
144
+ // Match any fixed binary type
145
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> FixedSizeBinaryLike();
146
+
147
+ // \brief Match any primitive type (boolean or any type representable as a C
148
+ // Type)
149
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Primitive();
150
+
151
+ // \brief Match any integer type that can be used as run-end in run-end encoded
152
+ // arrays
153
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndInteger();
154
+
155
+ /// \brief Match run-end encoded types that use any valid run-end type and
156
+ /// encode specific value types
157
+ ///
158
+ /// @param[in] value_type_matcher a matcher that is applied to the values field
159
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
160
+ std::shared_ptr<TypeMatcher> value_type_matcher);
161
+
162
+ /// \brief Match run-end encoded types that use any valid run-end type and
163
+ /// encode specific value types
164
+ ///
165
+ /// @param[in] value_type_id a type id that the type of the values field should match
166
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(Type::type value_type_id);
167
+
168
+ /// \brief Match run-end encoded types that encode specific run-end and value types
169
+ ///
170
+ /// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field
171
+ /// @param[in] value_type_matcher a matcher that is applied to the values field
172
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
173
+ std::shared_ptr<TypeMatcher> run_end_type_matcher,
174
+ std::shared_ptr<TypeMatcher> value_type_matcher);
175
+
176
+ } // namespace match
177
+
178
+ /// \brief An object used for type-checking arguments to be passed to a kernel
179
+ /// and stored in a KernelSignature. The type-checking rule can be supplied
180
+ /// either with an exact DataType instance or a custom TypeMatcher.
181
+ class ARROW_EXPORT InputType {
182
+ public:
183
+ /// \brief The kind of type-checking rule that the InputType contains.
184
+ enum Kind {
185
+ /// \brief Accept any value type.
186
+ ANY_TYPE,
187
+
188
+ /// \brief A fixed arrow::DataType and will only exact match having this
189
+ /// exact type (e.g. same TimestampType unit, same decimal scale and
190
+ /// precision, or same nested child types).
191
+ EXACT_TYPE,
192
+
193
+ /// \brief Uses a TypeMatcher implementation to check the type.
194
+ USE_TYPE_MATCHER
195
+ };
196
+
197
+ /// \brief Accept any value type
198
+ InputType() : kind_(ANY_TYPE) {}
199
+
200
+ /// \brief Accept an exact value type.
201
+ InputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
202
+ : kind_(EXACT_TYPE), type_(std::move(type)) {}
203
+
204
+ /// \brief Use the passed TypeMatcher to type check.
205
+ InputType(std::shared_ptr<TypeMatcher> type_matcher) // NOLINT implicit construction
206
+ : kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {}
207
+
208
+ /// \brief Match any type with the given Type::type. Uses a TypeMatcher for
209
+ /// its implementation.
210
+ InputType(Type::type type_id) // NOLINT implicit construction
211
+ : InputType(match::SameTypeId(type_id)) {}
212
+
213
+ InputType(const InputType& other) { CopyInto(other); }
214
+
215
+ void operator=(const InputType& other) { CopyInto(other); }
216
+
217
+ InputType(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
218
+
219
+ void operator=(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
220
+
221
+ // \brief Match any input (array, scalar of any type)
222
+ static InputType Any() { return InputType(); }
223
+
224
+ /// \brief Return true if this input type matches the same type cases as the
225
+ /// other.
226
+ bool Equals(const InputType& other) const;
227
+
228
+ bool operator==(const InputType& other) const { return this->Equals(other); }
229
+
230
+ bool operator!=(const InputType& other) const { return !(*this == other); }
231
+
232
+ /// \brief Return hash code.
233
+ size_t Hash() const;
234
+
235
+ /// \brief Render a human-readable string representation.
236
+ std::string ToString() const;
237
+
238
+ /// \brief Return true if the Datum matches this argument kind in
239
+ /// type (and only allows scalar or array-like Datums).
240
+ bool Matches(const Datum& value) const;
241
+
242
+ /// \brief Return true if the type matches this InputType
243
+ bool Matches(const DataType& type) const;
244
+
245
+ /// \brief The type matching rule that this InputType uses.
246
+ Kind kind() const { return kind_; }
247
+
248
+ /// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType
249
+ /// must match. Otherwise this function should not be used and will assert in
250
+ /// debug builds.
251
+ const std::shared_ptr<DataType>& type() const;
252
+
253
+ /// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for
254
+ /// checking the type of a value. Otherwise this function should not be used
255
+ /// and will assert in debug builds.
256
+ const TypeMatcher& type_matcher() const;
257
+
258
+ private:
259
+ void CopyInto(const InputType& other) {
260
+ this->kind_ = other.kind_;
261
+ this->type_ = other.type_;
262
+ this->type_matcher_ = other.type_matcher_;
263
+ }
264
+
265
+ void MoveInto(InputType&& other) {
266
+ this->kind_ = other.kind_;
267
+ this->type_ = std::move(other.type_);
268
+ this->type_matcher_ = std::move(other.type_matcher_);
269
+ }
270
+
271
+ Kind kind_;
272
+
273
+ // For EXACT_TYPE Kind
274
+ std::shared_ptr<DataType> type_;
275
+
276
+ // For USE_TYPE_MATCHER Kind
277
+ std::shared_ptr<TypeMatcher> type_matcher_;
278
+ };
279
+
280
+ /// \brief Container to capture both exact and input-dependent output types.
281
+ class ARROW_EXPORT OutputType {
282
+ public:
283
+ /// \brief An enum indicating whether the value type is an invariant fixed
284
+ /// value or one that's computed by a kernel-defined resolver function.
285
+ enum ResolveKind { FIXED, COMPUTED };
286
+
287
+ /// Type resolution function. Given input types, return output type. This
288
+ /// function MAY may use the kernel state to decide the output type based on
289
+ /// the FunctionOptions.
290
+ ///
291
+ /// This function SHOULD _not_ be used to check for arity, that is to be
292
+ /// performed one or more layers above.
293
+ using Resolver =
294
+ std::function<Result<TypeHolder>(KernelContext*, const std::vector<TypeHolder>&)>;
295
+
296
+ /// \brief Output an exact type
297
+ OutputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
298
+ : kind_(FIXED), type_(std::move(type)) {}
299
+
300
+ /// \brief Output a computed type depending on actual input types
301
+ template <typename Fn>
302
+ OutputType(Fn resolver) // NOLINT implicit construction
303
+ : kind_(COMPUTED), resolver_(std::move(resolver)) {}
304
+
305
+ OutputType(const OutputType& other) {
306
+ this->kind_ = other.kind_;
307
+ this->type_ = other.type_;
308
+ this->resolver_ = other.resolver_;
309
+ }
310
+
311
+ OutputType(OutputType&& other) {
312
+ this->kind_ = other.kind_;
313
+ this->type_ = std::move(other.type_);
314
+ this->resolver_ = other.resolver_;
315
+ }
316
+
317
+ OutputType& operator=(const OutputType&) = default;
318
+ OutputType& operator=(OutputType&&) = default;
319
+
320
+ /// \brief Return the type of the expected output value of the kernel given
321
+ /// the input argument types. The resolver may make use of state information
322
+ /// kept in the KernelContext.
323
+ Result<TypeHolder> Resolve(KernelContext* ctx,
324
+ const std::vector<TypeHolder>& args) const;
325
+
326
+ /// \brief The exact output value type for the FIXED kind.
327
+ const std::shared_ptr<DataType>& type() const;
328
+
329
+ /// \brief For use with COMPUTED resolution strategy. It may be more
330
+ /// convenient to invoke this with OutputType::Resolve returned from this
331
+ /// method.
332
+ const Resolver& resolver() const;
333
+
334
+ /// \brief Render a human-readable string representation.
335
+ std::string ToString() const;
336
+
337
+ /// \brief Return the kind of type resolution of this output type, whether
338
+ /// fixed/invariant or computed by a resolver.
339
+ ResolveKind kind() const { return kind_; }
340
+
341
+ private:
342
+ ResolveKind kind_;
343
+
344
+ // For FIXED resolution
345
+ std::shared_ptr<DataType> type_;
346
+
347
+ // For COMPUTED resolution
348
+ Resolver resolver_ = NULLPTR;
349
+ };
350
+
351
+ /// \brief Holds the input types and output type of the kernel.
352
+ ///
353
+ /// VarArgs functions with minimum N arguments should pass up to N input types to be
354
+ /// used to validate the input types of a function invocation. The first N-1 types
355
+ /// will be matched against the first N-1 arguments, and the last type will be
356
+ /// matched against the remaining arguments.
357
+ class ARROW_EXPORT KernelSignature {
358
+ public:
359
+ KernelSignature(std::vector<InputType> in_types, OutputType out_type,
360
+ bool is_varargs = false);
361
+
362
+ /// \brief Convenience ctor since make_shared can be awkward
363
+ static std::shared_ptr<KernelSignature> Make(std::vector<InputType> in_types,
364
+ OutputType out_type,
365
+ bool is_varargs = false);
366
+
367
+ /// \brief Return true if the signature if compatible with the list of input
368
+ /// value descriptors.
369
+ bool MatchesInputs(const std::vector<TypeHolder>& types) const;
370
+
371
+ /// \brief Returns true if the input types of each signature are
372
+ /// equal. Well-formed functions should have a deterministic output type
373
+ /// given input types, but currently it is the responsibility of the
374
+ /// developer to ensure this.
375
+ bool Equals(const KernelSignature& other) const;
376
+
377
+ bool operator==(const KernelSignature& other) const { return this->Equals(other); }
378
+
379
+ bool operator!=(const KernelSignature& other) const { return !(*this == other); }
380
+
381
+ /// \brief Compute a hash code for the signature
382
+ size_t Hash() const;
383
+
384
+ /// \brief The input types for the kernel. For VarArgs functions, this should
385
+ /// generally contain a single validator to use for validating all of the
386
+ /// function arguments.
387
+ const std::vector<InputType>& in_types() const { return in_types_; }
388
+
389
+ /// \brief The output type for the kernel. Use Resolve to return the
390
+ /// exact output given input argument types, since many kernels'
391
+ /// output types depend on their input types (or their type
392
+ /// metadata).
393
+ const OutputType& out_type() const { return out_type_; }
394
+
395
+ /// \brief Render a human-readable string representation
396
+ std::string ToString() const;
397
+
398
+ bool is_varargs() const { return is_varargs_; }
399
+
400
+ private:
401
+ std::vector<InputType> in_types_;
402
+ OutputType out_type_;
403
+ bool is_varargs_;
404
+
405
+ // For caching the hash code after it's computed the first time
406
+ mutable uint64_t hash_code_;
407
+ };
408
+
409
+ /// \brief A function may contain multiple variants of a kernel for a given
410
+ /// type combination for different SIMD levels. Based on the active system's
411
+ /// CPU info or the user's preferences, we can elect to use one over the other.
412
+ struct SimdLevel {
413
+ enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX };
414
+ };
415
+
416
+ /// \brief The strategy to use for propagating or otherwise populating the
417
+ /// validity bitmap of a kernel output.
418
+ struct NullHandling {
419
+ enum type {
420
+ /// Compute the output validity bitmap by intersecting the validity bitmaps
421
+ /// of the arguments using bitwise-and operations. This means that values
422
+ /// in the output are valid/non-null only if the corresponding values in
423
+ /// all input arguments were valid/non-null. Kernel generally need not
424
+ /// touch the bitmap thereafter, but a kernel's exec function is permitted
425
+ /// to alter the bitmap after the null intersection is computed if it needs
426
+ /// to.
427
+ INTERSECTION,
428
+
429
+ /// Kernel expects a pre-allocated buffer to write the result bitmap
430
+ /// into. The preallocated memory is not zeroed (except for the last byte),
431
+ /// so the kernel should ensure to completely populate the bitmap.
432
+ COMPUTED_PREALLOCATE,
433
+
434
+ /// Kernel allocates and sets the validity bitmap of the output.
435
+ COMPUTED_NO_PREALLOCATE,
436
+
437
+ /// Kernel output is never null and a validity bitmap does not need to be
438
+ /// allocated.
439
+ OUTPUT_NOT_NULL
440
+ };
441
+ };
442
+
443
+ /// \brief The preference for memory preallocation of fixed-width type outputs
444
+ /// in kernel execution.
445
+ struct MemAllocation {
446
+ enum type {
447
+ // For data types that support pre-allocation (i.e. fixed-width), the
448
+ // kernel expects to be provided a pre-allocated data buffer to write
449
+ // into. Non-fixed-width types must always allocate their own data
450
+ // buffers. The allocation made for the same length as the execution batch,
451
+ // so vector kernels yielding differently sized output should not use this.
452
+ //
453
+ // It is valid for the data to not be preallocated but the validity bitmap
454
+ // is (or is computed using the intersection/bitwise-and method).
455
+ //
456
+ // For variable-size output types like BinaryType or StringType, or for
457
+ // nested types, this option has no effect.
458
+ PREALLOCATE,
459
+
460
+ // The kernel is responsible for allocating its own data buffer for
461
+ // fixed-width type outputs.
462
+ NO_PREALLOCATE
463
+ };
464
+ };
465
+
466
+ struct Kernel;
467
+
468
+ /// \brief Arguments to pass to an KernelInit function. A struct is used to help
469
+ /// avoid API breakage should the arguments passed need to be expanded.
470
+ struct KernelInitArgs {
471
+ /// \brief A pointer to the kernel being initialized. The init function may
472
+ /// depend on the kernel's KernelSignature or other data contained there.
473
+ const Kernel* kernel;
474
+
475
+ /// \brief The types of the input arguments that the kernel is
476
+ /// about to be executed against.
477
+ const std::vector<TypeHolder>& inputs;
478
+
479
+ /// \brief Opaque options specific to this kernel. May be nullptr for functions
480
+ /// that do not require options.
481
+ const FunctionOptions* options;
482
+ };
483
+
484
+ /// \brief Common initializer function for all kernel types.
485
+ using KernelInit = std::function<Result<std::unique_ptr<KernelState>>(
486
+ KernelContext*, const KernelInitArgs&)>;
487
+
488
+ /// \brief Base type for kernels. Contains the function signature and
489
+ /// optionally the state initialization function, along with some common
490
+ /// attributes
491
+ struct ARROW_EXPORT Kernel {
492
+ Kernel() = default;
493
+
494
+ Kernel(std::shared_ptr<KernelSignature> sig, KernelInit init)
495
+ : signature(std::move(sig)), init(std::move(init)) {}
496
+
497
+ Kernel(std::vector<InputType> in_types, OutputType out_type, KernelInit init)
498
+ : Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)),
499
+ std::move(init)) {}
500
+
501
+ /// \brief The "signature" of the kernel containing the InputType input
502
+ /// argument validators and OutputType output type resolver.
503
+ std::shared_ptr<KernelSignature> signature;
504
+
505
+ /// \brief Create a new KernelState for invocations of this kernel, e.g. to
506
+ /// set up any options or state relevant for execution.
507
+ KernelInit init;
508
+
509
+ /// \brief Create a vector of new KernelState for invocations of this kernel.
510
+ static Status InitAll(KernelContext*, const KernelInitArgs&,
511
+ std::vector<std::unique_ptr<KernelState>>*);
512
+
513
+ /// \brief Indicates whether execution can benefit from parallelization
514
+ /// (splitting large chunks into smaller chunks and using multiple
515
+ /// threads). Some kernels may not support parallel execution at
516
+ /// all. Synchronization and concurrency-related issues are currently the
517
+ /// responsibility of the Kernel's implementation.
518
+ bool parallelizable = true;
519
+
520
+ /// \brief Indicates the level of SIMD instruction support in the host CPU is
521
+ /// required to use the function. The intention is for functions to be able to
522
+ /// contain multiple kernels with the same signature but different levels of SIMD,
523
+ /// so that the most optimized kernel supported on a host's processor can be chosen.
524
+ SimdLevel::type simd_level = SimdLevel::NONE;
525
+
526
+ // Additional kernel-specific data
527
+ std::shared_ptr<KernelState> data;
528
+ };
529
+
530
+ /// \brief The scalar kernel execution API that must be implemented for SCALAR
531
+ /// kernel types. This includes both stateless and stateful kernels. Kernels
532
+ /// depending on some execution state access that state via subclasses of
533
+ /// KernelState set on the KernelContext object. Implementations should
534
+ /// endeavor to write into pre-allocated memory if they are able, though for
535
+ /// some kernels (e.g. in cases when a builder like StringBuilder) must be
536
+ /// employed this may not be possible.
537
+ using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*);
538
+
539
+ /// \brief Kernel data structure for implementations of ScalarFunction. In
540
+ /// addition to the members found in Kernel, contains the null handling
541
+ /// and memory pre-allocation preferences.
542
+ struct ARROW_EXPORT ScalarKernel : public Kernel {
543
+ ScalarKernel() = default;
544
+
545
+ ScalarKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
546
+ KernelInit init = NULLPTR)
547
+ : Kernel(std::move(sig), init), exec(exec) {}
548
+
549
+ ScalarKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
550
+ KernelInit init = NULLPTR)
551
+ : Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {}
552
+
553
+ /// \brief Perform a single invocation of this kernel. Depending on the
554
+ /// implementation, it may only write into preallocated memory, while in some
555
+ /// cases it will allocate its own memory. Any required state is managed
556
+ /// through the KernelContext.
557
+ ArrayKernelExec exec;
558
+
559
+ /// \brief Writing execution results into larger contiguous allocations
560
+ /// requires that the kernel be able to write into sliced output ArrayData*,
561
+ /// including sliced output validity bitmaps. Some kernel implementations may
562
+ /// not be able to do this, so setting this to false disables this
563
+ /// functionality.
564
+ bool can_write_into_slices = true;
565
+
566
+ // For scalar functions preallocated data and intersecting arg validity
567
+ // bitmaps is a reasonable default
568
+ NullHandling::type null_handling = NullHandling::INTERSECTION;
569
+ MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE;
570
+ };
571
+
572
+ // ----------------------------------------------------------------------
573
+ // VectorKernel (for VectorFunction)
574
+
575
+ /// \brief Kernel data structure for implementations of VectorFunction. In
576
+ /// contains an optional finalizer function, the null handling and memory
577
+ /// pre-allocation preferences (which have different defaults from
578
+ /// ScalarKernel), and some other execution-related options.
579
+ struct ARROW_EXPORT VectorKernel : public Kernel {
580
+ /// \brief See VectorKernel::finalize member for usage
581
+ using FinalizeFunc = std::function<Status(KernelContext*, std::vector<Datum>*)>;
582
+
583
+ /// \brief Function for executing a stateful VectorKernel against a
584
+ /// ChunkedArray input. Does not need to be defined for all VectorKernels
585
+ using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out);
586
+
587
+ VectorKernel() = default;
588
+
589
+ VectorKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
590
+ KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
591
+ : Kernel(std::move(in_types), std::move(out_type), std::move(init)),
592
+ exec(exec),
593
+ finalize(std::move(finalize)) {}
594
+
595
+ VectorKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
596
+ KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
597
+ : Kernel(std::move(sig), std::move(init)),
598
+ exec(exec),
599
+ finalize(std::move(finalize)) {}
600
+
601
+ /// \brief Perform a single invocation of this kernel. Any required state is
602
+ /// managed through the KernelContext.
603
+ ArrayKernelExec exec;
604
+
605
+ /// \brief Execute the kernel on a ChunkedArray. Does not need to be defined
606
+ ChunkedExec exec_chunked = NULLPTR;
607
+
608
+ /// \brief For VectorKernel, convert intermediate results into finalized
609
+ /// results. Mutates input argument. Some kernels may accumulate state
610
+ /// (example: hashing-related functions) through processing chunked inputs, and
611
+ /// then need to attach some accumulated state to each of the outputs of
612
+ /// processing each chunk of data.
613
+ FinalizeFunc finalize;
614
+
615
+ /// Since vector kernels generally are implemented rather differently from
616
+ /// scalar/elementwise kernels (and they may not even yield arrays of the same
617
+ /// size), so we make the developer opt-in to any memory preallocation rather
618
+ /// than having to turn it off.
619
+ NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
620
+ MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE;
621
+
622
+ /// \brief Writing execution results into larger contiguous allocations
623
+ /// requires that the kernel be able to write into sliced output ArrayData*,
624
+ /// including sliced output validity bitmaps. Some kernel implementations may
625
+ /// not be able to do this, so setting this to false disables this
626
+ /// functionality.
627
+ bool can_write_into_slices = true;
628
+
629
+ /// Some vector kernels can do chunkwise execution using ExecSpanIterator,
630
+ /// in some cases accumulating some state. Other kernels (like Take) need to
631
+ /// be passed whole arrays and don't work on ChunkedArray inputs
632
+ bool can_execute_chunkwise = true;
633
+
634
+ /// Some kernels (like unique and value_counts) yield non-chunked output from
635
+ /// chunked-array inputs. This option controls how the results are boxed when
636
+ /// returned from ExecVectorFunction
637
+ ///
638
+ /// true -> ChunkedArray
639
+ /// false -> Array
640
+ bool output_chunked = true;
641
+ };
642
+
643
+ // ----------------------------------------------------------------------
644
+ // ScalarAggregateKernel (for ScalarAggregateFunction)
645
+
646
+ using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
647
+ using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*);
648
+ // Finalize returns Datum to permit multiple return values
649
+ using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*);
650
+
651
+ /// \brief Kernel data structure for implementations of
652
+ /// ScalarAggregateFunction. The four necessary components of an aggregation
653
+ /// kernel are the init, consume, merge, and finalize functions.
654
+ ///
655
+ /// * init: creates a new KernelState for a kernel.
656
+ /// * consume: processes an ExecSpan and updates the KernelState found in the
657
+ /// KernelContext.
658
+ /// * merge: combines one KernelState with another.
659
+ /// * finalize: produces the end result of the aggregation using the
660
+ /// KernelState in the KernelContext.
661
+ struct ARROW_EXPORT ScalarAggregateKernel : public Kernel {
662
+ ScalarAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
663
+ ScalarAggregateConsume consume, ScalarAggregateMerge merge,
664
+ ScalarAggregateFinalize finalize, const bool ordered)
665
+ : Kernel(std::move(sig), std::move(init)),
666
+ consume(consume),
667
+ merge(merge),
668
+ finalize(finalize),
669
+ ordered(ordered) {}
670
+
671
+ ScalarAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
672
+ KernelInit init, ScalarAggregateConsume consume,
673
+ ScalarAggregateMerge merge, ScalarAggregateFinalize finalize,
674
+ const bool ordered)
675
+ : ScalarAggregateKernel(
676
+ KernelSignature::Make(std::move(in_types), std::move(out_type)),
677
+ std::move(init), consume, merge, finalize, ordered) {}
678
+
679
+ /// \brief Merge a vector of KernelStates into a single KernelState.
680
+ /// The merged state will be returned and will be set on the KernelContext.
681
+ static Result<std::unique_ptr<KernelState>> MergeAll(
682
+ const ScalarAggregateKernel* kernel, KernelContext* ctx,
683
+ std::vector<std::unique_ptr<KernelState>> states);
684
+
685
+ ScalarAggregateConsume consume;
686
+ ScalarAggregateMerge merge;
687
+ ScalarAggregateFinalize finalize;
688
+ /// \brief Whether this kernel requires ordering
689
+ /// Some aggregations, such as, "first", requires some kind of input order. The
690
+ /// order can be implicit, e.g., the order of the input data, or explicit, e.g.
691
+ /// the ordering specified with a window aggregation.
692
+ /// The caller of the aggregate kernel is responsible for passing data in some
693
+ /// defined order to the kernel. The flag here is a way for the kernel to tell
694
+ /// the caller that data passed to the kernel must be defined in some order.
695
+ bool ordered = false;
696
+ };
697
+
698
+ // ----------------------------------------------------------------------
699
+ // HashAggregateKernel (for HashAggregateFunction)
700
+
701
+ using HashAggregateResize = Status (*)(KernelContext*, int64_t);
702
+ using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
703
+ using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&);
704
+
705
+ // Finalize returns Datum to permit multiple return values
706
+ using HashAggregateFinalize = Status (*)(KernelContext*, Datum*);
707
+
708
+ /// \brief Kernel data structure for implementations of
709
+ /// HashAggregateFunction. The four necessary components of an aggregation
710
+ /// kernel are the init, consume, merge, and finalize functions.
711
+ ///
712
+ /// * init: creates a new KernelState for a kernel.
713
+ /// * resize: ensure that the KernelState can accommodate the specified number of groups.
714
+ /// * consume: processes an ExecSpan (which includes the argument as well
715
+ /// as an array of group identifiers) and updates the KernelState found in the
716
+ /// KernelContext.
717
+ /// * merge: combines one KernelState with another.
718
+ /// * finalize: produces the end result of the aggregation using the
719
+ /// KernelState in the KernelContext.
720
+ struct ARROW_EXPORT HashAggregateKernel : public Kernel {
721
+ HashAggregateKernel() = default;
722
+
723
+ HashAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
724
+ HashAggregateResize resize, HashAggregateConsume consume,
725
+ HashAggregateMerge merge, HashAggregateFinalize finalize,
726
+ const bool ordered)
727
+ : Kernel(std::move(sig), std::move(init)),
728
+ resize(resize),
729
+ consume(consume),
730
+ merge(merge),
731
+ finalize(finalize),
732
+ ordered(ordered) {}
733
+
734
+ HashAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
735
+ KernelInit init, HashAggregateConsume consume,
736
+ HashAggregateResize resize, HashAggregateMerge merge,
737
+ HashAggregateFinalize finalize, const bool ordered)
738
+ : HashAggregateKernel(
739
+ KernelSignature::Make(std::move(in_types), std::move(out_type)),
740
+ std::move(init), resize, consume, merge, finalize, ordered) {}
741
+
742
+ HashAggregateResize resize;
743
+ HashAggregateConsume consume;
744
+ HashAggregateMerge merge;
745
+ HashAggregateFinalize finalize;
746
+ /// @brief whether the summarizer requires ordering
747
+ /// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel
748
+ /// for detailed doc of this variable.
749
+ bool ordered = false;
750
+ };
751
+
752
+ } // namespace compute
753
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <vector>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/compare.h"
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+ namespace compute {
29
+
30
+ enum class SortOrder {
31
+ /// Arrange values in increasing order
32
+ Ascending,
33
+ /// Arrange values in decreasing order
34
+ Descending,
35
+ };
36
+
37
+ enum class NullPlacement {
38
+ /// Place nulls and NaNs before any non-null values.
39
+ /// NaNs will come after nulls.
40
+ AtStart,
41
+ /// Place nulls and NaNs after any non-null values.
42
+ /// NaNs will come before nulls.
43
+ AtEnd,
44
+ };
45
+
46
+ /// \brief One sort key for PartitionNthIndices (TODO) and SortIndices
47
+ class ARROW_EXPORT SortKey : public util::EqualityComparable<SortKey> {
48
+ public:
49
+ explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending)
50
+ : target(std::move(target)), order(order) {}
51
+
52
+ bool Equals(const SortKey& other) const;
53
+ std::string ToString() const;
54
+
55
+ /// A FieldRef targeting the sort column.
56
+ FieldRef target;
57
+ /// How to order by this sort key.
58
+ SortOrder order;
59
+ };
60
+
61
+ class ARROW_EXPORT Ordering : public util::EqualityComparable<Ordering> {
62
+ public:
63
+ Ordering(std::vector<SortKey> sort_keys,
64
+ NullPlacement null_placement = NullPlacement::AtStart)
65
+ : sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {}
66
+ /// true if data ordered by other is also ordered by this
67
+ ///
68
+ /// For example, if data is ordered by [a, b, c] then it is also ordered
69
+ /// by [a, b] but not by [b, c] or [a, b, c, d].
70
+ ///
71
+ /// [a, b].IsSuborderOf([a, b, c]) - true
72
+ /// [a, b, c].IsSuborderOf([a, b, c]) - true
73
+ /// [b, c].IsSuborderOf([a, b, c]) - false
74
+ /// [a, b, c, d].IsSuborderOf([a, b, c]) - false
75
+ ///
76
+ /// The implicit ordering is not a suborder of any other ordering and
77
+ /// no other ordering is a suborder of it. The implicit ordering is not a
78
+ /// suborder of itself.
79
+ ///
80
+ /// The unordered ordering is a suborder of all other orderings but no
81
+ /// other ordering is a suborder of it. The unordered ordering is a suborder
82
+ /// of itself.
83
+ ///
84
+ /// The unordered ordering is a suborder of the implicit ordering.
85
+ bool IsSuborderOf(const Ordering& other) const;
86
+
87
+ bool Equals(const Ordering& other) const;
88
+ std::string ToString() const;
89
+
90
+ bool is_implicit() const { return is_implicit_; }
91
+ bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); }
92
+
93
+ const std::vector<SortKey>& sort_keys() const { return sort_keys_; }
94
+ NullPlacement null_placement() const { return null_placement_; }
95
+
96
+ static const Ordering& Implicit() {
97
+ static const Ordering kImplicit(true);
98
+ return kImplicit;
99
+ }
100
+
101
+ static const Ordering& Unordered() {
102
+ static const Ordering kUnordered(false);
103
+ // It is also possible to get an unordered ordering by passing in an empty vector
104
+ // using the normal constructor. This is ok and useful when ordering comes from user
105
+ // input.
106
+ return kUnordered;
107
+ }
108
+
109
+ private:
110
+ explicit Ordering(bool is_implicit)
111
+ : null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {}
112
+ /// Column key(s) to order by and how to order by these sort keys.
113
+ std::vector<SortKey> sort_keys_;
114
+ /// Whether nulls and NaNs are placed at the start or at the end
115
+ NullPlacement null_placement_;
116
+ bool is_implicit_ = false;
117
+ };
118
+
119
+ } // namespace compute
120
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+ #include <string>
25
+ #include <vector>
26
+
27
+ #include "arrow/result.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace compute {
33
+
34
+ class Function;
35
+ class FunctionOptionsType;
36
+
37
+ /// \brief A mutable central function registry for built-in functions as well
38
+ /// as user-defined functions. Functions are implementations of
39
+ /// arrow::compute::Function.
40
+ ///
41
+ /// Generally, each function contains kernels which are implementations of a
42
+ /// function for a specific argument signature. After looking up a function in
43
+ /// the registry, one can either execute it eagerly with Function::Execute or
44
+ /// use one of the function's dispatch methods to pick a suitable kernel for
45
+ /// lower-level function execution.
46
+ class ARROW_EXPORT FunctionRegistry {
47
+ public:
48
+ ~FunctionRegistry();
49
+
50
+ /// \brief Construct a new registry.
51
+ ///
52
+ /// Most users only need to use the global registry.
53
+ static std::unique_ptr<FunctionRegistry> Make();
54
+
55
+ /// \brief Construct a new nested registry with the given parent.
56
+ ///
57
+ /// Most users only need to use the global registry. The returned registry never changes
58
+ /// its parent, even when an operation allows overwriting.
59
+ static std::unique_ptr<FunctionRegistry> Make(FunctionRegistry* parent);
60
+
61
+ /// \brief Check whether a new function can be added to the registry.
62
+ ///
63
+ /// \returns Status::KeyError if a function with the same name is already registered.
64
+ Status CanAddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
65
+
66
+ /// \brief Add a new function to the registry.
67
+ ///
68
+ /// \returns Status::KeyError if a function with the same name is already registered.
69
+ Status AddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
70
+
71
+ /// \brief Check whether an alias can be added for the given function name.
72
+ ///
73
+ /// \returns Status::KeyError if the function with the given name is not registered.
74
+ Status CanAddAlias(const std::string& target_name, const std::string& source_name);
75
+
76
+ /// \brief Add alias for the given function name.
77
+ ///
78
+ /// \returns Status::KeyError if the function with the given name is not registered.
79
+ Status AddAlias(const std::string& target_name, const std::string& source_name);
80
+
81
+ /// \brief Check whether a new function options type can be added to the registry.
82
+ ///
83
+ /// \return Status::KeyError if a function options type with the same name is already
84
+ /// registered.
85
+ Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type,
86
+ bool allow_overwrite = false);
87
+
88
+ /// \brief Add a new function options type to the registry.
89
+ ///
90
+ /// \returns Status::KeyError if a function options type with the same name is already
91
+ /// registered.
92
+ Status AddFunctionOptionsType(const FunctionOptionsType* options_type,
93
+ bool allow_overwrite = false);
94
+
95
+ /// \brief Retrieve a function by name from the registry.
96
+ Result<std::shared_ptr<Function>> GetFunction(const std::string& name) const;
97
+
98
+ /// \brief Return vector of all entry names in the registry.
99
+ ///
100
+ /// Helpful for displaying a manifest of available functions.
101
+ std::vector<std::string> GetFunctionNames() const;
102
+
103
+ /// \brief Retrieve a function options type by name from the registry.
104
+ Result<const FunctionOptionsType*> GetFunctionOptionsType(
105
+ const std::string& name) const;
106
+
107
+ /// \brief The number of currently registered functions.
108
+ int num_functions() const;
109
+
110
+ /// \brief The cast function object registered in AddFunction.
111
+ ///
112
+ /// Helpful for get cast function as needed.
113
+ const Function* cast_function() const;
114
+
115
+ private:
116
+ FunctionRegistry();
117
+
118
+ // Use PIMPL pattern to not have std::unordered_map here
119
+ class FunctionRegistryImpl;
120
+ std::unique_ptr<FunctionRegistryImpl> impl_;
121
+
122
+ explicit FunctionRegistry(FunctionRegistryImpl* impl);
123
+ };
124
+
125
+ } // namespace compute
126
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/visibility.h"
21
+
22
+ namespace arrow {
23
+
24
+ struct Datum;
25
+ struct TypeHolder;
26
+
27
+ namespace compute {
28
+
29
+ class Function;
30
+ class ScalarAggregateFunction;
31
+ class FunctionExecutor;
32
+ class FunctionOptions;
33
+ class FunctionRegistry;
34
+
35
+ /// \brief Return the process-global function registry.
36
+ // Defined in registry.cc
37
+ ARROW_EXPORT FunctionRegistry* GetFunctionRegistry();
38
+
39
+ class CastOptions;
40
+
41
+ struct ExecBatch;
42
+ class ExecContext;
43
+ class KernelContext;
44
+
45
+ struct Kernel;
46
+ struct ScalarKernel;
47
+ struct ScalarAggregateKernel;
48
+ struct VectorKernel;
49
+
50
+ struct KernelState;
51
+
52
+ class Expression;
53
+
54
+ ARROW_EXPORT ExecContext* default_exec_context();
55
+ ARROW_EXPORT ExecContext* threaded_exec_context();
56
+
57
+ } // namespace compute
58
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <optional>
23
+ #include <thread>
24
+ #include <unordered_map>
25
+ #include <vector>
26
+
27
+ #include "arrow/compute/expression.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/result.h"
30
+ #include "arrow/util/cpu_info.h"
31
+ #include "arrow/util/simd.h"
32
+
33
+ #if defined(__clang__) || defined(__GNUC__)
34
+ # define BYTESWAP(x) __builtin_bswap64(x)
35
+ # define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31)))
36
+ # define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63)))
37
+ #elif defined(_MSC_VER)
38
+ # include <intrin.h>
39
+ # define BYTESWAP(x) _byteswap_uint64(x)
40
+ # define ROTL(x, n) _rotl((x), (n))
41
+ # define ROTL64(x, n) _rotl64((x), (n))
42
+ #endif
43
+
44
+ namespace arrow {
45
+ namespace util {
46
+
47
+ // Some platforms typedef int64_t as long int instead of long long int,
48
+ // which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics
49
+ // which need long long.
50
+ // We use the cast to the type below in these intrinsics to make the code
51
+ // compile in all cases.
52
+ //
53
+ using int64_for_gather_t = const long long int; // NOLINT runtime-int
54
+
55
+ // All MiniBatch... classes use TempVectorStack for vector allocations and can
56
+ // only work with vectors up to 1024 elements.
57
+ //
58
+ // They should only be allocated on the stack to guarantee the right sequence
59
+ // of allocation and deallocation of vectors from TempVectorStack.
60
+ //
61
+ class MiniBatch {
62
+ public:
63
+ static constexpr int kLogMiniBatchLength = 10;
64
+ static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength;
65
+ };
66
+
67
+ namespace bit_util {
68
+
69
+ ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags,
70
+ const int num_bits, const uint8_t* bits,
71
+ int* num_indexes, uint16_t* indexes,
72
+ int bit_offset = 0);
73
+
74
+ ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags,
75
+ const int num_bits, const uint8_t* bits,
76
+ const uint16_t* input_indexes, int* num_indexes,
77
+ uint16_t* indexes, int bit_offset = 0);
78
+
79
+ // Input and output indexes may be pointing to the same data (in-place filtering).
80
+ ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits,
81
+ const uint8_t* bits, int* num_indexes_bit0,
82
+ uint16_t* indexes_bit0, uint16_t* indexes_bit1,
83
+ int bit_offset = 0);
84
+
85
+ // Bit 1 is replaced with byte 0xFF.
86
+ ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits,
87
+ const uint8_t* bits, uint8_t* bytes, int bit_offset = 0);
88
+
89
+ // Return highest bit of each byte.
90
+ ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits,
91
+ const uint8_t* bytes, uint8_t* bits, int bit_offset = 0);
92
+
93
+ ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes,
94
+ uint32_t num_bytes);
95
+
96
+ #if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2)
97
+ // The functions below use BMI2 instructions, be careful before calling!
98
+
99
+ namespace avx2 {
100
+ ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits,
101
+ const uint8_t* bits,
102
+ const uint16_t* input_indexes,
103
+ int* num_indexes, uint16_t* indexes);
104
+ ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits,
105
+ const uint8_t* bits, int* num_indexes,
106
+ uint16_t* indexes, uint16_t base_index = 0);
107
+ ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits,
108
+ uint8_t* bytes);
109
+ ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes,
110
+ uint8_t* bits);
111
+ ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes);
112
+ } // namespace avx2
113
+
114
+ #endif
115
+
116
+ } // namespace bit_util
117
+ } // namespace util
118
+
119
+ namespace compute {
120
+
121
+ /// Modify an Expression with pre-order and post-order visitation.
122
+ /// `pre` will be invoked on each Expression. `pre` will visit Calls before their
123
+ /// arguments, `post_call` will visit Calls (and no other Expressions) after their
124
+ /// arguments. Visitors should return the Identical expression to indicate no change; this
125
+ /// will prevent unnecessary construction in the common case where a modification is not
126
+ /// possible/necessary/...
127
+ ///
128
+ /// If an argument was modified, `post_call` visits a reconstructed Call with the modified
129
+ /// arguments but also receives a pointer to the unmodified Expression as a second
130
+ /// argument. If no arguments were modified the unmodified Expression* will be nullptr.
131
+ template <typename PreVisit, typename PostVisitCall>
132
+ Result<Expression> ModifyExpression(Expression expr, const PreVisit& pre,
133
+ const PostVisitCall& post_call) {
134
+ ARROW_ASSIGN_OR_RAISE(expr, Result<Expression>(pre(std::move(expr))));
135
+
136
+ auto call = expr.call();
137
+ if (!call) return expr;
138
+
139
+ bool at_least_one_modified = false;
140
+ std::vector<Expression> modified_arguments;
141
+
142
+ for (size_t i = 0; i < call->arguments.size(); ++i) {
143
+ ARROW_ASSIGN_OR_RAISE(auto modified_argument,
144
+ ModifyExpression(call->arguments[i], pre, post_call));
145
+
146
+ if (Identical(modified_argument, call->arguments[i])) {
147
+ continue;
148
+ }
149
+
150
+ if (!at_least_one_modified) {
151
+ modified_arguments = call->arguments;
152
+ at_least_one_modified = true;
153
+ }
154
+
155
+ modified_arguments[i] = std::move(modified_argument);
156
+ }
157
+
158
+ if (at_least_one_modified) {
159
+ // reconstruct the call expression with the modified arguments
160
+ auto modified_call = *call;
161
+ modified_call.arguments = std::move(modified_arguments);
162
+ return post_call(Expression(std::move(modified_call)), &expr);
163
+ }
164
+
165
+ return post_call(std::move(expr), NULLPTR);
166
+ }
167
+
168
+ // Helper class to calculate the modified number of rows to process using SIMD.
169
+ //
170
+ // Some array elements at the end will be skipped in order to avoid buffer
171
+ // overrun, when doing memory loads and stores using larger word size than a
172
+ // single array element.
173
+ //
174
+ class TailSkipForSIMD {
175
+ public:
176
+ static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows,
177
+ int bit_offset) {
178
+ int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset);
179
+ int64_t num_bytes_safe =
180
+ std::max(static_cast<int64_t>(0LL), num_bytes - num_bytes_accessed_together + 1);
181
+ int64_t num_rows_safe =
182
+ std::max(static_cast<int64_t>(0LL), 8 * num_bytes_safe - bit_offset);
183
+ return std::min(num_rows_safe, num_rows);
184
+ }
185
+ static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
186
+ int64_t length) {
187
+ int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together);
188
+ int64_t num_rows_safe =
189
+ std::max(static_cast<int64_t>(0LL), num_rows - num_rows_to_skip);
190
+ return num_rows_safe;
191
+ }
192
+ static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
193
+ const uint32_t* offsets) {
194
+ // Do not process rows that could read past the end of the buffer using N
195
+ // byte loads/stores.
196
+ //
197
+ int64_t num_rows_safe = num_rows;
198
+ while (num_rows_safe > 0 &&
199
+ offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) {
200
+ --num_rows_safe;
201
+ }
202
+ return num_rows_safe;
203
+ }
204
+ static int FixSelection(int64_t num_rows_safe, int num_selected,
205
+ const uint16_t* selection) {
206
+ int num_selected_safe = num_selected;
207
+ while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) {
208
+ --num_selected_safe;
209
+ }
210
+ return num_selected_safe;
211
+ }
212
+ };
213
+
214
+ } // namespace compute
215
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/compute/expression.h"
23
+ #include "arrow/dataset/dataset.h"
24
+ #include "arrow/dataset/discovery.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #ifdef ARROW_CSV
27
+ # include "arrow/dataset/file_csv.h"
28
+ #endif
29
+ #ifdef ARROW_JSON
30
+ # include "arrow/dataset/file_json.h"
31
+ #endif
32
+ #include "arrow/dataset/file_ipc.h"
33
+ #ifdef ARROW_ORC
34
+ # include "arrow/dataset/file_orc.h"
35
+ #endif
36
+ #ifdef ARROW_PARQUET
37
+ # include "arrow/dataset/file_parquet.h"
38
+ #endif
39
+ #include "arrow/dataset/scanner.h"
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/dataset/type_fwd.h"
31
+ #include "arrow/dataset/visibility.h"
32
+ #include "arrow/util/async_generator_fwd.h"
33
+ #include "arrow/util/future.h"
34
+ #include "arrow/util/macros.h"
35
+ #include "arrow/util/mutex.h"
36
+
37
+ namespace arrow {
38
+
39
+ namespace internal {
40
+ class Executor;
41
+ } // namespace internal
42
+
43
+ namespace dataset {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ /// \brief Description of a column to scan
48
+ struct ARROW_DS_EXPORT FragmentSelectionColumn {
49
+ /// \brief The path to the column to load
50
+ FieldPath path;
51
+ /// \brief The type of the column in the dataset schema
52
+ ///
53
+ /// A format may choose to ignore this field completely. For example, when
54
+ /// reading from IPC the reader can just return the column in the data type
55
+ /// that is stored on disk. There is no point in doing anything special.
56
+ ///
57
+ /// However, some formats may be capable of casting on the fly. For example,
58
+ /// when reading from CSV, if we know the target type of the column, we can
59
+ /// convert from string to the target type as we read.
60
+ DataType* requested_type;
61
+ };
62
+
63
+ /// \brief A list of columns that should be loaded from a fragment
64
+ ///
65
+ /// The paths in this selection should be referring to the fragment schema. This class
66
+ /// contains a virtual destructor as it is expected evolution strategies will need to
67
+ /// extend this to add any information needed to later evolve the batches.
68
+ ///
69
+ /// For example, in the basic evolution strategy, we keep track of which columns
70
+ /// were missing from the file so that we can fill those in with null when evolving.
71
+ class ARROW_DS_EXPORT FragmentSelection {
72
+ public:
73
+ explicit FragmentSelection(std::vector<FragmentSelectionColumn> columns)
74
+ : columns_(std::move(columns)) {}
75
+ virtual ~FragmentSelection() = default;
76
+ /// The columns that should be loaded from the fragment
77
+ const std::vector<FragmentSelectionColumn>& columns() const { return columns_; }
78
+
79
+ private:
80
+ std::vector<FragmentSelectionColumn> columns_;
81
+ };
82
+
83
+ /// \brief Instructions for scanning a particular fragment
84
+ ///
85
+ /// The fragment scan request is derived from ScanV2Options. The main
86
+ /// difference is that the scan options are based on the dataset schema
87
+ /// while the fragment request is based on the fragment schema.
88
+ struct ARROW_DS_EXPORT FragmentScanRequest {
89
+ /// \brief A row filter
90
+ ///
91
+ /// The filter expression should be written against the fragment schema.
92
+ ///
93
+ /// \see ScanV2Options for details on how this filter should be applied
94
+ compute::Expression filter = compute::literal(true);
95
+
96
+ /// \brief The columns to scan
97
+ ///
98
+ /// These indices refer to the fragment schema
99
+ ///
100
+ /// Note: This is NOT a simple list of top-level column indices.
101
+ /// For more details \see ScanV2Options
102
+ ///
103
+ /// If possible a fragment should only read from disk the data needed
104
+ /// to satisfy these columns. If a format cannot partially read a nested
105
+ /// column (e.g. JSON) then it must apply the column selection (in memory)
106
+ /// before returning the scanned batch.
107
+ std::shared_ptr<FragmentSelection> fragment_selection;
108
+ /// \brief Options specific to the format being scanned
109
+ const FragmentScanOptions* format_scan_options;
110
+ };
111
+
112
+ /// \brief An iterator-like object that can yield batches created from a fragment
113
+ class ARROW_DS_EXPORT FragmentScanner {
114
+ public:
115
+ /// This instance will only be destroyed after all ongoing scan futures
116
+ /// have been completed.
117
+ ///
118
+ /// This means any callbacks created as part of the scan can safely
119
+ /// capture `this`
120
+ virtual ~FragmentScanner() = default;
121
+ /// \brief Scan a batch of data from the file
122
+ /// \param batch_number The index of the batch to read
123
+ virtual Future<std::shared_ptr<RecordBatch>> ScanBatch(int batch_number) = 0;
124
+ /// \brief Calculate an estimate of how many data bytes the given batch will represent
125
+ ///
126
+ /// "Data bytes" should be the total size of all the buffers once the data has been
127
+ /// decoded into the Arrow format.
128
+ virtual int64_t EstimatedDataBytes(int batch_number) = 0;
129
+ /// \brief The number of batches in the fragment to scan
130
+ virtual int NumBatches() = 0;
131
+ };
132
+
133
+ /// \brief Information learned about a fragment through inspection
134
+ ///
135
+ /// This information can be used to figure out which fields need
136
+ /// to be read from a file and how the data read in should be evolved
137
+ /// to match the dataset schema.
138
+ ///
139
+ /// For example, from a CSV file we can inspect and learn the column
140
+ /// names and use those column names to determine which columns to load
141
+ /// from the CSV file.
142
+ struct ARROW_DS_EXPORT InspectedFragment {
143
+ explicit InspectedFragment(std::vector<std::string> column_names)
144
+ : column_names(std::move(column_names)) {}
145
+ std::vector<std::string> column_names;
146
+ };
147
+
148
+ /// \brief A granular piece of a Dataset, such as an individual file.
149
+ ///
150
+ /// A Fragment can be read/scanned separately from other fragments. It yields a
151
+ /// collection of RecordBatches when scanned
152
+ ///
153
+ /// Note that Fragments have well defined physical schemas which are reconciled by
154
+ /// the Datasets which contain them; these physical schemas may differ from a parent
155
+ /// Dataset's schema and the physical schemas of sibling Fragments.
156
+ class ARROW_DS_EXPORT Fragment : public std::enable_shared_from_this<Fragment> {
157
+ public:
158
+ /// \brief An expression that represents no known partition information
159
+ static const compute::Expression kNoPartitionInformation;
160
+
161
+ /// \brief Return the physical schema of the Fragment.
162
+ ///
163
+ /// The physical schema is also called the writer schema.
164
+ /// This method is blocking and may suffer from high latency filesystem.
165
+ /// The schema is cached after being read once, or may be specified at construction.
166
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchema();
167
+
168
+ /// An asynchronous version of Scan
169
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
170
+ const std::shared_ptr<ScanOptions>& options) = 0;
171
+
172
+ /// \brief Inspect a fragment to learn basic information
173
+ ///
174
+ /// This will be called before a scan and a fragment should attach whatever
175
+ /// information will be needed to figure out an evolution strategy. This information
176
+ /// will then be passed to the call to BeginScan
177
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
178
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
179
+
180
+ /// \brief Start a scan operation
181
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
182
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
183
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
184
+
185
+ /// \brief Count the number of rows in this fragment matching the filter using metadata
186
+ /// only. That is, this method may perform I/O, but will not load data.
187
+ ///
188
+ /// If this is not possible, resolve with an empty optional. The fragment can perform
189
+ /// I/O (e.g. to read metadata) before it deciding whether it can satisfy the request.
190
+ virtual Future<std::optional<int64_t>> CountRows(
191
+ compute::Expression predicate, const std::shared_ptr<ScanOptions>& options);
192
+
193
+ virtual std::string type_name() const = 0;
194
+ virtual std::string ToString() const { return type_name(); }
195
+
196
+ /// \brief An expression which evaluates to true for all data viewed by this
197
+ /// Fragment.
198
+ const compute::Expression& partition_expression() const {
199
+ return partition_expression_;
200
+ }
201
+
202
+ virtual ~Fragment() = default;
203
+
204
+ protected:
205
+ Fragment() = default;
206
+ explicit Fragment(compute::Expression partition_expression,
207
+ std::shared_ptr<Schema> physical_schema);
208
+
209
+ virtual Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() = 0;
210
+
211
+ util::Mutex physical_schema_mutex_;
212
+ compute::Expression partition_expression_ = compute::literal(true);
213
+ std::shared_ptr<Schema> physical_schema_;
214
+ };
215
+
216
+ /// \brief Per-scan options for fragment(s) in a dataset.
217
+ ///
218
+ /// These options are not intrinsic to the format or fragment itself, but do affect
219
+ /// the results of a scan. These are options which make sense to change between
220
+ /// repeated reads of the same dataset, such as format-specific conversion options
221
+ /// (that do not affect the schema).
222
+ ///
223
+ /// \ingroup dataset-scanning
224
+ class ARROW_DS_EXPORT FragmentScanOptions {
225
+ public:
226
+ virtual std::string type_name() const = 0;
227
+ virtual std::string ToString() const { return type_name(); }
228
+ virtual ~FragmentScanOptions() = default;
229
+ };
230
+
231
+ /// \defgroup dataset-implementations Concrete implementations
232
+ ///
233
+ /// @{
234
+
235
+ /// \brief A trivial Fragment that yields ScanTask out of a fixed set of
236
+ /// RecordBatch.
237
+ class ARROW_DS_EXPORT InMemoryFragment : public Fragment {
238
+ public:
239
+ class Scanner;
240
+ InMemoryFragment(std::shared_ptr<Schema> schema, RecordBatchVector record_batches,
241
+ compute::Expression = compute::literal(true));
242
+ explicit InMemoryFragment(RecordBatchVector record_batches,
243
+ compute::Expression = compute::literal(true));
244
+
245
+ Result<RecordBatchGenerator> ScanBatchesAsync(
246
+ const std::shared_ptr<ScanOptions>& options) override;
247
+ Future<std::optional<int64_t>> CountRows(
248
+ compute::Expression predicate,
249
+ const std::shared_ptr<ScanOptions>& options) override;
250
+
251
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
252
+ const FragmentScanOptions* format_options,
253
+ compute::ExecContext* exec_context) override;
254
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
255
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
256
+ const FragmentScanOptions* format_options,
257
+ compute::ExecContext* exec_context) override;
258
+
259
+ std::string type_name() const override { return "in-memory"; }
260
+
261
+ protected:
262
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
263
+
264
+ RecordBatchVector record_batches_;
265
+ };
266
+
267
+ /// @}
268
+
269
+ using FragmentGenerator = AsyncGenerator<std::shared_ptr<Fragment>>;
270
+
271
+ /// \brief Rules for converting the dataset schema to and from fragment schemas
272
+ class ARROW_DS_EXPORT FragmentEvolutionStrategy {
273
+ public:
274
+ /// This instance will only be destroyed when all scan operations for the
275
+ /// fragment have completed.
276
+ virtual ~FragmentEvolutionStrategy() = default;
277
+ /// \brief A guarantee that applies to all batches of this fragment
278
+ ///
279
+ /// For example, if a fragment is missing one of the fields in the dataset
280
+ /// schema then a typical evolution strategy is to set that field to null.
281
+ ///
282
+ /// So if the column at index 3 is missing then the guarantee is
283
+ /// FieldRef(3) == null
284
+ ///
285
+ /// Individual field guarantees should be AND'd together and returned
286
+ /// as a single expression.
287
+ virtual Result<compute::Expression> GetGuarantee(
288
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
289
+
290
+ /// \brief Return a fragment schema selection given a dataset schema selection
291
+ ///
292
+ /// For example, if the user wants fields 2 & 4 of the dataset schema and
293
+ /// in this fragment the field 2 is missing and the field 4 is at index 1 then
294
+ /// this should return {1}
295
+ virtual Result<std::unique_ptr<FragmentSelection>> DevolveSelection(
296
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
297
+
298
+ /// \brief Return a filter expression bound to the fragment schema given
299
+ /// a filter expression bound to the dataset schema
300
+ ///
301
+ /// The dataset scan filter will first be simplified by the guarantee returned
302
+ /// by GetGuarantee. This means an evolution that only handles dropping or casting
303
+ /// fields doesn't need to do anything here except return the given filter.
304
+ ///
305
+ /// On the other hand, an evolution that is doing some kind of aliasing will likely
306
+ /// need to convert field references in the filter to the aliased field references
307
+ /// where appropriate.
308
+ virtual Result<compute::Expression> DevolveFilter(
309
+ const compute::Expression& filter) const = 0;
310
+
311
+ /// \brief Convert a batch from the fragment schema to the dataset schema
312
+ ///
313
+ /// Typically this involves casting columns from the data type stored on disk
314
+ /// to the data type of the dataset schema. For example, this fragment might
315
+ /// have columns stored as int32 and the dataset schema might have int64 for
316
+ /// the column. In this case we should cast the column from int32 to int64.
317
+ ///
318
+ /// Note: A fragment may perform this cast as the data is read from disk. In
319
+ /// that case a cast might not be needed.
320
+ virtual Result<compute::ExecBatch> EvolveBatch(
321
+ const std::shared_ptr<RecordBatch>& batch,
322
+ const std::vector<FieldPath>& dataset_selection,
323
+ const FragmentSelection& selection) const = 0;
324
+
325
+ /// \brief Return a string description of this strategy
326
+ virtual std::string ToString() const = 0;
327
+ };
328
+
329
+ /// \brief Lookup to create a FragmentEvolutionStrategy for a given fragment
330
+ class ARROW_DS_EXPORT DatasetEvolutionStrategy {
331
+ public:
332
+ virtual ~DatasetEvolutionStrategy() = default;
333
+ /// \brief Create a strategy for evolving from the given fragment
334
+ /// to the schema of the given dataset
335
+ virtual std::unique_ptr<FragmentEvolutionStrategy> GetStrategy(
336
+ const Dataset& dataset, const Fragment& fragment,
337
+ const InspectedFragment& inspected_fragment) = 0;
338
+
339
+ /// \brief Return a string description of this strategy
340
+ virtual std::string ToString() const = 0;
341
+ };
342
+
343
+ ARROW_DS_EXPORT std::unique_ptr<DatasetEvolutionStrategy>
344
+ MakeBasicDatasetEvolutionStrategy();
345
+
346
+ /// \brief A container of zero or more Fragments.
347
+ ///
348
+ /// A Dataset acts as a union of Fragments, e.g. files deeply nested in a
349
+ /// directory. A Dataset has a schema to which Fragments must align during a
350
+ /// scan operation. This is analogous to Avro's reader and writer schema.
351
+ class ARROW_DS_EXPORT Dataset : public std::enable_shared_from_this<Dataset> {
352
+ public:
353
+ /// \brief Begin to build a new Scan operation against this Dataset
354
+ Result<std::shared_ptr<ScannerBuilder>> NewScan();
355
+
356
+ /// \brief GetFragments returns an iterator of Fragments given a predicate.
357
+ Result<FragmentIterator> GetFragments(compute::Expression predicate);
358
+ Result<FragmentIterator> GetFragments();
359
+
360
+ /// \brief Async versions of `GetFragments`.
361
+ Result<FragmentGenerator> GetFragmentsAsync(compute::Expression predicate);
362
+ Result<FragmentGenerator> GetFragmentsAsync();
363
+
364
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
365
+
366
+ /// \brief An expression which evaluates to true for all data viewed by this Dataset.
367
+ /// May be null, which indicates no information is available.
368
+ const compute::Expression& partition_expression() const {
369
+ return partition_expression_;
370
+ }
371
+
372
+ /// \brief The name identifying the kind of Dataset
373
+ virtual std::string type_name() const = 0;
374
+
375
+ /// \brief Return a copy of this Dataset with a different schema.
376
+ ///
377
+ /// The copy will view the same Fragments. If the new schema is not compatible with the
378
+ /// original dataset's schema then an error will be raised.
379
+ virtual Result<std::shared_ptr<Dataset>> ReplaceSchema(
380
+ std::shared_ptr<Schema> schema) const = 0;
381
+
382
+ /// \brief Rules used by this dataset to handle schema evolution
383
+ DatasetEvolutionStrategy* evolution_strategy() { return evolution_strategy_.get(); }
384
+
385
+ virtual ~Dataset() = default;
386
+
387
+ protected:
388
+ explicit Dataset(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
389
+
390
+ Dataset(std::shared_ptr<Schema> schema, compute::Expression partition_expression);
391
+
392
+ virtual Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) = 0;
393
+ /// \brief Default non-virtual implementation method for the base
394
+ /// `GetFragmentsAsyncImpl` method, which creates a fragment generator for
395
+ /// the dataset, possibly filtering results with a predicate (forwarding to
396
+ /// the synchronous `GetFragmentsImpl` method and moving the computations
397
+ /// to the background, using the IO thread pool).
398
+ ///
399
+ /// Currently, `executor` is always the same as `internal::GetCPUThreadPool()`,
400
+ /// which means the results from the underlying fragment generator will be
401
+ /// transferred to the default CPU thread pool. The generator itself is
402
+ /// offloaded to run on the default IO thread pool.
403
+ virtual Result<FragmentGenerator> GetFragmentsAsyncImpl(
404
+ compute::Expression predicate, arrow::internal::Executor* executor);
405
+
406
+ std::shared_ptr<Schema> schema_;
407
+ compute::Expression partition_expression_ = compute::literal(true);
408
+ std::unique_ptr<DatasetEvolutionStrategy> evolution_strategy_ =
409
+ MakeBasicDatasetEvolutionStrategy();
410
+ };
411
+
412
+ /// \addtogroup dataset-implementations
413
+ ///
414
+ /// @{
415
+
416
+ /// \brief A Source which yields fragments wrapping a stream of record batches.
417
+ ///
418
+ /// The record batches must match the schema provided to the source at construction.
419
+ class ARROW_DS_EXPORT InMemoryDataset : public Dataset {
420
+ public:
421
+ class RecordBatchGenerator {
422
+ public:
423
+ virtual ~RecordBatchGenerator() = default;
424
+ virtual RecordBatchIterator Get() const = 0;
425
+ };
426
+
427
+ /// Construct a dataset from a schema and a factory of record batch iterators.
428
+ InMemoryDataset(std::shared_ptr<Schema> schema,
429
+ std::shared_ptr<RecordBatchGenerator> get_batches)
430
+ : Dataset(std::move(schema)), get_batches_(std::move(get_batches)) {}
431
+
432
+ /// Convenience constructor taking a fixed list of batches
433
+ InMemoryDataset(std::shared_ptr<Schema> schema, RecordBatchVector batches);
434
+
435
+ /// Convenience constructor taking a Table
436
+ explicit InMemoryDataset(std::shared_ptr<Table> table);
437
+
438
+ std::string type_name() const override { return "in-memory"; }
439
+
440
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
441
+ std::shared_ptr<Schema> schema) const override;
442
+
443
+ protected:
444
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
445
+
446
+ std::shared_ptr<RecordBatchGenerator> get_batches_;
447
+ };
448
+
449
+ /// \brief A Dataset wrapping child Datasets.
450
+ class ARROW_DS_EXPORT UnionDataset : public Dataset {
451
+ public:
452
+ /// \brief Construct a UnionDataset wrapping child Datasets.
453
+ ///
454
+ /// \param[in] schema the schema of the resulting dataset.
455
+ /// \param[in] children one or more child Datasets. Their schemas must be identical to
456
+ /// schema.
457
+ static Result<std::shared_ptr<UnionDataset>> Make(std::shared_ptr<Schema> schema,
458
+ DatasetVector children);
459
+
460
+ const DatasetVector& children() const { return children_; }
461
+
462
+ std::string type_name() const override { return "union"; }
463
+
464
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
465
+ std::shared_ptr<Schema> schema) const override;
466
+
467
+ protected:
468
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
469
+
470
+ explicit UnionDataset(std::shared_ptr<Schema> schema, DatasetVector children)
471
+ : Dataset(std::move(schema)), children_(std::move(children)) {}
472
+
473
+ DatasetVector children_;
474
+
475
+ friend class UnionDatasetFactory;
476
+ };
477
+
478
+ /// @}
479
+
480
+ } // namespace dataset
481
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/dataset/file_base.h"
23
+ #include "arrow/record_batch.h"
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/async_util.h"
26
+ #include "arrow/util/future.h"
27
+
28
+ namespace arrow {
29
+ namespace dataset {
30
+ namespace internal {
31
+
32
+ // This lines up with our other defaults in the scanner and execution plan
33
+ constexpr uint64_t kDefaultDatasetWriterMaxRowsQueued = 8 * 1024 * 1024;
34
+
35
+ /// \brief Utility class that manages a set of writers to different paths
36
+ ///
37
+ /// Writers may be closed and reopened (and a new file created) based on the dataset
38
+ /// write options (for example, max_rows_per_file or max_open_files)
39
+ ///
40
+ /// The dataset writer enforces its own back pressure based on the # of rows (as opposed
41
+ /// to # of batches which is how it is typically enforced elsewhere) and # of files.
42
+ class ARROW_DS_EXPORT DatasetWriter {
43
+ public:
44
+ /// \brief Create a dataset writer
45
+ ///
46
+ /// Will fail if basename_template is invalid or if there is existing data and
47
+ /// existing_data_behavior is kError
48
+ ///
49
+ /// \param write_options options to control how the data should be written
50
+ /// \param max_rows_queued max # of rows allowed to be queued before the dataset_writer
51
+ /// will ask for backpressure
52
+ static Result<std::unique_ptr<DatasetWriter>> Make(
53
+ FileSystemDatasetWriteOptions write_options, util::AsyncTaskScheduler* scheduler,
54
+ std::function<void()> pause_callback, std::function<void()> resume_callback,
55
+ std::function<void()> finish_callback,
56
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
57
+
58
+ ~DatasetWriter();
59
+
60
+ /// \brief Write a batch to the dataset
61
+ /// \param[in] batch The batch to write
62
+ /// \param[in] directory The directory to write to
63
+ ///
64
+ /// Note: The written filename will be {directory}/{filename_factory(i)} where i is a
65
+ /// counter controlled by `max_open_files` and `max_rows_per_file`
66
+ ///
67
+ /// If multiple WriteRecordBatch calls arrive with the same `directory` then the batches
68
+ /// may be written to the same file.
69
+ ///
70
+ /// The returned future will be marked finished when the record batch has been queued
71
+ /// to be written. If the returned future is unfinished then this indicates the dataset
72
+ /// writer's queue is full and the data provider should pause.
73
+ ///
74
+ /// This method is NOT async reentrant. The returned future will only be unfinished
75
+ /// if back pressure needs to be applied. Async reentrancy is not necessary for
76
+ /// concurrent writes to happen. Calling this method again before the previous future
77
+ /// completes will not just violate max_rows_queued but likely lead to race conditions.
78
+ ///
79
+ /// One thing to note is that the ordering of your data can affect your maximum
80
+ /// potential parallelism. If this seems odd then consider a dataset where the first
81
+ /// 1000 batches go to the same directory and then the 1001st batch goes to a different
82
+ /// directory. The only way to get two parallel writes immediately would be to queue
83
+ /// all 1000 pending writes to the first directory.
84
+ void WriteRecordBatch(std::shared_ptr<RecordBatch> batch, const std::string& directory,
85
+ const std::string& prefix = "");
86
+
87
+ /// Finish all pending writes and close any open files
88
+ void Finish();
89
+
90
+ protected:
91
+ DatasetWriter(FileSystemDatasetWriteOptions write_options,
92
+ util::AsyncTaskScheduler* scheduler, std::function<void()> pause_callback,
93
+ std::function<void()> resume_callback,
94
+ std::function<void()> finish_callback,
95
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
96
+
97
+ class DatasetWriterImpl;
98
+ std::unique_ptr<DatasetWriterImpl> impl_;
99
+ };
100
+
101
+ } // namespace internal
102
+ } // namespace dataset
103
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// Logic for automatically determining the structure of multi-file
19
+ /// dataset with possible partitioning according to available
20
+ /// partitioning
21
+
22
+ // This API is EXPERIMENTAL.
23
+
24
+ #pragma once
25
+
26
+ #include <memory>
27
+ #include <string>
28
+ #include <variant>
29
+ #include <vector>
30
+
31
+ #include "arrow/dataset/partition.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/type_fwd.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/util/macros.h"
37
+
38
+ namespace arrow {
39
+ namespace dataset {
40
+
41
+ /// \defgroup dataset-discovery Discovery API
42
+ ///
43
+ /// @{
44
+
45
+ struct InspectOptions {
46
+ /// See `fragments` property.
47
+ static constexpr int kInspectAllFragments = -1;
48
+
49
+ /// Indicate how many fragments should be inspected to infer the unified dataset
50
+ /// schema. Limiting the number of fragments accessed improves the latency of
51
+ /// the discovery process when dealing with a high number of fragments and/or
52
+ /// high latency file systems.
53
+ ///
54
+ /// The default value of `1` inspects the schema of the first (in no particular
55
+ /// order) fragment only. If the dataset has a uniform schema for all fragments,
56
+ /// this default is the optimal value. In order to inspect all fragments and
57
+ /// robustly unify their potentially varying schemas, set this option to
58
+ /// `kInspectAllFragments`. A value of `0` disables inspection of fragments
59
+ /// altogether so only the partitioning schema will be inspected.
60
+ int fragments = 1;
61
+
62
+ /// Control how to unify types. By default, types are merged strictly (the
63
+ /// type must match exactly, except nulls can be merged with other types).
64
+ Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults();
65
+ };
66
+
67
+ struct FinishOptions {
68
+ /// Finalize the dataset with this given schema. If the schema is not
69
+ /// provided, infer the schema via the Inspect, see the `inspect_options`
70
+ /// property.
71
+ std::shared_ptr<Schema> schema = NULLPTR;
72
+
73
+ /// If the schema is not provided, it will be discovered by passing the
74
+ /// following options to `DatasetDiscovery::Inspect`.
75
+ InspectOptions inspect_options{};
76
+
77
+ /// Indicate if the given Schema (when specified), should be validated against
78
+ /// the fragments' schemas. `inspect_options` will control how many fragments
79
+ /// are checked.
80
+ bool validate_fragments = false;
81
+ };
82
+
83
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected
84
+ /// schema before materializing said Dataset.
85
+ class ARROW_DS_EXPORT DatasetFactory {
86
+ public:
87
+ /// \brief Get the schemas of the Fragments and Partitioning.
88
+ virtual Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
89
+ InspectOptions options) = 0;
90
+
91
+ /// \brief Get unified schema for the resulting Dataset.
92
+ Result<std::shared_ptr<Schema>> Inspect(InspectOptions options = {});
93
+
94
+ /// \brief Create a Dataset
95
+ Result<std::shared_ptr<Dataset>> Finish();
96
+ /// \brief Create a Dataset with the given schema (see \a InspectOptions::schema)
97
+ Result<std::shared_ptr<Dataset>> Finish(std::shared_ptr<Schema> schema);
98
+ /// \brief Create a Dataset with the given options
99
+ virtual Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) = 0;
100
+
101
+ /// \brief Optional root partition for the resulting Dataset.
102
+ const compute::Expression& root_partition() const { return root_partition_; }
103
+ /// \brief Set the root partition for the resulting Dataset.
104
+ Status SetRootPartition(compute::Expression partition) {
105
+ root_partition_ = std::move(partition);
106
+ return Status::OK();
107
+ }
108
+
109
+ virtual ~DatasetFactory() = default;
110
+
111
+ protected:
112
+ DatasetFactory();
113
+
114
+ compute::Expression root_partition_;
115
+ };
116
+
117
+ /// @}
118
+
119
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's
120
+ /// expected schema before materialization.
121
+ /// \ingroup dataset-implementations
122
+ class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory {
123
+ public:
124
+ static Result<std::shared_ptr<DatasetFactory>> Make(
125
+ std::vector<std::shared_ptr<DatasetFactory>> factories);
126
+
127
+ /// \brief Return the list of child DatasetFactory
128
+ const std::vector<std::shared_ptr<DatasetFactory>>& factories() const {
129
+ return factories_;
130
+ }
131
+
132
+ /// \brief Get the schemas of the Datasets.
133
+ ///
134
+ /// Instead of applying options globally, it applies at each child factory.
135
+ /// This will not respect `options.fragments` exactly, but will respect the
136
+ /// spirit of peeking the first fragments or all of them.
137
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
138
+ InspectOptions options) override;
139
+
140
+ /// \brief Create a Dataset.
141
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
142
+
143
+ protected:
144
+ explicit UnionDatasetFactory(std::vector<std::shared_ptr<DatasetFactory>> factories);
145
+
146
+ std::vector<std::shared_ptr<DatasetFactory>> factories_;
147
+ };
148
+
149
+ /// \ingroup dataset-filesystem
150
+ struct FileSystemFactoryOptions {
151
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
152
+ ///
153
+ /// If a factory is provided, it will be used to infer a schema for partition fields
154
+ /// based on file and directory paths then construct a Partitioning. The default
155
+ /// is a Partitioning which will yield no partition information.
156
+ ///
157
+ /// The (explicit or discovered) partitioning will be applied to discovered files
158
+ /// and the resulting partition information embedded in the Dataset.
159
+ PartitioningOrFactory partitioning{Partitioning::Default()};
160
+
161
+ /// For the purposes of applying the partitioning, paths will be stripped
162
+ /// of the partition_base_dir. Files not matching the partition_base_dir
163
+ /// prefix will be skipped for partition discovery. The ignored files will still
164
+ /// be part of the Dataset, but will not have partition information.
165
+ ///
166
+ /// Example:
167
+ /// partition_base_dir = "/dataset";
168
+ ///
169
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
170
+ ///
171
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
172
+ ///
173
+ /// This is useful for partitioning which parses directory when ordering
174
+ /// is important, e.g. DirectoryPartitioning.
175
+ std::string partition_base_dir;
176
+
177
+ /// Invalid files (via selector or explicitly) will be excluded by checking
178
+ /// with the FileFormat::IsSupported method. This will incur IO for each files
179
+ /// in a serial and single threaded fashion. Disabling this feature will skip the
180
+ /// IO, but unsupported files may be present in the Dataset
181
+ /// (resulting in an error at scan time).
182
+ bool exclude_invalid_files = false;
183
+
184
+ /// When discovering from a Selector (and not from an explicit file list), ignore
185
+ /// files and directories matching any of these prefixes.
186
+ ///
187
+ /// Example (with selector = "/dataset/**"):
188
+ /// selector_ignore_prefixes = {"_", ".DS_STORE" };
189
+ ///
190
+ /// - "/dataset/data.csv" -> not ignored
191
+ /// - "/dataset/_metadata" -> ignored
192
+ /// - "/dataset/.DS_STORE" -> ignored
193
+ /// - "/dataset/_hidden/dat" -> ignored
194
+ /// - "/dataset/nested/.DS_STORE" -> ignored
195
+ std::vector<std::string> selector_ignore_prefixes = {
196
+ ".",
197
+ "_",
198
+ };
199
+ };
200
+
201
+ /// \brief FileSystemDatasetFactory creates a Dataset from a vector of
202
+ /// fs::FileInfo or a fs::FileSelector.
203
+ /// \ingroup dataset-filesystem
204
+ class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory {
205
+ public:
206
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
207
+ /// paths.
208
+ ///
209
+ /// \param[in] filesystem passed to FileSystemDataset
210
+ /// \param[in] paths passed to FileSystemDataset
211
+ /// \param[in] format passed to FileSystemDataset
212
+ /// \param[in] options see FileSystemFactoryOptions for more information.
213
+ static Result<std::shared_ptr<DatasetFactory>> Make(
214
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<std::string>& paths,
215
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
216
+
217
+ /// \brief Build a FileSystemDatasetFactory from a fs::FileSelector.
218
+ ///
219
+ /// The selector will expand to a vector of FileInfo. The expansion/crawling
220
+ /// is performed in this function call. Thus, the finalized Dataset is
221
+ /// working with a snapshot of the filesystem.
222
+ //
223
+ /// If options.partition_base_dir is not provided, it will be overwritten
224
+ /// with selector.base_dir.
225
+ ///
226
+ /// \param[in] filesystem passed to FileSystemDataset
227
+ /// \param[in] selector used to crawl and search files
228
+ /// \param[in] format passed to FileSystemDataset
229
+ /// \param[in] options see FileSystemFactoryOptions for more information.
230
+ static Result<std::shared_ptr<DatasetFactory>> Make(
231
+ std::shared_ptr<fs::FileSystem> filesystem, fs::FileSelector selector,
232
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
233
+
234
+ /// \brief Build a FileSystemDatasetFactory from an uri including filesystem
235
+ /// information.
236
+ ///
237
+ /// \param[in] uri passed to FileSystemDataset
238
+ /// \param[in] format passed to FileSystemDataset
239
+ /// \param[in] options see FileSystemFactoryOptions for more information.
240
+ static Result<std::shared_ptr<DatasetFactory>> Make(std::string uri,
241
+ std::shared_ptr<FileFormat> format,
242
+ FileSystemFactoryOptions options);
243
+
244
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
245
+ /// file information.
246
+ ///
247
+ /// \param[in] filesystem passed to FileSystemDataset
248
+ /// \param[in] files passed to FileSystemDataset
249
+ /// \param[in] format passed to FileSystemDataset
250
+ /// \param[in] options see FileSystemFactoryOptions for more information.
251
+ static Result<std::shared_ptr<DatasetFactory>> Make(
252
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<fs::FileInfo>& files,
253
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
254
+
255
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
256
+ InspectOptions options) override;
257
+
258
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
259
+
260
+ protected:
261
+ FileSystemDatasetFactory(std::vector<fs::FileInfo> files,
262
+ std::shared_ptr<fs::FileSystem> filesystem,
263
+ std::shared_ptr<FileFormat> format,
264
+ FileSystemFactoryOptions options);
265
+
266
+ Result<std::shared_ptr<Schema>> PartitionSchema();
267
+
268
+ std::vector<fs::FileInfo> files_;
269
+ std::shared_ptr<fs::FileSystem> fs_;
270
+ std::shared_ptr<FileFormat> format_;
271
+ FileSystemFactoryOptions options_;
272
+ };
273
+
274
+ } // namespace dataset
275
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/buffer.h"
29
+ #include "arrow/dataset/dataset.h"
30
+ #include "arrow/dataset/partition.h"
31
+ #include "arrow/dataset/scanner.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/filesystem.h"
35
+ #include "arrow/io/file.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/compression.h"
38
+
39
+ namespace arrow {
40
+
41
+ namespace dataset {
42
+
43
+ /// \defgroup dataset-file-formats File formats for reading and writing datasets
44
+ /// \defgroup dataset-filesystem File system datasets
45
+ ///
46
+ /// @{
47
+
48
+ /// \brief The path and filesystem where an actual file is located or a buffer which can
49
+ /// be read like a file
50
+ class ARROW_DS_EXPORT FileSource : public util::EqualityComparable<FileSource> {
51
+ public:
52
+ FileSource(std::string path, std::shared_ptr<fs::FileSystem> filesystem,
53
+ Compression::type compression = Compression::UNCOMPRESSED)
54
+ : file_info_(std::move(path)),
55
+ filesystem_(std::move(filesystem)),
56
+ compression_(compression) {}
57
+
58
+ FileSource(fs::FileInfo info, std::shared_ptr<fs::FileSystem> filesystem,
59
+ Compression::type compression = Compression::UNCOMPRESSED)
60
+ : file_info_(std::move(info)),
61
+ filesystem_(std::move(filesystem)),
62
+ compression_(compression) {}
63
+
64
+ explicit FileSource(std::shared_ptr<Buffer> buffer,
65
+ Compression::type compression = Compression::UNCOMPRESSED)
66
+ : buffer_(std::move(buffer)), compression_(compression) {}
67
+
68
+ using CustomOpen = std::function<Result<std::shared_ptr<io::RandomAccessFile>>()>;
69
+ FileSource(CustomOpen open, int64_t size)
70
+ : custom_open_(std::move(open)), custom_size_(size) {}
71
+
72
+ using CustomOpenWithCompression =
73
+ std::function<Result<std::shared_ptr<io::RandomAccessFile>>(Compression::type)>;
74
+ FileSource(CustomOpenWithCompression open_with_compression, int64_t size,
75
+ Compression::type compression = Compression::UNCOMPRESSED)
76
+ : custom_open_(std::bind(std::move(open_with_compression), compression)),
77
+ custom_size_(size),
78
+ compression_(compression) {}
79
+
80
+ FileSource(std::shared_ptr<io::RandomAccessFile> file, int64_t size,
81
+ Compression::type compression = Compression::UNCOMPRESSED)
82
+ : custom_open_([=] { return ToResult(file); }),
83
+ custom_size_(size),
84
+ compression_(compression) {}
85
+
86
+ explicit FileSource(std::shared_ptr<io::RandomAccessFile> file,
87
+ Compression::type compression = Compression::UNCOMPRESSED);
88
+
89
+ FileSource() : custom_open_(CustomOpen{&InvalidOpen}) {}
90
+
91
+ static std::vector<FileSource> FromPaths(const std::shared_ptr<fs::FileSystem>& fs,
92
+ std::vector<std::string> paths) {
93
+ std::vector<FileSource> sources;
94
+ for (auto&& path : paths) {
95
+ sources.emplace_back(std::move(path), fs);
96
+ }
97
+ return sources;
98
+ }
99
+
100
+ /// \brief Return the type of raw compression on the file, if any.
101
+ Compression::type compression() const { return compression_; }
102
+
103
+ /// \brief Return the file path, if any. Only valid when file source wraps a path.
104
+ const std::string& path() const {
105
+ static std::string buffer_path = "<Buffer>";
106
+ static std::string custom_open_path = "<Buffer>";
107
+ return filesystem_ ? file_info_.path() : buffer_ ? buffer_path : custom_open_path;
108
+ }
109
+
110
+ /// \brief Return the filesystem, if any. Otherwise returns nullptr
111
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
112
+
113
+ /// \brief Return the buffer containing the file, if any. Otherwise returns nullptr
114
+ const std::shared_ptr<Buffer>& buffer() const { return buffer_; }
115
+
116
+ /// \brief Get a RandomAccessFile which views this file source
117
+ Result<std::shared_ptr<io::RandomAccessFile>> Open() const;
118
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenAsync() const;
119
+
120
+ /// \brief Get the size (in bytes) of the file or buffer
121
+ /// If the file is compressed this should be the compressed (on-disk) size.
122
+ int64_t Size() const;
123
+
124
+ /// \brief Get an InputStream which views this file source (and decompresses if needed)
125
+ /// \param[in] compression If nullopt, guess the compression scheme from the
126
+ /// filename, else decompress with the given codec
127
+ Result<std::shared_ptr<io::InputStream>> OpenCompressed(
128
+ std::optional<Compression::type> compression = std::nullopt) const;
129
+
130
+ /// \brief equality comparison with another FileSource
131
+ bool Equals(const FileSource& other) const;
132
+
133
+ private:
134
+ static Result<std::shared_ptr<io::RandomAccessFile>> InvalidOpen() {
135
+ return Status::Invalid("Called Open() on an uninitialized FileSource");
136
+ }
137
+
138
+ fs::FileInfo file_info_;
139
+ std::shared_ptr<fs::FileSystem> filesystem_;
140
+ std::shared_ptr<Buffer> buffer_;
141
+ CustomOpen custom_open_;
142
+ int64_t custom_size_ = 0;
143
+ Compression::type compression_ = Compression::UNCOMPRESSED;
144
+ };
145
+
146
+ /// \brief Base class for file format implementation
147
+ class ARROW_DS_EXPORT FileFormat : public std::enable_shared_from_this<FileFormat> {
148
+ public:
149
+ /// Options affecting how this format is scanned.
150
+ ///
151
+ /// The options here can be overridden at scan time.
152
+ std::shared_ptr<FragmentScanOptions> default_fragment_scan_options;
153
+
154
+ virtual ~FileFormat() = default;
155
+
156
+ /// \brief The name identifying the kind of file format
157
+ virtual std::string type_name() const = 0;
158
+
159
+ virtual bool Equals(const FileFormat& other) const = 0;
160
+
161
+ /// \brief Indicate if the FileSource is supported/readable by this format.
162
+ virtual Result<bool> IsSupported(const FileSource& source) const = 0;
163
+
164
+ /// \brief Return the schema of the file if possible.
165
+ virtual Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const = 0;
166
+
167
+ /// \brief Learn what we need about the file before we start scanning it
168
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
169
+ const FileSource& source, const FragmentScanOptions* format_options,
170
+ compute::ExecContext* exec_context) const;
171
+
172
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
173
+ const std::shared_ptr<ScanOptions>& options,
174
+ const std::shared_ptr<FileFragment>& file) const = 0;
175
+
176
+ virtual Future<std::optional<int64_t>> CountRows(
177
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
178
+ const std::shared_ptr<ScanOptions>& options);
179
+
180
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
181
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
182
+ const FragmentScanOptions* format_options,
183
+ compute::ExecContext* exec_context) const;
184
+
185
+ /// \brief Open a fragment
186
+ virtual Result<std::shared_ptr<FileFragment>> MakeFragment(
187
+ FileSource source, compute::Expression partition_expression,
188
+ std::shared_ptr<Schema> physical_schema);
189
+
190
+ /// \brief Create a FileFragment for a FileSource.
191
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
192
+ FileSource source, compute::Expression partition_expression);
193
+
194
+ /// \brief Create a FileFragment for a FileSource.
195
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
196
+ FileSource source, std::shared_ptr<Schema> physical_schema = NULLPTR);
197
+
198
+ /// \brief Create a writer for this format.
199
+ virtual Result<std::shared_ptr<FileWriter>> MakeWriter(
200
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
201
+ std::shared_ptr<FileWriteOptions> options,
202
+ fs::FileLocator destination_locator) const = 0;
203
+
204
+ /// \brief Get default write options for this format.
205
+ ///
206
+ /// May return null shared_ptr if this file format does not yet support
207
+ /// writing datasets.
208
+ virtual std::shared_ptr<FileWriteOptions> DefaultWriteOptions() = 0;
209
+
210
+ protected:
211
+ explicit FileFormat(std::shared_ptr<FragmentScanOptions> default_fragment_scan_options)
212
+ : default_fragment_scan_options(std::move(default_fragment_scan_options)) {}
213
+ };
214
+
215
+ /// \brief A Fragment that is stored in a file with a known format
216
+ class ARROW_DS_EXPORT FileFragment : public Fragment,
217
+ public util::EqualityComparable<FileFragment> {
218
+ public:
219
+ Result<RecordBatchGenerator> ScanBatchesAsync(
220
+ const std::shared_ptr<ScanOptions>& options) override;
221
+ Future<std::optional<int64_t>> CountRows(
222
+ compute::Expression predicate,
223
+ const std::shared_ptr<ScanOptions>& options) override;
224
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
225
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
226
+ const FragmentScanOptions* format_options,
227
+ compute::ExecContext* exec_context) override;
228
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
229
+ const FragmentScanOptions* format_options,
230
+ compute::ExecContext* exec_context) override;
231
+
232
+ std::string type_name() const override { return format_->type_name(); }
233
+ std::string ToString() const override { return source_.path(); };
234
+
235
+ const FileSource& source() const { return source_; }
236
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
237
+
238
+ bool Equals(const FileFragment& other) const;
239
+
240
+ protected:
241
+ FileFragment(FileSource source, std::shared_ptr<FileFormat> format,
242
+ compute::Expression partition_expression,
243
+ std::shared_ptr<Schema> physical_schema)
244
+ : Fragment(std::move(partition_expression), std::move(physical_schema)),
245
+ source_(std::move(source)),
246
+ format_(std::move(format)) {}
247
+
248
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
249
+
250
+ FileSource source_;
251
+ std::shared_ptr<FileFormat> format_;
252
+
253
+ friend class FileFormat;
254
+ };
255
+
256
+ /// \brief A Dataset of FileFragments.
257
+ ///
258
+ /// A FileSystemDataset is composed of one or more FileFragment. The fragments
259
+ /// are independent and don't need to share the same format and/or filesystem.
260
+ class ARROW_DS_EXPORT FileSystemDataset : public Dataset {
261
+ public:
262
+ /// \brief Create a FileSystemDataset.
263
+ ///
264
+ /// \param[in] schema the schema of the dataset
265
+ /// \param[in] root_partition the partition expression of the dataset
266
+ /// \param[in] format the format of each FileFragment.
267
+ /// \param[in] filesystem the filesystem of each FileFragment, or nullptr if the
268
+ /// fragments wrap buffers.
269
+ /// \param[in] fragments list of fragments to create the dataset from.
270
+ /// \param[in] partitioning the Partitioning object in case the dataset is created
271
+ /// with a known partitioning (e.g. from a discovered partitioning
272
+ /// through a DatasetFactory), or nullptr if not known.
273
+ ///
274
+ /// Note that fragments wrapping files resident in differing filesystems are not
275
+ /// permitted; to work with multiple filesystems use a UnionDataset.
276
+ ///
277
+ /// \return A constructed dataset.
278
+ static Result<std::shared_ptr<FileSystemDataset>> Make(
279
+ std::shared_ptr<Schema> schema, compute::Expression root_partition,
280
+ std::shared_ptr<FileFormat> format, std::shared_ptr<fs::FileSystem> filesystem,
281
+ std::vector<std::shared_ptr<FileFragment>> fragments,
282
+ std::shared_ptr<Partitioning> partitioning = NULLPTR);
283
+
284
+ /// \brief Write a dataset.
285
+ static Status Write(const FileSystemDatasetWriteOptions& write_options,
286
+ std::shared_ptr<Scanner> scanner);
287
+
288
+ /// \brief Return the type name of the dataset.
289
+ std::string type_name() const override { return "filesystem"; }
290
+
291
+ /// \brief Replace the schema of the dataset.
292
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
293
+ std::shared_ptr<Schema> schema) const override;
294
+
295
+ /// \brief Return the path of files.
296
+ std::vector<std::string> files() const;
297
+
298
+ /// \brief Return the format.
299
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
300
+
301
+ /// \brief Return the filesystem. May be nullptr if the fragments wrap buffers.
302
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
303
+
304
+ /// \brief Return the partitioning. May be nullptr if the dataset was not constructed
305
+ /// with a partitioning.
306
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
307
+
308
+ std::string ToString() const;
309
+
310
+ protected:
311
+ struct FragmentSubtrees;
312
+
313
+ explicit FileSystemDataset(std::shared_ptr<Schema> schema)
314
+ : Dataset(std::move(schema)) {}
315
+
316
+ FileSystemDataset(std::shared_ptr<Schema> schema,
317
+ compute::Expression partition_expression)
318
+ : Dataset(std::move(schema), partition_expression) {}
319
+
320
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
321
+
322
+ void SetupSubtreePruning();
323
+
324
+ std::shared_ptr<FileFormat> format_;
325
+ std::shared_ptr<fs::FileSystem> filesystem_;
326
+ std::vector<std::shared_ptr<FileFragment>> fragments_;
327
+ std::shared_ptr<Partitioning> partitioning_;
328
+
329
+ std::shared_ptr<FragmentSubtrees> subtrees_;
330
+ };
331
+
332
+ /// \brief Options for writing a file of this format.
333
+ class ARROW_DS_EXPORT FileWriteOptions {
334
+ public:
335
+ virtual ~FileWriteOptions() = default;
336
+
337
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
338
+
339
+ std::string type_name() const { return format_->type_name(); }
340
+
341
+ protected:
342
+ explicit FileWriteOptions(std::shared_ptr<FileFormat> format)
343
+ : format_(std::move(format)) {}
344
+
345
+ std::shared_ptr<FileFormat> format_;
346
+ };
347
+
348
+ /// \brief A writer for this format.
349
+ class ARROW_DS_EXPORT FileWriter {
350
+ public:
351
+ virtual ~FileWriter() = default;
352
+
353
+ /// \brief Write the given batch.
354
+ virtual Status Write(const std::shared_ptr<RecordBatch>& batch) = 0;
355
+
356
+ /// \brief Write all batches from the reader.
357
+ Status Write(RecordBatchReader* batches);
358
+
359
+ /// \brief Indicate that writing is done.
360
+ virtual Future<> Finish();
361
+
362
+ const std::shared_ptr<FileFormat>& format() const { return options_->format(); }
363
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
364
+ const std::shared_ptr<FileWriteOptions>& options() const { return options_; }
365
+ const fs::FileLocator& destination() const { return destination_locator_; }
366
+
367
+ /// \brief After Finish() is called, provides number of bytes written to file.
368
+ Result<int64_t> GetBytesWritten() const;
369
+
370
+ protected:
371
+ FileWriter(std::shared_ptr<Schema> schema, std::shared_ptr<FileWriteOptions> options,
372
+ std::shared_ptr<io::OutputStream> destination,
373
+ fs::FileLocator destination_locator)
374
+ : schema_(std::move(schema)),
375
+ options_(std::move(options)),
376
+ destination_(std::move(destination)),
377
+ destination_locator_(std::move(destination_locator)) {}
378
+
379
+ virtual Future<> FinishInternal() = 0;
380
+
381
+ std::shared_ptr<Schema> schema_;
382
+ std::shared_ptr<FileWriteOptions> options_;
383
+ std::shared_ptr<io::OutputStream> destination_;
384
+ fs::FileLocator destination_locator_;
385
+ std::optional<int64_t> bytes_written_;
386
+ };
387
+
388
+ /// \brief Options for writing a dataset.
389
+ struct ARROW_DS_EXPORT FileSystemDatasetWriteOptions {
390
+ /// Options for individual fragment writing.
391
+ std::shared_ptr<FileWriteOptions> file_write_options;
392
+
393
+ /// FileSystem into which a dataset will be written.
394
+ std::shared_ptr<fs::FileSystem> filesystem;
395
+
396
+ /// Root directory into which the dataset will be written.
397
+ std::string base_dir;
398
+
399
+ /// Partitioning used to generate fragment paths.
400
+ std::shared_ptr<Partitioning> partitioning;
401
+
402
+ /// Maximum number of partitions any batch may be written into, default is 1K.
403
+ int max_partitions = 1024;
404
+
405
+ /// Template string used to generate fragment basenames.
406
+ /// {i} will be replaced by an auto incremented integer.
407
+ std::string basename_template;
408
+
409
+ /// A functor which will be applied on an incremented counter. The result will be
410
+ /// inserted into the basename_template in place of {i}.
411
+ ///
412
+ /// This can be used, for example, to left-pad the file counter.
413
+ std::function<std::string(int)> basename_template_functor;
414
+
415
+ /// If greater than 0 then this will limit the maximum number of files that can be left
416
+ /// open. If an attempt is made to open too many files then the least recently used file
417
+ /// will be closed. If this setting is set too low you may end up fragmenting your data
418
+ /// into many small files.
419
+ ///
420
+ /// The default is 900 which also allows some # of files to be open by the scanner
421
+ /// before hitting the default Linux limit of 1024
422
+ uint32_t max_open_files = 900;
423
+
424
+ /// If greater than 0 then this will limit how many rows are placed in any single file.
425
+ /// Otherwise there will be no limit and one file will be created in each output
426
+ /// directory unless files need to be closed to respect max_open_files
427
+ uint64_t max_rows_per_file = 0;
428
+
429
+ /// If greater than 0 then this will cause the dataset writer to batch incoming data
430
+ /// and only write the row groups to the disk when sufficient rows have accumulated.
431
+ /// The final row group size may be less than this value and other options such as
432
+ /// `max_open_files` or `max_rows_per_file` lead to smaller row group sizes.
433
+ uint64_t min_rows_per_group = 0;
434
+
435
+ /// If greater than 0 then the dataset writer may split up large incoming batches into
436
+ /// multiple row groups. If this value is set then min_rows_per_group should also be
437
+ /// set or else you may end up with very small row groups (e.g. if the incoming row
438
+ /// group size is just barely larger than this value).
439
+ uint64_t max_rows_per_group = 1 << 20;
440
+
441
+ /// Controls what happens if an output directory already exists.
442
+ ExistingDataBehavior existing_data_behavior = ExistingDataBehavior::kError;
443
+
444
+ /// \brief If false the dataset writer will not create directories
445
+ /// This is mainly intended for filesystems that do not require directories such as S3.
446
+ bool create_dir = true;
447
+
448
+ /// Callback to be invoked against all FileWriters before
449
+ /// they are finalized with FileWriter::Finish().
450
+ std::function<Status(FileWriter*)> writer_pre_finish = [](FileWriter*) {
451
+ return Status::OK();
452
+ };
453
+
454
+ /// Callback to be invoked against all FileWriters after they have
455
+ /// called FileWriter::Finish().
456
+ std::function<Status(FileWriter*)> writer_post_finish = [](FileWriter*) {
457
+ return Status::OK();
458
+ };
459
+
460
+ const std::shared_ptr<FileFormat>& format() const {
461
+ return file_write_options->format();
462
+ }
463
+ };
464
+
465
+ /// \brief Wraps FileSystemDatasetWriteOptions for consumption as compute::ExecNodeOptions
466
+ class ARROW_DS_EXPORT WriteNodeOptions : public acero::ExecNodeOptions {
467
+ public:
468
+ explicit WriteNodeOptions(
469
+ FileSystemDatasetWriteOptions options,
470
+ std::shared_ptr<const KeyValueMetadata> custom_metadata = NULLPTR)
471
+ : write_options(std::move(options)), custom_metadata(std::move(custom_metadata)) {}
472
+
473
+ /// \brief Options to control how to write the dataset
474
+ FileSystemDatasetWriteOptions write_options;
475
+ /// \brief Optional schema to attach to all written batches
476
+ ///
477
+ /// By default, we will use the output schema of the input.
478
+ ///
479
+ /// This can be used to alter schema metadata, field nullability, or field metadata.
480
+ /// However, this cannot be used to change the type of data. If the custom schema does
481
+ /// not have the same number of fields and the same data types as the input then the
482
+ /// plan will fail.
483
+ std::shared_ptr<Schema> custom_schema;
484
+ /// \brief Optional metadata to attach to written batches
485
+ std::shared_ptr<const KeyValueMetadata> custom_metadata;
486
+ };
487
+
488
+ /// @}
489
+
490
+ namespace internal {
491
+ ARROW_DS_EXPORT void InitializeDatasetWriter(arrow::acero::ExecFactoryRegistry* registry);
492
+ }
493
+
494
+ } // namespace dataset
495
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/util/compression.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ constexpr char kCsvTypeName[] = "csv";
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Csv files
42
+ class ARROW_DS_EXPORT CsvFileFormat : public FileFormat {
43
+ public:
44
+ // TODO(ARROW-18328) Remove this, moved to CsvFragmentScanOptions
45
+ /// Options affecting the parsing of CSV files
46
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
47
+
48
+ CsvFileFormat();
49
+
50
+ std::string type_name() const override { return kCsvTypeName; }
51
+
52
+ bool Equals(const FileFormat& other) const override;
53
+
54
+ Result<bool> IsSupported(const FileSource& source) const override;
55
+
56
+ /// \brief Return the schema of the file if possible.
57
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
58
+
59
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
60
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
61
+ const FragmentScanOptions* format_options,
62
+ compute::ExecContext* exec_context) const override;
63
+
64
+ Result<RecordBatchGenerator> ScanBatchesAsync(
65
+ const std::shared_ptr<ScanOptions>& scan_options,
66
+ const std::shared_ptr<FileFragment>& file) const override;
67
+
68
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
69
+ const FileSource& source, const FragmentScanOptions* format_options,
70
+ compute::ExecContext* exec_context) const override;
71
+
72
+ Future<std::optional<int64_t>> CountRows(
73
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
74
+ const std::shared_ptr<ScanOptions>& options) override;
75
+
76
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
77
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
78
+ std::shared_ptr<FileWriteOptions> options,
79
+ fs::FileLocator destination_locator) const override;
80
+
81
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
82
+ };
83
+
84
+ /// \brief Per-scan options for CSV fragments
85
+ struct ARROW_DS_EXPORT CsvFragmentScanOptions : public FragmentScanOptions {
86
+ std::string type_name() const override { return kCsvTypeName; }
87
+
88
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
89
+ std::shared_ptr<io::InputStream>)>;
90
+
91
+ /// CSV conversion options
92
+ csv::ConvertOptions convert_options = csv::ConvertOptions::Defaults();
93
+
94
+ /// CSV reading options
95
+ ///
96
+ /// Note that use_threads is always ignored.
97
+ csv::ReadOptions read_options = csv::ReadOptions::Defaults();
98
+
99
+ /// CSV parse options
100
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
101
+
102
+ /// Optional stream wrapping function
103
+ ///
104
+ /// If defined, all open dataset file fragments will be passed
105
+ /// through this function. One possible use case is to transparently
106
+ /// transcode all input files from a given character set to utf8.
107
+ StreamWrapFunc stream_transform_func{};
108
+ };
109
+
110
+ class ARROW_DS_EXPORT CsvFileWriteOptions : public FileWriteOptions {
111
+ public:
112
+ /// Options passed to csv::MakeCSVWriter.
113
+ std::shared_ptr<csv::WriteOptions> write_options;
114
+
115
+ protected:
116
+ explicit CsvFileWriteOptions(std::shared_ptr<FileFormat> format)
117
+ : FileWriteOptions(std::move(format)) {}
118
+
119
+ friend class CsvFileFormat;
120
+ };
121
+
122
+ class ARROW_DS_EXPORT CsvFileWriter : public FileWriter {
123
+ public:
124
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
125
+
126
+ private:
127
+ CsvFileWriter(std::shared_ptr<io::OutputStream> destination,
128
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
129
+ std::shared_ptr<Schema> schema,
130
+ std::shared_ptr<CsvFileWriteOptions> options,
131
+ fs::FileLocator destination_locator);
132
+
133
+ Future<> FinishInternal() override;
134
+
135
+ std::shared_ptr<io::OutputStream> destination_;
136
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
137
+
138
+ friend class CsvFileFormat;
139
+ };
140
+
141
+ /// @}
142
+
143
+ } // namespace dataset
144
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/ipc/type_fwd.h"
30
+ #include "arrow/result.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ /// \addtogroup dataset-file-formats
36
+ ///
37
+ /// @{
38
+
39
+ constexpr char kIpcTypeName[] = "ipc";
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Ipc files
42
+ class ARROW_DS_EXPORT IpcFileFormat : public FileFormat {
43
+ public:
44
+ std::string type_name() const override { return kIpcTypeName; }
45
+
46
+ IpcFileFormat();
47
+
48
+ bool Equals(const FileFormat& other) const override {
49
+ return type_name() == other.type_name();
50
+ }
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ /// \brief Return the schema of the file if possible.
55
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
56
+
57
+ Result<RecordBatchGenerator> ScanBatchesAsync(
58
+ const std::shared_ptr<ScanOptions>& options,
59
+ const std::shared_ptr<FileFragment>& file) const override;
60
+
61
+ Future<std::optional<int64_t>> CountRows(
62
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
63
+ const std::shared_ptr<ScanOptions>& options) override;
64
+
65
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
66
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
67
+ std::shared_ptr<FileWriteOptions> options,
68
+ fs::FileLocator destination_locator) const override;
69
+
70
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
71
+ };
72
+
73
+ /// \brief Per-scan options for IPC fragments
74
+ class ARROW_DS_EXPORT IpcFragmentScanOptions : public FragmentScanOptions {
75
+ public:
76
+ std::string type_name() const override { return kIpcTypeName; }
77
+
78
+ /// Options passed to the IPC file reader.
79
+ /// included_fields, memory_pool, and use_threads are ignored.
80
+ std::shared_ptr<ipc::IpcReadOptions> options;
81
+ /// If present, the async scanner will enable I/O coalescing.
82
+ /// This is ignored by the sync scanner.
83
+ std::shared_ptr<io::CacheOptions> cache_options;
84
+ };
85
+
86
+ class ARROW_DS_EXPORT IpcFileWriteOptions : public FileWriteOptions {
87
+ public:
88
+ /// Options passed to ipc::MakeFileWriter. use_threads is ignored
89
+ std::shared_ptr<ipc::IpcWriteOptions> options;
90
+
91
+ /// custom_metadata written to the file's footer
92
+ std::shared_ptr<const KeyValueMetadata> metadata;
93
+
94
+ protected:
95
+ explicit IpcFileWriteOptions(std::shared_ptr<FileFormat> format)
96
+ : FileWriteOptions(std::move(format)) {}
97
+
98
+ friend class IpcFileFormat;
99
+ };
100
+
101
+ class ARROW_DS_EXPORT IpcFileWriter : public FileWriter {
102
+ public:
103
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
104
+
105
+ private:
106
+ IpcFileWriter(std::shared_ptr<io::OutputStream> destination,
107
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
108
+ std::shared_ptr<Schema> schema,
109
+ std::shared_ptr<IpcFileWriteOptions> options,
110
+ fs::FileLocator destination_locator);
111
+
112
+ Future<> FinishInternal() override;
113
+
114
+ std::shared_ptr<io::OutputStream> destination_;
115
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
116
+
117
+ friend class IpcFileFormat;
118
+ };
119
+
120
+ /// @}
121
+
122
+ } // namespace dataset
123
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/json/options.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/future.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow::dataset {
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ constexpr char kJsonTypeName[] = "json";
42
+
43
+ /// \brief A FileFormat implementation that reads from JSON files
44
+ class ARROW_DS_EXPORT JsonFileFormat : public FileFormat {
45
+ public:
46
+ JsonFileFormat();
47
+
48
+ std::string type_name() const override { return kJsonTypeName; }
49
+
50
+ bool Equals(const FileFormat& other) const override;
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
57
+ const FileSource& source, const FragmentScanOptions* format_options,
58
+ compute::ExecContext* exec_context) const override;
59
+
60
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
61
+ const FragmentScanRequest& scan_request, const InspectedFragment& inspected,
62
+ const FragmentScanOptions* format_options,
63
+ compute::ExecContext* exec_context) const override;
64
+
65
+ Result<RecordBatchGenerator> ScanBatchesAsync(
66
+ const std::shared_ptr<ScanOptions>& scan_options,
67
+ const std::shared_ptr<FileFragment>& file) const override;
68
+
69
+ Future<std::optional<int64_t>> CountRows(
70
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
71
+ const std::shared_ptr<ScanOptions>& scan_options) override;
72
+
73
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
74
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
75
+ std::shared_ptr<FileWriteOptions> options,
76
+ fs::FileLocator destination_locator) const override {
77
+ return Status::NotImplemented("Writing JSON files is not currently supported");
78
+ }
79
+
80
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override { return NULLPTR; }
81
+ };
82
+
83
+ /// \brief Per-scan options for JSON fragments
84
+ struct ARROW_DS_EXPORT JsonFragmentScanOptions : public FragmentScanOptions {
85
+ std::string type_name() const override { return kJsonTypeName; }
86
+
87
+ /// @brief Options that affect JSON parsing
88
+ ///
89
+ /// Note: `explicit_schema` and `unexpected_field_behavior` are ignored.
90
+ json::ParseOptions parse_options = json::ParseOptions::Defaults();
91
+
92
+ /// @brief Options that affect JSON reading
93
+ json::ReadOptions read_options = json::ReadOptions::Defaults();
94
+ };
95
+
96
+ /// @}
97
+
98
+ } // namespace arrow::dataset
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/result.h"
30
+
31
+ namespace arrow {
32
+ namespace dataset {
33
+
34
+ /// \addtogroup dataset-file-formats
35
+ ///
36
+ /// @{
37
+
38
+ constexpr char kOrcTypeName[] = "orc";
39
+
40
+ /// \brief A FileFormat implementation that reads from and writes to ORC files
41
+ class ARROW_DS_EXPORT OrcFileFormat : public FileFormat {
42
+ public:
43
+ OrcFileFormat();
44
+
45
+ std::string type_name() const override { return kOrcTypeName; }
46
+
47
+ bool Equals(const FileFormat& other) const override {
48
+ return type_name() == other.type_name();
49
+ }
50
+
51
+ Result<bool> IsSupported(const FileSource& source) const override;
52
+
53
+ /// \brief Return the schema of the file if possible.
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Result<RecordBatchGenerator> ScanBatchesAsync(
57
+ const std::shared_ptr<ScanOptions>& options,
58
+ const std::shared_ptr<FileFragment>& file) const override;
59
+
60
+ Future<std::optional<int64_t>> CountRows(
61
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
62
+ const std::shared_ptr<ScanOptions>& options) override;
63
+
64
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
65
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
66
+ std::shared_ptr<FileWriteOptions> options,
67
+ fs::FileLocator destination_locator) const override;
68
+
69
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
70
+ };
71
+
72
+ /// @}
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/dataset/type_fwd.h"
21
+
22
+ namespace parquet::encryption {
23
+ class CryptoFactory;
24
+ struct KmsConnectionConfig;
25
+ struct EncryptionConfiguration;
26
+ struct DecryptionConfiguration;
27
+ } // namespace parquet::encryption
28
+
29
+ namespace arrow {
30
+ namespace dataset {
31
+
32
+ /// \brief Core configuration class encapsulating parameters for high-level encryption
33
+ /// within Parquet framework.
34
+ ///
35
+ /// ParquetEncryptionConfig serves as a bridge, passing encryption-related
36
+ /// parameters to appropriate components within the Parquet library. It holds references
37
+ /// to objects defining encryption strategy, Key Management Service (KMS) configuration,
38
+ /// and specific encryption configurations for Parquet data.
39
+ struct ARROW_DS_EXPORT ParquetEncryptionConfig {
40
+ /// Shared pointer to CryptoFactory object, responsible for creating cryptographic
41
+ /// components like encryptors and decryptors.
42
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
43
+
44
+ /// Shared pointer to KmsConnectionConfig object, holding configuration parameters for
45
+ /// connecting to a Key Management Service (KMS).
46
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
47
+
48
+ /// Shared pointer to EncryptionConfiguration object, defining specific encryption
49
+ /// settings for Parquet data, like keys for different columns.
50
+ std::shared_ptr<parquet::encryption::EncryptionConfiguration> encryption_config;
51
+ };
52
+
53
+ /// \brief Core configuration class encapsulating parameters for high-level decryption
54
+ /// within Parquet framework.
55
+ ///
56
+ /// ParquetDecryptionConfig is designed to pass decryption-related parameters to
57
+ /// appropriate decryption components within Parquet library. It holds references to
58
+ /// objects defining decryption strategy, Key Management Service (KMS) configuration,
59
+ /// and specific decryption configurations for reading encrypted Parquet data.
60
+ struct ARROW_DS_EXPORT ParquetDecryptionConfig {
61
+ /// Shared pointer to CryptoFactory object, pivotal in creating cryptographic
62
+ /// components for decryption process.
63
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
64
+
65
+ /// Shared pointer to KmsConnectionConfig object, containing parameters for connecting
66
+ /// to a Key Management Service (KMS) during decryption.
67
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
68
+
69
+ /// Shared pointer to DecryptionConfiguration object, specifying decryption settings
70
+ /// for reading encrypted Parquet data.
71
+ std::shared_ptr<parquet::encryption::DecryptionConfiguration> decryption_config;
72
+ };
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <optional>
26
+ #include <string>
27
+ #include <unordered_map>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/util/compare.h"
35
+
36
+ namespace arrow {
37
+
38
+ namespace dataset {
39
+
40
+ constexpr char kFilenamePartitionSep = '_';
41
+
42
+ struct ARROW_DS_EXPORT PartitionPathFormat {
43
+ std::string directory, filename;
44
+ };
45
+
46
+ // ----------------------------------------------------------------------
47
+ // Partitioning
48
+
49
+ /// \defgroup dataset-partitioning Partitioning API
50
+ ///
51
+ /// @{
52
+
53
+ /// \brief Interface for parsing partition expressions from string partition
54
+ /// identifiers.
55
+ ///
56
+ /// For example, the identifier "foo=5" might be parsed to an equality expression
57
+ /// between the "foo" field and the value 5.
58
+ ///
59
+ /// Some partitionings may store the field names in a metadata
60
+ /// store instead of in file paths, for example
61
+ /// dataset_root/2009/11/... could be used when the partition fields
62
+ /// are "year" and "month"
63
+ ///
64
+ /// Paths are consumed from left to right. Paths must be relative to
65
+ /// the root of a partition; path prefixes must be removed before passing
66
+ /// the path to a partitioning for parsing.
67
+ class ARROW_DS_EXPORT Partitioning : public util::EqualityComparable<Partitioning> {
68
+ public:
69
+ virtual ~Partitioning() = default;
70
+
71
+ /// \brief The name identifying the kind of partitioning
72
+ virtual std::string type_name() const = 0;
73
+
74
+ //// \brief Return whether the partitionings are equal
75
+ virtual bool Equals(const Partitioning& other) const {
76
+ return schema_->Equals(other.schema_, /*check_metadata=*/false);
77
+ }
78
+
79
+ /// \brief If the input batch shares any fields with this partitioning,
80
+ /// produce sub-batches which satisfy mutually exclusive Expressions.
81
+ struct PartitionedBatches {
82
+ RecordBatchVector batches;
83
+ std::vector<compute::Expression> expressions;
84
+ };
85
+ virtual Result<PartitionedBatches> Partition(
86
+ const std::shared_ptr<RecordBatch>& batch) const = 0;
87
+
88
+ /// \brief Parse a path into a partition expression
89
+ virtual Result<compute::Expression> Parse(const std::string& path) const = 0;
90
+
91
+ virtual Result<PartitionPathFormat> Format(const compute::Expression& expr) const = 0;
92
+
93
+ /// \brief A default Partitioning which is a DirectoryPartitioning
94
+ /// with an empty schema.
95
+ static std::shared_ptr<Partitioning> Default();
96
+
97
+ /// \brief The partition schema.
98
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
99
+
100
+ protected:
101
+ explicit Partitioning(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
102
+
103
+ std::shared_ptr<Schema> schema_;
104
+ };
105
+
106
+ /// \brief The encoding of partition segments.
107
+ enum class SegmentEncoding : int8_t {
108
+ /// No encoding.
109
+ None = 0,
110
+ /// Segment values are URL-encoded.
111
+ Uri = 1,
112
+ };
113
+
114
+ ARROW_DS_EXPORT
115
+ std::ostream& operator<<(std::ostream& os, SegmentEncoding segment_encoding);
116
+
117
+ /// \brief Options for key-value based partitioning (hive/directory).
118
+ struct ARROW_DS_EXPORT KeyValuePartitioningOptions {
119
+ /// After splitting a path into components, decode the path components
120
+ /// before parsing according to this scheme.
121
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
122
+ };
123
+
124
+ /// \brief Options for inferring a partitioning.
125
+ struct ARROW_DS_EXPORT PartitioningFactoryOptions {
126
+ /// When inferring a schema for partition fields, yield dictionary encoded types
127
+ /// instead of plain. This can be more efficient when materializing virtual
128
+ /// columns, and Expressions parsed by the finished Partitioning will include
129
+ /// dictionaries of all unique inspected values for each field.
130
+ bool infer_dictionary = false;
131
+ /// Optionally, an expected schema can be provided, in which case inference
132
+ /// will only check discovered fields against the schema and update internal
133
+ /// state (such as dictionaries).
134
+ std::shared_ptr<Schema> schema;
135
+ /// After splitting a path into components, decode the path components
136
+ /// before parsing according to this scheme.
137
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
138
+
139
+ KeyValuePartitioningOptions AsPartitioningOptions() const;
140
+ };
141
+
142
+ /// \brief Options for inferring a hive-style partitioning.
143
+ struct ARROW_DS_EXPORT HivePartitioningFactoryOptions : PartitioningFactoryOptions {
144
+ /// The hive partitioning scheme maps null to a hard coded fallback string.
145
+ std::string null_fallback;
146
+
147
+ HivePartitioningOptions AsHivePartitioningOptions() const;
148
+ };
149
+
150
+ /// \brief PartitioningFactory provides creation of a partitioning when the
151
+ /// specific schema must be inferred from available paths (no explicit schema is known).
152
+ class ARROW_DS_EXPORT PartitioningFactory {
153
+ public:
154
+ virtual ~PartitioningFactory() = default;
155
+
156
+ /// \brief The name identifying the kind of partitioning
157
+ virtual std::string type_name() const = 0;
158
+
159
+ /// Get the schema for the resulting Partitioning.
160
+ /// This may reset internal state, for example dictionaries of unique representations.
161
+ virtual Result<std::shared_ptr<Schema>> Inspect(
162
+ const std::vector<std::string>& paths) = 0;
163
+
164
+ /// Create a partitioning using the provided schema
165
+ /// (fields may be dropped).
166
+ virtual Result<std::shared_ptr<Partitioning>> Finish(
167
+ const std::shared_ptr<Schema>& schema) const = 0;
168
+ };
169
+
170
+ /// \brief Subclass for the common case of a partitioning which yields an equality
171
+ /// expression for each segment
172
+ class ARROW_DS_EXPORT KeyValuePartitioning : public Partitioning {
173
+ public:
174
+ /// An unconverted equality expression consisting of a field name and the representation
175
+ /// of a scalar value
176
+ struct Key {
177
+ std::string name;
178
+ std::optional<std::string> value;
179
+ };
180
+
181
+ Result<PartitionedBatches> Partition(
182
+ const std::shared_ptr<RecordBatch>& batch) const override;
183
+
184
+ Result<compute::Expression> Parse(const std::string& path) const override;
185
+
186
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override;
187
+
188
+ const ArrayVector& dictionaries() const { return dictionaries_; }
189
+
190
+ SegmentEncoding segment_encoding() const { return options_.segment_encoding; }
191
+
192
+ bool Equals(const Partitioning& other) const override;
193
+
194
+ protected:
195
+ KeyValuePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
196
+ KeyValuePartitioningOptions options)
197
+ : Partitioning(std::move(schema)),
198
+ dictionaries_(std::move(dictionaries)),
199
+ options_(options) {
200
+ if (dictionaries_.empty()) {
201
+ dictionaries_.resize(schema_->num_fields());
202
+ }
203
+ }
204
+
205
+ virtual Result<std::vector<Key>> ParseKeys(const std::string& path) const = 0;
206
+
207
+ virtual Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const = 0;
208
+
209
+ /// Convert a Key to a full expression.
210
+ Result<compute::Expression> ConvertKey(const Key& key) const;
211
+
212
+ Result<std::vector<std::string>> FormatPartitionSegments(
213
+ const ScalarVector& values) const;
214
+ Result<std::vector<Key>> ParsePartitionSegments(
215
+ const std::vector<std::string>& segments) const;
216
+
217
+ ArrayVector dictionaries_;
218
+ KeyValuePartitioningOptions options_;
219
+ };
220
+
221
+ /// \brief DirectoryPartitioning parses one segment of a path for each field in its
222
+ /// schema. All fields are required, so paths passed to DirectoryPartitioning::Parse
223
+ /// must contain segments for each field.
224
+ ///
225
+ /// For example given schema<year:int16, month:int8> the path "/2009/11" would be
226
+ /// parsed to ("year"_ == 2009 and "month"_ == 11)
227
+ class ARROW_DS_EXPORT DirectoryPartitioning : public KeyValuePartitioning {
228
+ public:
229
+ /// If a field in schema is of dictionary type, the corresponding element of
230
+ /// dictionaries must be contain the dictionary of values for that field.
231
+ explicit DirectoryPartitioning(std::shared_ptr<Schema> schema,
232
+ ArrayVector dictionaries = {},
233
+ KeyValuePartitioningOptions options = {});
234
+
235
+ std::string type_name() const override { return "directory"; }
236
+
237
+ bool Equals(const Partitioning& other) const override;
238
+
239
+ /// \brief Create a factory for a directory partitioning.
240
+ ///
241
+ /// \param[in] field_names The names for the partition fields. Types will be
242
+ /// inferred.
243
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
244
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
245
+
246
+ private:
247
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
248
+
249
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
250
+ };
251
+
252
+ /// \brief The default fallback used for null values in a Hive-style partitioning.
253
+ static constexpr char kDefaultHiveNullFallback[] = "__HIVE_DEFAULT_PARTITION__";
254
+
255
+ struct ARROW_DS_EXPORT HivePartitioningOptions : public KeyValuePartitioningOptions {
256
+ std::string null_fallback = kDefaultHiveNullFallback;
257
+
258
+ static HivePartitioningOptions DefaultsWithNullFallback(std::string fallback) {
259
+ HivePartitioningOptions options;
260
+ options.null_fallback = std::move(fallback);
261
+ return options;
262
+ }
263
+ };
264
+
265
+ /// \brief Multi-level, directory based partitioning
266
+ /// originating from Apache Hive with all data files stored in the
267
+ /// leaf directories. Data is partitioned by static values of a
268
+ /// particular column in the schema. Partition keys are represented in
269
+ /// the form $key=$value in directory names.
270
+ /// Field order is ignored, as are missing or unrecognized field names.
271
+ ///
272
+ /// For example given schema<year:int16, month:int8, day:int8> the path
273
+ /// "/day=321/ignored=3.4/year=2009" parses to ("year"_ == 2009 and "day"_ == 321)
274
+ class ARROW_DS_EXPORT HivePartitioning : public KeyValuePartitioning {
275
+ public:
276
+ /// If a field in schema is of dictionary type, the corresponding element of
277
+ /// dictionaries must be contain the dictionary of values for that field.
278
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries = {},
279
+ std::string null_fallback = kDefaultHiveNullFallback)
280
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries),
281
+ KeyValuePartitioningOptions()),
282
+ hive_options_(
283
+ HivePartitioningOptions::DefaultsWithNullFallback(std::move(null_fallback))) {
284
+ }
285
+
286
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
287
+ HivePartitioningOptions options)
288
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries), options),
289
+ hive_options_(options) {}
290
+
291
+ std::string type_name() const override { return "hive"; }
292
+ std::string null_fallback() const { return hive_options_.null_fallback; }
293
+ const HivePartitioningOptions& options() const { return hive_options_; }
294
+
295
+ static Result<std::optional<Key>> ParseKey(const std::string& segment,
296
+ const HivePartitioningOptions& options);
297
+
298
+ bool Equals(const Partitioning& other) const override;
299
+
300
+ /// \brief Create a factory for a hive partitioning.
301
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
302
+ HivePartitioningFactoryOptions = {});
303
+
304
+ private:
305
+ const HivePartitioningOptions hive_options_;
306
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
307
+
308
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
309
+ };
310
+
311
+ /// \brief Implementation provided by lambda or other callable
312
+ class ARROW_DS_EXPORT FunctionPartitioning : public Partitioning {
313
+ public:
314
+ using ParseImpl = std::function<Result<compute::Expression>(const std::string&)>;
315
+
316
+ using FormatImpl =
317
+ std::function<Result<PartitionPathFormat>(const compute::Expression&)>;
318
+
319
+ FunctionPartitioning(std::shared_ptr<Schema> schema, ParseImpl parse_impl,
320
+ FormatImpl format_impl = NULLPTR, std::string name = "function")
321
+ : Partitioning(std::move(schema)),
322
+ parse_impl_(std::move(parse_impl)),
323
+ format_impl_(std::move(format_impl)),
324
+ name_(std::move(name)) {}
325
+
326
+ std::string type_name() const override { return name_; }
327
+
328
+ bool Equals(const Partitioning& other) const override { return false; }
329
+
330
+ Result<compute::Expression> Parse(const std::string& path) const override {
331
+ return parse_impl_(path);
332
+ }
333
+
334
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override {
335
+ if (format_impl_) {
336
+ return format_impl_(expr);
337
+ }
338
+ return Status::NotImplemented("formatting paths from ", type_name(), " Partitioning");
339
+ }
340
+
341
+ Result<PartitionedBatches> Partition(
342
+ const std::shared_ptr<RecordBatch>& batch) const override {
343
+ return Status::NotImplemented("partitioning batches from ", type_name(),
344
+ " Partitioning");
345
+ }
346
+
347
+ private:
348
+ ParseImpl parse_impl_;
349
+ FormatImpl format_impl_;
350
+ std::string name_;
351
+ };
352
+
353
+ class ARROW_DS_EXPORT FilenamePartitioning : public KeyValuePartitioning {
354
+ public:
355
+ /// \brief Construct a FilenamePartitioning from its components.
356
+ ///
357
+ /// If a field in schema is of dictionary type, the corresponding element of
358
+ /// dictionaries must be contain the dictionary of values for that field.
359
+ explicit FilenamePartitioning(std::shared_ptr<Schema> schema,
360
+ ArrayVector dictionaries = {},
361
+ KeyValuePartitioningOptions options = {});
362
+
363
+ std::string type_name() const override { return "filename"; }
364
+
365
+ /// \brief Create a factory for a filename partitioning.
366
+ ///
367
+ /// \param[in] field_names The names for the partition fields. Types will be
368
+ /// inferred.
369
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
370
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
371
+
372
+ bool Equals(const Partitioning& other) const override;
373
+
374
+ private:
375
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
376
+
377
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
378
+ };
379
+
380
+ ARROW_DS_EXPORT std::string StripPrefix(const std::string& path,
381
+ const std::string& prefix);
382
+
383
+ /// \brief Extracts the directory and filename and removes the prefix of a path
384
+ ///
385
+ /// e.g., `StripPrefixAndFilename("/data/year=2019/c.txt", "/data") ->
386
+ /// {"year=2019","c.txt"}`
387
+ ARROW_DS_EXPORT std::string StripPrefixAndFilename(const std::string& path,
388
+ const std::string& prefix);
389
+
390
+ /// \brief Vector version of StripPrefixAndFilename.
391
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
392
+ const std::vector<std::string>& paths, const std::string& prefix);
393
+
394
+ /// \brief Vector version of StripPrefixAndFilename.
395
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
396
+ const std::vector<fs::FileInfo>& files, const std::string& prefix);
397
+
398
+ /// \brief Either a Partitioning or a PartitioningFactory
399
+ class ARROW_DS_EXPORT PartitioningOrFactory {
400
+ public:
401
+ explicit PartitioningOrFactory(std::shared_ptr<Partitioning> partitioning)
402
+ : partitioning_(std::move(partitioning)) {}
403
+
404
+ explicit PartitioningOrFactory(std::shared_ptr<PartitioningFactory> factory)
405
+ : factory_(std::move(factory)) {}
406
+
407
+ PartitioningOrFactory& operator=(std::shared_ptr<Partitioning> partitioning) {
408
+ return *this = PartitioningOrFactory(std::move(partitioning));
409
+ }
410
+
411
+ PartitioningOrFactory& operator=(std::shared_ptr<PartitioningFactory> factory) {
412
+ return *this = PartitioningOrFactory(std::move(factory));
413
+ }
414
+
415
+ /// \brief The partitioning (if given).
416
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
417
+
418
+ /// \brief The partition factory (if given).
419
+ const std::shared_ptr<PartitioningFactory>& factory() const { return factory_; }
420
+
421
+ /// \brief Get the partition schema, inferring it with the given factory if needed.
422
+ Result<std::shared_ptr<Schema>> GetOrInferSchema(const std::vector<std::string>& paths);
423
+
424
+ private:
425
+ std::shared_ptr<PartitioningFactory> factory_;
426
+ std::shared_ptr<Partitioning> partitioning_;
427
+ };
428
+
429
+ /// @}
430
+
431
+ } // namespace dataset
432
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ // This API is EXPERIMENTAL.
24
+
25
+ #include "arrow/dataset/dataset.h"
26
+ #include "arrow/dataset/scanner.h"
27
+ #include "arrow/pch.h"
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #include "arrow/dataset/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace dataset {
24
+ namespace internal {
25
+
26
+ /// Register dataset-based exec nodes with the exec node registry
27
+ ///
28
+ /// This function must be called before using dataset ExecNode factories
29
+ ARROW_DS_EXPORT void Initialize();
30
+
31
+ } // namespace internal
32
+ } // namespace dataset
33
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/dataset/visibility.h"
23
+ #include "arrow/type_fwd.h"
24
+
25
+ namespace arrow {
26
+ namespace dataset {
27
+
28
+ // FIXME this is superceded by compute::Expression::Bind
29
+ ARROW_DS_EXPORT Status CheckProjectable(const Schema& from, const Schema& to);
30
+
31
+ } // namespace dataset
32
+ } // namespace arrow
vllm/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/acero/options.h"
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/compute/type_fwd.h"
31
+ #include "arrow/dataset/dataset.h"
32
+ #include "arrow/dataset/projector.h"
33
+ #include "arrow/dataset/type_fwd.h"
34
+ #include "arrow/dataset/visibility.h"
35
+ #include "arrow/io/interfaces.h"
36
+ #include "arrow/memory_pool.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/async_generator.h"
39
+ #include "arrow/util/iterator.h"
40
+ #include "arrow/util/thread_pool.h"
41
+ #include "arrow/util/type_fwd.h"
42
+
43
+ namespace arrow {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ namespace dataset {
48
+
49
+ /// \defgroup dataset-scanning Scanning API
50
+ ///
51
+ /// @{
52
+
53
+ constexpr int64_t kDefaultBatchSize = 1 << 17; // 128Ki rows
54
+ // This will yield 64 batches ~ 8Mi rows
55
+ constexpr int32_t kDefaultBatchReadahead = 16;
56
+ constexpr int32_t kDefaultFragmentReadahead = 4;
57
+ constexpr int32_t kDefaultBytesReadahead = 1 << 25; // 32MiB
58
+
59
+ /// Scan-specific options, which can be changed between scans of the same dataset.
60
+ struct ARROW_DS_EXPORT ScanOptions {
61
+ /// A row filter (which will be pushed down to partitioning/reading if supported).
62
+ compute::Expression filter = compute::literal(true);
63
+ /// A projection expression (which can add/remove/rename columns).
64
+ compute::Expression projection;
65
+
66
+ /// Schema with which batches will be read from fragments. This is also known as the
67
+ /// "reader schema" it will be used (for example) in constructing CSV file readers to
68
+ /// identify column types for parsing. Usually only a subset of its fields (see
69
+ /// MaterializedFields) will be materialized during a scan.
70
+ std::shared_ptr<Schema> dataset_schema;
71
+
72
+ /// Schema of projected record batches. This is independent of dataset_schema as its
73
+ /// fields are derived from the projection. For example, let
74
+ ///
75
+ /// dataset_schema = {"a": int32, "b": int32, "id": utf8}
76
+ /// projection = project({equal(field_ref("a"), field_ref("b"))}, {"a_plus_b"})
77
+ ///
78
+ /// (no filter specified). In this case, the projected_schema would be
79
+ ///
80
+ /// {"a_plus_b": int32}
81
+ std::shared_ptr<Schema> projected_schema;
82
+
83
+ /// Maximum row count for scanned batches.
84
+ int64_t batch_size = kDefaultBatchSize;
85
+
86
+ /// How many batches to read ahead within a fragment.
87
+ ///
88
+ /// Set to 0 to disable batch readahead
89
+ ///
90
+ /// Note: May not be supported by all formats
91
+ /// Note: Will be ignored if use_threads is set to false
92
+ int32_t batch_readahead = kDefaultBatchReadahead;
93
+
94
+ /// How many files to read ahead
95
+ ///
96
+ /// Set to 0 to disable fragment readahead
97
+ ///
98
+ /// Note: May not be enforced by all scanners
99
+ /// Note: Will be ignored if use_threads is set to false
100
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
101
+
102
+ /// A pool from which materialized and scanned arrays will be allocated.
103
+ MemoryPool* pool = arrow::default_memory_pool();
104
+
105
+ /// IOContext for any IO tasks
106
+ ///
107
+ /// Note: The IOContext executor will be ignored if use_threads is set to false
108
+ io::IOContext io_context;
109
+
110
+ /// If true the scanner will scan in parallel
111
+ ///
112
+ /// Note: If true, this will use threads from both the cpu_executor and the
113
+ /// io_context.executor
114
+ /// Note: This must be true in order for any readahead to happen
115
+ bool use_threads = false;
116
+
117
+ /// If true the scanner will add augmented fields to the output schema.
118
+ bool add_augmented_fields = true;
119
+
120
+ /// Fragment-specific scan options.
121
+ std::shared_ptr<FragmentScanOptions> fragment_scan_options;
122
+
123
+ /// Return a vector of FieldRefs that require materialization.
124
+ ///
125
+ /// This is usually the union of the fields referenced in the projection and the
126
+ /// filter expression. Examples:
127
+ ///
128
+ /// - `SELECT a, b WHERE a < 2 && c > 1` => ["a", "b", "a", "c"]
129
+ /// - `SELECT a + b < 3 WHERE a > 1` => ["a", "b", "a"]
130
+ ///
131
+ /// This is needed for expression where a field may not be directly
132
+ /// used in the final projection but is still required to evaluate the
133
+ /// expression.
134
+ ///
135
+ /// This is used by Fragment implementations to apply the column
136
+ /// sub-selection optimization.
137
+ std::vector<FieldRef> MaterializedFields() const;
138
+
139
+ /// Parameters which control when the plan should pause for a slow consumer
140
+ acero::BackpressureOptions backpressure =
141
+ acero::BackpressureOptions::DefaultBackpressure();
142
+ };
143
+
144
+ /// Scan-specific options, which can be changed between scans of the same dataset.
145
+ ///
146
+ /// A dataset consists of one or more individual fragments. A fragment is anything
147
+ /// that is independently scannable, often a file.
148
+ ///
149
+ /// Batches from all fragments will be converted to a single schema. This unified
150
+ /// schema is referred to as the "dataset schema" and is the output schema for
151
+ /// this node.
152
+ ///
153
+ /// Individual fragments may have schemas that are different from the dataset
154
+ /// schema. This is sometimes referred to as the physical or fragment schema.
155
+ /// Conversion from the fragment schema to the dataset schema is a process
156
+ /// known as evolution.
157
+ struct ARROW_DS_EXPORT ScanV2Options : public acero::ExecNodeOptions {
158
+ explicit ScanV2Options(std::shared_ptr<Dataset> dataset)
159
+ : dataset(std::move(dataset)) {}
160
+
161
+ /// \brief The dataset to scan
162
+ std::shared_ptr<Dataset> dataset;
163
+ /// \brief A row filter
164
+ ///
165
+ /// The filter expression should be written against the dataset schema.
166
+ /// The filter must be unbound.
167
+ ///
168
+ /// This is an opportunistic pushdown filter. Filtering capabilities will
169
+ /// vary between formats. If a format is not capable of applying the filter
170
+ /// then it will ignore it.
171
+ ///
172
+ /// Each fragment will do its best to filter the data based on the information
173
+ /// (partitioning guarantees, statistics) available to it. If it is able to
174
+ /// apply some filtering then it will indicate what filtering it was able to
175
+ /// apply by attaching a guarantee to the batch.
176
+ ///
177
+ /// For example, if a filter is x < 50 && y > 40 then a batch may be able to
178
+ /// apply a guarantee x < 50. Post-scan filtering would then only need to
179
+ /// consider y > 40 (for this specific batch). The next batch may not be able
180
+ /// to attach any guarantee and both clauses would need to be applied to that batch.
181
+ ///
182
+ /// A single guarantee-aware filtering operation should generally be applied to all
183
+ /// resulting batches. The scan node is not responsible for this.
184
+ ///
185
+ /// Fields that are referenced by the filter should be included in the `columns` vector.
186
+ /// The scan node will not automatically fetch fields referenced by the filter
187
+ /// expression. \see AddFieldsNeededForFilter
188
+ ///
189
+ /// If the filter references fields that are not included in `columns` this may or may
190
+ /// not be an error, depending on the format.
191
+ compute::Expression filter = compute::literal(true);
192
+
193
+ /// \brief The columns to scan
194
+ ///
195
+ /// This is not a simple list of top-level column indices but instead a set of paths
196
+ /// allowing for partial selection of columns
197
+ ///
198
+ /// These paths refer to the dataset schema
199
+ ///
200
+ /// For example, consider the following dataset schema:
201
+ /// schema({
202
+ /// field("score", int32()),
203
+ /// "marker", struct_({
204
+ /// field("color", utf8()),
205
+ /// field("location", struct_({
206
+ /// field("x", float64()),
207
+ /// field("y", float64())
208
+ /// })
209
+ /// })
210
+ /// })
211
+ ///
212
+ /// If `columns` is {{0}, {1,1,0}} then the output schema is:
213
+ /// schema({field("score", int32()), field("x", float64())})
214
+ ///
215
+ /// If `columns` is {{1,1,1}, {1,1}} then the output schema is:
216
+ /// schema({
217
+ /// field("y", float64()),
218
+ /// field("location", struct_({
219
+ /// field("x", float64()),
220
+ /// field("y", float64())
221
+ /// })
222
+ /// })
223
+ std::vector<FieldPath> columns;
224
+
225
+ /// \brief Target number of bytes to read ahead in a fragment
226
+ ///
227
+ /// This limit involves some amount of estimation. Formats typically only know
228
+ /// batch boundaries in terms of rows (not decoded bytes) and so an estimation
229
+ /// must be done to guess the average row size. Other formats like CSV and JSON
230
+ /// must make even more generalized guesses.
231
+ ///
232
+ /// This is a best-effort guide. Some formats may need to read ahead further,
233
+ /// for example, if scanning a parquet file that has batches with 100MiB of data
234
+ /// then the actual readahead will be at least 100MiB
235
+ ///
236
+ /// Set to 0 to disable readahead. When disabled, the scanner will read the
237
+ /// dataset one batch at a time
238
+ ///
239
+ /// This limit applies across all fragments. If the limit is 32MiB and the
240
+ /// fragment readahead allows for 20 fragments to be read at once then the
241
+ /// total readahead will still be 32MiB and NOT 20 * 32MiB.
242
+ int32_t target_bytes_readahead = kDefaultBytesReadahead;
243
+
244
+ /// \brief Number of fragments to read ahead
245
+ ///
246
+ /// Higher readahead will potentially lead to more efficient I/O but will lead
247
+ /// to the scan operation using more RAM. The default is fairly conservative
248
+ /// and designed for fast local disks (or slow local spinning disks which cannot
249
+ /// handle much parallelism anyways). When using a highly parallel remote filesystem
250
+ /// you will likely want to increase these values.
251
+ ///
252
+ /// Set to 0 to disable fragment readahead. When disabled the dataset will be scanned
253
+ /// one fragment at a time.
254
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
255
+ /// \brief Options specific to the file format
256
+ const FragmentScanOptions* format_options = NULLPTR;
257
+
258
+ /// \brief Utility method to get a selection representing all columns in a dataset
259
+ static std::vector<FieldPath> AllColumns(const Schema& dataset_schema);
260
+
261
+ /// \brief Utility method to add fields needed for the current filter
262
+ ///
263
+ /// This method adds any fields that are needed by `filter` which are not already
264
+ /// included in the list of columns. Any new fields added will be added to the end
265
+ /// in no particular order.
266
+ static Status AddFieldsNeededForFilter(ScanV2Options* options);
267
+ };
268
+
269
+ /// \brief Describes a projection
270
+ struct ARROW_DS_EXPORT ProjectionDescr {
271
+ /// \brief The projection expression itself
272
+ /// This expression must be a call to make_struct
273
+ compute::Expression expression;
274
+ /// \brief The output schema of the projection.
275
+
276
+ /// This can be calculated from the input schema and the expression but it
277
+ /// is cached here for convenience.
278
+ std::shared_ptr<Schema> schema;
279
+
280
+ /// \brief Create a ProjectionDescr by binding an expression to the dataset schema
281
+ ///
282
+ /// expression must return a struct type
283
+ static Result<ProjectionDescr> FromStructExpression(
284
+ const compute::Expression& expression, const Schema& dataset_schema);
285
+
286
+ /// \brief Create a ProjectionDescr from expressions/names for each field
287
+ static Result<ProjectionDescr> FromExpressions(std::vector<compute::Expression> exprs,
288
+ std::vector<std::string> names,
289
+ const Schema& dataset_schema);
290
+
291
+ /// \brief Create a default projection referencing fields in the dataset schema
292
+ static Result<ProjectionDescr> FromNames(std::vector<std::string> names,
293
+ const Schema& dataset_schema,
294
+ bool add_augmented_fields = true);
295
+
296
+ /// \brief Make a projection that projects every field in the dataset schema
297
+ static Result<ProjectionDescr> Default(const Schema& dataset_schema,
298
+ bool add_augmented_fields = true);
299
+ };
300
+
301
+ /// \brief Utility method to set the projection expression and schema
302
+ ARROW_DS_EXPORT void SetProjection(ScanOptions* options, ProjectionDescr projection);
303
+
304
+ /// \brief Combines a record batch with the fragment that the record batch originated
305
+ /// from
306
+ ///
307
+ /// Knowing the source fragment can be useful for debugging & understanding loaded
308
+ /// data
309
+ struct TaggedRecordBatch {
310
+ std::shared_ptr<RecordBatch> record_batch;
311
+ std::shared_ptr<Fragment> fragment;
312
+ };
313
+ using TaggedRecordBatchGenerator = std::function<Future<TaggedRecordBatch>()>;
314
+ using TaggedRecordBatchIterator = Iterator<TaggedRecordBatch>;
315
+
316
+ /// \brief Combines a tagged batch with positional information
317
+ ///
318
+ /// This is returned when scanning batches in an unordered fashion. This information is
319
+ /// needed if you ever want to reassemble the batches in order
320
+ struct EnumeratedRecordBatch {
321
+ Enumerated<std::shared_ptr<RecordBatch>> record_batch;
322
+ Enumerated<std::shared_ptr<Fragment>> fragment;
323
+ };
324
+ using EnumeratedRecordBatchGenerator = std::function<Future<EnumeratedRecordBatch>()>;
325
+ using EnumeratedRecordBatchIterator = Iterator<EnumeratedRecordBatch>;
326
+
327
+ /// @}
328
+
329
+ } // namespace dataset
330
+
331
+ template <>
332
+ struct IterationTraits<dataset::TaggedRecordBatch> {
333
+ static dataset::TaggedRecordBatch End() {
334
+ return dataset::TaggedRecordBatch{NULLPTR, NULLPTR};
335
+ }
336
+ static bool IsEnd(const dataset::TaggedRecordBatch& val) {
337
+ return val.record_batch == NULLPTR;
338
+ }
339
+ };
340
+
341
+ template <>
342
+ struct IterationTraits<dataset::EnumeratedRecordBatch> {
343
+ static dataset::EnumeratedRecordBatch End() {
344
+ return dataset::EnumeratedRecordBatch{
345
+ IterationEnd<Enumerated<std::shared_ptr<RecordBatch>>>(),
346
+ IterationEnd<Enumerated<std::shared_ptr<dataset::Fragment>>>()};
347
+ }
348
+ static bool IsEnd(const dataset::EnumeratedRecordBatch& val) {
349
+ return IsIterationEnd(val.fragment);
350
+ }
351
+ };
352
+
353
+ namespace dataset {
354
+
355
+ /// \defgroup dataset-scanning Scanning API
356
+ ///
357
+ /// @{
358
+
359
+ /// \brief A scanner glues together several dataset classes to load in data.
360
+ /// The dataset contains a collection of fragments and partitioning rules.
361
+ ///
362
+ /// The fragments identify independently loadable units of data (i.e. each fragment has
363
+ /// a potentially unique schema and possibly even format. It should be possible to read
364
+ /// fragments in parallel if desired).
365
+ ///
366
+ /// The fragment's format contains the logic necessary to actually create a task to load
367
+ /// the fragment into memory. That task may or may not support parallel execution of
368
+ /// its own.
369
+ ///
370
+ /// The scanner is then responsible for creating scan tasks from every fragment in the
371
+ /// dataset and (potentially) sequencing the loaded record batches together.
372
+ ///
373
+ /// The scanner should not buffer the entire dataset in memory (unless asked) instead
374
+ /// yielding record batches as soon as they are ready to scan. Various readahead
375
+ /// properties control how much data is allowed to be scanned before pausing to let a
376
+ /// slow consumer catchup.
377
+ ///
378
+ /// Today the scanner also handles projection & filtering although that may change in
379
+ /// the future.
380
+ class ARROW_DS_EXPORT Scanner {
381
+ public:
382
+ virtual ~Scanner() = default;
383
+
384
+ /// \brief Apply a visitor to each RecordBatch as it is scanned. If multiple threads
385
+ /// are used (via use_threads), the visitor will be invoked from those threads and is
386
+ /// responsible for any synchronization.
387
+ virtual Status Scan(std::function<Status(TaggedRecordBatch)> visitor) = 0;
388
+ /// \brief Convert a Scanner into a Table.
389
+ ///
390
+ /// Use this convenience utility with care. This will serially materialize the
391
+ /// Scan result in memory before creating the Table.
392
+ virtual Result<std::shared_ptr<Table>> ToTable() = 0;
393
+ /// \brief Scan the dataset into a stream of record batches. Each batch is tagged
394
+ /// with the fragment it originated from. The batches will arrive in order. The
395
+ /// order of fragments is determined by the dataset.
396
+ ///
397
+ /// Note: The scanner will perform some readahead but will avoid materializing too
398
+ /// much in memory (this is goverended by the readahead options and use_threads option).
399
+ /// If the readahead queue fills up then I/O will pause until the calling thread catches
400
+ /// up.
401
+ virtual Result<TaggedRecordBatchIterator> ScanBatches() = 0;
402
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync() = 0;
403
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync(
404
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
405
+ /// \brief Scan the dataset into a stream of record batches. Unlike ScanBatches this
406
+ /// method may allow record batches to be returned out of order. This allows for more
407
+ /// efficient scanning: some fragments may be accessed more quickly than others (e.g.
408
+ /// may be cached in RAM or just happen to get scheduled earlier by the I/O)
409
+ ///
410
+ /// To make up for the out-of-order iteration each batch is further tagged with
411
+ /// positional information.
412
+ virtual Result<EnumeratedRecordBatchIterator> ScanBatchesUnordered() = 0;
413
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync() = 0;
414
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync(
415
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
416
+ /// \brief A convenience to synchronously load the given rows by index.
417
+ ///
418
+ /// Will only consume as many batches as needed from ScanBatches().
419
+ virtual Result<std::shared_ptr<Table>> TakeRows(const Array& indices) = 0;
420
+ /// \brief Get the first N rows.
421
+ virtual Result<std::shared_ptr<Table>> Head(int64_t num_rows) = 0;
422
+ /// \brief Count rows matching a predicate.
423
+ ///
424
+ /// This method will push down the predicate and compute the result based on fragment
425
+ /// metadata if possible.
426
+ virtual Result<int64_t> CountRows() = 0;
427
+ virtual Future<int64_t> CountRowsAsync() = 0;
428
+ /// \brief Convert the Scanner to a RecordBatchReader so it can be
429
+ /// easily used with APIs that expect a reader.
430
+ virtual Result<std::shared_ptr<RecordBatchReader>> ToRecordBatchReader() = 0;
431
+
432
+ /// \brief Get the options for this scan.
433
+ const std::shared_ptr<ScanOptions>& options() const { return scan_options_; }
434
+ /// \brief Get the dataset that this scanner will scan
435
+ virtual const std::shared_ptr<Dataset>& dataset() const = 0;
436
+
437
+ protected:
438
+ explicit Scanner(std::shared_ptr<ScanOptions> scan_options)
439
+ : scan_options_(std::move(scan_options)) {}
440
+
441
+ Result<EnumeratedRecordBatchIterator> AddPositioningToInOrderScan(
442
+ TaggedRecordBatchIterator scan);
443
+
444
+ const std::shared_ptr<ScanOptions> scan_options_;
445
+ };
446
+
447
+ /// \brief ScannerBuilder is a factory class to construct a Scanner. It is used
448
+ /// to pass information, notably a potential filter expression and a subset of
449
+ /// columns to materialize.
450
+ class ARROW_DS_EXPORT ScannerBuilder {
451
+ public:
452
+ explicit ScannerBuilder(std::shared_ptr<Dataset> dataset);
453
+
454
+ ScannerBuilder(std::shared_ptr<Dataset> dataset,
455
+ std::shared_ptr<ScanOptions> scan_options);
456
+
457
+ ScannerBuilder(std::shared_ptr<Schema> schema, std::shared_ptr<Fragment> fragment,
458
+ std::shared_ptr<ScanOptions> scan_options);
459
+
460
+ /// \brief Make a scanner from a record batch reader.
461
+ ///
462
+ /// The resulting scanner can be scanned only once. This is intended
463
+ /// to support writing data from streaming sources or other sources
464
+ /// that can be iterated only once.
465
+ static std::shared_ptr<ScannerBuilder> FromRecordBatchReader(
466
+ std::shared_ptr<RecordBatchReader> reader);
467
+
468
+ /// \brief Set the subset of columns to materialize.
469
+ ///
470
+ /// Columns which are not referenced may not be read from fragments.
471
+ ///
472
+ /// \param[in] columns list of columns to project. Order and duplicates will
473
+ /// be preserved.
474
+ ///
475
+ /// \return Failure if any column name does not exists in the dataset's
476
+ /// Schema.
477
+ Status Project(std::vector<std::string> columns);
478
+
479
+ /// \brief Set expressions which will be evaluated to produce the materialized
480
+ /// columns.
481
+ ///
482
+ /// Columns which are not referenced may not be read from fragments.
483
+ ///
484
+ /// \param[in] exprs expressions to evaluate to produce columns.
485
+ /// \param[in] names list of names for the resulting columns.
486
+ ///
487
+ /// \return Failure if any referenced column does not exists in the dataset's
488
+ /// Schema.
489
+ Status Project(std::vector<compute::Expression> exprs, std::vector<std::string> names);
490
+
491
+ /// \brief Set the filter expression to return only rows matching the filter.
492
+ ///
493
+ /// The predicate will be passed down to Sources and corresponding
494
+ /// Fragments to exploit predicate pushdown if possible using
495
+ /// partition information or Fragment internal metadata, e.g. Parquet statistics.
496
+ /// Columns which are not referenced may not be read from fragments.
497
+ ///
498
+ /// \param[in] filter expression to filter rows with.
499
+ ///
500
+ /// \return Failure if any referenced columns does not exist in the dataset's
501
+ /// Schema.
502
+ Status Filter(const compute::Expression& filter);
503
+
504
+ /// \brief Indicate if the Scanner should make use of the available
505
+ /// ThreadPool found in ScanOptions;
506
+ Status UseThreads(bool use_threads = true);
507
+
508
+ /// \brief Set the maximum number of rows per RecordBatch.
509
+ ///
510
+ /// \param[in] batch_size the maximum number of rows.
511
+ /// \returns An error if the number for batch is not greater than 0.
512
+ ///
513
+ /// This option provides a control limiting the memory owned by any RecordBatch.
514
+ Status BatchSize(int64_t batch_size);
515
+
516
+ /// \brief Set the number of batches to read ahead within a fragment.
517
+ ///
518
+ /// \param[in] batch_readahead How many batches to read ahead within a fragment
519
+ /// \returns an error if this number is less than 0.
520
+ ///
521
+ /// This option provides a control on the RAM vs I/O tradeoff.
522
+ /// It might not be supported by all file formats, in which case it will
523
+ /// simply be ignored.
524
+ Status BatchReadahead(int32_t batch_readahead);
525
+
526
+ /// \brief Set the number of fragments to read ahead
527
+ ///
528
+ /// \param[in] fragment_readahead How many fragments to read ahead
529
+ /// \returns an error if this number is less than 0.
530
+ ///
531
+ /// This option provides a control on the RAM vs I/O tradeoff.
532
+ Status FragmentReadahead(int32_t fragment_readahead);
533
+
534
+ /// \brief Set the pool from which materialized and scanned arrays will be allocated.
535
+ Status Pool(MemoryPool* pool);
536
+
537
+ /// \brief Set fragment-specific scan options.
538
+ Status FragmentScanOptions(std::shared_ptr<FragmentScanOptions> fragment_scan_options);
539
+
540
+ /// \brief Override default backpressure configuration
541
+ Status Backpressure(acero::BackpressureOptions backpressure);
542
+
543
+ /// \brief Return the current scan options for the builder.
544
+ Result<std::shared_ptr<ScanOptions>> GetScanOptions();
545
+
546
+ /// \brief Return the constructed now-immutable Scanner object
547
+ Result<std::shared_ptr<Scanner>> Finish();
548
+
549
+ const std::shared_ptr<Schema>& schema() const;
550
+ const std::shared_ptr<Schema>& projected_schema() const;
551
+
552
+ private:
553
+ std::shared_ptr<Dataset> dataset_;
554
+ std::shared_ptr<ScanOptions> scan_options_ = std::make_shared<ScanOptions>();
555
+ };
556
+
557
+ /// \brief Construct a source ExecNode which yields batches from a dataset scan.
558
+ ///
559
+ /// Does not construct associated filter or project nodes.
560
+ /// Yielded batches will be augmented with fragment/batch indices to enable stable
561
+ /// ordering for simple ExecPlans.
562
+ class ARROW_DS_EXPORT ScanNodeOptions : public acero::ExecNodeOptions {
563
+ public:
564
+ explicit ScanNodeOptions(std::shared_ptr<Dataset> dataset,
565
+ std::shared_ptr<ScanOptions> scan_options,
566
+ bool require_sequenced_output = false)
567
+ : dataset(std::move(dataset)),
568
+ scan_options(std::move(scan_options)),
569
+ require_sequenced_output(require_sequenced_output) {}
570
+
571
+ std::shared_ptr<Dataset> dataset;
572
+ std::shared_ptr<ScanOptions> scan_options;
573
+ bool require_sequenced_output;
574
+ };
575
+
576
+ /// @}
577
+
578
+ namespace internal {
579
+ ARROW_DS_EXPORT void InitializeScanner(arrow::acero::ExecFactoryRegistry* registry);
580
+ ARROW_DS_EXPORT void InitializeScannerV2(arrow::acero::ExecFactoryRegistry* registry);
581
+ } // namespace internal
582
+ } // namespace dataset
583
+ } // namespace arrow