diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8138907181f75f89dfd4f3dd43fb18983a1d51d0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA @@ -0,0 +1,15 @@ +Metadata-Version: 2.2 +Name: antlr4-python3-runtime +Version: 4.9.3 +Summary: ANTLR 4.9.3 runtime for Python 3.7 +Home-page: http://www.antlr.org +Author: Eric Vergnaud, Terence Parr, Sam Harwell +Author-email: eric.vergnaud@wanadoo.fr +License: BSD +Requires-Dist: typing; python_version < "3.5" +Dynamic: author +Dynamic: author-email +Dynamic: home-page +Dynamic: license +Dynamic: requires-dist +Dynamic: summary diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..505164bc02d63fe6b0b3299f849a77c5f1beeb41 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/parrot/lib/python3.10/site-packages/pyarrow/__init__.py b/parrot/lib/python3.10/site-packages/pyarrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e52e0d242bee567da89cfa4688036fd115ff0a05 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/__init__.py @@ -0,0 +1,432 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +""" +PyArrow is the python implementation of Apache Arrow. + +Apache Arrow is a cross-language development platform for in-memory data. +It specifies a standardized language-independent columnar memory format for +flat and hierarchical data, organized for efficient analytic operations on +modern hardware. It also provides computational libraries and zero-copy +streaming messaging and interprocess communication. + +For more information see the official page at https://arrow.apache.org +""" + +import gc as _gc +import importlib as _importlib +import os as _os +import platform as _platform +import sys as _sys +import warnings as _warnings + +try: + from ._generated_version import version as __version__ +except ImportError: + # Package is not installed, parse git tag at runtime + try: + import setuptools_scm + # Code duplicated from setup.py to avoid a dependency on each other + + def parse_git(root, **kwargs): + """ + Parse function for setuptools_scm that ignores tags for non-C++ + subprojects, e.g. apache-arrow-js-XXX tags. + """ + from setuptools_scm.git import parse + kwargs['describe_command'] = \ + "git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'" + return parse(root, **kwargs) + __version__ = setuptools_scm.get_version('../', + parse=parse_git) + except ImportError: + __version__ = None + +# ARROW-8684: Disable GC while initializing Cython extension module, +# to workaround Cython bug in https://github.com/cython/cython/issues/3603 +_gc_enabled = _gc.isenabled() +_gc.disable() +import pyarrow.lib as _lib +if _gc_enabled: + _gc.enable() + +from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path, + MonthDayNano, VersionInfo, cpp_build_info, + cpp_version, cpp_version_info, runtime_info, + cpu_count, set_cpu_count, enable_signal_handlers, + io_thread_count, set_io_thread_count) + + +def show_versions(): + """ + Print various version information, to help with error reporting. + """ + def print_entry(label, value): + print(f"{label: <26}: {value: <8}") + + print("pyarrow version info\n--------------------") + print_entry("Package kind", cpp_build_info.package_kind + if len(cpp_build_info.package_kind) > 0 + else "not indicated") + print_entry("Arrow C++ library version", cpp_build_info.version) + print_entry("Arrow C++ compiler", + f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}") + print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags) + print_entry("Arrow C++ git revision", cpp_build_info.git_id) + print_entry("Arrow C++ git description", cpp_build_info.git_description) + print_entry("Arrow C++ build type", cpp_build_info.build_type) + + +def _module_is_available(module): + try: + _importlib.import_module(f'pyarrow.{module}') + except ImportError: + return False + else: + return True + + +def _filesystem_is_available(fs): + try: + import pyarrow.fs + except ImportError: + return False + + try: + getattr(pyarrow.fs, fs) + except (ImportError, AttributeError): + return False + else: + return True + + +def show_info(): + """ + Print detailed version and platform information, for error reporting + """ + show_versions() + + def print_entry(label, value): + print(f" {label: <20}: {value: <8}") + + print("\nPlatform:") + print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}") + print_entry("SIMD Level", runtime_info().simd_level) + print_entry("Detected SIMD Level", runtime_info().detected_simd_level) + + pool = default_memory_pool() + print("\nMemory:") + print_entry("Default backend", pool.backend_name) + print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes") + print_entry("Max memory", f"{pool.max_memory()} bytes") + print_entry("Supported Backends", ', '.join(supported_memory_backends())) + + print("\nOptional modules:") + modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json", + "orc", "parquet"] + for module in modules: + status = "Enabled" if _module_is_available(module) else "-" + print(f" {module: <20}: {status: <8}") + + print("\nFilesystems:") + filesystems = ["AzureFileSystem", "GcsFileSystem", + "HadoopFileSystem", "S3FileSystem"] + for fs in filesystems: + status = "Enabled" if _filesystem_is_available(fs) else "-" + print(f" {fs: <20}: {status: <8}") + + print("\nCompression Codecs:") + codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"] + for codec in codecs: + status = "Enabled" if Codec.is_available(codec) else "-" + print(f" {codec: <20}: {status: <8}") + + +from pyarrow.lib import (null, bool_, + int8, int16, int32, int64, + uint8, uint16, uint32, uint64, + time32, time64, timestamp, date32, date64, duration, + month_day_nano_interval, + float16, float32, float64, + binary, string, utf8, binary_view, string_view, + large_binary, large_string, large_utf8, + decimal128, decimal256, + list_, large_list, list_view, large_list_view, + map_, struct, + union, sparse_union, dense_union, + dictionary, + run_end_encoded, + fixed_shape_tensor, + field, + type_for_alias, + DataType, DictionaryType, StructType, + ListType, LargeListType, FixedSizeListType, + ListViewType, LargeListViewType, + MapType, UnionType, SparseUnionType, DenseUnionType, + TimestampType, Time32Type, Time64Type, DurationType, + FixedSizeBinaryType, Decimal128Type, Decimal256Type, + BaseExtensionType, ExtensionType, + RunEndEncodedType, FixedShapeTensorType, + PyExtensionType, UnknownExtensionType, + register_extension_type, unregister_extension_type, + DictionaryMemo, + KeyValueMetadata, + Field, + Schema, + schema, + unify_schemas, + Array, Tensor, + array, chunked_array, record_batch, nulls, repeat, + SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix, + SparseCSFTensor, + infer_type, from_numpy_dtype, + NullArray, + NumericArray, IntegerArray, FloatingPointArray, + BooleanArray, + Int8Array, UInt8Array, + Int16Array, UInt16Array, + Int32Array, UInt32Array, + Int64Array, UInt64Array, + HalfFloatArray, FloatArray, DoubleArray, + ListArray, LargeListArray, FixedSizeListArray, + ListViewArray, LargeListViewArray, + MapArray, UnionArray, + BinaryArray, StringArray, + LargeBinaryArray, LargeStringArray, + BinaryViewArray, StringViewArray, + FixedSizeBinaryArray, + DictionaryArray, + Date32Array, Date64Array, TimestampArray, + Time32Array, Time64Array, DurationArray, + MonthDayNanoIntervalArray, + Decimal128Array, Decimal256Array, StructArray, ExtensionArray, + RunEndEncodedArray, FixedShapeTensorArray, + scalar, NA, _NULL as NULL, Scalar, + NullScalar, BooleanScalar, + Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar, + UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar, + HalfFloatScalar, FloatScalar, DoubleScalar, + Decimal128Scalar, Decimal256Scalar, + ListScalar, LargeListScalar, FixedSizeListScalar, + ListViewScalar, LargeListViewScalar, + Date32Scalar, Date64Scalar, + Time32Scalar, Time64Scalar, + TimestampScalar, DurationScalar, + MonthDayNanoIntervalScalar, + BinaryScalar, LargeBinaryScalar, BinaryViewScalar, + StringScalar, LargeStringScalar, StringViewScalar, + FixedSizeBinaryScalar, DictionaryScalar, + MapScalar, StructScalar, UnionScalar, + RunEndEncodedScalar, ExtensionScalar) + +# Buffers, allocation +from pyarrow.lib import (DeviceAllocationType, Device, MemoryManager, + default_cpu_memory_manager) + +from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer, + Codec, compress, decompress, allocate_buffer) + +from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool, + total_allocated_bytes, set_memory_pool, + default_memory_pool, system_memory_pool, + jemalloc_memory_pool, mimalloc_memory_pool, + logging_memory_pool, proxy_memory_pool, + log_memory_allocations, jemalloc_set_decay_ms, + supported_memory_backends) + +# I/O +from pyarrow.lib import (NativeFile, PythonFile, + BufferedInputStream, BufferedOutputStream, CacheOptions, + CompressedInputStream, CompressedOutputStream, + TransformInputStream, transcoding_input_stream, + FixedSizeBufferWriter, + BufferReader, BufferOutputStream, + OSFile, MemoryMappedFile, memory_map, + create_memory_map, MockOutputStream, + input_stream, output_stream, + have_libhdfs) + +from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table, + concat_arrays, concat_tables, TableGroupBy, + RecordBatchReader) + +# Exceptions +from pyarrow.lib import (ArrowCancelled, + ArrowCapacityError, + ArrowException, + ArrowKeyError, + ArrowIndexError, + ArrowInvalid, + ArrowIOError, + ArrowMemoryError, + ArrowNotImplementedError, + ArrowTypeError, + ArrowSerializationError) + +from pyarrow.ipc import serialize_pandas, deserialize_pandas +import pyarrow.ipc as ipc + +import pyarrow.types as types + + +# ---------------------------------------------------------------------- +# Deprecations + +from pyarrow.util import _deprecate_api, _deprecate_class + + +# TODO: Deprecate these somehow in the pyarrow namespace +from pyarrow.ipc import (Message, MessageReader, MetadataVersion, + RecordBatchFileReader, RecordBatchFileWriter, + RecordBatchStreamReader, RecordBatchStreamWriter) + +# ---------------------------------------------------------------------- +# Returning absolute path to the pyarrow include directory (if bundled, e.g. in +# wheels) + + +def get_include(): + """ + Return absolute path to directory containing Arrow C++ include + headers. Similar to numpy.get_include + """ + return _os.path.join(_os.path.dirname(__file__), 'include') + + +def _get_pkg_config_executable(): + return _os.environ.get('PKG_CONFIG', 'pkg-config') + + +def _has_pkg_config(pkgname): + import subprocess + try: + return subprocess.call([_get_pkg_config_executable(), + '--exists', pkgname]) == 0 + except FileNotFoundError: + return False + + +def _read_pkg_config_variable(pkgname, cli_args): + import subprocess + cmd = [_get_pkg_config_executable(), pkgname] + cli_args + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode != 0: + raise RuntimeError("pkg-config failed: " + err.decode('utf8')) + return out.rstrip().decode('utf8') + + +def get_libraries(): + """ + Return list of library names to include in the `libraries` argument for C + or Cython extensions using pyarrow + """ + return ['arrow_python', 'arrow'] + + +def create_library_symlinks(): + """ + With Linux and macOS wheels, the bundled shared libraries have an embedded + ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them + with -larrow won't work unless we create symlinks at locations like + site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses + prior problems we had with shipping two copies of the shared libraries to + permit third party projects like turbodbc to build their C++ extensions + against the pyarrow wheels. + + This function must only be invoked once and only when the shared libraries + are bundled with the Python package, which should only apply to wheel-based + installs. It requires write access to the site-packages/pyarrow directory + and so depending on your system may need to be run with root. + """ + import glob + if _sys.platform == 'win32': + return + package_cwd = _os.path.dirname(__file__) + + if _sys.platform == 'linux': + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*')) + + def get_symlink_path(hard_path): + return hard_path.rsplit('.', 1)[0] + else: + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib')) + + def get_symlink_path(hard_path): + return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib')) + + for lib_hard_path in bundled_libs: + symlink_path = get_symlink_path(lib_hard_path) + if _os.path.exists(symlink_path): + continue + try: + _os.symlink(lib_hard_path, symlink_path) + except PermissionError: + print("Tried creating symlink {}. If you need to link to " + "bundled shared libraries, run " + "pyarrow.create_library_symlinks() as root") + + +def get_library_dirs(): + """ + Return lists of directories likely to contain Arrow C++ libraries for + linking C or Cython extensions using pyarrow + """ + package_cwd = _os.path.dirname(__file__) + library_dirs = [package_cwd] + + def append_library_dir(library_dir): + if library_dir not in library_dirs: + library_dirs.append(library_dir) + + # Search library paths via pkg-config. This is necessary if the user + # installed libarrow and the other shared libraries manually and they + # are not shipped inside the pyarrow package (see also ARROW-2976). + pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config' + for pkgname in ["arrow", "arrow_python"]: + if _has_pkg_config(pkgname): + library_dir = _read_pkg_config_variable(pkgname, + ["--libs-only-L"]) + # pkg-config output could be empty if Arrow is installed + # as a system package. + if library_dir: + if not library_dir.startswith("-L"): + raise ValueError( + "pkg-config --libs-only-L returned unexpected " + "value {!r}".format(library_dir)) + append_library_dir(library_dir[2:]) + + if _sys.platform == 'win32': + # TODO(wesm): Is this necessary, or does setuptools within a conda + # installation add Library\lib to the linker path for MSVC? + python_base_install = _os.path.dirname(_sys.executable) + library_dir = _os.path.join(python_base_install, 'Library', 'lib') + + if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')): + append_library_dir(library_dir) + + # ARROW-4074: Allow for ARROW_HOME to be set to some other directory + if _os.environ.get('ARROW_HOME'): + append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib')) + else: + # Python wheels bundle the Arrow libraries in the pyarrow directory. + append_library_dir(_os.path.dirname(_os.path.abspath(__file__))) + + return library_dirs diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_acero.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_acero.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9e8cbd65be224bb255448b580b44f0575942fc1e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_acero.pyx @@ -0,0 +1,608 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Low-level Acero bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table, + RecordBatchReader) +from pyarrow.lib import frombytes, tobytes +from pyarrow._compute cimport ( + Expression, FunctionOptions, _ensure_field_ref, _true, + unwrap_null_placement, unwrap_sort_order +) + + +cdef class ExecNodeOptions(_Weakrefable): + """ + Base class for the node options. + + Use one of the subclasses to construct an options object. + """ + __slots__ = () # avoid mistakingly creating attributes + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp): + self.wrapped = sp + + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil: + return self.wrapped + + +cdef class _TableSourceNodeOptions(ExecNodeOptions): + + def _set_options(self, Table table): + cdef: + shared_ptr[CTable] c_table + + c_table = pyarrow_unwrap_table(table) + self.wrapped.reset( + new CTableSourceNodeOptions(c_table) + ) + + +class TableSourceNodeOptions(_TableSourceNodeOptions): + """ + A Source node which accepts a table. + + This is the option class for the "table_source" node factory. + + Parameters + ---------- + table : pyarrow.Table + The table which acts as the data source. + """ + + def __init__(self, Table table): + self._set_options(table) + + +cdef class _FilterNodeOptions(ExecNodeOptions): + + def _set_options(self, Expression filter_expression not None): + self.wrapped.reset( + new CFilterNodeOptions(filter_expression.unwrap()) + ) + + +class FilterNodeOptions(_FilterNodeOptions): + """ + Make a node which excludes some rows from batches passed through it. + + This is the option class for the "filter" node factory. + + The "filter" operation provides an option to define data filtering + criteria. It selects rows where the given expression evaluates to true. + Filters can be written using pyarrow.compute.Expression, and the + expression must have a return type of boolean. + + Parameters + ---------- + filter_expression : pyarrow.compute.Expression + """ + + def __init__(self, Expression filter_expression): + self._set_options(filter_expression) + + +cdef class _ProjectNodeOptions(ExecNodeOptions): + + def _set_options(self, expressions, names=None): + cdef: + Expression expr + vector[CExpression] c_expressions + vector[c_string] c_names + + for expr in expressions: + c_expressions.push_back(expr.unwrap()) + + if names is not None: + if len(names) != len(expressions): + raise ValueError( + "The number of names should be equal to the number of expressions" + ) + + for name in names: + c_names.push_back(tobytes(name)) + + self.wrapped.reset( + new CProjectNodeOptions(c_expressions, c_names) + ) + else: + self.wrapped.reset( + new CProjectNodeOptions(c_expressions) + ) + + +class ProjectNodeOptions(_ProjectNodeOptions): + """ + Make a node which executes expressions on input batches, + producing batches of the same length with new columns. + + This is the option class for the "project" node factory. + + The "project" operation rearranges, deletes, transforms, and + creates columns. Each output column is computed by evaluating + an expression against the source record batch. These must be + scalar expressions (expressions consisting of scalar literals, + field references and scalar functions, i.e. elementwise functions + that return one value for each input row independent of the value + of all other rows). + + Parameters + ---------- + expressions : list of pyarrow.compute.Expression + List of expressions to evaluate against the source batch. This must + be scalar expressions. + names : list of str, optional + List of names for each of the output columns (same length as + `expressions`). If `names` is not provided, the string + representations of exprs will be used. + """ + + def __init__(self, expressions, names=None): + self._set_options(expressions, names) + + +cdef class _AggregateNodeOptions(ExecNodeOptions): + + def _set_options(self, aggregates, keys=None): + cdef: + CAggregate c_aggr + vector[CAggregate] c_aggregations + vector[CFieldRef] c_keys + + for arg_names, func_name, opts, name in aggregates: + c_aggr.function = tobytes(func_name) + if opts is not None: + c_aggr.options = (opts).wrapped + else: + c_aggr.options = nullptr + if not isinstance(arg_names, (list, tuple)): + arg_names = [arg_names] + for arg in arg_names: + c_aggr.target.push_back(_ensure_field_ref(arg)) + c_aggr.name = tobytes(name) + + c_aggregations.push_back(move(c_aggr)) + + if keys is None: + keys = [] + for name in keys: + c_keys.push_back(_ensure_field_ref(name)) + + self.wrapped.reset( + new CAggregateNodeOptions(c_aggregations, c_keys) + ) + + +class AggregateNodeOptions(_AggregateNodeOptions): + """ + Make a node which aggregates input batches, optionally grouped by keys. + + This is the option class for the "aggregate" node factory. + + Acero supports two types of aggregates: "scalar" aggregates, + and "hash" aggregates. Scalar aggregates reduce an array or scalar + input to a single scalar output (e.g. computing the mean of a column). + Hash aggregates act like GROUP BY in SQL and first partition data + based on one or more key columns, then reduce the data in each partition. + The aggregate node supports both types of computation, and can compute + any number of aggregations at once. + + Parameters + ---------- + aggregates : list of tuples + Aggregations which will be applied to the targeted fields. + Specified as a list of tuples, where each tuple is one aggregation + specification and consists of: aggregation target column(s) followed + by function name, aggregation function options object and the + output field name. + The target column(s) specification can be a single field reference, + an empty list or a list of fields unary, nullary and n-ary aggregation + functions respectively. Each field reference can be a string + column name or expression. + keys : list of field references, optional + Keys by which aggregations will be grouped. Each key can reference + a field using a string name or expression. + """ + + def __init__(self, aggregates, keys=None): + self._set_options(aggregates, keys) + + +cdef class _OrderByNodeOptions(ExecNodeOptions): + + def _set_options(self, sort_keys, null_placement): + cdef: + vector[CSortKey] c_sort_keys + + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + + self.wrapped.reset( + new COrderByNodeOptions( + COrdering(c_sort_keys, unwrap_null_placement(null_placement)) + ) + ) + + +class OrderByNodeOptions(_OrderByNodeOptions): + """ + Make a node which applies a new ordering to the data. + + Currently this node works by accumulating all data, sorting, and then + emitting the new data with an updated batch index. + Larger-than-memory sort is not currently supported. + + This is the option class for the "order_by" node factory. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + Each field reference can be a string column name or expression. + null_placement : str, default "at_end" + Where nulls in input should be sorted, only applying to + columns/fields mentioned in `sort_keys`. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, sort_keys=(), *, null_placement="at_end"): + self._set_options(sort_keys, null_placement) + + +cdef class _HashJoinNodeOptions(ExecNodeOptions): + + def _set_options( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="", + ): + cdef: + CJoinType c_join_type + vector[CFieldRef] c_left_keys + vector[CFieldRef] c_right_keys + vector[CFieldRef] c_left_output + vector[CFieldRef] c_right_output + + # join type + if join_type == "left semi": + c_join_type = CJoinType_LEFT_SEMI + elif join_type == "right semi": + c_join_type = CJoinType_RIGHT_SEMI + elif join_type == "left anti": + c_join_type = CJoinType_LEFT_ANTI + elif join_type == "right anti": + c_join_type = CJoinType_RIGHT_ANTI + elif join_type == "inner": + c_join_type = CJoinType_INNER + elif join_type == "left outer": + c_join_type = CJoinType_LEFT_OUTER + elif join_type == "right outer": + c_join_type = CJoinType_RIGHT_OUTER + elif join_type == "full outer": + c_join_type = CJoinType_FULL_OUTER + else: + raise ValueError("Unsupported join type") + + # left/right keys + if not isinstance(left_keys, (list, tuple)): + left_keys = [left_keys] + for key in left_keys: + c_left_keys.push_back(_ensure_field_ref(key)) + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for key in right_keys: + c_right_keys.push_back(_ensure_field_ref(key)) + + # left/right output fields + if left_output is not None and right_output is not None: + for colname in left_output: + c_left_output.push_back(_ensure_field_ref(colname)) + for colname in right_output: + c_right_output.push_back(_ensure_field_ref(colname)) + + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + c_left_output, c_right_output, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + else: + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + + +class HashJoinNodeOptions(_HashJoinNodeOptions): + """ + Make a node which implements join operation using hash join strategy. + + This is the option class for the "hashjoin" node factory. + + Parameters + ---------- + join_type : str + Type of join. One of "left semi", "right semi", "left anti", + "right anti", "inner", "left outer", "right outer", "full outer". + left_keys : str, Expression or list + Key fields from left input. Each key can be a string column name + or a field expression, or a list of such field references. + right_keys : str, Expression or list + Key fields from right input. See `left_keys` for details. + left_output : list, optional + List of output fields passed from left input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + right_output : list, optional + List of output fields passed from right input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + output_suffix_for_left : str + Suffix added to names of output fields coming from left input + (used to distinguish, if necessary, between fields of the same + name in left and right input and can be left empty if there are + no name collisions). + output_suffix_for_right : str + Suffix added to names of output fields coming from right input, + see `output_suffix_for_left` for details. + """ + + def __init__( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="" + ): + self._set_options( + join_type, left_keys, right_keys, left_output, right_output, + output_suffix_for_left, output_suffix_for_right + ) + + +cdef class _AsofJoinNodeOptions(ExecNodeOptions): + + def _set_options(self, left_on, left_by, right_on, right_by, tolerance): + cdef: + vector[CFieldRef] c_left_by + vector[CFieldRef] c_right_by + CAsofJoinKeys c_left_keys + CAsofJoinKeys c_right_keys + vector[CAsofJoinKeys] c_input_keys + + # Prepare left AsofJoinNodeOption::Keys + if not isinstance(left_by, (list, tuple)): + left_by = [left_by] + for key in left_by: + c_left_by.push_back(_ensure_field_ref(key)) + + c_left_keys.on_key = _ensure_field_ref(left_on) + c_left_keys.by_key = c_left_by + + c_input_keys.push_back(c_left_keys) + + # Prepare right AsofJoinNodeOption::Keys + if not isinstance(right_by, (list, tuple)): + right_by = [right_by] + for key in right_by: + c_right_by.push_back(_ensure_field_ref(key)) + + c_right_keys.on_key = _ensure_field_ref(right_on) + c_right_keys.by_key = c_right_by + + c_input_keys.push_back(c_right_keys) + + self.wrapped.reset( + new CAsofJoinNodeOptions( + c_input_keys, + tolerance, + ) + ) + + +class AsofJoinNodeOptions(_AsofJoinNodeOptions): + """ + Make a node which implements 'as of join' operation. + + This is the option class for the "asofjoin" node factory. + + Parameters + ---------- + left_on : str, Expression + The left key on which the join operation should be performed. + Can be a string column name or a field expression. + + An inexact match is used on the "on" key, i.e. a row is considered a + match if and only if left_on - tolerance <= right_on <= left_on. + + The input dataset must be sorted by the "on" key. Must be a single + field of a common type. + + Currently, the "on" key must be an integer, date, or timestamp type. + left_by: str, Expression or list + The left keys on which the join operation should be performed. + Exact equality is used for each field of the "by" keys. + Each key can be a string column name or a field expression, + or a list of such field references. + right_on : str, Expression + The right key on which the join operation should be performed. + See `left_on` for details. + right_by: str, Expression or list + The right keys on which the join operation should be performed. + See `left_by` for details. + tolerance : int + The tolerance to use for the asof join. The tolerance is interpreted in + the same units as the "on" key. + """ + + def __init__(self, left_on, left_by, right_on, right_by, tolerance): + self._set_options(left_on, left_by, right_on, right_by, tolerance) + + +cdef class Declaration(_Weakrefable): + """ + Helper class for declaring the nodes of an ExecPlan. + + A Declaration represents an unconstructed ExecNode, and potentially + more since its inputs may also be Declarations or when constructed + with ``from_sequence``. + + The possible ExecNodes to use are registered with a name, + the "factory name", and need to be specified using this name, together + with its corresponding ExecNodeOptions subclass. + + Parameters + ---------- + factory_name : str + The ExecNode factory name, such as "table_source", "filter", + "project" etc. See the ExecNodeOptions subclasses for the exact + factory names to use. + options : ExecNodeOptions + Corresponding ExecNodeOptions subclass (matching the factory name). + inputs : list of Declaration, optional + Input nodes for this declaration. Optional if the node is a source + node, or when the declaration gets combined later with + ``from_sequence``. + + Returns + ------- + Declaration + """ + cdef void init(self, const CDeclaration& c_decl): + self.decl = c_decl + + @staticmethod + cdef wrap(const CDeclaration& c_decl): + cdef Declaration self = Declaration.__new__(Declaration) + self.init(c_decl) + return self + + cdef inline CDeclaration unwrap(self) nogil: + return self.decl + + def __init__(self, factory_name, ExecNodeOptions options, inputs=None): + cdef: + c_string c_factory_name + CDeclaration c_decl + vector[CDeclaration.Input] c_inputs + + c_factory_name = tobytes(factory_name) + + if inputs is not None: + for ipt in inputs: + c_inputs.push_back( + CDeclaration.Input((ipt).unwrap()) + ) + + c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap()) + self.init(c_decl) + + @staticmethod + def from_sequence(decls): + """ + Convenience factory for the common case of a simple sequence of nodes. + + Each of the declarations will be appended to the inputs of the + subsequent declaration, and the final modified declaration will + be returned. + + Parameters + ---------- + decls : list of Declaration + + Returns + ------- + Declaration + """ + cdef: + vector[CDeclaration] c_decls + CDeclaration c_decl + + for decl in decls: + c_decls.push_back(( decl).unwrap()) + + c_decl = CDeclaration.Sequence(c_decls) + return Declaration.wrap(c_decl) + + def __str__(self): + return frombytes(GetResultValue(DeclarationToString(self.decl))) + + def __repr__(self): + return "\n{0}".format(str(self)) + + def to_table(self, bint use_threads=True): + """ + Run the declaration and collect the results into a table. + + This method will implicitly add a sink node to the declaration + to collect results into a table. It will then create an ExecPlan + from the declaration, start the exec plan, block until the plan + has finished, and return the created table. + + Parameters + ---------- + use_threads : bool, default True + If set to False, then all CPU work will be done on the calling + thread. I/O tasks will still happen on the I/O executor + and may be multi-threaded (but should not use significant CPU + resources). + + Returns + ------- + pyarrow.Table + """ + cdef: + shared_ptr[CTable] c_table + + with nogil: + c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads)) + return pyarrow_wrap_table(c_table) + + def to_reader(self, bint use_threads=True): + """Run the declaration and return results as a RecordBatchReader. + + For details about the parameters, see `to_table`. + + Returns + ------- + pyarrow.RecordBatchReader + """ + cdef: + RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader.reset( + GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release() + ) + return reader diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_azurefs.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_azurefs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..5cd6300c18c6a83e7036d84724666ba85396b530 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_azurefs.pyx @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + + +from pyarrow.lib import frombytes, tobytes +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + + +cdef class AzureFileSystem(FileSystem): + """ + Azure Blob Storage backed FileSystem implementation + + This implementation supports flat namespace and hierarchical namespace (HNS) a.k.a. + Data Lake Gen2 storage accounts. HNS will be automatically detected and HNS specific + features will be used when they provide a performance advantage. Azurite emulator is + also supported. Note: `/` is the only supported delimiter. + + The storage account is considered the root of the filesystem. When enabled, containers + will be created or deleted during relevant directory operations. Obviously, this also + requires authentication with the additional permissions. + + By default `DefaultAzureCredential `__ + is used for authentication. This means it will try several types of authentication + and go with the first one that works. If any authentication parameters are provided when + initialising the FileSystem, they will be used instead of the default credential. + + Parameters + ---------- + account_name : str + Azure Blob Storage account name. This is the globally unique identifier for the + storage account. + account_key : str, default None + Account key of the storage account. Pass None to use default credential. + blob_storage_authority : str, default None + hostname[:port] of the Blob Service. Defaults to `.blob.core.windows.net`. Useful + for connecting to a local emulator, like Azurite. + dfs_storage_authority : str, default None + hostname[:port] of the Data Lake Gen 2 Service. Defaults to + `.dfs.core.windows.net`. Useful for connecting to a local emulator, like Azurite. + blob_storage_scheme : str, default None + Either `http` or `https`. Defaults to `https`. Useful for connecting to a local + emulator, like Azurite. + dfs_storage_scheme : str, default None + Either `http` or `https`. Defaults to `https`. Useful for connecting to a local + emulator, like Azurite. + + Examples + -------- + >>> from pyarrow import fs + >>> azure_fs = fs.AzureFileSystem(account_name='myaccount') + >>> azurite_fs = fs.AzureFileSystem( + ... account_name='devstoreaccount1', + ... account_key='Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + ... blob_storage_authority='127.0.0.1:10000', + ... dfs_storage_authority='127.0.0.1:10000', + ... blob_storage_scheme='http', + ... dfs_storage_scheme='http', + ... ) + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + cdef: + CAzureFileSystem* azurefs + c_string account_key + + def __init__(self, account_name, *, account_key=None, blob_storage_authority=None, + dfs_storage_authority=None, blob_storage_scheme=None, + dfs_storage_scheme=None): + cdef: + CAzureOptions options + shared_ptr[CAzureFileSystem] wrapped + + options.account_name = tobytes(account_name) + if blob_storage_authority: + options.blob_storage_authority = tobytes(blob_storage_authority) + if dfs_storage_authority: + options.dfs_storage_authority = tobytes(dfs_storage_authority) + if blob_storage_scheme: + options.blob_storage_scheme = tobytes(blob_storage_scheme) + if dfs_storage_scheme: + options.dfs_storage_scheme = tobytes(dfs_storage_scheme) + + if account_key: + options.ConfigureAccountKeyCredential(tobytes(account_key)) + self.account_key = tobytes(account_key) + else: + options.ConfigureDefaultCredential() + + with nogil: + wrapped = GetResultValue(CAzureFileSystem.Make(options)) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.azurefs = wrapped.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return AzureFileSystem(**kwargs) + + def __reduce__(self): + cdef CAzureOptions opts = self.azurefs.options() + return ( + AzureFileSystem._reconstruct, (dict( + account_name=frombytes(opts.account_name), + account_key=frombytes(self.account_key), + blob_storage_authority=frombytes(opts.blob_storage_authority), + dfs_storage_authority=frombytes(opts.dfs_storage_authority), + blob_storage_scheme=frombytes(opts.blob_storage_scheme), + dfs_storage_scheme=frombytes(opts.dfs_storage_scheme) + ),)) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_compute.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_compute.pyx new file mode 100644 index 0000000000000000000000000000000000000000..0e860eaf4c6b864b5159adceae07c6db2ff01607 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_compute.pyx @@ -0,0 +1,3266 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +import sys + +from cpython.object cimport Py_LT, Py_EQ, Py_GT, Py_LE, Py_NE, Py_GE +from cython.operator cimport dereference as deref + +from collections import namedtuple + +from pyarrow.lib import frombytes, tobytes, ArrowInvalid +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +import pyarrow.lib as lib +from pyarrow.util import _DEPR_MSG +from libcpp cimport bool as c_bool + +import inspect +import numpy as np +import warnings + + +__pas = None +_substrait_msg = ( + "The pyarrow installation is not built with support for Substrait." +) + + +def _pas(): + global __pas + if __pas is None: + try: + import pyarrow.substrait as pas + __pas = pas + except ImportError: + raise ImportError(_substrait_msg) + return __pas + + +def _forbid_instantiation(klass, subclasses_instead=True): + msg = '{} is an abstract class thus cannot be initialized.'.format( + klass.__name__ + ) + if subclasses_instead: + subclasses = [cls.__name__ for cls in klass.__subclasses__] + msg += ' Use one of the subclasses instead: {}'.format( + ', '.join(subclasses) + ) + raise TypeError(msg) + + +cdef wrap_scalar_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ scalar Function in a ScalarFunction object. + """ + cdef ScalarFunction func = ScalarFunction.__new__(ScalarFunction) + func.init(sp_func) + return func + + +cdef wrap_vector_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ vector Function in a VectorFunction object. + """ + cdef VectorFunction func = VectorFunction.__new__(VectorFunction) + func.init(sp_func) + return func + + +cdef wrap_scalar_aggregate_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ aggregate Function in a ScalarAggregateFunction object. + """ + cdef ScalarAggregateFunction func = \ + ScalarAggregateFunction.__new__(ScalarAggregateFunction) + func.init(sp_func) + return func + + +cdef wrap_hash_aggregate_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ aggregate Function in a HashAggregateFunction object. + """ + cdef HashAggregateFunction func = \ + HashAggregateFunction.__new__(HashAggregateFunction) + func.init(sp_func) + return func + + +cdef wrap_meta_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ meta Function in a MetaFunction object. + """ + cdef MetaFunction func = MetaFunction.__new__(MetaFunction) + func.init(sp_func) + return func + + +cdef wrap_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ Function in a Function object. + + This dispatches to specialized wrappers depending on the function kind. + """ + if sp_func.get() == NULL: + raise ValueError("Function was NULL") + + cdef FunctionKind c_kind = sp_func.get().kind() + if c_kind == FunctionKind_SCALAR: + return wrap_scalar_function(sp_func) + elif c_kind == FunctionKind_VECTOR: + return wrap_vector_function(sp_func) + elif c_kind == FunctionKind_SCALAR_AGGREGATE: + return wrap_scalar_aggregate_function(sp_func) + elif c_kind == FunctionKind_HASH_AGGREGATE: + return wrap_hash_aggregate_function(sp_func) + elif c_kind == FunctionKind_META: + return wrap_meta_function(sp_func) + else: + raise NotImplementedError("Unknown Function::Kind") + + +cdef wrap_scalar_kernel(const CScalarKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef ScalarKernel kernel = ScalarKernel.__new__(ScalarKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_vector_kernel(const CVectorKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef VectorKernel kernel = VectorKernel.__new__(VectorKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_scalar_aggregate_kernel(const CScalarAggregateKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef ScalarAggregateKernel kernel = \ + ScalarAggregateKernel.__new__(ScalarAggregateKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_hash_aggregate_kernel(const CHashAggregateKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef HashAggregateKernel kernel = \ + HashAggregateKernel.__new__(HashAggregateKernel) + kernel.init(c_kernel) + return kernel + + +cdef class Kernel(_Weakrefable): + """ + A kernel object. + + Kernels handle the execution of a Function for a certain signature. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + +cdef class ScalarKernel(Kernel): + cdef const CScalarKernel* kernel + + cdef void init(self, const CScalarKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("ScalarKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class VectorKernel(Kernel): + cdef const CVectorKernel* kernel + + cdef void init(self, const CVectorKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("VectorKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class ScalarAggregateKernel(Kernel): + cdef const CScalarAggregateKernel* kernel + + cdef void init(self, const CScalarAggregateKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("ScalarAggregateKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class HashAggregateKernel(Kernel): + cdef const CHashAggregateKernel* kernel + + cdef void init(self, const CHashAggregateKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("HashAggregateKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +FunctionDoc = namedtuple( + "FunctionDoc", + ("summary", "description", "arg_names", "options_class", + "options_required")) + + +cdef class Function(_Weakrefable): + """ + A compute function. + + A function implements a certain logical computation over a range of + possible input signatures. Each signature accepts a range of input + types and is implemented by a given Kernel. + + Functions can be of different kinds: + + * "scalar" functions apply an item-wise computation over all items + of their inputs. Each item in the output only depends on the values + of the inputs at the same position. Examples: addition, comparisons, + string predicates... + + * "vector" functions apply a collection-wise computation, such that + each item in the output may depend on the values of several items + in each input. Examples: dictionary encoding, sorting, extracting + unique values... + + * "scalar_aggregate" functions reduce the dimensionality of the inputs by + applying a reduction function. Examples: sum, min_max, mode... + + * "hash_aggregate" functions apply a reduction function to an input + subdivided by grouping criteria. They may not be directly called. + Examples: hash_sum, hash_min_max... + + * "meta" functions dispatch to other functions. + """ + + cdef: + shared_ptr[CFunction] sp_func + CFunction* base_func + + _kind_map = { + FunctionKind_SCALAR: "scalar", + FunctionKind_VECTOR: "vector", + FunctionKind_SCALAR_AGGREGATE: "scalar_aggregate", + FunctionKind_HASH_AGGREGATE: "hash_aggregate", + FunctionKind_META: "meta", + } + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + self.sp_func = sp_func + self.base_func = sp_func.get() + + def __repr__(self): + return ("arrow.compute.Function" + .format(self.name, self.kind, self.arity, self.num_kernels)) + + def __reduce__(self): + # Reduction uses the global registry + return get_function, (self.name,) + + @property + def name(self): + """ + The function name. + """ + return frombytes(self.base_func.name()) + + @property + def arity(self): + """ + The function arity. + + If Ellipsis (i.e. `...`) is returned, the function takes a variable + number of arguments. + """ + cdef CArity arity = self.base_func.arity() + if arity.is_varargs: + return ... + else: + return arity.num_args + + @property + def kind(self): + """ + The function kind. + """ + cdef FunctionKind c_kind = self.base_func.kind() + try: + return self._kind_map[c_kind] + except KeyError: + raise NotImplementedError("Unknown Function::Kind") + + @property + def _doc(self): + """ + The C++-like function documentation (for internal use). + """ + cdef CFunctionDoc c_doc = self.base_func.doc() + return FunctionDoc(frombytes(c_doc.summary), + frombytes(c_doc.description), + [frombytes(s) for s in c_doc.arg_names], + frombytes(c_doc.options_class), + c_doc.options_required) + + @property + def num_kernels(self): + """ + The number of kernels implementing this function. + """ + return self.base_func.num_kernels() + + def call(self, args, FunctionOptions options=None, + MemoryPool memory_pool=None, length=None): + """ + Call the function on the given arguments. + + Parameters + ---------- + args : iterable + The arguments to pass to the function. Accepted types depend + on the specific function. + options : FunctionOptions, optional + Options instance for executing this function. This should have + the right concrete options type. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + length : int, optional + Batch size for execution, for nullary (no argument) functions. If + not passed, will be inferred from passed data. + """ + cdef: + const CFunctionOptions* c_options = NULL + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + CExecContext c_exec_ctx = CExecContext(pool) + CExecBatch c_batch + CDatum result + + _pack_compute_args(args, &c_batch.values) + + if options is not None: + c_options = options.get_options() + + if length is not None: + c_batch.length = length + with nogil: + result = GetResultValue( + self.base_func.Execute(c_batch, c_options, &c_exec_ctx) + ) + else: + with nogil: + result = GetResultValue( + self.base_func.Execute(c_batch.values, c_options, + &c_exec_ctx) + ) + + return wrap_datum(result) + + +cdef class ScalarFunction(Function): + cdef const CScalarFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CScalarKernel*] kernels = self.func.kernels() + return [wrap_scalar_kernel(k) for k in kernels] + + +cdef class VectorFunction(Function): + cdef const CVectorFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CVectorKernel*] kernels = self.func.kernels() + return [wrap_vector_kernel(k) for k in kernels] + + +cdef class ScalarAggregateFunction(Function): + cdef const CScalarAggregateFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CScalarAggregateKernel*] kernels = \ + self.func.kernels() + return [wrap_scalar_aggregate_kernel(k) for k in kernels] + + +cdef class HashAggregateFunction(Function): + cdef const CHashAggregateFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CHashAggregateKernel*] kernels = self.func.kernels() + return [wrap_hash_aggregate_kernel(k) for k in kernels] + + +cdef class MetaFunction(Function): + cdef const CMetaFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + # Since num_kernels is exposed, also expose a kernels property + @property + def kernels(self): + """ + The kernels implementing this function. + """ + return [] + + +cdef _pack_compute_args(object values, vector[CDatum]* out): + for val in values: + if isinstance(val, (list, np.ndarray)): + val = lib.asarray(val) + + if isinstance(val, Array): + out.push_back(CDatum(( val).sp_array)) + continue + elif isinstance(val, ChunkedArray): + out.push_back(CDatum(( val).sp_chunked_array)) + continue + elif isinstance(val, Scalar): + out.push_back(CDatum(( val).unwrap())) + continue + elif isinstance(val, RecordBatch): + out.push_back(CDatum(( val).sp_batch)) + continue + elif isinstance(val, Table): + out.push_back(CDatum(( val).sp_table)) + continue + else: + # Is it a Python scalar? + try: + scal = lib.scalar(val) + except Exception: + # Raise dedicated error below + pass + else: + out.push_back(CDatum(( scal).unwrap())) + continue + + raise TypeError(f"Got unexpected argument type {type(val)} " + "for compute function") + + +cdef class FunctionRegistry(_Weakrefable): + cdef CFunctionRegistry* registry + + def __init__(self): + self.registry = GetFunctionRegistry() + + def list_functions(self): + """ + Return all function names in the registry. + """ + cdef vector[c_string] names = self.registry.GetFunctionNames() + return [frombytes(name) for name in names] + + def get_function(self, name): + """ + Look up a function by name in the registry. + + Parameters + ---------- + name : str + The name of the function to lookup + """ + cdef: + c_string c_name = tobytes(name) + shared_ptr[CFunction] func + with nogil: + func = GetResultValue(self.registry.GetFunction(c_name)) + return wrap_function(func) + + +cdef FunctionRegistry _global_func_registry = FunctionRegistry() + + +def function_registry(): + return _global_func_registry + + +def get_function(name): + """ + Get a function by name. + + The function is looked up in the global registry + (as returned by `function_registry()`). + + Parameters + ---------- + name : str + The name of the function to lookup + """ + return _global_func_registry.get_function(name) + + +def list_functions(): + """ + Return all function names in the global registry. + """ + return _global_func_registry.list_functions() + + +def call_function(name, args, options=None, memory_pool=None, length=None): + """ + Call a named function. + + The function is looked up in the global registry + (as returned by `function_registry()`). + + Parameters + ---------- + name : str + The name of the function to call. + args : list + The arguments to the function. + options : optional + options provided to the function. + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + length : int, optional + Batch size for execution, for nullary (no argument) functions. If not + passed, inferred from data. + """ + func = _global_func_registry.get_function(name) + return func.call(args, options=options, memory_pool=memory_pool, + length=length) + + +cdef class FunctionOptions(_Weakrefable): + __slots__ = () # avoid mistakingly creating attributes + + cdef const CFunctionOptions* get_options(self) except NULL: + return self.wrapped.get() + + cdef void init(self, const shared_ptr[CFunctionOptions]& sp): + self.wrapped = sp + + cdef inline shared_ptr[CFunctionOptions] unwrap(self): + return self.wrapped + + def serialize(self): + cdef: + CResult[shared_ptr[CBuffer]] res = self.get_options().Serialize() + shared_ptr[CBuffer] c_buf = GetResultValue(res) + return pyarrow_wrap_buffer(c_buf) + + @staticmethod + def deserialize(buf): + """ + Deserialize options for a function. + + Parameters + ---------- + buf : Buffer + The buffer containing the data to deserialize. + """ + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + CResult[unique_ptr[CFunctionOptions]] maybe_options = \ + DeserializeFunctionOptions(deref(c_buf)) + shared_ptr[CFunctionOptions] c_options + c_options = to_shared(GetResultValue(move(maybe_options))) + type_name = frombytes(c_options.get().options_type().type_name()) + module = globals() + if type_name not in module: + raise ValueError(f'Cannot deserialize "{type_name}"') + klass = module[type_name] + options = klass.__new__(klass) + ( options).init(c_options) + return options + + def __repr__(self): + type_name = self.__class__.__name__ + # Remove {} so we can use our own braces + string_repr = frombytes(self.get_options().ToString())[1:-1] + return f"{type_name}({string_repr})" + + def __eq__(self, FunctionOptions other): + return self.get_options().Equals(deref(other.get_options())) + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +# NOTE: +# To properly expose the constructor signature of FunctionOptions +# subclasses, we use a two-level inheritance: +# 1. a C extension class that implements option validation and setting +# (won't expose function signatures because of +# https://github.com/cython/cython/issues/3873) +# 2. a Python derived class that implements the constructor + +cdef class _CastOptions(FunctionOptions): + cdef CCastOptions* options + + cdef void init(self, const shared_ptr[CFunctionOptions]& sp): + FunctionOptions.init(self, sp) + self.options = self.wrapped.get() + + def _set_options(self, DataType target_type, allow_int_overflow, + allow_time_truncate, allow_time_overflow, + allow_decimal_truncate, allow_float_truncate, + allow_invalid_utf8): + cdef: + shared_ptr[CCastOptions] wrapped = make_shared[CCastOptions]() + self.init( wrapped) + self._set_type(target_type) + if allow_int_overflow is not None: + self.allow_int_overflow = allow_int_overflow + if allow_time_truncate is not None: + self.allow_time_truncate = allow_time_truncate + if allow_time_overflow is not None: + self.allow_time_overflow = allow_time_overflow + if allow_decimal_truncate is not None: + self.allow_decimal_truncate = allow_decimal_truncate + if allow_float_truncate is not None: + self.allow_float_truncate = allow_float_truncate + if allow_invalid_utf8 is not None: + self.allow_invalid_utf8 = allow_invalid_utf8 + + def _set_type(self, target_type=None): + if target_type is not None: + deref(self.options).to_type = \ + ( ensure_type(target_type)).sp_type + + def _set_safe(self): + self.init(shared_ptr[CFunctionOptions]( + new CCastOptions(CCastOptions.Safe()))) + + def _set_unsafe(self): + self.init(shared_ptr[CFunctionOptions]( + new CCastOptions(CCastOptions.Unsafe()))) + + def is_safe(self): + return not (deref(self.options).allow_int_overflow or + deref(self.options).allow_time_truncate or + deref(self.options).allow_time_overflow or + deref(self.options).allow_decimal_truncate or + deref(self.options).allow_float_truncate or + deref(self.options).allow_invalid_utf8) + + @property + def allow_int_overflow(self): + return deref(self.options).allow_int_overflow + + @allow_int_overflow.setter + def allow_int_overflow(self, c_bool flag): + deref(self.options).allow_int_overflow = flag + + @property + def allow_time_truncate(self): + return deref(self.options).allow_time_truncate + + @allow_time_truncate.setter + def allow_time_truncate(self, c_bool flag): + deref(self.options).allow_time_truncate = flag + + @property + def allow_time_overflow(self): + return deref(self.options).allow_time_overflow + + @allow_time_overflow.setter + def allow_time_overflow(self, c_bool flag): + deref(self.options).allow_time_overflow = flag + + @property + def allow_decimal_truncate(self): + return deref(self.options).allow_decimal_truncate + + @allow_decimal_truncate.setter + def allow_decimal_truncate(self, c_bool flag): + deref(self.options).allow_decimal_truncate = flag + + @property + def allow_float_truncate(self): + return deref(self.options).allow_float_truncate + + @allow_float_truncate.setter + def allow_float_truncate(self, c_bool flag): + deref(self.options).allow_float_truncate = flag + + @property + def allow_invalid_utf8(self): + return deref(self.options).allow_invalid_utf8 + + @allow_invalid_utf8.setter + def allow_invalid_utf8(self, c_bool flag): + deref(self.options).allow_invalid_utf8 = flag + + +class CastOptions(_CastOptions): + """ + Options for the `cast` function. + + Parameters + ---------- + target_type : DataType, optional + The PyArrow type to cast to. + allow_int_overflow : bool, default False + Whether integer overflow is allowed when casting. + allow_time_truncate : bool, default False + Whether time precision truncation is allowed when casting. + allow_time_overflow : bool, default False + Whether date/time range overflow is allowed when casting. + allow_decimal_truncate : bool, default False + Whether decimal precision truncation is allowed when casting. + allow_float_truncate : bool, default False + Whether floating-point precision truncation is allowed when casting. + allow_invalid_utf8 : bool, default False + Whether producing invalid utf8 data is allowed when casting. + """ + + def __init__(self, target_type=None, *, allow_int_overflow=None, + allow_time_truncate=None, allow_time_overflow=None, + allow_decimal_truncate=None, allow_float_truncate=None, + allow_invalid_utf8=None): + self._set_options(target_type, allow_int_overflow, allow_time_truncate, + allow_time_overflow, allow_decimal_truncate, + allow_float_truncate, allow_invalid_utf8) + + @staticmethod + def safe(target_type=None): + """" + Create a CastOptions for a safe cast. + + Parameters + ---------- + target_type : optional + Target cast type for the safe cast. + """ + self = CastOptions() + self._set_safe() + self._set_type(target_type) + return self + + @staticmethod + def unsafe(target_type=None): + """" + Create a CastOptions for an unsafe cast. + + Parameters + ---------- + target_type : optional + Target cast type for the unsafe cast. + """ + self = CastOptions() + self._set_unsafe() + self._set_type(target_type) + return self + + +def _skip_nulls_doc(): + # (note the weird indent because of how the string is inserted + # by callers) + return """skip_nulls : bool, default True + Whether to skip (ignore) nulls in the input. + If False, any null in the input forces the output to null. +""" + + +def _min_count_doc(*, default): + return f"""min_count : int, default {default} + Minimum number of non-null values in the input. If the number + of non-null values is below `min_count`, the output is null. +""" + + +cdef class _ElementWiseAggregateOptions(FunctionOptions): + def _set_options(self, skip_nulls): + self.wrapped.reset(new CElementWiseAggregateOptions(skip_nulls)) + + +class ElementWiseAggregateOptions(_ElementWiseAggregateOptions): + __doc__ = f""" + Options for element-wise aggregate functions. + + Parameters + ---------- + {_skip_nulls_doc()} + """ + + def __init__(self, *, skip_nulls=True): + self._set_options(skip_nulls) + + +cdef CRoundMode unwrap_round_mode(round_mode) except *: + if round_mode == "down": + return CRoundMode_DOWN + elif round_mode == "up": + return CRoundMode_UP + elif round_mode == "towards_zero": + return CRoundMode_TOWARDS_ZERO + elif round_mode == "towards_infinity": + return CRoundMode_TOWARDS_INFINITY + elif round_mode == "half_down": + return CRoundMode_HALF_DOWN + elif round_mode == "half_up": + return CRoundMode_HALF_UP + elif round_mode == "half_towards_zero": + return CRoundMode_HALF_TOWARDS_ZERO + elif round_mode == "half_towards_infinity": + return CRoundMode_HALF_TOWARDS_INFINITY + elif round_mode == "half_to_even": + return CRoundMode_HALF_TO_EVEN + elif round_mode == "half_to_odd": + return CRoundMode_HALF_TO_ODD + _raise_invalid_function_option(round_mode, "round mode") + + +cdef class _RoundOptions(FunctionOptions): + def _set_options(self, ndigits, round_mode): + self.wrapped.reset( + new CRoundOptions(ndigits, unwrap_round_mode(round_mode)) + ) + + +class RoundOptions(_RoundOptions): + """ + Options for rounding numbers. + + Parameters + ---------- + ndigits : int, default 0 + Number of fractional digits to round to. + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, ndigits=0, round_mode="half_to_even"): + self._set_options(ndigits, round_mode) + + +cdef class _RoundBinaryOptions(FunctionOptions): + def _set_options(self, round_mode): + self.wrapped.reset( + new CRoundBinaryOptions(unwrap_round_mode(round_mode)) + ) + + +class RoundBinaryOptions(_RoundBinaryOptions): + """ + Options for rounding numbers when ndigits is provided by a second array + + Parameters + ---------- + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, round_mode="half_to_even"): + self._set_options(round_mode) + + +cdef CCalendarUnit unwrap_round_temporal_unit(unit) except *: + if unit == "nanosecond": + return CCalendarUnit_NANOSECOND + elif unit == "microsecond": + return CCalendarUnit_MICROSECOND + elif unit == "millisecond": + return CCalendarUnit_MILLISECOND + elif unit == "second": + return CCalendarUnit_SECOND + elif unit == "minute": + return CCalendarUnit_MINUTE + elif unit == "hour": + return CCalendarUnit_HOUR + elif unit == "day": + return CCalendarUnit_DAY + elif unit == "week": + return CCalendarUnit_WEEK + elif unit == "month": + return CCalendarUnit_MONTH + elif unit == "quarter": + return CCalendarUnit_QUARTER + elif unit == "year": + return CCalendarUnit_YEAR + _raise_invalid_function_option(unit, "Calendar unit") + + +cdef class _RoundTemporalOptions(FunctionOptions): + def _set_options(self, multiple, unit, week_starts_monday, + ceil_is_strictly_greater, calendar_based_origin): + self.wrapped.reset( + new CRoundTemporalOptions( + multiple, unwrap_round_temporal_unit(unit), + week_starts_monday, ceil_is_strictly_greater, + calendar_based_origin) + ) + + +class RoundTemporalOptions(_RoundTemporalOptions): + """ + Options for rounding temporal values. + + Parameters + ---------- + multiple : int, default 1 + Number of units to round to. + unit : str, default "day" + The unit in which `multiple` is expressed. + Accepted values are "year", "quarter", "month", "week", "day", + "hour", "minute", "second", "millisecond", "microsecond", + "nanosecond". + week_starts_monday : bool, default True + If True, weeks start on Monday; if False, on Sunday. + ceil_is_strictly_greater : bool, default False + If True, ceil returns a rounded value that is strictly greater than the + input. For example: ceiling 1970-01-01T00:00:00 to 3 hours would + yield 1970-01-01T03:00:00 if set to True and 1970-01-01T00:00:00 + if set to False. + This applies to the ceil_temporal function only. + calendar_based_origin : bool, default False + By default, the origin is 1970-01-01T00:00:00. By setting this to True, + rounding origin will be beginning of one less precise calendar unit. + E.g.: rounding to hours will use beginning of day as origin. + + By default time is rounded to a multiple of units since + 1970-01-01T00:00:00. By setting calendar_based_origin to true, + time will be rounded to number of units since the last greater + calendar unit. + For example: rounding to multiple of days since the beginning of the + month or to hours since the beginning of the day. + Exceptions: week and quarter are not used as greater units, + therefore days will be rounded to the beginning of the month not + week. Greater unit of week is a year. + Note that ceiling and rounding might change sorting order of an array + near greater unit change. For example rounding YYYY-mm-dd 23:00:00 to + 5 hours will ceil and round to YYYY-mm-dd+1 01:00:00 and floor to + YYYY-mm-dd 20:00:00. On the other hand YYYY-mm-dd+1 00:00:00 will + ceil, round and floor to YYYY-mm-dd+1 00:00:00. This can break the + order of an already ordered array. + + """ + + def __init__(self, multiple=1, unit="day", *, week_starts_monday=True, + ceil_is_strictly_greater=False, + calendar_based_origin=False): + self._set_options(multiple, unit, week_starts_monday, + ceil_is_strictly_greater, + calendar_based_origin) + + +cdef class _RoundToMultipleOptions(FunctionOptions): + def _set_options(self, multiple, round_mode): + if not isinstance(multiple, Scalar): + try: + multiple = lib.scalar(multiple) + except Exception: + _raise_invalid_function_option( + multiple, "multiple type for RoundToMultipleOptions", + exception_class=TypeError) + + self.wrapped.reset( + new CRoundToMultipleOptions( + pyarrow_unwrap_scalar(multiple), unwrap_round_mode(round_mode)) + ) + + +class RoundToMultipleOptions(_RoundToMultipleOptions): + """ + Options for rounding numbers to a multiple. + + Parameters + ---------- + multiple : numeric scalar, default 1.0 + Multiple to round to. Should be a scalar of a type compatible + with the argument to be rounded. + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, multiple=1.0, round_mode="half_to_even"): + self._set_options(multiple, round_mode) + + +cdef class _JoinOptions(FunctionOptions): + _null_handling_map = { + "emit_null": CJoinNullHandlingBehavior_EMIT_NULL, + "skip": CJoinNullHandlingBehavior_SKIP, + "replace": CJoinNullHandlingBehavior_REPLACE, + } + + def _set_options(self, null_handling, null_replacement): + try: + self.wrapped.reset( + new CJoinOptions(self._null_handling_map[null_handling], + tobytes(null_replacement)) + ) + except KeyError: + _raise_invalid_function_option(null_handling, "null handling") + + +class JoinOptions(_JoinOptions): + """ + Options for the `binary_join_element_wise` function. + + Parameters + ---------- + null_handling : str, default "emit_null" + How to handle null values in the inputs. + Accepted values are "emit_null", "skip", "replace". + null_replacement : str, default "" + Replacement string to emit for null inputs if `null_handling` + is "replace". + """ + + def __init__(self, null_handling="emit_null", null_replacement=""): + self._set_options(null_handling, null_replacement) + + +cdef class _MatchSubstringOptions(FunctionOptions): + def _set_options(self, pattern, ignore_case): + self.wrapped.reset( + new CMatchSubstringOptions(tobytes(pattern), ignore_case) + ) + + +class MatchSubstringOptions(_MatchSubstringOptions): + """ + Options for looking for a substring. + + Parameters + ---------- + pattern : str + Substring pattern to look for inside input values. + ignore_case : bool, default False + Whether to perform a case-insensitive match. + """ + + def __init__(self, pattern, *, ignore_case=False): + self._set_options(pattern, ignore_case) + + +cdef class _PadOptions(FunctionOptions): + def _set_options(self, width, padding, lean_left_on_odd_padding): + self.wrapped.reset(new CPadOptions(width, tobytes(padding), lean_left_on_odd_padding)) + + +class PadOptions(_PadOptions): + """ + Options for padding strings. + + Parameters + ---------- + width : int + Desired string length. + padding : str, default " " + What to pad the string with. Should be one byte or codepoint. + lean_left_on_odd_padding : bool, default True + What to do if there is an odd number of padding characters (in case + of centered padding). Defaults to aligning on the left (i.e. adding + the extra padding character on the right). + """ + + def __init__(self, width, padding=' ', lean_left_on_odd_padding=True): + self._set_options(width, padding, lean_left_on_odd_padding) + + +cdef class _TrimOptions(FunctionOptions): + def _set_options(self, characters): + self.wrapped.reset(new CTrimOptions(tobytes(characters))) + + +class TrimOptions(_TrimOptions): + """ + Options for trimming characters from strings. + + Parameters + ---------- + characters : str + Individual characters to be trimmed from the string. + """ + + def __init__(self, characters): + self._set_options(tobytes(characters)) + + +cdef class _ReplaceSubstringOptions(FunctionOptions): + def _set_options(self, pattern, replacement, max_replacements): + self.wrapped.reset( + new CReplaceSubstringOptions(tobytes(pattern), + tobytes(replacement), + max_replacements) + ) + + +class ReplaceSubstringOptions(_ReplaceSubstringOptions): + """ + Options for replacing matched substrings. + + Parameters + ---------- + pattern : str + Substring pattern to look for inside input values. + replacement : str + What to replace the pattern with. + max_replacements : int or None, default None + The maximum number of strings to replace in each + input value (unlimited if None). + """ + + def __init__(self, pattern, replacement, *, max_replacements=None): + if max_replacements is None: + max_replacements = -1 + self._set_options(pattern, replacement, max_replacements) + + +cdef class _ExtractRegexOptions(FunctionOptions): + def _set_options(self, pattern): + self.wrapped.reset(new CExtractRegexOptions(tobytes(pattern))) + + +class ExtractRegexOptions(_ExtractRegexOptions): + """ + Options for the `extract_regex` function. + + Parameters + ---------- + pattern : str + Regular expression with named capture fields. + """ + + def __init__(self, pattern): + self._set_options(pattern) + + +cdef class _SliceOptions(FunctionOptions): + def _set_options(self, start, stop, step): + self.wrapped.reset(new CSliceOptions(start, stop, step)) + + +class SliceOptions(_SliceOptions): + """ + Options for slicing. + + Parameters + ---------- + start : int + Index to start slicing at (inclusive). + stop : int or None, default None + If given, index to stop slicing at (exclusive). + If not given, slicing will stop at the end. + step : int, default 1 + Slice step. + """ + + def __init__(self, start, stop=None, step=1): + if stop is None: + stop = sys.maxsize + if step < 0: + stop = -stop + self._set_options(start, stop, step) + + +cdef class _ListSliceOptions(FunctionOptions): + cpdef _set_options(self, start, stop=None, step=1, return_fixed_size_list=None): + cdef: + CListSliceOptions* opts + opts = new CListSliceOptions( + start, + nullopt if stop is None + else (stop), + step, + nullopt if return_fixed_size_list is None + else (return_fixed_size_list) + ) + self.wrapped.reset(opts) + + +class ListSliceOptions(_ListSliceOptions): + """ + Options for list array slicing. + + Parameters + ---------- + start : int + Index to start slicing inner list elements (inclusive). + stop : Optional[int], default None + If given, index to stop slicing at (exclusive). + If not given, slicing will stop at the end. (NotImplemented) + step : int, default 1 + Slice step. + return_fixed_size_list : Optional[bool], default None + Whether to return a FixedSizeListArray. If true _and_ stop is after + a list element's length, nulls will be appended to create the + requested slice size. The default of `None` will return the same + type which was passed in. + """ + + def __init__(self, start, stop=None, step=1, return_fixed_size_list=None): + self._set_options(start, stop, step, return_fixed_size_list) + + +cdef class _ReplaceSliceOptions(FunctionOptions): + def _set_options(self, start, stop, replacement): + self.wrapped.reset( + new CReplaceSliceOptions(start, stop, tobytes(replacement)) + ) + + +class ReplaceSliceOptions(_ReplaceSliceOptions): + """ + Options for replacing slices. + + Parameters + ---------- + start : int + Index to start slicing at (inclusive). + stop : int + Index to stop slicing at (exclusive). + replacement : str + What to replace the slice with. + """ + + def __init__(self, start, stop, replacement): + self._set_options(start, stop, replacement) + + +cdef class _FilterOptions(FunctionOptions): + _null_selection_map = { + "drop": CFilterNullSelectionBehavior_DROP, + "emit_null": CFilterNullSelectionBehavior_EMIT_NULL, + } + + def _set_options(self, null_selection_behavior): + try: + self.wrapped.reset( + new CFilterOptions( + self._null_selection_map[null_selection_behavior] + ) + ) + except KeyError: + _raise_invalid_function_option(null_selection_behavior, + "null selection behavior") + + +class FilterOptions(_FilterOptions): + """ + Options for selecting with a boolean filter. + + Parameters + ---------- + null_selection_behavior : str, default "drop" + How to handle nulls in the selection filter. + Accepted values are "drop", "emit_null". + """ + + def __init__(self, null_selection_behavior="drop"): + self._set_options(null_selection_behavior) + + +cdef class _DictionaryEncodeOptions(FunctionOptions): + _null_encoding_map = { + "encode": CDictionaryEncodeNullEncodingBehavior_ENCODE, + "mask": CDictionaryEncodeNullEncodingBehavior_MASK, + } + + def _set_options(self, null_encoding): + try: + self.wrapped.reset( + new CDictionaryEncodeOptions( + self._null_encoding_map[null_encoding] + ) + ) + except KeyError: + _raise_invalid_function_option(null_encoding, "null encoding") + + +class DictionaryEncodeOptions(_DictionaryEncodeOptions): + """ + Options for dictionary encoding. + + Parameters + ---------- + null_encoding : str, default "mask" + How to encode nulls in the input. + Accepted values are "mask" (null inputs emit a null in the indices + array), "encode" (null inputs emit a non-null index pointing to + a null value in the dictionary array). + """ + + def __init__(self, null_encoding="mask"): + self._set_options(null_encoding) + + +cdef class _RunEndEncodeOptions(FunctionOptions): + def _set_options(self, run_end_type): + run_end_ty = ensure_type(run_end_type) + self.wrapped.reset(new CRunEndEncodeOptions(pyarrow_unwrap_data_type(run_end_ty))) + + +class RunEndEncodeOptions(_RunEndEncodeOptions): + """ + Options for run-end encoding. + + Parameters + ---------- + run_end_type : DataType, default pyarrow.int32() + The data type of the run_ends array. + + Accepted values are pyarrow.{int16(), int32(), int64()}. + """ + + def __init__(self, run_end_type=lib.int32()): + self._set_options(run_end_type) + + +cdef class _TakeOptions(FunctionOptions): + def _set_options(self, boundscheck): + self.wrapped.reset(new CTakeOptions(boundscheck)) + + +class TakeOptions(_TakeOptions): + """ + Options for the `take` and `array_take` functions. + + Parameters + ---------- + boundscheck : boolean, default True + Whether to check indices are within bounds. If False and an + index is out of bounds, behavior is undefined (the process + may crash). + """ + + def __init__(self, *, boundscheck=True): + self._set_options(boundscheck) + + +cdef class _MakeStructOptions(FunctionOptions): + def _set_options(self, field_names, field_nullability, field_metadata): + cdef: + vector[c_string] c_field_names + vector[shared_ptr[const CKeyValueMetadata]] c_field_metadata + for name in field_names: + c_field_names.push_back(tobytes(name)) + for metadata in field_metadata: + c_field_metadata.push_back(pyarrow_unwrap_metadata(metadata)) + self.wrapped.reset( + new CMakeStructOptions(c_field_names, field_nullability, + c_field_metadata) + ) + + +class MakeStructOptions(_MakeStructOptions): + """ + Options for the `make_struct` function. + + Parameters + ---------- + field_names : sequence of str + Names of the struct fields to create. + field_nullability : sequence of bool, optional + Nullability information for each struct field. + If omitted, all fields are nullable. + field_metadata : sequence of KeyValueMetadata, optional + Metadata for each struct field. + """ + + def __init__(self, field_names=(), *, field_nullability=None, + field_metadata=None): + if field_nullability is None: + field_nullability = [True] * len(field_names) + if field_metadata is None: + field_metadata = [None] * len(field_names) + self._set_options(field_names, field_nullability, field_metadata) + + +cdef CFieldRef _ensure_field_ref(value) except *: + cdef: + CFieldRef field_ref + const CFieldRef* field_ref_ptr + + if isinstance(value, (list, tuple)): + value = Expression._nested_field(tuple(value)) + + if isinstance(value, Expression): + field_ref_ptr = (value).unwrap().field_ref() + if field_ref_ptr is NULL: + raise ValueError("Unable to get FieldRef from Expression") + field_ref = deref(field_ref_ptr) + elif isinstance(value, (bytes, str)): + if value.startswith(b'.' if isinstance(value, bytes) else '.'): + field_ref = GetResultValue( + CFieldRef.FromDotPath(tobytes(value))) + else: + field_ref = CFieldRef(tobytes(value)) + elif isinstance(value, int): + field_ref = CFieldRef( value) + else: + raise TypeError("Expected a field reference as a str or int, list of " + f"str or int, or Expression. Got {type(value)} instead.") + return field_ref + + +cdef class _StructFieldOptions(FunctionOptions): + def _set_options(self, indices): + + if isinstance(indices, (list, tuple)) and not len(indices): + # Allow empty indices; effectively return same array + self.wrapped.reset( + new CStructFieldOptions(indices)) + return + + cdef CFieldRef field_ref = _ensure_field_ref(indices) + self.wrapped.reset(new CStructFieldOptions(field_ref)) + + +class StructFieldOptions(_StructFieldOptions): + """ + Options for the `struct_field` function. + + Parameters + ---------- + indices : List[str], List[bytes], List[int], Expression, bytes, str, or int + List of indices for chained field lookup, for example `[4, 1]` + will look up the second nested field in the fifth outer field. + """ + + def __init__(self, indices): + self._set_options(indices) + + +cdef class _ScalarAggregateOptions(FunctionOptions): + def _set_options(self, skip_nulls, min_count): + self.wrapped.reset(new CScalarAggregateOptions(skip_nulls, min_count)) + + +class ScalarAggregateOptions(_ScalarAggregateOptions): + __doc__ = f""" + Options for scalar aggregations. + + Parameters + ---------- + {_skip_nulls_doc()} + {_min_count_doc(default=1)} + """ + + def __init__(self, *, skip_nulls=True, min_count=1): + self._set_options(skip_nulls, min_count) + + +cdef class _CountOptions(FunctionOptions): + _mode_map = { + "only_valid": CCountMode_ONLY_VALID, + "only_null": CCountMode_ONLY_NULL, + "all": CCountMode_ALL, + } + + def _set_options(self, mode): + try: + self.wrapped.reset(new CCountOptions(self._mode_map[mode])) + except KeyError: + _raise_invalid_function_option(mode, "count mode") + + +class CountOptions(_CountOptions): + """ + Options for the `count` function. + + Parameters + ---------- + mode : str, default "only_valid" + Which values to count in the input. + Accepted values are "only_valid", "only_null", "all". + """ + + def __init__(self, mode="only_valid"): + self._set_options(mode) + + +cdef class _IndexOptions(FunctionOptions): + def _set_options(self, scalar): + self.wrapped.reset(new CIndexOptions(pyarrow_unwrap_scalar(scalar))) + + +class IndexOptions(_IndexOptions): + """ + Options for the `index` function. + + Parameters + ---------- + value : Scalar + The value to search for. + """ + + def __init__(self, value): + self._set_options(value) + + +cdef class _MapLookupOptions(FunctionOptions): + _occurrence_map = { + "all": CMapLookupOccurrence_ALL, + "first": CMapLookupOccurrence_FIRST, + "last": CMapLookupOccurrence_LAST, + } + + def _set_options(self, query_key, occurrence): + try: + self.wrapped.reset( + new CMapLookupOptions( + pyarrow_unwrap_scalar(query_key), + self._occurrence_map[occurrence] + ) + ) + except KeyError: + _raise_invalid_function_option(occurrence, + "Should either be first, last, or all") + + +class MapLookupOptions(_MapLookupOptions): + """ + Options for the `map_lookup` function. + + Parameters + ---------- + query_key : Scalar or Object can be converted to Scalar + The key to search for. + occurrence : str + The occurrence(s) to return from the Map + Accepted values are "first", "last", or "all". + """ + + def __init__(self, query_key, occurrence): + if not isinstance(query_key, lib.Scalar): + query_key = lib.scalar(query_key) + + self._set_options(query_key, occurrence) + + +cdef class _ModeOptions(FunctionOptions): + def _set_options(self, n, skip_nulls, min_count): + self.wrapped.reset(new CModeOptions(n, skip_nulls, min_count)) + + +class ModeOptions(_ModeOptions): + __doc__ = f""" + Options for the `mode` function. + + Parameters + ---------- + n : int, default 1 + Number of distinct most-common values to return. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, n=1, *, skip_nulls=True, min_count=0): + self._set_options(n, skip_nulls, min_count) + + +cdef class _SetLookupOptions(FunctionOptions): + def _set_options(self, value_set, c_bool skip_nulls): + cdef unique_ptr[CDatum] valset + if isinstance(value_set, Array): + valset.reset(new CDatum(( value_set).sp_array)) + elif isinstance(value_set, ChunkedArray): + valset.reset( + new CDatum(( value_set).sp_chunked_array) + ) + elif isinstance(value_set, Scalar): + valset.reset(new CDatum(( value_set).unwrap())) + else: + _raise_invalid_function_option(value_set, "value set", + exception_class=TypeError) + + self.wrapped.reset(new CSetLookupOptions(deref(valset), skip_nulls)) + + +class SetLookupOptions(_SetLookupOptions): + """ + Options for the `is_in` and `index_in` functions. + + Parameters + ---------- + value_set : Array + Set of values to look for in the input. + skip_nulls : bool, default False + If False, nulls in the input are matched in the value_set just + like regular values. + If True, nulls in the input always fail matching. + """ + + def __init__(self, value_set, *, skip_nulls=False): + self._set_options(value_set, skip_nulls) + + +cdef class _StrptimeOptions(FunctionOptions): + _unit_map = { + "s": TimeUnit_SECOND, + "ms": TimeUnit_MILLI, + "us": TimeUnit_MICRO, + "ns": TimeUnit_NANO, + } + + def _set_options(self, format, unit, error_is_null): + try: + self.wrapped.reset( + new CStrptimeOptions(tobytes(format), self._unit_map[unit], + error_is_null) + ) + except KeyError: + _raise_invalid_function_option(unit, "time unit") + + +class StrptimeOptions(_StrptimeOptions): + """ + Options for the `strptime` function. + + Parameters + ---------- + format : str + Pattern for parsing input strings as timestamps, such as "%Y/%m/%d". + Note that the semantics of the format follow the C/C++ strptime, not the Python one. + There are differences in behavior, for example how the "%y" placeholder + handles years with less than four digits. + unit : str + Timestamp unit of the output. + Accepted values are "s", "ms", "us", "ns". + error_is_null : boolean, default False + Return null on parsing errors if true or raise if false. + """ + + def __init__(self, format, unit, error_is_null=False): + self._set_options(format, unit, error_is_null) + + +cdef class _StrftimeOptions(FunctionOptions): + def _set_options(self, format, locale): + self.wrapped.reset( + new CStrftimeOptions(tobytes(format), tobytes(locale)) + ) + + +class StrftimeOptions(_StrftimeOptions): + """ + Options for the `strftime` function. + + Parameters + ---------- + format : str, default "%Y-%m-%dT%H:%M:%S" + Pattern for formatting input values. + locale : str, default "C" + Locale to use for locale-specific format specifiers. + """ + + def __init__(self, format="%Y-%m-%dT%H:%M:%S", locale="C"): + self._set_options(format, locale) + + +cdef class _DayOfWeekOptions(FunctionOptions): + def _set_options(self, count_from_zero, week_start): + self.wrapped.reset( + new CDayOfWeekOptions(count_from_zero, week_start) + ) + + +class DayOfWeekOptions(_DayOfWeekOptions): + """ + Options for the `day_of_week` function. + + Parameters + ---------- + count_from_zero : bool, default True + If True, number days from 0, otherwise from 1. + week_start : int, default 1 + Which day does the week start with (Monday=1, Sunday=7). + How this value is numbered is unaffected by `count_from_zero`. + """ + + def __init__(self, *, count_from_zero=True, week_start=1): + self._set_options(count_from_zero, week_start) + + +cdef class _WeekOptions(FunctionOptions): + def _set_options(self, week_starts_monday, count_from_zero, + first_week_is_fully_in_year): + self.wrapped.reset( + new CWeekOptions(week_starts_monday, count_from_zero, + first_week_is_fully_in_year) + ) + + +class WeekOptions(_WeekOptions): + """ + Options for the `week` function. + + Parameters + ---------- + week_starts_monday : bool, default True + If True, weeks start on Monday; if False, on Sunday. + count_from_zero : bool, default False + If True, dates at the start of a year that fall into the last week + of the previous year emit 0. + If False, they emit 52 or 53 (the week number of the last week + of the previous year). + first_week_is_fully_in_year : bool, default False + If True, week number 0 is fully in January. + If False, a week that begins on December 29, 30 or 31 is considered + to be week number 0 of the following year. + """ + + def __init__(self, *, week_starts_monday=True, count_from_zero=False, + first_week_is_fully_in_year=False): + self._set_options(week_starts_monday, + count_from_zero, first_week_is_fully_in_year) + + +cdef class _AssumeTimezoneOptions(FunctionOptions): + _ambiguous_map = { + "raise": CAssumeTimezoneAmbiguous_AMBIGUOUS_RAISE, + "earliest": CAssumeTimezoneAmbiguous_AMBIGUOUS_EARLIEST, + "latest": CAssumeTimezoneAmbiguous_AMBIGUOUS_LATEST, + } + _nonexistent_map = { + "raise": CAssumeTimezoneNonexistent_NONEXISTENT_RAISE, + "earliest": CAssumeTimezoneNonexistent_NONEXISTENT_EARLIEST, + "latest": CAssumeTimezoneNonexistent_NONEXISTENT_LATEST, + } + + def _set_options(self, timezone, ambiguous, nonexistent): + if ambiguous not in self._ambiguous_map: + _raise_invalid_function_option(ambiguous, + "'ambiguous' timestamp handling") + if nonexistent not in self._nonexistent_map: + _raise_invalid_function_option(nonexistent, + "'nonexistent' timestamp handling") + self.wrapped.reset( + new CAssumeTimezoneOptions(tobytes(timezone), + self._ambiguous_map[ambiguous], + self._nonexistent_map[nonexistent]) + ) + + +class AssumeTimezoneOptions(_AssumeTimezoneOptions): + """ + Options for the `assume_timezone` function. + + Parameters + ---------- + timezone : str + Timezone to assume for the input. + ambiguous : str, default "raise" + How to handle timestamps that are ambiguous in the assumed timezone. + Accepted values are "raise", "earliest", "latest". + nonexistent : str, default "raise" + How to handle timestamps that don't exist in the assumed timezone. + Accepted values are "raise", "earliest", "latest". + """ + + def __init__(self, timezone, *, ambiguous="raise", nonexistent="raise"): + self._set_options(timezone, ambiguous, nonexistent) + + +cdef class _NullOptions(FunctionOptions): + def _set_options(self, nan_is_null): + self.wrapped.reset(new CNullOptions(nan_is_null)) + + +class NullOptions(_NullOptions): + """ + Options for the `is_null` function. + + Parameters + ---------- + nan_is_null : bool, default False + Whether floating-point NaN values are considered null. + """ + + def __init__(self, *, nan_is_null=False): + self._set_options(nan_is_null) + + +cdef class _VarianceOptions(FunctionOptions): + def _set_options(self, ddof, skip_nulls, min_count): + self.wrapped.reset(new CVarianceOptions(ddof, skip_nulls, min_count)) + + +class VarianceOptions(_VarianceOptions): + __doc__ = f""" + Options for the `variance` and `stddev` functions. + + Parameters + ---------- + ddof : int, default 0 + Number of degrees of freedom. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, *, ddof=0, skip_nulls=True, min_count=0): + self._set_options(ddof, skip_nulls, min_count) + + +cdef class _SplitOptions(FunctionOptions): + def _set_options(self, max_splits, reverse): + self.wrapped.reset(new CSplitOptions(max_splits, reverse)) + + +class SplitOptions(_SplitOptions): + """ + Options for splitting on whitespace. + + Parameters + ---------- + max_splits : int or None, default None + Maximum number of splits for each input value (unlimited if None). + reverse : bool, default False + Whether to start splitting from the end of each input value. + This only has an effect if `max_splits` is not None. + """ + + def __init__(self, *, max_splits=None, reverse=False): + if max_splits is None: + max_splits = -1 + self._set_options(max_splits, reverse) + + +cdef class _SplitPatternOptions(FunctionOptions): + def _set_options(self, pattern, max_splits, reverse): + self.wrapped.reset( + new CSplitPatternOptions(tobytes(pattern), max_splits, reverse) + ) + + +class SplitPatternOptions(_SplitPatternOptions): + """ + Options for splitting on a string pattern. + + Parameters + ---------- + pattern : str + String pattern to split on. + max_splits : int or None, default None + Maximum number of splits for each input value (unlimited if None). + reverse : bool, default False + Whether to start splitting from the end of each input value. + This only has an effect if `max_splits` is not None. + """ + + def __init__(self, pattern, *, max_splits=None, reverse=False): + if max_splits is None: + max_splits = -1 + self._set_options(pattern, max_splits, reverse) + + +cdef CSortOrder unwrap_sort_order(order) except *: + if order == "ascending": + return CSortOrder_Ascending + elif order == "descending": + return CSortOrder_Descending + _raise_invalid_function_option(order, "sort order") + + +cdef CNullPlacement unwrap_null_placement(null_placement) except *: + if null_placement == "at_start": + return CNullPlacement_AtStart + elif null_placement == "at_end": + return CNullPlacement_AtEnd + _raise_invalid_function_option(null_placement, "null placement") + + +cdef class _PartitionNthOptions(FunctionOptions): + def _set_options(self, pivot, null_placement): + self.wrapped.reset(new CPartitionNthOptions( + pivot, unwrap_null_placement(null_placement))) + + +class PartitionNthOptions(_PartitionNthOptions): + """ + Options for the `partition_nth_indices` function. + + Parameters + ---------- + pivot : int + Index into the equivalent sorted array of the pivot element. + null_placement : str, default "at_end" + Where nulls in the input should be partitioned. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, pivot, *, null_placement="at_end"): + self._set_options(pivot, null_placement) + + +cdef class _CumulativeOptions(FunctionOptions): + def _set_options(self, start, skip_nulls): + if start is None: + self.wrapped.reset(new CCumulativeOptions(skip_nulls)) + elif isinstance(start, Scalar): + self.wrapped.reset(new CCumulativeOptions( + pyarrow_unwrap_scalar(start), skip_nulls)) + else: + try: + start = lib.scalar(start) + self.wrapped.reset(new CCumulativeOptions( + pyarrow_unwrap_scalar(start), skip_nulls)) + except Exception: + _raise_invalid_function_option( + start, "`start` type for CumulativeOptions", TypeError) + + +class CumulativeOptions(_CumulativeOptions): + """ + Options for `cumulative_*` functions. + + - cumulative_sum + - cumulative_sum_checked + - cumulative_prod + - cumulative_prod_checked + - cumulative_max + - cumulative_min + + Parameters + ---------- + start : Scalar, default None + Starting value for the cumulative operation. If none is given, + a default value depending on the operation and input type is used. + skip_nulls : bool, default False + When false, the first encountered null is propagated. + """ + + def __init__(self, start=None, *, skip_nulls=False): + self._set_options(start, skip_nulls) + + +class CumulativeSumOptions(_CumulativeOptions): + """ + Options for `cumulative_sum` function. + + Parameters + ---------- + start : Scalar, default None + Starting value for sum computation + skip_nulls : bool, default False + When false, the first encountered null is propagated. + """ + + def __init__(self, start=None, *, skip_nulls=False): + warnings.warn( + _DEPR_MSG.format("CumulativeSumOptions", "14.0", "CumulativeOptions"), + FutureWarning, + stacklevel=2 + ) + self._set_options(start, skip_nulls) + + +cdef class _PairwiseOptions(FunctionOptions): + def _set_options(self, period): + self.wrapped.reset(new CPairwiseOptions(period)) + + +class PairwiseOptions(_PairwiseOptions): + """ + Options for `pairwise` functions. + + Parameters + ---------- + period : int, default 1 + Period for applying the period function. + """ + + def __init__(self, period=1): + self._set_options(period) + + +cdef class _ListFlattenOptions(FunctionOptions): + def _set_options(self, recursive): + self.wrapped.reset(new CListFlattenOptions(recursive)) + + +class ListFlattenOptions(_ListFlattenOptions): + """ + Options for `list_flatten` function + + Parameters + ---------- + recursive : bool, default False + When True, the list array is flattened recursively until an array + of non-list values is formed. + """ + + def __init__(self, recursive=False): + self._set_options(recursive) + + +cdef class _ArraySortOptions(FunctionOptions): + def _set_options(self, order, null_placement): + self.wrapped.reset(new CArraySortOptions( + unwrap_sort_order(order), unwrap_null_placement(null_placement))) + + +class ArraySortOptions(_ArraySortOptions): + """ + Options for the `array_sort_indices` function. + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + null_placement : str, default "at_end" + Where nulls in the input should be sorted. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, order="ascending", *, null_placement="at_end"): + self._set_options(order, null_placement) + + +cdef class _SortOptions(FunctionOptions): + def _set_options(self, sort_keys, null_placement): + cdef vector[CSortKey] c_sort_keys + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + self.wrapped.reset(new CSortOptions( + c_sort_keys, unwrap_null_placement(null_placement))) + + +class SortOptions(_SortOptions): + """ + Options for the `sort_indices` function. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + null_placement : str, default "at_end" + Where nulls in input should be sorted, only applying to + columns/fields mentioned in `sort_keys`. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, sort_keys=(), *, null_placement="at_end"): + self._set_options(sort_keys, null_placement) + + +cdef class _SelectKOptions(FunctionOptions): + def _set_options(self, k, sort_keys): + cdef vector[CSortKey] c_sort_keys + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + self.wrapped.reset(new CSelectKOptions(k, c_sort_keys)) + + +class SelectKOptions(_SelectKOptions): + """ + Options for top/bottom k-selection. + + Parameters + ---------- + k : int + Number of leading values to select in sorted order + (i.e. the largest values if sort order is "descending", + the smallest otherwise). + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + """ + + def __init__(self, k, sort_keys): + self._set_options(k, sort_keys) + + +cdef class _QuantileOptions(FunctionOptions): + _interp_map = { + "linear": CQuantileInterp_LINEAR, + "lower": CQuantileInterp_LOWER, + "higher": CQuantileInterp_HIGHER, + "nearest": CQuantileInterp_NEAREST, + "midpoint": CQuantileInterp_MIDPOINT, + } + + def _set_options(self, quantiles, interp, skip_nulls, min_count): + try: + self.wrapped.reset( + new CQuantileOptions(quantiles, self._interp_map[interp], + skip_nulls, min_count) + ) + except KeyError: + _raise_invalid_function_option(interp, "quantile interpolation") + + +class QuantileOptions(_QuantileOptions): + __doc__ = f""" + Options for the `quantile` function. + + Parameters + ---------- + q : double or sequence of double, default 0.5 + Probability levels of the quantiles to compute. All values must be in + [0, 1]. + interpolation : str, default "linear" + How to break ties between competing data points for a given quantile. + Accepted values are: + + - "linear": compute an interpolation + - "lower": always use the smallest of the two data points + - "higher": always use the largest of the two data points + - "nearest": select the data point that is closest to the quantile + - "midpoint": compute the (unweighted) mean of the two data points + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, q=0.5, *, interpolation="linear", skip_nulls=True, + min_count=0): + if not isinstance(q, (list, tuple, np.ndarray)): + q = [q] + self._set_options(q, interpolation, skip_nulls, min_count) + + +cdef class _TDigestOptions(FunctionOptions): + def _set_options(self, quantiles, delta, buffer_size, skip_nulls, + min_count): + self.wrapped.reset( + new CTDigestOptions(quantiles, delta, buffer_size, skip_nulls, + min_count) + ) + + +class TDigestOptions(_TDigestOptions): + __doc__ = f""" + Options for the `tdigest` function. + + Parameters + ---------- + q : double or sequence of double, default 0.5 + Probability levels of the quantiles to approximate. All values must be + in [0, 1]. + delta : int, default 100 + Compression parameter for the T-digest algorithm. + buffer_size : int, default 500 + Buffer size for the T-digest algorithm. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, q=0.5, *, delta=100, buffer_size=500, skip_nulls=True, + min_count=0): + if not isinstance(q, (list, tuple, np.ndarray)): + q = [q] + self._set_options(q, delta, buffer_size, skip_nulls, min_count) + + +cdef class _Utf8NormalizeOptions(FunctionOptions): + _form_map = { + "NFC": CUtf8NormalizeForm_NFC, + "NFKC": CUtf8NormalizeForm_NFKC, + "NFD": CUtf8NormalizeForm_NFD, + "NFKD": CUtf8NormalizeForm_NFKD, + } + + def _set_options(self, form): + try: + self.wrapped.reset( + new CUtf8NormalizeOptions(self._form_map[form]) + ) + except KeyError: + _raise_invalid_function_option(form, + "Unicode normalization form") + + +class Utf8NormalizeOptions(_Utf8NormalizeOptions): + """ + Options for the `utf8_normalize` function. + + Parameters + ---------- + form : str + Unicode normalization form. + Accepted values are "NFC", "NFKC", "NFD", NFKD". + """ + + def __init__(self, form): + self._set_options(form) + + +cdef class _RandomOptions(FunctionOptions): + def _set_options(self, initializer): + if initializer == 'system': + self.wrapped.reset(new CRandomOptions( + CRandomOptions.FromSystemRandom())) + return + + if not isinstance(initializer, int): + try: + initializer = hash(initializer) + except TypeError: + raise TypeError( + f"initializer should be 'system', an integer, " + f"or a hashable object; got {initializer!r}") + + if initializer < 0: + initializer += 2**64 + self.wrapped.reset(new CRandomOptions( + CRandomOptions.FromSeed(initializer))) + + +class RandomOptions(_RandomOptions): + """ + Options for random generation. + + Parameters + ---------- + initializer : int or str + How to initialize the underlying random generator. + If an integer is given, it is used as a seed. + If "system" is given, the random generator is initialized with + a system-specific source of (hopefully true) randomness. + Other values are invalid. + """ + + def __init__(self, *, initializer='system'): + self._set_options(initializer) + + +cdef class _RankOptions(FunctionOptions): + + _tiebreaker_map = { + "min": CRankOptionsTiebreaker_Min, + "max": CRankOptionsTiebreaker_Max, + "first": CRankOptionsTiebreaker_First, + "dense": CRankOptionsTiebreaker_Dense, + } + + def _set_options(self, sort_keys, null_placement, tiebreaker): + cdef vector[CSortKey] c_sort_keys + if isinstance(sort_keys, str): + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(""), unwrap_sort_order(sort_keys)) + ) + else: + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + try: + self.wrapped.reset( + new CRankOptions(c_sort_keys, + unwrap_null_placement(null_placement), + self._tiebreaker_map[tiebreaker]) + ) + except KeyError: + _raise_invalid_function_option(tiebreaker, "tiebreaker") + + +class RankOptions(_RankOptions): + """ + Options for the `rank` function. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples or str, default "ascending" + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + Alternatively, one can simply pass "ascending" or "descending" as a string + if the input is array-like. + null_placement : str, default "at_end" + Where nulls in input should be sorted. + Accepted values are "at_start", "at_end". + tiebreaker : str, default "first" + Configure how ties between equal values are handled. + Accepted values are: + + - "min": Ties get the smallest possible rank in sorted order. + - "max": Ties get the largest possible rank in sorted order. + - "first": Ranks are assigned in order of when ties appear in the + input. This ensures the ranks are a stable permutation + of the input. + - "dense": The ranks span a dense [1, M] interval where M is the + number of distinct values in the input. + """ + + def __init__(self, sort_keys="ascending", *, null_placement="at_end", tiebreaker="first"): + self._set_options(sort_keys, null_placement, tiebreaker) + + +cdef class Expression(_Weakrefable): + """ + A logical expression to be evaluated against some input. + + To create an expression: + + - Use the factory function ``pyarrow.compute.scalar()`` to create a + scalar (not necessary when combined, see example below). + - Use the factory function ``pyarrow.compute.field()`` to reference + a field (column in table). + - Compare fields and scalars with ``<``, ``<=``, ``==``, ``>=``, ``>``. + - Combine expressions using python operators ``&`` (logical and), + ``|`` (logical or) and ``~`` (logical not). + Note: python keywords ``and``, ``or`` and ``not`` cannot be used + to combine expressions. + - Create expression predicates using Expression methods such as + ``pyarrow.compute.Expression.isin()``. + + Examples + -------- + + >>> import pyarrow.compute as pc + >>> (pc.field("a") < pc.scalar(3)) | (pc.field("b") > 7) + 7))> + >>> pc.field('a') != 3 + + >>> pc.field('a').isin([1, 2, 3]) + + """ + + def __init__(self): + msg = 'Expression is an abstract class thus cannot be initialized.' + raise TypeError(msg) + + cdef void init(self, const CExpression& sp): + self.expr = sp + + @staticmethod + cdef wrap(const CExpression& sp): + cdef Expression self = Expression.__new__(Expression) + self.init(sp) + return self + + cdef inline CExpression unwrap(self): + return self.expr + + def equals(self, Expression other): + """ + Parameters + ---------- + other : pyarrow.dataset.Expression + + Returns + ------- + bool + """ + return self.expr.Equals(other.unwrap()) + + def __str__(self): + return frombytes(self.expr.ToString()) + + def __repr__(self): + return "".format( + self.__class__.__name__, str(self) + ) + + @staticmethod + def from_substrait(object buffer not None): + """ + Deserialize an expression from Substrait + + The serialized message must be an ExtendedExpression message that has + only a single expression. The name of the expression and the schema + the expression was bound to will be ignored. Use + pyarrow.substrait.deserialize_expressions if this information is needed + or if the message might contain multiple expressions. + + Parameters + ---------- + buffer : bytes or Buffer + The Substrait message to deserialize + + Returns + ------- + Expression + The deserialized expression + """ + expressions = _pas().deserialize_expressions(buffer).expressions + if len(expressions) == 0: + raise ValueError("Substrait message did not contain any expressions") + if len(expressions) > 1: + raise ValueError( + "Substrait message contained multiple expressions. Use pyarrow.substrait.deserialize_expressions instead") + return next(iter(expressions.values())) + + def to_substrait(self, Schema schema not None, c_bool allow_arrow_extensions=False): + """ + Serialize the expression using Substrait + + The expression will be serialized as an ExtendedExpression message that has a + single expression named "expression" + + Parameters + ---------- + schema : Schema + The input schema the expression will be bound to + allow_arrow_extensions : bool, default False + If False then only functions that are part of the core Substrait function + definitions will be allowed. Set this to True to allow pyarrow-specific functions + but the result may not be accepted by other compute libraries. + + Returns + ------- + Buffer + A buffer containing the serialized Protobuf plan. + """ + return _pas().serialize_expressions([self], ["expression"], schema, allow_arrow_extensions=allow_arrow_extensions) + + @staticmethod + def _deserialize(Buffer buffer not None): + return Expression.wrap(GetResultValue(CDeserializeExpression( + pyarrow_unwrap_buffer(buffer)))) + + def __reduce__(self): + buffer = pyarrow_wrap_buffer(GetResultValue( + CSerializeExpression(self.expr))) + return Expression._deserialize, (buffer,) + + @staticmethod + cdef Expression _expr_or_scalar(object expr): + if isinstance(expr, Expression): + return ( expr) + return ( Expression._scalar(expr)) + + @staticmethod + def _call(str function_name, list arguments, FunctionOptions options=None): + cdef: + vector[CExpression] c_arguments + shared_ptr[CFunctionOptions] c_options + + for argument in arguments: + if not isinstance(argument, Expression): + # Attempt to help convert this to an expression + try: + argument = Expression._scalar(argument) + except ArrowInvalid: + raise TypeError( + "only other expressions allowed as arguments") + c_arguments.push_back(( argument).expr) + + if options is not None: + c_options = options.unwrap() + + return Expression.wrap(CMakeCallExpression( + tobytes(function_name), move(c_arguments), c_options)) + + def __richcmp__(self, other, int op): + other = Expression._expr_or_scalar(other) + return Expression._call({ + Py_EQ: "equal", + Py_NE: "not_equal", + Py_GT: "greater", + Py_GE: "greater_equal", + Py_LT: "less", + Py_LE: "less_equal", + }[op], [self, other]) + + def __bool__(self): + raise ValueError( + "An Expression cannot be evaluated to python True or False. " + "If you are using the 'and', 'or' or 'not' operators, use '&', " + "'|' or '~' instead." + ) + + def __invert__(self): + return Expression._call("invert", [self]) + + def __and__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("and_kleene", [self, other]) + + def __or__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("or_kleene", [self, other]) + + def __add__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("add_checked", [self, other]) + + def __mul__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("multiply_checked", [self, other]) + + def __sub__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("subtract_checked", [self, other]) + + def __truediv__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("divide_checked", [self, other]) + + def is_valid(self): + """ + Check whether the expression is not-null (valid). + + This creates a new expression equivalent to calling the + `is_valid` compute function on this expression. + + Returns + ------- + is_valid : Expression + """ + return Expression._call("is_valid", [self]) + + def is_null(self, bint nan_is_null=False): + """ + Check whether the expression is null. + + This creates a new expression equivalent to calling the + `is_null` compute function on this expression. + + Parameters + ---------- + nan_is_null : boolean, default False + Whether floating-point NaNs are considered null. + + Returns + ------- + is_null : Expression + """ + options = NullOptions(nan_is_null=nan_is_null) + return Expression._call("is_null", [self], options) + + def is_nan(self): + """ + Check whether the expression is NaN. + + This creates a new expression equivalent to calling the + `is_nan` compute function on this expression. + + Returns + ------- + is_nan : Expression + """ + return Expression._call("is_nan", [self]) + + def cast(self, type=None, safe=None, options=None): + """ + Explicitly set or change the expression's data type. + + This creates a new expression equivalent to calling the + `cast` compute function on this expression. + + Parameters + ---------- + type : DataType, default None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + cast : Expression + """ + safe_vars_passed = (safe is not None) or (type is not None) + + if safe_vars_passed and (options is not None): + raise ValueError("Must either pass values for 'type' and 'safe' or pass a " + "value for 'options'") + + if options is None: + type = ensure_type(type, allow_none=False) + if safe is False: + options = CastOptions.unsafe(type) + else: + options = CastOptions.safe(type) + return Expression._call("cast", [self], options) + + def isin(self, values): + """ + Check whether the expression is contained in values. + + This creates a new expression equivalent to calling the + `is_in` compute function on this expression. + + Parameters + ---------- + values : Array or iterable + The values to check for. + + Returns + ------- + isin : Expression + A new expression that, when evaluated, checks whether + this expression's value is contained in `values`. + """ + if not isinstance(values, Array): + values = lib.array(values) + + options = SetLookupOptions(values) + return Expression._call("is_in", [self], options) + + @staticmethod + def _field(name_or_idx not None): + cdef: + CFieldRef c_field + + if isinstance(name_or_idx, int): + return Expression.wrap(CMakeFieldExpressionByIndex(name_or_idx)) + else: + c_field = CFieldRef( tobytes(name_or_idx)) + return Expression.wrap(CMakeFieldExpression(c_field)) + + @staticmethod + def _nested_field(tuple names not None): + cdef: + vector[CFieldRef] nested + + if len(names) == 0: + raise ValueError("nested field reference should be non-empty") + nested.reserve(len(names)) + for name in names: + if isinstance(name, int): + nested.push_back(CFieldRef(name)) + else: + nested.push_back(CFieldRef( tobytes(name))) + return Expression.wrap(CMakeFieldExpression(CFieldRef(move(nested)))) + + @staticmethod + def _scalar(value): + cdef: + Scalar scalar + + if isinstance(value, Scalar): + scalar = value + else: + scalar = lib.scalar(value) + + return Expression.wrap(CMakeScalarExpression(scalar.unwrap())) + + +_deserialize = Expression._deserialize +cdef CExpression _true = CMakeScalarExpression( + make_shared[CBooleanScalar](True) +) + + +cdef CExpression _bind(Expression filter, Schema schema) except *: + assert schema is not None + + if filter is None: + return _true + + return GetResultValue(filter.unwrap().Bind( + deref(pyarrow_unwrap_schema(schema).get()))) + + +cdef class UdfContext: + """ + Per-invocation function context/state. + + This object will always be the first argument to a user-defined + function. It should not be used outside of a call to the function. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + cdef void init(self, const CUdfContext &c_context): + self.c_context = c_context + + @property + def batch_length(self): + """ + The common length of all input arguments (int). + + In the case that all arguments are scalars, this value + is used to pass the "actual length" of the arguments, + e.g. because the scalar values are encoding a column + with a constant value. + """ + return self.c_context.batch_length + + @property + def memory_pool(self): + """ + A memory pool for allocations (:class:`MemoryPool`). + + This is the memory pool supplied by the user when they invoked + the function and it should be used in any calls to arrow that the + UDF makes if that call accepts a memory_pool. + """ + return box_memory_pool(self.c_context.pool) + + +cdef inline CFunctionDoc _make_function_doc(dict func_doc) except *: + """ + Helper function to generate the FunctionDoc + This function accepts a dictionary and expects the + summary(str), description(str) and arg_names(List[str]) keys. + """ + cdef: + CFunctionDoc f_doc + vector[c_string] c_arg_names + + f_doc.summary = tobytes(func_doc["summary"]) + f_doc.description = tobytes(func_doc["description"]) + for arg_name in func_doc["arg_names"]: + c_arg_names.push_back(tobytes(arg_name)) + f_doc.arg_names = c_arg_names + # UDFOptions integration: + # TODO: https://issues.apache.org/jira/browse/ARROW-16041 + f_doc.options_class = b"" + f_doc.options_required = False + return f_doc + + +cdef object box_udf_context(const CUdfContext& c_context): + cdef UdfContext context = UdfContext.__new__(UdfContext) + context.init(c_context) + return context + + +cdef _udf_callback(user_function, const CUdfContext& c_context, inputs): + """ + Helper callback function used to wrap the UdfContext from Python to C++ + execution. + """ + context = box_udf_context(c_context) + return user_function(context, *inputs) + + +def _get_udf_context(memory_pool, batch_length): + cdef CUdfContext c_context + c_context.pool = maybe_unbox_memory_pool(memory_pool) + c_context.batch_length = batch_length + context = box_udf_context(c_context) + return context + + +ctypedef CStatus (*CRegisterUdf)(PyObject* function, function[CallbackUdf] wrapper, + const CUdfOptions& options, CFunctionRegistry* registry) + +cdef class RegisterUdf(_Weakrefable): + cdef CRegisterUdf register_func + + cdef void init(self, const CRegisterUdf register_func): + self.register_func = register_func + + +cdef get_register_scalar_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterScalarFunction + return reg + + +cdef get_register_tabular_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterTabularFunction + return reg + + +cdef get_register_aggregate_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterAggregateFunction + return reg + +cdef get_register_vector_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterVectorFunction + return reg + + +def register_scalar_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined scalar function. + + This API is EXPERIMENTAL. + + A scalar function is a function that executes elementwise + operations on arrays or scalars, i.e. a scalar function must + be computed row-by-row with no state where each output row + is computed only from its corresponding input row. + In other words, all argument arrays have the same length, + and the output array is of the same length as the arguments. + Scalar functions are the only functions allowed in query engine + expressions. + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return an Array or Scalar + matching the out_type. It must return a Scalar if + all arguments are scalar, else it must return an Array. + + To define a varargs function, pass a callable that takes + *args. The last in_type will be the type of all varargs + arguments. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "simple udf" + >>> func_doc["description"] = "add a constant to a scalar" + >>> + >>> def add_constant(ctx, array): + ... return pc.add(array, 1, memory_pool=ctx.memory_pool) + >>> + >>> func_name = "py_add_func" + >>> in_types = {"array": pa.int64()} + >>> out_type = pa.int64() + >>> pc.register_scalar_function(add_constant, func_name, func_doc, + ... in_types, out_type) + >>> + >>> func = pc.get_function(func_name) + >>> func.name + 'py_add_func' + >>> answer = pc.call_function(func_name, [pa.array([20])]) + >>> answer + + [ + 21 + ] + """ + return _register_user_defined_function(get_register_scalar_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_vector_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined vector function. + + This API is EXPERIMENTAL. + + A vector function is a function that executes vector + operations on arrays. Vector function is often used + when compute doesn't fit other more specific types of + functions (e.g., scalar and aggregate). + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return an Array or Scalar + matching the out_type. It must return a Scalar if + all arguments are scalar, else it must return an Array. + + To define a varargs function, pass a callable that takes + *args. The last in_type will be the type of all varargs + arguments. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "percent rank" + >>> func_doc["description"] = "compute percent rank" + >>> + >>> def list_flatten_udf(ctx, x): + ... return pc.list_flatten(x) + >>> + >>> func_name = "list_flatten_udf" + >>> in_types = {"array": pa.list_(pa.int64())} + >>> out_type = pa.int64() + >>> pc.register_vector_function(list_flatten_udf, func_name, func_doc, + ... in_types, out_type) + >>> + >>> answer = pc.call_function(func_name, [pa.array([[1, 2], [3, 4]])]) + >>> answer + + [ + 1, + 2, + 3, + 4 + ] + """ + return _register_user_defined_function(get_register_vector_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_aggregate_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined non-decomposable aggregate function. + + This API is EXPERIMENTAL. + + A non-decomposable aggregation function is a function that executes + aggregate operations on the whole data that it is aggregating. + In other words, non-decomposable aggregate function cannot be + split into consume/merge/finalize steps. + + This is often used with ordered or segmented aggregation where groups + can be emit before accumulating all of the input data. + + Note that currently the size of any input column cannot exceed 2 GB + for a single segment (all groups combined). + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return a Scalar matching the + out_type. + To define a varargs function, pass a callable that takes + *args. The in_type needs to match in type of inputs when + the function gets called. + function_name : str + Name of the function. This name must be unique, i.e., + there should only be one function registered with + this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import numpy as np + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "simple median udf" + >>> func_doc["description"] = "compute median" + >>> + >>> def compute_median(ctx, array): + ... return pa.scalar(np.median(array)) + >>> + >>> func_name = "py_compute_median" + >>> in_types = {"array": pa.int64()} + >>> out_type = pa.float64() + >>> pc.register_aggregate_function(compute_median, func_name, func_doc, + ... in_types, out_type) + >>> + >>> func = pc.get_function(func_name) + >>> func.name + 'py_compute_median' + >>> answer = pc.call_function(func_name, [pa.array([20, 40])]) + >>> answer + + >>> table = pa.table([pa.array([1, 1, 2, 2]), pa.array([10, 20, 30, 40])], names=['k', 'v']) + >>> result = table.group_by('k').aggregate([('v', 'py_compute_median')]) + >>> result + pyarrow.Table + k: int64 + v_py_compute_median: double + ---- + k: [[1,2]] + v_py_compute_median: [[15,35]] + """ + return _register_user_defined_function(get_register_aggregate_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_tabular_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined tabular function. + + This API is EXPERIMENTAL. + + A tabular function is one accepting a context argument of type + UdfContext and returning a generator of struct arrays. + The in_types argument must be empty and the out_type argument + specifies a schema. Each struct array must have field types + corresponding to the schema. + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The only argument is the context argument of type + UdfContext. It must return a callable that + returns on each invocation a StructArray matching + the out_type, where an empty array indicates end. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + Must be an empty dictionary (reserved for future use). + out_type : Union[Schema, DataType] + Schema of the function's output, or a corresponding flat struct type. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + shared_ptr[CSchema] c_schema + shared_ptr[CDataType] c_type + + if isinstance(out_type, Schema): + c_schema = pyarrow_unwrap_schema(out_type) + with nogil: + c_type = make_shared[CStructType](deref(c_schema).fields()) + out_type = pyarrow_wrap_data_type(c_type) + return _register_user_defined_function(get_register_tabular_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def _register_user_defined_function(register_func, func, function_name, function_doc, in_types, + out_type, func_registry=None): + """ + Register a user-defined function. + + This method itself doesn't care about the type of the UDF + (i.e., scalar vs tabular vs aggregate) + + Parameters + ---------- + register_func: object + An object holding a CRegisterUdf in a "register_func" attribute. + func : callable + A callable implementing the user-defined function. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + CRegisterUdf c_register_func + c_string c_func_name + CArity c_arity + CFunctionDoc c_func_doc + vector[shared_ptr[CDataType]] c_in_types + PyObject* c_function + shared_ptr[CDataType] c_out_type + CUdfOptions c_options + CFunctionRegistry* c_func_registry + + if callable(func): + c_function = func + else: + raise TypeError("func must be a callable") + + c_func_name = tobytes(function_name) + + func_spec = inspect.getfullargspec(func) + num_args = -1 + if isinstance(in_types, dict): + for in_type in in_types.values(): + c_in_types.push_back( + pyarrow_unwrap_data_type(ensure_type(in_type))) + function_doc["arg_names"] = in_types.keys() + num_args = len(in_types) + else: + raise TypeError( + "in_types must be a dictionary of DataType") + + c_arity = CArity( num_args, func_spec.varargs) + + if "summary" not in function_doc: + raise ValueError("Function doc must contain a summary") + + if "description" not in function_doc: + raise ValueError("Function doc must contain a description") + + if "arg_names" not in function_doc: + raise ValueError("Function doc must contain arg_names") + + c_func_doc = _make_function_doc(function_doc) + + c_out_type = pyarrow_unwrap_data_type(ensure_type(out_type)) + + c_options.func_name = c_func_name + c_options.arity = c_arity + c_options.func_doc = c_func_doc + c_options.input_types = c_in_types + c_options.output_type = c_out_type + + if func_registry is None: + c_func_registry = NULL + else: + c_func_registry = (func_registry).registry + + c_register_func = (register_func).register_func + + check_status(c_register_func(c_function, + &_udf_callback, + c_options, c_func_registry)) + + +def call_tabular_function(function_name, args=None, func_registry=None): + """ + Get a record batch iterator from a tabular function. + + Parameters + ---------- + function_name : str + Name of the function. + args : iterable + The arguments to pass to the function. Accepted types depend + on the specific function. Currently, only an empty args is supported. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + c_string c_func_name + vector[CDatum] c_args + CFunctionRegistry* c_func_registry + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + + c_func_name = tobytes(function_name) + if func_registry is None: + c_func_registry = NULL + else: + c_func_registry = (func_registry).registry + if args is None: + args = [] + _pack_compute_args(args, &c_args) + + with nogil: + c_reader = GetResultValue(CallTabularFunction( + c_func_name, c_args, c_func_registry)) + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return RecordBatchReader.from_batches(pyarrow_wrap_schema(deref(c_reader).schema()), reader) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_csv.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_csv.pyx new file mode 100644 index 0000000000000000000000000000000000000000..508488c0c3b3c3bcd2d2157f57f625b1e5b92c2e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_csv.pyx @@ -0,0 +1,1542 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from cython.operator cimport dereference as deref + +from collections import namedtuple +from collections.abc import Mapping + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema, + RecordBatchReader, ensure_type, + maybe_unbox_memory_pool, get_input_stream, + get_writer, native_transcoding_input_stream, + pyarrow_unwrap_batch, pyarrow_unwrap_schema, + pyarrow_unwrap_table, pyarrow_wrap_schema, + pyarrow_wrap_table, pyarrow_wrap_data_type, + pyarrow_unwrap_data_type, Table, RecordBatch, + StopToken, _CRecordBatchWriter) +from pyarrow.lib import frombytes, tobytes, SignalStopHandler + + +cdef unsigned char _single_char(s) except 0: + val = ord(s) + if val == 0 or val > 127: + raise ValueError("Expecting an ASCII character") + return val + + +_InvalidRow = namedtuple( + "_InvalidRow", ("expected_columns", "actual_columns", "number", "text"), + module=__name__) + + +class InvalidRow(_InvalidRow): + """ + Description of an invalid row in a CSV file. + + Parameters + ---------- + expected_columns : int + The expected number of columns in the row. + actual_columns : int + The actual number of columns in the row. + number : int or None + The physical row number if known, otherwise None. + text : str + The contents of the row. + """ + __slots__ = () + + +cdef CInvalidRowResult _handle_invalid_row( + handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error: + # A negative row number means undetermined (because of parallel reading) + row_number = c_row.number if c_row.number >= 0 else None + row = InvalidRow(c_row.expected_columns, c_row.actual_columns, + row_number, frombytes( c_row.text)) + result = handler(row) + if result == 'error': + return CInvalidRowResult_Error + elif result == 'skip': + return CInvalidRowResult_Skip + else: + raise ValueError("Invalid return value for invalid row handler: " + f"expected 'error' or 'skip', got {result!r}") + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading CSV files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + Minimum valid value for block size is 1 + skip_rows : int, optional (default 0) + The number of rows to skip before the column names (if any) + and the CSV data. + skip_rows_after_names : int, optional (default 0) + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + column_names : list, optional + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + autogenerate_column_names : bool, optional (default False) + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + encoding : str, optional (default 'utf8') + The character encoding of the CSV data. Columns that cannot + decode using this encoding can still be read as Binary. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04" + >>> print(s) + 1,2,3 + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + + Ignore the first numbered row and substitute it with defined + or autogenerated column names: + + >>> from pyarrow import csv + >>> read_options = csv.ReadOptions( + ... column_names=["animals", "n_legs", "entry"], + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + >>> read_options = csv.ReadOptions(autogenerate_column_names=True, + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + f0: string + f1: int64 + f2: date32[day] + ---- + f0: [["Flamingo","Horse","Brittle stars","Centipede"]] + f1: [[2,4,5,100]] + f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + Remove the first 2 rows of the data: + + >>> read_options = csv.ReadOptions(skip_rows_after_names=2) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + 1: string + 2: int64 + 3: date32[day] + ---- + 1: [["Brittle stars","Centipede"]] + 2: [[5,100]] + 3: [[2022-03-03,2022-03-04]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + # __init__() is not called when unpickling, initialize storage here + def __cinit__(self, *argw, **kwargs): + self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults())) + + def __init__(self, *, use_threads=None, block_size=None, skip_rows=None, + skip_rows_after_names=None, column_names=None, + autogenerate_column_names=None, encoding='utf8'): + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + if skip_rows is not None: + self.skip_rows = skip_rows + if skip_rows_after_names is not None: + self.skip_rows_after_names = skip_rows_after_names + if column_names is not None: + self.column_names = column_names + if autogenerate_column_names is not None: + self.autogenerate_column_names= autogenerate_column_names + # Python-specific option + self.encoding = encoding + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return deref(self.options).use_threads + + @use_threads.setter + def use_threads(self, value): + deref(self.options).use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + """ + return deref(self.options).block_size + + @block_size.setter + def block_size(self, value): + deref(self.options).block_size = value + + @property + def skip_rows(self): + """ + The number of rows to skip before the column names (if any) + and the CSV data. + See `skip_rows_after_names` for interaction description + """ + return deref(self.options).skip_rows + + @skip_rows.setter + def skip_rows(self, value): + deref(self.options).skip_rows = value + + @property + def skip_rows_after_names(self): + """ + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + """ + return deref(self.options).skip_rows_after_names + + @skip_rows_after_names.setter + def skip_rows_after_names(self, value): + deref(self.options).skip_rows_after_names = value + + @property + def column_names(self): + """ + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + """ + return [frombytes(s) for s in deref(self.options).column_names] + + @column_names.setter + def column_names(self, value): + deref(self.options).column_names.clear() + for item in value: + deref(self.options).column_names.push_back(tobytes(item)) + + @property + def autogenerate_column_names(self): + """ + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + """ + return deref(self.options).autogenerate_column_names + + @autogenerate_column_names.setter + def autogenerate_column_names(self, value): + deref(self.options).autogenerate_column_names = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size and + self.skip_rows == other.skip_rows and + self.skip_rows_after_names == other.skip_rows_after_names and + self.column_names == other.column_names and + self.autogenerate_column_names == + other.autogenerate_column_names and + self.encoding == other.encoding + ) + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options): + out = ReadOptions() + out.options.reset(new CCSVReadOptions(move(options))) + out.encoding = 'utf8' # No way to know this + return out + + def __getstate__(self): + return (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) + + def __setstate__(self, state): + (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing CSV files. + + Parameters + ---------- + delimiter : 1-character string, optional (default ',') + The character delimiting individual cells in the CSV data. + quote_char : 1-character string or False, optional (default '"') + The character used optionally for quoting CSV values + (False if quoting is not allowed). + double_quote : bool, optional (default True) + Whether two quotes in a quoted CSV value denote a single quote + in the data. + escape_char : 1-character string or False, optional (default False) + The character used optionally for escaping special characters + (False if escaping is not allowed). + newlines_in_values : bool, optional (default False) + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + ignore_empty_lines : bool, optional (default True) + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + invalid_row_handler : callable, optional (default None) + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals;n_legs;entry\\n" + ... "Flamingo;2;2022-03-01\\n" + ... "# Comment here:\\n" + ... "Horse;4;2022-03-02\\n" + ... "Brittle stars;5;2022-03-03\\n" + ... "Centipede;100;2022-03-04" + ... ) + >>> print(s) + animals;n_legs;entry + Flamingo;2;2022-03-01 + # Comment here: + Horse;4;2022-03-02 + Brittle stars;5;2022-03-03 + Centipede;100;2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Read the data from a file skipping rows with comments + and defining the delimiter: + + >>> from pyarrow import csv + >>> def skip_comment(row): + ... if row.text.startswith("# "): + ... return 'skip' + ... else: + ... return 'error' + ... + >>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment) + >>> csv.read_csv(source, parse_options=parse_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self._invalid_row_handler = None + self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults())) + + def __init__(self, *, delimiter=None, quote_char=None, double_quote=None, + escape_char=None, newlines_in_values=None, + ignore_empty_lines=None, invalid_row_handler=None): + if delimiter is not None: + self.delimiter = delimiter + if quote_char is not None: + self.quote_char = quote_char + if double_quote is not None: + self.double_quote = double_quote + if escape_char is not None: + self.escape_char = escape_char + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if ignore_empty_lines is not None: + self.ignore_empty_lines = ignore_empty_lines + if invalid_row_handler is not None: + self.invalid_row_handler = invalid_row_handler + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quote_char(self): + """ + The character used optionally for quoting CSV values + (False if quoting is not allowed). + """ + if deref(self.options).quoting: + return chr(deref(self.options).quote_char) + else: + return False + + @quote_char.setter + def quote_char(self, value): + if value is False: + deref(self.options).quoting = False + else: + deref(self.options).quote_char = _single_char(value) + deref(self.options).quoting = True + + @property + def double_quote(self): + """ + Whether two quotes in a quoted CSV value denote a single quote + in the data. + """ + return deref(self.options).double_quote + + @double_quote.setter + def double_quote(self, value): + deref(self.options).double_quote = value + + @property + def escape_char(self): + """ + The character used optionally for escaping special characters + (False if escaping is not allowed). + """ + if deref(self.options).escaping: + return chr(deref(self.options).escape_char) + else: + return False + + @escape_char.setter + def escape_char(self, value): + if value is False: + deref(self.options).escaping = False + else: + deref(self.options).escape_char = _single_char(value) + deref(self.options).escaping = True + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + """ + return deref(self.options).newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + deref(self.options).newlines_in_values = value + + @property + def ignore_empty_lines(self): + """ + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + """ + return deref(self.options).ignore_empty_lines + + @property + def invalid_row_handler(self): + """ + Optional handler for invalid rows. + + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + """ + return self._invalid_row_handler + + @invalid_row_handler.setter + def invalid_row_handler(self, value): + if value is not None and not callable(value): + raise TypeError("Expected callable or None, " + f"got instance of {type(value)!r}") + self._invalid_row_handler = value + deref(self.options).invalid_row_handler = MakeInvalidRowHandler( + &_handle_invalid_row, value) + + @ignore_empty_lines.setter + def ignore_empty_lines(self, value): + deref(self.options).ignore_empty_lines = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ParseOptions + + Returns + ------- + bool + """ + return ( + self.delimiter == other.delimiter and + self.quote_char == other.quote_char and + self.double_quote == other.double_quote and + self.escape_char == other.escape_char and + self.newlines_in_values == other.newlines_in_values and + self.ignore_empty_lines == other.ignore_empty_lines and + self._invalid_row_handler == other._invalid_row_handler + ) + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options): + out = ParseOptions() + out.options.reset(new CCSVParseOptions(move(options))) + return out + + def __getstate__(self): + return (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) + + def __setstate__(self, state): + (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class _ISO8601(_Weakrefable): + """ + A special object indicating ISO-8601 parsing. + """ + __slots__ = () + + def __str__(self): + return 'ISO8601' + + def __eq__(self, other): + return isinstance(other, _ISO8601) + + +ISO8601 = _ISO8601() + + +cdef class ConvertOptions(_Weakrefable): + """ + Options for converting CSV data. + + Parameters + ---------- + check_utf8 : bool, optional (default True) + Whether to check UTF8 validity of string columns. + column_types : pyarrow.Schema or dict, optional + Explicitly map column names to column types. Passing this argument + disables type inference on the defined columns. + null_values : list, optional + A sequence of strings that denote nulls in the data + (defaults are appropriate in most cases). Note that by default, + string columns are not checked for null values. To enable + null checking for those, specify ``strings_can_be_null=True``. + true_values : list, optional + A sequence of strings that denote true booleans in the data + (defaults are appropriate in most cases). + false_values : list, optional + A sequence of strings that denote false booleans in the data + (defaults are appropriate in most cases). + decimal_point : 1-character string, optional (default '.') + The character used as decimal point in floating-point and decimal + data. + strings_can_be_null : bool, optional (default False) + Whether string / binary columns can have null values. + If true, then strings in null_values are considered null for + string columns. + If false, then all strings are valid string values. + quoted_strings_can_be_null : bool, optional (default True) + Whether quoted values can be null. + If true, then strings in "null_values" are also considered null + when they appear quoted in the CSV file. Otherwise, quoted values + are never considered null. + include_columns : list, optional + The names of columns to include in the Table. + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + include_missing_columns : bool, optional (default False) + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a column of nulls (whose type is selected using + `column_types`, or null by default). + This option is ignored if `include_columns` is empty. + auto_dict_encode : bool, optional (default False) + Whether to try to automatically dict-encode string / binary data. + If true, then when type inference detects a string or binary column, + it it dict-encoded up to `auto_dict_max_cardinality` distinct values + (per chunk), after which it switches to regular encoding. + This setting is ignored for non-inferred columns (those in + `column_types`). + auto_dict_max_cardinality : int, optional + The maximum dictionary cardinality for `auto_dict_encode`. + This value is per chunk. + timestamp_parsers : list, optional + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry,fast\\n" + ... "Flamingo,2,01/03/2022,Yes\\n" + ... "Horse,4,02/03/2022,Yes\\n" + ... "Brittle stars,5,03/03/2022,No\\n" + ... "Centipede,100,04/03/2022,No\\n" + ... ",6,05/03/2022," + ... ) + >>> print(s) + animals,n_legs,entry,fast + Flamingo,2,01/03/2022,Yes + Horse,4,02/03/2022,Yes + Brittle stars,5,03/03/2022,No + Centipede,100,04/03/2022,No + ,6,05/03/2022, + + Change the type of a column: + + >>> import pyarrow as pa + >>> from pyarrow import csv + >>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()}) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: double + entry: string + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]] + fast: [["Yes","Yes","No","No",""]] + + Define a date parsing format to get a timestamp type column + (in case dates are not in ISO format and not converted by default): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: timestamp[s] + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [["Yes","Yes","No","No",""]] + + Specify a subset of columns to be read: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + + List additional column to be included as a null typed column: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs", "location"], + ... include_missing_columns=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + location: null + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + location: [5 nulls] + + Define columns as dictionary type (by default only the + string/binary columns are dictionary encoded): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"], + ... auto_dict_encode=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: dictionary + n_legs: int64 + entry: timestamp[s] + fast: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Horse","Brittle stars","Centipede",""] -- indices: + [0,1,2,3,4]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [ -- dictionary: + ["Yes","No",""] -- indices: + [0,0,1,1,2]] + + Set upper limit for the number of categories. If the categories + is more than the limit, the conversion to dictionary will not + happen: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals"], + ... auto_dict_encode=True, + ... auto_dict_max_cardinality=2) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + + Set empty strings to missing values: + + >>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"], + ... strings_can_be_null=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",null]] + n_legs: [[2,4,5,100,6]] + + Define values to be True and False when converting a column + into a bool type: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["fast"], + ... false_values=["No"], + ... true_values=["Yes"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + fast: bool + ---- + fast: [[true,true,false,false,null]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self.options.reset( + new CCSVConvertOptions(CCSVConvertOptions.Defaults())) + + def __init__(self, *, check_utf8=None, column_types=None, null_values=None, + true_values=None, false_values=None, decimal_point=None, + strings_can_be_null=None, quoted_strings_can_be_null=None, + include_columns=None, include_missing_columns=None, + auto_dict_encode=None, auto_dict_max_cardinality=None, + timestamp_parsers=None): + if check_utf8 is not None: + self.check_utf8 = check_utf8 + if column_types is not None: + self.column_types = column_types + if null_values is not None: + self.null_values = null_values + if true_values is not None: + self.true_values = true_values + if false_values is not None: + self.false_values = false_values + if decimal_point is not None: + self.decimal_point = decimal_point + if strings_can_be_null is not None: + self.strings_can_be_null = strings_can_be_null + if quoted_strings_can_be_null is not None: + self.quoted_strings_can_be_null = quoted_strings_can_be_null + if include_columns is not None: + self.include_columns = include_columns + if include_missing_columns is not None: + self.include_missing_columns = include_missing_columns + if auto_dict_encode is not None: + self.auto_dict_encode = auto_dict_encode + if auto_dict_max_cardinality is not None: + self.auto_dict_max_cardinality = auto_dict_max_cardinality + if timestamp_parsers is not None: + self.timestamp_parsers = timestamp_parsers + + @property + def check_utf8(self): + """ + Whether to check UTF8 validity of string columns. + """ + return deref(self.options).check_utf8 + + @check_utf8.setter + def check_utf8(self, value): + deref(self.options).check_utf8 = value + + @property + def strings_can_be_null(self): + """ + Whether string / binary columns can have null values. + """ + return deref(self.options).strings_can_be_null + + @strings_can_be_null.setter + def strings_can_be_null(self, value): + deref(self.options).strings_can_be_null = value + + @property + def quoted_strings_can_be_null(self): + """ + Whether quoted values can be null. + """ + return deref(self.options).quoted_strings_can_be_null + + @quoted_strings_can_be_null.setter + def quoted_strings_can_be_null(self, value): + deref(self.options).quoted_strings_can_be_null = value + + @property + def column_types(self): + """ + Explicitly map column names to column types. + """ + d = {frombytes(item.first): pyarrow_wrap_data_type(item.second) + for item in deref(self.options).column_types} + return d + + @column_types.setter + def column_types(self, value): + cdef: + shared_ptr[CDataType] typ + + if isinstance(value, Mapping): + value = value.items() + + deref(self.options).column_types.clear() + for item in value: + if isinstance(item, Field): + k = item.name + v = item.type + else: + k, v = item + typ = pyarrow_unwrap_data_type(ensure_type(v)) + assert typ != NULL + deref(self.options).column_types[tobytes(k)] = typ + + @property + def null_values(self): + """ + A sequence of strings that denote nulls in the data. + """ + return [frombytes(x) for x in deref(self.options).null_values] + + @null_values.setter + def null_values(self, value): + deref(self.options).null_values = [tobytes(x) for x in value] + + @property + def true_values(self): + """ + A sequence of strings that denote true booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).true_values] + + @true_values.setter + def true_values(self, value): + deref(self.options).true_values = [tobytes(x) for x in value] + + @property + def false_values(self): + """ + A sequence of strings that denote false booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).false_values] + + @false_values.setter + def false_values(self, value): + deref(self.options).false_values = [tobytes(x) for x in value] + + @property + def decimal_point(self): + """ + The character used as decimal point in floating-point and decimal + data. + """ + return chr(deref(self.options).decimal_point) + + @decimal_point.setter + def decimal_point(self, value): + deref(self.options).decimal_point = _single_char(value) + + @property + def auto_dict_encode(self): + """ + Whether to try to automatically dict-encode string / binary data. + """ + return deref(self.options).auto_dict_encode + + @auto_dict_encode.setter + def auto_dict_encode(self, value): + deref(self.options).auto_dict_encode = value + + @property + def auto_dict_max_cardinality(self): + """ + The maximum dictionary cardinality for `auto_dict_encode`. + + This value is per chunk. + """ + return deref(self.options).auto_dict_max_cardinality + + @auto_dict_max_cardinality.setter + def auto_dict_max_cardinality(self, value): + deref(self.options).auto_dict_max_cardinality = value + + @property + def include_columns(self): + """ + The names of columns to include in the Table. + + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + """ + return [frombytes(s) for s in deref(self.options).include_columns] + + @include_columns.setter + def include_columns(self, value): + deref(self.options).include_columns.clear() + for item in value: + deref(self.options).include_columns.push_back(tobytes(item)) + + @property + def include_missing_columns(self): + """ + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a null column (whose type is selected using `column_types`, + or null by default). + This option is ignored if `include_columns` is empty. + """ + return deref(self.options).include_missing_columns + + @include_missing_columns.setter + def include_missing_columns(self, value): + deref(self.options).include_missing_columns = value + + @property + def timestamp_parsers(self): + """ + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + """ + cdef: + shared_ptr[CTimestampParser] c_parser + c_string kind + + parsers = [] + for c_parser in deref(self.options).timestamp_parsers: + kind = deref(c_parser).kind() + if kind == b'strptime': + parsers.append(frombytes(deref(c_parser).format())) + else: + assert kind == b'iso8601' + parsers.append(ISO8601) + + return parsers + + @timestamp_parsers.setter + def timestamp_parsers(self, value): + cdef: + vector[shared_ptr[CTimestampParser]] c_parsers + + for v in value: + if isinstance(v, str): + c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v))) + elif v == ISO8601: + c_parsers.push_back(CTimestampParser.MakeISO8601()) + else: + raise TypeError("Expected list of str or ISO8601 objects") + + deref(self.options).timestamp_parsers = move(c_parsers) + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options): + out = ConvertOptions() + out.options.reset(new CCSVConvertOptions(move(options))) + return out + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ConvertOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ConvertOptions + + Returns + ------- + bool + """ + return ( + self.check_utf8 == other.check_utf8 and + self.column_types == other.column_types and + self.null_values == other.null_values and + self.true_values == other.true_values and + self.false_values == other.false_values and + self.decimal_point == other.decimal_point and + self.timestamp_parsers == other.timestamp_parsers and + self.strings_can_be_null == other.strings_can_be_null and + self.quoted_strings_can_be_null == + other.quoted_strings_can_be_null and + self.auto_dict_encode == other.auto_dict_encode and + self.auto_dict_max_cardinality == + other.auto_dict_max_cardinality and + self.include_columns == other.include_columns and + self.include_missing_columns == other.include_missing_columns + ) + + def __getstate__(self): + return (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) + + def __setstate__(self, state): + (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef _get_reader(input_file, ReadOptions read_options, + shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + if read_options is not None: + out[0] = native_transcoding_input_stream(out[0], + read_options.encoding, + 'utf8') + + +cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out): + if read_options is None: + out[0] = CCSVReadOptions.Defaults() + else: + out[0] = deref(read_options.options) + + +cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out): + if parse_options is None: + out[0] = CCSVParseOptions.Defaults() + else: + out[0] = deref(parse_options.options) + + +cdef _get_convert_options(ConvertOptions convert_options, + CCSVConvertOptions* out): + if convert_options is None: + out[0] = CCSVConvertOptions.Defaults() + else: + out[0] = deref(convert_options.options) + + +cdef class CSVStreamingReader(RecordBatchReader): + """An object that reads record batches incrementally from a CSV file. + + Should not be instantiated directly by user code. + """ + cdef readonly: + Schema schema + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.csv.open_csv() instead." + .format(self.__class__.__name__)) + + # Note about cancellation: we cannot create a SignalStopHandler + # by default here, as several CSVStreamingReader instances may be + # created (including by the same thread). Handling cancellation + # would require having the user pass the SignalStopHandler. + # (in addition to solving ARROW-11853) + + cdef _open(self, shared_ptr[CInputStream] stream, + CCSVReadOptions c_read_options, + CCSVParseOptions c_parse_options, + CCSVConvertOptions c_convert_options, + MemoryPool memory_pool): + cdef: + shared_ptr[CSchema] c_schema + CIOContext io_context + + io_context = CIOContext(maybe_unbox_memory_pool(memory_pool)) + + with nogil: + self.reader = GetResultValue( + CCSVStreamingReader.Make( + io_context, stream, + move(c_read_options), move(c_parse_options), + move(c_convert_options))) + c_schema = self.reader.get().schema() + + self.schema = pyarrow_wrap_schema(c_schema) + + +def read_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Read a Table from a stream of CSV data. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.Table` + Contents of the CSV file as a in-memory table. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry\\n" + ... "Flamingo,2,2022-03-01\\n" + ... "Horse,4,2022-03-02\\n" + ... "Brittle stars,5,2022-03-03\\n" + ... "Centipede,100,2022-03-04" + ... ) + >>> print(s) + animals,n_legs,entry + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Reading from the file + + >>> from pyarrow import csv + >>> csv.read_csv(source) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CIOContext io_context + SharedPtrNoGIL[CCSVReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + with SignalStopHandler() as stop_handler: + io_context = CIOContext( + maybe_unbox_memory_pool(memory_pool), + ( stop_handler.stop_token).stop_token) + reader = GetResultValue(CCSVReader.Make( + io_context, stream, + c_read_options, c_parse_options, c_convert_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) + + +def open_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Open a streaming reader of CSV data. + + Reading using this function is always single-threaded. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.csv.CSVStreamingReader` + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CSVStreamingReader reader + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + reader = CSVStreamingReader.__new__(CSVStreamingReader) + reader._open(stream, move(c_read_options), move(c_parse_options), + move(c_convert_options), memory_pool) + return reader + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *: + if quoting_style == "needed": + return CQuotingStyle_Needed + elif quoting_style == "all_valid": + return CQuotingStyle_AllValid + elif quoting_style == "none": + return CQuotingStyle_None + _raise_invalid_function_option(quoting_style, "quoting style") + + +cdef wrap_quoting_style(quoting_style): + if quoting_style == CQuotingStyle_Needed: + return 'needed' + elif quoting_style == CQuotingStyle_AllValid: + return 'all_valid' + elif quoting_style == CQuotingStyle_None: + return 'none' + + +cdef class WriteOptions(_Weakrefable): + """ + Options for writing CSV files. + + Parameters + ---------- + include_header : bool, optional (default True) + Whether to write an initial header line with column names + batch_size : int, optional (default 1024) + How many rows to process together when converting and writing + CSV data + delimiter : 1-character string, optional (default ",") + The character delimiting individual cells in the CSV data. + quoting_style : str, optional (default "needed") + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, include_header=None, batch_size=None, + delimiter=None, quoting_style=None): + self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults())) + if include_header is not None: + self.include_header = include_header + if batch_size is not None: + self.batch_size = batch_size + if delimiter is not None: + self.delimiter = delimiter + if quoting_style is not None: + self.quoting_style = quoting_style + + @property + def include_header(self): + """ + Whether to write an initial header line with column names. + """ + return deref(self.options).include_header + + @include_header.setter + def include_header(self, value): + deref(self.options).include_header = value + + @property + def batch_size(self): + """ + How many rows to process together when converting and writing + CSV data. + """ + return deref(self.options).batch_size + + @batch_size.setter + def batch_size(self, value): + deref(self.options).batch_size = value + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quoting_style(self): + """ + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + return wrap_quoting_style(deref(self.options).quoting_style) + + @quoting_style.setter + def quoting_style(self, value): + deref(self.options).quoting_style = unwrap_quoting_style(value) + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options): + out = WriteOptions() + out.options.reset(new CCSVWriteOptions(move(options))) + return out + + def validate(self): + check_status(self.options.get().Validate()) + + +cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out): + if write_options is None: + out[0] = CCSVWriteOptions.Defaults() + else: + out[0] = deref(write_options.options) + + +def write_csv(data, output_file, write_options=None, + MemoryPool memory_pool=None): + """ + Write record batch or table to a CSV file. + + Parameters + ---------- + data : pyarrow.RecordBatch or pyarrow.Table + The data to write. + output_file : string, path, pyarrow.NativeFile, or file-like object + The location where to write the CSV data. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + + Examples + -------- + + >>> import pyarrow as pa + >>> from pyarrow import csv + + >>> legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> entry_date = pa.array(["01/03/2022", "02/03/2022", + ... "03/03/2022", "04/03/2022"]) + >>> table = pa.table([animals, legs, entry_date], + ... names=["animals", "n_legs", "entry"]) + + >>> csv.write_csv(table, "animals.csv") + + >>> write_options = csv.WriteOptions(include_header=False) + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + + >>> write_options = csv.WriteOptions(delimiter=";") + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + """ + cdef: + shared_ptr[COutputStream] stream + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool + CRecordBatch* batch + CTable* table + _get_write_options(write_options, &c_write_options) + + get_writer(output_file, &stream) + c_memory_pool = maybe_unbox_memory_pool(memory_pool) + c_write_options.io_context = CIOContext(c_memory_pool) + if isinstance(data, RecordBatch): + batch = pyarrow_unwrap_batch(data).get() + with nogil: + check_status(WriteCSV(deref(batch), c_write_options, stream.get())) + elif isinstance(data, Table): + table = pyarrow_unwrap_table(data).get() + with nogil: + check_status(WriteCSV(deref(table), c_write_options, stream.get())) + else: + raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'") + + +cdef class CSVWriter(_CRecordBatchWriter): + """ + Writer to create a CSV file. + + Parameters + ---------- + sink : str, path, pyarrow.OutputStream or file-like object + The location where to write the CSV data. + schema : pyarrow.Schema + The schema of the data to be written. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + """ + + def __init__(self, sink, Schema schema, *, + WriteOptions write_options=None, MemoryPool memory_pool=None): + cdef: + shared_ptr[COutputStream] c_stream + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_write_options(write_options, &c_write_options) + c_write_options.io_context = CIOContext(c_memory_pool) + get_writer(sink, &c_stream) + with nogil: + self.writer = GetResultValue(MakeCSVWriter( + c_stream, c_schema, c_write_options)) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6acb8826d1789ab2c9e5213f16f2851c9e3dc22b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pxd @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_cuda cimport * + + +cdef class Context(_Weakrefable): + cdef: + shared_ptr[CCudaContext] context + int device_number + + cdef void init(self, const shared_ptr[CCudaContext]& ctx) + + +cdef class IpcMemHandle(_Weakrefable): + cdef: + shared_ptr[CCudaIpcMemHandle] handle + + cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h) + + +cdef class CudaBuffer(Buffer): + cdef: + shared_ptr[CCudaBuffer] cuda_buffer + object base + + cdef void init_cuda(self, + const shared_ptr[CCudaBuffer]& buffer, + object base) + + +cdef class HostBuffer(Buffer): + cdef: + shared_ptr[CCudaHostBuffer] host_buffer + + cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer) + + +cdef class BufferReader(NativeFile): + cdef: + CCudaBufferReader* reader + CudaBuffer buffer + + +cdef class BufferWriter(NativeFile): + cdef: + CCudaBufferWriter* writer + CudaBuffer buffer diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pyx new file mode 100644 index 0000000000000000000000000000000000000000..ba799a105e7e15ab8414988cdefdaa4dc315cad8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_cuda.pyx @@ -0,0 +1,1058 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow.lib cimport * +from pyarrow.includes.libarrow_cuda cimport * +from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError +from pyarrow.util import get_contiguous_span +cimport cpython as cp + + +cdef class Context(_Weakrefable): + """ + CUDA driver context. + """ + + def __init__(self, *args, **kwargs): + """ + Create a CUDA driver context for a particular device. + + If a CUDA context handle is passed, it is wrapped, otherwise + a default CUDA context for the given device is requested. + + Parameters + ---------- + device_number : int (default 0) + Specify the GPU device for which the CUDA driver context is + requested. + handle : int, optional + Specify CUDA handle for a shared context that has been created + by another library. + """ + # This method exposed because autodoc doesn't pick __cinit__ + + def __cinit__(self, int device_number=0, uintptr_t handle=0): + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + cdef int n = manager.num_devices() + if device_number >= n or device_number < 0: + self.context.reset() + raise ValueError('device_number argument must be ' + 'non-negative less than %s' % (n)) + if handle == 0: + self.context = GetResultValue(manager.GetContext(device_number)) + else: + self.context = GetResultValue(manager.GetSharedContext( + device_number, handle)) + self.device_number = device_number + + @staticmethod + def from_numba(context=None): + """ + Create a Context instance from a Numba CUDA context. + + Parameters + ---------- + context : {numba.cuda.cudadrv.driver.Context, None} + A Numba CUDA context instance. + If None, the current Numba context is used. + + Returns + ------- + shared_context : pyarrow.cuda.Context + Context instance. + """ + if context is None: + import numba.cuda + context = numba.cuda.current_context() + return Context(device_number=context.device.id, + handle=context.handle.value) + + def to_numba(self): + """ + Convert Context to a Numba CUDA context. + + Returns + ------- + context : numba.cuda.cudadrv.driver.Context + Numba CUDA context instance. + """ + import ctypes + import numba.cuda + device = numba.cuda.gpus[self.device_number] + handle = ctypes.c_void_p(self.handle) + context = numba.cuda.cudadrv.driver.Context(device, handle) + + class DummyPendingDeallocs(object): + # Context is managed by pyarrow + def add_item(self, *args, **kwargs): + pass + + context.deallocations = DummyPendingDeallocs() + return context + + @staticmethod + def get_num_devices(): + """ Return the number of GPU devices. + """ + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + return manager.num_devices() + + @property + def device_number(self): + """ Return context device number. + """ + return self.device_number + + @property + def handle(self): + """ Return pointer to context handle. + """ + return self.context.get().handle() + + cdef void init(self, const shared_ptr[CCudaContext]& ctx): + self.context = ctx + + def synchronize(self): + """Blocks until the device has completed all preceding requested + tasks. + """ + check_status(self.context.get().Synchronize()) + + @property + def bytes_allocated(self): + """Return the number of allocated bytes. + """ + return self.context.get().bytes_allocated() + + def get_device_address(self, uintptr_t address): + """Return the device address that is reachable from kernels running in + the context + + Parameters + ---------- + address : int + Specify memory address value + + Returns + ------- + device_address : int + Device address accessible from device context + + Notes + ----- + The device address is defined as a memory address accessible + by device. While it is often a device memory address but it + can be also a host memory address, for instance, when the + memory is allocated as host memory (using cudaMallocHost or + cudaHostAlloc) or as managed memory (using cudaMallocManaged) + or the host memory is page-locked (using cudaHostRegister). + """ + return GetResultValue(self.context.get().GetDeviceAddress(address)) + + def new_buffer(self, int64_t nbytes): + """Return new device buffer. + + Parameters + ---------- + nbytes : int + Specify the number of bytes to be allocated. + + Returns + ------- + buf : CudaBuffer + Allocated buffer. + """ + cdef: + shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue(self.context.get().Allocate(nbytes)) + return pyarrow_wrap_cudabuffer(cudabuf) + + def foreign_buffer(self, address, size, base=None): + """ + Create device buffer from address and size as a view. + + The caller is responsible for allocating and freeing the + memory. When `address==size==0` then a new zero-sized buffer + is returned. + + Parameters + ---------- + address : int + Specify the starting address of the buffer. The address can + refer to both device or host memory but it must be + accessible from device after mapping it with + `get_device_address` method. + size : int + Specify the size of device buffer in bytes. + base : {None, object} + Specify object that owns the referenced memory. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device reachable memory. + + """ + if not address and size == 0: + return self.new_buffer(0) + cdef: + uintptr_t c_addr = self.get_device_address(address) + int64_t c_size = size + shared_ptr[CCudaBuffer] cudabuf + + cudabuf = GetResultValue(self.context.get().View( + c_addr, c_size)) + return pyarrow_wrap_cudabuffer_base(cudabuf, base) + + def open_ipc_buffer(self, ipc_handle): + """ Open existing CUDA IPC memory handle + + Parameters + ---------- + ipc_handle : IpcMemHandle + Specify opaque pointer to CUipcMemHandle (driver API). + + Returns + ------- + buf : CudaBuffer + referencing device buffer + """ + handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle) + cdef shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue( + self.context.get().OpenIpcBuffer(handle.get()[0])) + return pyarrow_wrap_cudabuffer(cudabuf) + + def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1): + """Create device buffer and initialize with data. + + Parameters + ---------- + data : {CudaBuffer, HostBuffer, Buffer, array-like} + Specify data to be copied to device buffer. + offset : int + Specify the offset of input buffer for device data + buffering. Default: 0. + size : int + Specify the size of device buffer in bytes. Default: all + (starting from input offset) + + Returns + ------- + cbuf : CudaBuffer + Device buffer with copied data. + """ + is_host_data = not pyarrow_is_cudabuffer(data) + buf = as_buffer(data) if is_host_data else data + + bsize = buf.size + if offset < 0 or (bsize and offset >= bsize): + raise ValueError('offset argument is out-of-range') + if size < 0: + size = bsize - offset + elif offset + size > bsize: + raise ValueError( + 'requested larger slice than available in device buffer') + + if offset != 0 or size != bsize: + buf = buf.slice(offset, size) + + result = self.new_buffer(size) + if is_host_data: + result.copy_from_host(buf, position=0, nbytes=size) + else: + result.copy_from_device(buf, position=0, nbytes=size) + return result + + def buffer_from_object(self, obj): + """Create device buffer view of arbitrary object that references + device accessible memory. + + When the object contains a non-contiguous view of device + accessible memory then the returned device buffer will contain + contiguous view of the memory, that is, including the + intermediate data that is otherwise invisible to the input + object. + + Parameters + ---------- + obj : {object, Buffer, HostBuffer, CudaBuffer, ...} + Specify an object that holds (device or host) address that + can be accessed from device. This includes objects with + types defined in pyarrow.cuda as well as arbitrary objects + that implement the CUDA array interface as defined by numba. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device accessible memory. + + """ + if isinstance(obj, HostBuffer): + return self.foreign_buffer(obj.address, obj.size, base=obj) + elif isinstance(obj, Buffer): + return CudaBuffer.from_buffer(obj) + elif isinstance(obj, CudaBuffer): + return obj + elif hasattr(obj, '__cuda_array_interface__'): + desc = obj.__cuda_array_interface__ + addr = desc['data'][0] + if addr is None: + return self.new_buffer(0) + import numpy as np + start, end = get_contiguous_span( + desc['shape'], desc.get('strides'), + np.dtype(desc['typestr']).itemsize) + return self.foreign_buffer(addr + start, end - start, base=obj) + raise ArrowTypeError('cannot create device buffer view from' + ' `%s` object' % (type(obj))) + + +cdef class IpcMemHandle(_Weakrefable): + """A serializable container for a CUDA IPC handle. + """ + cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h): + self.handle = h + + @staticmethod + def from_buffer(Buffer opaque_handle): + """Create IpcMemHandle from opaque buffer (e.g. from another + process) + + Parameters + ---------- + opaque_handle : + a CUipcMemHandle as a const void* + + Returns + ------- + ipc_handle : IpcMemHandle + """ + c_buf = pyarrow_unwrap_buffer(opaque_handle) + cdef: + shared_ptr[CCudaIpcMemHandle] handle + + handle = GetResultValue( + CCudaIpcMemHandle.FromBuffer(c_buf.get().data())) + return pyarrow_wrap_cudaipcmemhandle(handle) + + def serialize(self, pool=None): + """Write IpcMemHandle to a Buffer + + Parameters + ---------- + pool : {MemoryPool, None} + Specify a pool to allocate memory from + + Returns + ------- + buf : Buffer + The serialized buffer. + """ + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + cdef shared_ptr[CBuffer] buf + cdef CCudaIpcMemHandle* h = self.handle.get() + with nogil: + buf = GetResultValue(h.Serialize(pool_)) + return pyarrow_wrap_buffer(buf) + + +cdef class CudaBuffer(Buffer): + """An Arrow buffer with data located in a GPU device. + + To create a CudaBuffer instance, use Context.device_buffer(). + + The memory allocated in a CudaBuffer is freed when the buffer object + is deleted. + """ + + def __init__(self): + raise TypeError("Do not call CudaBuffer's constructor directly, use " + "`.device_buffer`" + " method instead.") + + cdef void init_cuda(self, + const shared_ptr[CCudaBuffer]& buffer, + object base): + self.cuda_buffer = buffer + self.init( buffer) + self.base = base + + @staticmethod + def from_buffer(buf): + """ Convert back generic buffer into CudaBuffer + + Parameters + ---------- + buf : Buffer + Specify buffer containing CudaBuffer + + Returns + ------- + dbuf : CudaBuffer + Resulting device buffer. + """ + c_buf = pyarrow_unwrap_buffer(buf) + cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf)) + return pyarrow_wrap_cudabuffer(cuda_buffer) + + @staticmethod + def from_numba(mem): + """Create a CudaBuffer view from numba MemoryPointer instance. + + Parameters + ---------- + mem : numba.cuda.cudadrv.driver.MemoryPointer + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of numba MemoryPointer. + """ + ctx = Context.from_numba(mem.context) + if mem.device_pointer.value is None and mem.size==0: + return ctx.new_buffer(0) + return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem) + + def to_numba(self): + """Return numba memory pointer of CudaBuffer instance. + """ + import ctypes + from numba.cuda.cudadrv.driver import MemoryPointer + return MemoryPointer(self.context.to_numba(), + pointer=ctypes.c_void_p(self.address), + size=self.size) + + cdef getitem(self, int64_t i): + return self.copy_to_host(position=i, nbytes=1)[0] + + def copy_to_host(self, int64_t position=0, int64_t nbytes=-1, + Buffer buf=None, + MemoryPool memory_pool=None, c_bool resizable=False): + """Copy memory from GPU device to CPU host + + Caller is responsible for ensuring that all tasks affecting + the memory are finished. Use + + `.context.synchronize()` + + when needed. + + Parameters + ---------- + position : int + Specify the starting position of the source data in GPU + device buffer. Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + the position until host buffer is full). + buf : Buffer + Specify a pre-allocated output buffer in host. Default: None + (allocate new output buffer). + memory_pool : MemoryPool + resizable : bool + Specify extra arguments to allocate_buffer. Used only when + buf is None. + + Returns + ------- + buf : Buffer + Output buffer in host. + + """ + if position < 0 or (self.size and position > self.size) \ + or (self.size == 0 and position != 0): + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + if buf is None: + if nbytes < 0: + # copy all starting from position to new host buffer + c_nbytes = self.size - position + else: + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available from ' + 'device buffer') + # copy nbytes starting from position to new host buffer + c_nbytes = nbytes + buf = allocate_buffer(c_nbytes, memory_pool=memory_pool, + resizable=resizable) + else: + if nbytes < 0: + # copy all from position until given host buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested copy does not fit into host buffer') + # copy nbytes from position to given host buffer + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get() + .CopyToHost(c_position, c_nbytes, + c_buf.get().mutable_data())) + return buf + + def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1): + """Copy data from host to device. + + The device buffer must be pre-allocated. + + Parameters + ---------- + data : {Buffer, array-like} + Specify data in host. It can be array-like that is valid + argument to py_buffer + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + buf = as_buffer(data) + + if nbytes < 0: + # copy from host buffer to device buffer starting from + # position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from host buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from host buffer to device buffer starting + # from position + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromHost(c_position, c_buf.get().data(), + c_nbytes)) + return c_nbytes + + def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1): + """Copy data from device to device. + + Parameters + ---------- + buf : CudaBuffer + Specify source device buffer. + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + + if nbytes < 0: + # copy from source device buffer to device buffer starting + # from position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from device buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from source device buffer to device buffer + # starting from position + c_nbytes = nbytes + + cdef: + shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf) + int64_t c_position = position + shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext( + buf.context) + void* c_source_data = (c_buf.get().address()) + + if self.context.handle != buf.context.handle: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromAnotherDevice(c_src_ctx, c_position, + c_source_data, c_nbytes)) + else: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromDevice(c_position, c_source_data, + c_nbytes)) + return c_nbytes + + def export_for_ipc(self): + """ + Expose this device buffer as IPC memory which can be used in other + processes. + + After calling this function, this device memory will not be + freed when the CudaBuffer is destructed. + + Returns + ------- + ipc_handle : IpcMemHandle + The exported IPC handle + + """ + cdef shared_ptr[CCudaIpcMemHandle] handle + with nogil: + handle = GetResultValue(self.cuda_buffer.get().ExportForIpc()) + return pyarrow_wrap_cudaipcmemhandle(handle) + + @property + def context(self): + """Returns the CUDA driver context of this buffer. + """ + return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context()) + + def slice(self, offset=0, length=None): + """Return slice of device buffer + + Parameters + ---------- + offset : int, default 0 + Specify offset from the start of device buffer to slice + length : int, default None + Specify the length of slice (default is until end of device + buffer starting from offset). If the length is larger than + the data available, the returned slice will have a size of + the available data starting from the offset. + + Returns + ------- + sliced : CudaBuffer + Zero-copy slice of device buffer. + + """ + if offset < 0 or (self.size and offset >= self.size): + raise ValueError('offset argument is out-of-range') + cdef int64_t offset_ = offset + cdef int64_t size + if length is None: + size = self.size - offset_ + elif offset + length <= self.size: + size = length + else: + size = self.size - offset + parent = pyarrow_unwrap_cudabuffer(self) + return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent, + offset_, size)) + + def to_pybytes(self): + """Return device buffer content as Python bytes. + """ + return self.copy_to_host().to_pybytes() + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + # Device buffer contains data pointers on the device. Hence, + # cannot support buffer protocol PEP-3118 for CudaBuffer. + raise BufferError('buffer protocol for device buffer not supported') + + +cdef class HostBuffer(Buffer): + """Device-accessible CPU memory created using cudaHostAlloc. + + To create a HostBuffer instance, use + + cuda.new_host_buffer() + """ + + def __init__(self): + raise TypeError("Do not call HostBuffer's constructor directly," + " use `cuda.new_host_buffer` function instead.") + + cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer): + self.host_buffer = buffer + self.init( buffer) + + @property + def size(self): + return self.host_buffer.get().size() + + +cdef class BufferReader(NativeFile): + """File interface for zero-copy read from CUDA buffers. + + Note: Read methods return pointers to device memory. This means + you must be careful using this interface with any Arrow code which + may expect to be able to do anything other than pointer arithmetic + on the returned buffers. + """ + + def __cinit__(self, CudaBuffer obj): + self.buffer = obj + self.reader = new CCudaBufferReader(self.buffer.buffer) + self.set_random_access_file( + shared_ptr[CRandomAccessFile](self.reader)) + self.is_readable = True + + def read_buffer(self, nbytes=None): + """Return a slice view of the underlying device buffer. + + The slice will start at the current reader position and will + have specified size in bytes. + + Parameters + ---------- + nbytes : int, default None + Specify the number of bytes to read. Default: None (read all + remaining bytes). + + Returns + ------- + cbuf : CudaBuffer + New device buffer. + + """ + cdef: + int64_t c_nbytes + shared_ptr[CCudaBuffer] output + + if nbytes is None: + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + with nogil: + output = static_pointer_cast[CCudaBuffer, CBuffer]( + GetResultValue(self.reader.Read(c_nbytes))) + + return pyarrow_wrap_cudabuffer(output) + + +cdef class BufferWriter(NativeFile): + """File interface for writing to CUDA buffers. + + By default writes are unbuffered. Use set_buffer_size to enable + buffering. + """ + + def __cinit__(self, CudaBuffer buffer): + self.buffer = buffer + self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer) + self.set_output_stream(shared_ptr[COutputStream](self.writer)) + self.is_writable = True + + def writeat(self, int64_t position, object data): + """Write data to buffer starting from position. + + Parameters + ---------- + position : int + Specify device buffer position where the data will be + written. + data : array-like + Specify data, the data instance must implement buffer + protocol. + """ + cdef: + Buffer buf = as_buffer(data) + const uint8_t* c_data = buf.buffer.get().data() + int64_t c_size = buf.buffer.get().size() + + with nogil: + check_status(self.writer.WriteAt(position, c_data, c_size)) + + def flush(self): + """ Flush the buffer stream """ + with nogil: + check_status(self.writer.Flush()) + + def seek(self, int64_t position, int whence=0): + # TODO: remove this method after NativeFile.seek supports + # writable files. + cdef int64_t offset + + with nogil: + if whence == 0: + offset = position + elif whence == 1: + offset = GetResultValue(self.writer.Tell()) + offset = offset + position + else: + with gil: + raise ValueError("Invalid value of whence: {0}" + .format(whence)) + check_status(self.writer.Seek(offset)) + return self.tell() + + @property + def buffer_size(self): + """Returns size of host (CPU) buffer, 0 for unbuffered + """ + return self.writer.buffer_size() + + @buffer_size.setter + def buffer_size(self, int64_t buffer_size): + """Set CPU buffer size to limit calls to cudaMemcpy + + Parameters + ---------- + buffer_size : int + Specify the size of CPU buffer to allocate in bytes. + """ + with nogil: + check_status(self.writer.SetBufferSize(buffer_size)) + + @property + def num_bytes_buffered(self): + """Returns number of bytes buffered on host + """ + return self.writer.num_bytes_buffered() + +# Functions + + +def new_host_buffer(const int64_t size, int device=0): + """Return buffer with CUDA-accessible memory on CPU host + + Parameters + ---------- + size : int + Specify the number of bytes to be allocated. + device : int + Specify GPU device number. + + Returns + ------- + dbuf : HostBuffer + Allocated host buffer + """ + cdef shared_ptr[CCudaHostBuffer] buffer + with nogil: + buffer = GetResultValue(AllocateCudaHostBuffer(device, size)) + return pyarrow_wrap_cudahostbuffer(buffer) + + +def serialize_record_batch(object batch, object ctx): + """ Write record batch message to GPU device memory + + Parameters + ---------- + batch : RecordBatch + Record batch to write + ctx : Context + CUDA Context to allocate device memory from + + Returns + ------- + dbuf : CudaBuffer + device buffer which contains the record batch message + """ + cdef shared_ptr[CCudaBuffer] buffer + cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get() + cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get() + with nogil: + buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_)) + return pyarrow_wrap_cudabuffer(buffer) + + +def read_message(object source, pool=None): + """ Read Arrow IPC message located on GPU device + + Parameters + ---------- + source : {CudaBuffer, cuda.BufferReader} + Device buffer or reader of device buffer. + pool : MemoryPool (optional) + Pool to allocate CPU memory for the metadata + + Returns + ------- + message : Message + The deserialized message, body still on device + """ + cdef: + Message result = Message.__new__(Message) + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + if not isinstance(source, BufferReader): + reader = BufferReader(source) + with nogil: + result.message = move( + GetResultValue(ReadMessage(reader.reader, pool_))) + return result + + +def read_record_batch(object buffer, object schema, *, + DictionaryMemo dictionary_memo=None, pool=None): + """Construct RecordBatch referencing IPC message located on CUDA device. + + While the metadata is copied to host memory for deserialization, + the record batch data remains on the device. + + Parameters + ---------- + buffer : + Device buffer containing the complete IPC message + schema : Schema + The schema for the record batch + dictionary_memo : DictionaryMemo, optional + If message contains dictionaries, must pass a populated + DictionaryMemo + pool : MemoryPool (optional) + Pool to allocate metadata from + + Returns + ------- + batch : RecordBatch + Reconstructed record batch, with device pointers + + """ + cdef: + shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema) + shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer) + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + shared_ptr[CRecordBatch] batch + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + with nogil: + batch = GetResultValue(CudaReadRecordBatch( + schema_, arg_dict_memo, buffer_, pool_)) + return pyarrow_wrap_batch(batch) + + +# Public API + + +cdef public api bint pyarrow_is_buffer(object buffer): + return isinstance(buffer, Buffer) + +# cudabuffer + +cdef public api bint pyarrow_is_cudabuffer(object buffer): + return isinstance(buffer, CudaBuffer) + + +cdef public api object \ + pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, base) + return result + + +cdef public api object \ + pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, None) + return result + + +cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj): + if pyarrow_is_cudabuffer(obj): + return (obj).cuda_buffer + raise TypeError('expected CudaBuffer instance, got %s' + % (type(obj).__name__)) + +# cudahostbuffer + +cdef public api bint pyarrow_is_cudahostbuffer(object buffer): + return isinstance(buffer, HostBuffer) + + +cdef public api object \ + pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf): + cdef HostBuffer result = HostBuffer.__new__(HostBuffer) + result.init_host(buf) + return result + + +cdef public api shared_ptr[CCudaHostBuffer] \ + pyarrow_unwrap_cudahostbuffer(object obj): + if pyarrow_is_cudahostbuffer(obj): + return (obj).host_buffer + raise TypeError('expected HostBuffer instance, got %s' + % (type(obj).__name__)) + +# cudacontext + +cdef public api bint pyarrow_is_cudacontext(object ctx): + return isinstance(ctx, Context) + + +cdef public api object \ + pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx): + cdef Context result = Context.__new__(Context) + result.init(ctx) + return result + + +cdef public api shared_ptr[CCudaContext] \ + pyarrow_unwrap_cudacontext(object obj): + if pyarrow_is_cudacontext(obj): + return (obj).context + raise TypeError('expected Context instance, got %s' + % (type(obj).__name__)) + +# cudaipcmemhandle + +cdef public api bint pyarrow_is_cudaipcmemhandle(object handle): + return isinstance(handle, IpcMemHandle) + + +cdef public api object \ + pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h): + cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle) + result.init(h) + return result + + +cdef public api shared_ptr[CCudaIpcMemHandle] \ + pyarrow_unwrap_cudaipcmemhandle(object obj): + if pyarrow_is_cudaipcmemhandle(obj): + return (obj).handle + raise TypeError('expected IpcMemHandle instance, got %s' + % (type(obj).__name__)) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..220ab6b19affe6b520db3a3501fad2772919f5e4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pxd @@ -0,0 +1,183 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.lib cimport * +from pyarrow._fs cimport FileSystem, FileInfo + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*) + +cdef class DatasetFactory(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDatasetFactory] wrapped + CDatasetFactory* factory + + cdef init(self, const shared_ptr[CDatasetFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp) + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil + + +cdef class Dataset(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDataset] wrapped + CDataset* dataset + public dict _scan_options + + cdef void init(self, const shared_ptr[CDataset]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp) + + cdef shared_ptr[CDataset] unwrap(self) nogil + + +cdef class Scanner(_Weakrefable): + cdef: + SharedPtrNoGIL[CScanner] wrapped + CScanner* scanner + + cdef void init(self, const shared_ptr[CScanner]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp) + + cdef shared_ptr[CScanner] unwrap(self) + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except * + + +cdef class FragmentScanOptions(_Weakrefable): + + cdef: + shared_ptr[CFragmentScanOptions] wrapped + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp) + + +cdef class FileFormat(_Weakrefable): + + cdef: + shared_ptr[CFileFormat] wrapped + CFileFormat* format + + cdef void init(self, const shared_ptr[CFileFormat]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp) + + cdef inline shared_ptr[CFileFormat] unwrap(self) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options) + + # Return a WrittenFile after a file was written. + # May be overridden by subclasses, e.g. to add metadata. + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer) + + +cdef class FileWriteOptions(_Weakrefable): + + cdef: + shared_ptr[CFileWriteOptions] wrapped + CFileWriteOptions* c_options + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self) + + +cdef class Fragment(_Weakrefable): + + cdef: + SharedPtrNoGIL[CFragment] wrapped + CFragment* fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp) + + cdef inline shared_ptr[CFragment] unwrap(self) + + +cdef class FileFragment(Fragment): + + cdef: + CFileFragment* file_fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + +cdef class Partitioning(_Weakrefable): + + cdef: + shared_ptr[CPartitioning] wrapped + CPartitioning* partitioning + + cdef init(self, const shared_ptr[CPartitioning]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp) + + cdef inline shared_ptr[CPartitioning] unwrap(self) + + +cdef class PartitioningFactory(_Weakrefable): + + cdef: + shared_ptr[CPartitioningFactory] wrapped + CPartitioningFactory* factory + object constructor + object options + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options) + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self) + + +cdef class WrittenFile(_Weakrefable): + + # The full path to the created file + cdef public str path + # Optional Parquet metadata + # This metadata will have the file path attribute set to the path of + # the written file. + cdef public object metadata + # The size of the file in bytes + cdef public int64_t size diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pyx new file mode 100644 index 0000000000000000000000000000000000000000..3583a3213ccbc4c46a5a05056ccb3c0c3882a444 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset.pyx @@ -0,0 +1,4052 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from cython.operator cimport dereference as deref + +import codecs +import collections +from libcpp cimport bool + +import pyarrow as pa +from pyarrow.lib cimport * +from pyarrow.lib import ArrowTypeError, frombytes, tobytes, _pac +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow._acero cimport ExecNodeOptions +from pyarrow._compute cimport Expression, _bind +from pyarrow._compute import _forbid_instantiation +from pyarrow._fs cimport FileSystem, FileSelector, FileInfo +from pyarrow._csv cimport ( + ConvertOptions, ParseOptions, ReadOptions, WriteOptions) +from pyarrow.util import _is_iterable, _is_path_like, _stringify_path +from pyarrow._json cimport ParseOptions as JsonParseOptions +from pyarrow._json cimport ReadOptions as JsonReadOptions + + +_DEFAULT_BATCH_SIZE = 2**17 +_DEFAULT_BATCH_READAHEAD = 16 +_DEFAULT_FRAGMENT_READAHEAD = 4 + + +# Initialise support for Datasets in ExecPlan +Initialize() + + +_orc_fileformat = None +_orc_imported = False + + +def _get_orc_fileformat(): + """ + Import OrcFileFormat on first usage (to avoid circular import issue + when `pyarrow._dataset_orc` would be imported first) + """ + global _orc_fileformat + global _orc_imported + if not _orc_imported: + try: + from pyarrow._dataset_orc import OrcFileFormat + _orc_fileformat = OrcFileFormat + except ImportError as e: + _orc_fileformat = None + finally: + _orc_imported = True + return _orc_fileformat + + +_dataset_pq = False + + +def _get_parquet_classes(): + """ + Import Parquet class files on first usage (to avoid circular import issue + when `pyarrow._dataset_parquet` would be imported first) + """ + global _dataset_pq + if _dataset_pq is False: + try: + import pyarrow._dataset_parquet as _dataset_pq + except ImportError: + _dataset_pq = None + + +def _get_parquet_symbol(name): + """ + Get a symbol from pyarrow.parquet if the latter is importable, otherwise + return None. + """ + _get_parquet_classes() + return _dataset_pq and getattr(_dataset_pq, name) + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=None, object file_size=None): + + cdef: + CFileSource c_source + shared_ptr[CFileSystem] c_filesystem + CFileInfo c_info + c_string c_path + shared_ptr[CRandomAccessFile] c_file + shared_ptr[CBuffer] c_buffer + int64_t c_size + + if isinstance(file, Buffer): + c_buffer = pyarrow_unwrap_buffer(file) + c_source = CFileSource(move(c_buffer)) + elif _is_path_like(file): + if filesystem is None: + raise ValueError("cannot construct a FileSource from " + "a path without a FileSystem") + c_filesystem = filesystem.unwrap() + c_path = tobytes(_stringify_path(file)) + + if file_size is not None: + c_size = file_size + c_info = FileInfo(c_path, size=c_size).unwrap() + c_source = CFileSource(move(c_info), move(c_filesystem)) + else: + c_source = CFileSource(move(c_path), move(c_filesystem)) + elif hasattr(file, 'read'): + # Optimistically hope this is file-like + c_file = get_native_file(file, False).get_random_access_file() + c_source = CFileSource(move(c_file)) + + else: + raise TypeError("cannot construct a FileSource " + "from " + str(file)) + + return c_source + + +cdef CSegmentEncoding _get_segment_encoding(str segment_encoding): + if segment_encoding == "none": + return CSegmentEncoding_None + elif segment_encoding == "uri": + return CSegmentEncoding_Uri + raise ValueError(f"Unknown segment encoding: {segment_encoding}") + + +cdef str _wrap_segment_encoding(CSegmentEncoding segment_encoding): + if segment_encoding == CSegmentEncoding_None: + return "none" + elif segment_encoding == CSegmentEncoding_Uri: + return "uri" + raise ValueError("Unknown segment encoding") + + +cdef Expression _true = Expression._scalar(True) + + +cdef class Dataset(_Weakrefable): + """ + Collection of data fragments and potentially child datasets. + + Arrow Datasets allow you to query against data that has been split across + multiple files. This sharding of data may indicate partitioning, which + can accelerate queries that only touch some partitions (files). + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CDataset]& sp): + self.wrapped = sp + self.dataset = sp.get() + self._scan_options = dict() + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'union': UnionDataset, + 'filesystem': FileSystemDataset, + 'in-memory': InMemoryDataset, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Dataset self = class_.__new__(class_) + self.init(sp) + return self + + cdef shared_ptr[CDataset] unwrap(self) nogil: + return self.wrapped + + @property + def partition_expression(self): + """ + An Expression which evaluates to true for all data viewed by this + Dataset. + """ + return Expression.wrap(self.dataset.partition_expression()) + + def replace_schema(self, Schema schema not None): + """ + Return a copy of this Dataset with a different schema. + + The copy will view the same Fragments. If the new schema is not + compatible with the original dataset's schema then an error will + be raised. + + Parameters + ---------- + schema : Schema + The new dataset schema. + """ + cdef shared_ptr[CDataset] copy = GetResultValue( + self.dataset.ReplaceSchema(pyarrow_unwrap_schema(schema)) + ) + + d = Dataset.wrap(move(copy)) + if self._scan_options: + # Preserve scan options if set. + d._scan_options = self._scan_options.copy() + return d + + def get_fragments(self, Expression filter=None): + """Returns an iterator over the fragments in this dataset. + + Parameters + ---------- + filter : Expression, default None + Return fragments matching the optional filter, either using the + partition_expression or internal information like Parquet's + statistics. + + Returns + ------- + fragments : iterator of Fragment + """ + if self._scan_options.get("filter") is not None: + # Accessing fragments of a filtered dataset is not supported. + # It would be unclear if you wanted to filter the fragments + # or the rows in those fragments. + raise ValueError( + "Retrieving fragments of a filtered or projected " + "dataset is not allowed. Remove the filtering." + ) + + return self._get_fragments(filter) + + def _get_fragments(self, Expression filter): + cdef: + CExpression c_filter + + if filter is None: + c_fragments = move(GetResultValue(self.dataset.GetFragments())) + else: + c_filter = _bind(filter, self.schema) + c_fragments = move(GetResultValue( + self.dataset.GetFragments(c_filter))) + + for maybe_fragment in c_fragments: + yield Fragment.wrap(GetResultValue(move(maybe_fragment))) + + def _scanner_options(self, options): + """Returns the default options to create a new Scanner. + + This is automatically invoked by :meth:`Dataset.scanner` + and there is no need to use it. + """ + new_options = options.copy() + + # at the moment only support filter + requested_filter = options.get("filter") + current_filter = self._scan_options.get("filter") + if requested_filter is not None and current_filter is not None: + new_options["filter"] = current_filter & requested_filter + elif current_filter is not None: + new_options["filter"] = current_filter + + return new_options + + def scanner(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the dataset. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + See the :meth:`Scanner.from_dataset` method for further information. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, "dataset_scanner.parquet") + + >>> import pyarrow.dataset as ds + >>> dataset = ds.dataset("dataset_scanner.parquet") + + Selecting a subset of the columns: + + >>> dataset.scanner(columns=["year", "n_legs"]).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + ---- + year: [[2020,2022,2021,2022,2019,2021]] + n_legs: [[2,2,4,4,5,100]] + + Projecting selected columns using an expression: + + >>> dataset.scanner(columns={ + ... "n_legs_uint": ds.field("n_legs").cast("uint8"), + ... }).to_table() + pyarrow.Table + n_legs_uint: uint8 + ---- + n_legs_uint: [[2,2,4,4,5,100]] + + Filtering rows while scanning: + + >>> dataset.scanner(filter=ds.field("year") > 2020).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2022,2021,2022,2021]] + n_legs: [[2,4,4,100]] + animal: [["Parrot","Dog","Horse","Centipede"]] + """ + return Scanner.from_dataset( + self, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset as materialized record batches. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset to an Arrow table. + + Note that this method reads all the selected data from the dataset + into memory. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + @property + def schema(self): + """The common schema of the full Dataset""" + return pyarrow_wrap_schema(self.dataset.schema()) + + def filter(self, expression not None): + """ + Apply a row filter to the dataset. + + Parameters + ---------- + expression : Expression + The filter that should be applied to the dataset. + + Returns + ------- + Dataset + """ + cdef: + Dataset filtered_dataset + + new_filter = expression + current_filter = self._scan_options.get("filter") + if current_filter is not None and new_filter is not None: + new_filter = current_filter & new_filter + + filtered_dataset = self.__class__.__new__(self.__class__) + filtered_dataset.init(self.wrapped) + filtered_dataset._scan_options = dict(filter=new_filter) + return filtered_dataset + + def sort_by(self, sorting, **kwargs): + """ + Sort the Dataset by one or multiple columns. + + Parameters + ---------- + sorting : str or list[tuple(name, order)] + Name of the column to use to sort (ascending), or + a list of multiple sorting conditions where + each entry is a tuple with column name + and sorting order ("ascending" or "descending") + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + InMemoryDataset + A new dataset sorted according to the sort keys. + """ + if isinstance(sorting, str): + sorting = [(sorting, "ascending")] + + res = _pac()._sort_source( + self, output_type=InMemoryDataset, sort_keys=sorting, **kwargs + ) + return res + + def join(self, right_dataset, keys, right_keys=None, join_type="left outer", + left_suffix=None, right_suffix=None, coalesce_keys=True, + use_threads=True): + """ + Perform a join between this dataset and another one. + + Result of the join will be a new dataset, where further + operations can be applied. + + Parameters + ---------- + right_dataset : dataset + The dataset to join to the current one, acting as the right dataset + in the join operation. + keys : str or list[str] + The columns from current dataset that should be used as keys + of the join operation left side. + right_keys : str or list[str], default None + The columns from the right_dataset that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left dataset. + join_type : str, default "left outer" + The kind of join that should be performed, one of + ("left semi", "right semi", "left anti", "right anti", + "inner", "left outer", "right outer", "full outer") + left_suffix : str, default None + Which suffix to add to right column names. This prevents confusion + when the columns in left and right datasets have colliding names. + right_suffix : str, default None + Which suffix to add to the left column names. This prevents confusion + when the columns in left and right datasets have colliding names. + coalesce_keys : bool, default True + If the duplicated keys should be omitted from one of the sides + in the join result. + use_threads : bool, default True + Whenever to use multithreading or not. + + Returns + ------- + InMemoryDataset + """ + if right_keys is None: + right_keys = keys + return _pac()._perform_join( + join_type, self, keys, right_dataset, right_keys, + left_suffix=left_suffix, right_suffix=right_suffix, + use_threads=use_threads, coalesce_keys=coalesce_keys, + output_type=InMemoryDataset + ) + + def join_asof(self, right_dataset, on, by, tolerance, right_on=None, right_by=None): + """ + Perform an asof join between this dataset and another one. + + This is similar to a left-join except that we match on nearest key rather + than equal keys. Both datasets must be sorted by the key. This type of join + is most useful for time series data that are not perfectly aligned. + + Optionally match on equivalent keys with "by" before searching with "on". + + Result of the join will be a new Dataset, where further + operations can be applied. + + Parameters + ---------- + right_dataset : dataset + The dataset to join to the current one, acting as the right dataset + in the join operation. + on : str + The column from current dataset that should be used as the "on" key + of the join operation left side. + + An inexact match is used on the "on" key, i.e. a row is considered a + match if and only if left_on - tolerance <= right_on <= left_on. + + The input table must be sorted by the "on" key. Must be a single + field of a common type. + + Currently, the "on" key must be an integer, date, or timestamp type. + by : str or list[str] + The columns from current dataset that should be used as the keys + of the join operation left side. The join operation is then done + only for the matches in these columns. + tolerance : int + The tolerance for inexact "on" key matching. A right row is considered + a match with the left row `right.on - left.on <= tolerance`. The + `tolerance` may be: + + - negative, in which case a past-as-of-join occurs; + - or positive, in which case a future-as-of-join occurs; + - or zero, in which case an exact-as-of-join occurs. + + The tolerance is interpreted in the same units as the "on" key. + right_on : str or list[str], default None + The columns from the right_dataset that should be used as the on key + on the join operation right side. + When ``None`` use the same key name as the left dataset. + right_by : str or list[str], default None + The columns from the right_dataset that should be used as by keys + on the join operation right side. + When ``None`` use the same key names as the left dataset. + + Returns + ------- + InMemoryDataset + """ + if right_on is None: + right_on = on + if right_by is None: + right_by = by + return _pac()._perform_join_asof(self, on, by, + right_dataset, right_on, right_by, + tolerance, output_type=InMemoryDataset) + + +cdef class InMemoryDataset(Dataset): + """ + A Dataset wrapping in-memory data. + + Parameters + ---------- + source : RecordBatch, Table, list, tuple + The data for this dataset. Can be a RecordBatch, Table, list of + RecordBatch/Table, iterable of RecordBatch, or a RecordBatchReader + If an iterable is provided, the schema must also be provided. + schema : Schema, optional + Only required if passing an iterable as the source + """ + + cdef: + CInMemoryDataset* in_memory_dataset + + def __init__(self, source, Schema schema=None): + cdef: + shared_ptr[CInMemoryDataset] in_memory_dataset + + if isinstance(source, (pa.RecordBatch, pa.Table)): + source = [source] + + if isinstance(source, (list, tuple)): + batches = [] + for item in source: + if isinstance(item, pa.RecordBatch): + batches.append(item) + elif isinstance(item, pa.Table): + batches.extend(item.to_batches()) + else: + raise TypeError( + 'Expected a list of tables or batches. The given list ' + 'contains a ' + type(item).__name__) + if schema is None: + schema = item.schema + elif not schema.equals(item.schema): + raise ArrowTypeError( + f'Item has schema\n{item.schema}\nwhich does not ' + f'match expected schema\n{schema}') + if not batches and schema is None: + raise ValueError('Must provide schema to construct in-memory ' + 'dataset from an empty list') + table = pa.Table.from_batches(batches, schema=schema) + in_memory_dataset = make_shared[CInMemoryDataset]( + pyarrow_unwrap_table(table)) + else: + raise TypeError( + 'Expected a table, batch, or list of tables/batches ' + 'instead of the given type: ' + + type(source).__name__ + ) + + self.init( in_memory_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.in_memory_dataset = sp.get() + + +cdef class UnionDataset(Dataset): + """ + A Dataset wrapping child datasets. + + Children's schemas must agree with the provided schema. + + Parameters + ---------- + schema : Schema + A known schema to conform to. + children : list of Dataset + One or more input children + """ + + cdef: + CUnionDataset* union_dataset + + def __init__(self, Schema schema not None, children): + cdef: + Dataset child + CDatasetVector c_children + shared_ptr[CUnionDataset] union_dataset + + for child in children: + c_children.push_back(child.wrapped) + + union_dataset = GetResultValue(CUnionDataset.Make( + pyarrow_unwrap_schema(schema), move(c_children))) + self.init( union_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.union_dataset = sp.get() + + def __reduce__(self): + return UnionDataset, (self.schema, self.children) + + @property + def children(self): + cdef CDatasetVector children = self.union_dataset.children() + return [Dataset.wrap(children[i]) for i in range(children.size())] + + +cdef class FileSystemDataset(Dataset): + """ + A Dataset of file fragments. + + A FileSystemDataset is composed of one or more FileFragment. + + Parameters + ---------- + fragments : list[Fragments] + List of fragments to consume. + schema : Schema + The top-level schema of the Dataset. + format : FileFormat + File format of the fragments, currently only ParquetFileFormat, + IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + FileSystem of the fragments. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + + cdef: + CFileSystemDataset* filesystem_dataset + + def __init__(self, fragments, Schema schema, FileFormat format, + FileSystem filesystem=None, root_partition=None): + cdef: + FileFragment fragment=None + vector[shared_ptr[CFileFragment]] c_fragments + CResult[shared_ptr[CDataset]] result + shared_ptr[CFileSystem] c_filesystem + + if root_partition is None: + root_partition = _true + elif not isinstance(root_partition, Expression): + raise TypeError( + "Argument 'root_partition' has incorrect type (expected " + "Expression, got {0})".format(type(root_partition)) + ) + + for fragment in fragments: + c_fragments.push_back( + static_pointer_cast[CFileFragment, CFragment]( + fragment.unwrap())) + + if filesystem is None: + filesystem = fragment.filesystem + + if filesystem is not None: + c_filesystem = filesystem.unwrap() + + result = CFileSystemDataset.Make( + pyarrow_unwrap_schema(schema), + ( root_partition).unwrap(), + format.unwrap(), + c_filesystem, + c_fragments + ) + self.init(GetResultValue(result)) + + @property + def filesystem(self): + return FileSystem.wrap(self.filesystem_dataset.filesystem()) + + @property + def partitioning(self): + """ + The partitioning of the Dataset source, if discovered. + + If the FileSystemDataset is created using the ``dataset()`` factory + function with a partitioning specified, this will return the + finalized Partitioning object from the dataset discovery. In all + other cases, this returns None. + """ + c_partitioning = self.filesystem_dataset.partitioning() + if c_partitioning.get() == nullptr: + return None + try: + return Partitioning.wrap(c_partitioning) + except TypeError: + # e.g. type_name "default" + return None + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.filesystem_dataset = sp.get() + + def __reduce__(self): + return FileSystemDataset, ( + list(self.get_fragments()), + self.schema, + self.format, + self.filesystem, + self.partition_expression + ) + + @classmethod + def from_paths(cls, paths, schema=None, format=None, + filesystem=None, partitions=None, root_partition=None): + """ + A Dataset created from a list of paths on a particular filesystem. + + Parameters + ---------- + paths : list of str + List of file paths to create the fragments from. + schema : Schema + The top-level schema of the DataDataset. + format : FileFormat + File format to create fragments from, currently only + ParquetFileFormat, IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + The filesystem which files are from. + partitions : list[Expression], optional + Attach additional partition information for the file paths. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + if root_partition is None: + root_partition = _true + + for arg, class_, name in [ + (schema, Schema, 'schema'), + (format, FileFormat, 'format'), + (filesystem, FileSystem, 'filesystem'), + (root_partition, Expression, 'root_partition') + ]: + if not isinstance(arg, class_): + raise TypeError( + "Argument '{0}' has incorrect type (expected {1}, " + "got {2})".format(name, class_.__name__, type(arg)) + ) + + partitions = partitions or [_true] * len(paths) + + if len(paths) != len(partitions): + raise ValueError( + 'The number of files resulting from paths_or_selector ' + 'must be equal to the number of partitions.' + ) + + fragments = [ + format.make_fragment(path, filesystem, partitions[i]) + for i, path in enumerate(paths) + ] + return FileSystemDataset(fragments, schema, format, + filesystem, root_partition) + + @property + def files(self): + """List of the files""" + cdef vector[c_string] files = self.filesystem_dataset.files() + return [frombytes(f) for f in files] + + @property + def format(self): + """The FileFormat of this source.""" + return FileFormat.wrap(self.filesystem_dataset.format()) + + +cdef class FileWriteOptions(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + self.wrapped = sp + self.c_options = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFileWriteOptions, + 'ipc': IpcFileWriteOptions, + 'parquet': _get_parquet_symbol('ParquetFileWriteOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileWriteOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def format(self): + return FileFormat.wrap(self.c_options.format()) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self): + return self.wrapped + + +cdef class FileFormat(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + self.wrapped = sp + self.format = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'ipc': IpcFileFormat, + 'csv': CsvFileFormat, + 'json': JsonFileFormat, + 'parquet': _get_parquet_symbol('ParquetFileFormat'), + 'orc': _get_orc_fileformat(), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileFormat self = class_.__new__(class_) + self.init(sp) + return self + + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer): + parquet_metadata = None + size = GetResultValue(file_writer.GetBytesWritten()) + return WrittenFile(path, parquet_metadata, size) + + cdef inline shared_ptr[CFileFormat] unwrap(self): + return self.wrapped + + def inspect(self, file, filesystem=None): + """ + Infer the schema of a file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to infer a schema from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + + Returns + ------- + schema : Schema + The schema inferred from the file + """ + cdef: + CFileSource c_source = _make_file_source(file, filesystem, file_size=None) + CResult[shared_ptr[CSchema]] c_result + with nogil: + c_result = self.format.Inspect(c_source) + c_schema = GetResultValue(c_result) + return pyarrow_wrap_schema(move(c_schema)) + + def make_fragment(self, file, filesystem=None, + Expression partition_expression=None, + *, file_size=None): + """ + Make a FileFragment from a given file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to make a fragment from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + partition_expression : Expression, optional + An expression that is guaranteed true for all rows in the fragment. Allows + fragment to be potentially skipped while scanning with a filter. + file_size : int, optional + The size of the file in bytes. Can improve performance with high-latency filesystems + when file size needs to be known before reading. + + Returns + ------- + fragment : Fragment + The file fragment + """ + if partition_expression is None: + partition_expression = _true + c_source = _make_file_source(file, filesystem, file_size) + c_fragment = GetResultValue( + self.format.MakeFragment(move(c_source), + partition_expression.unwrap(), + nullptr)) + return Fragment.wrap(move(c_fragment)) + + def make_write_options(self): + sp_write_options = self.format.DefaultWriteOptions() + if sp_write_options.get() == nullptr: + # DefaultWriteOptions() may return `nullptr` which means that + # the format does not yet support writing datasets. + raise NotImplementedError( + "Writing datasets not yet implemented for this file format." + ) + return FileWriteOptions.wrap(sp_write_options) + + @property + def default_extname(self): + return frombytes(self.format.type_name()) + + @property + def default_fragment_scan_options(self): + dfso = FragmentScanOptions.wrap( + self.wrapped.get().default_fragment_scan_options) + # CsvFileFormat stores a Python-specific encoding field that needs + # to be restored because it does not exist in the C++ struct + if isinstance(self, CsvFileFormat): + if self._read_options_py is not None: + dfso.read_options = self._read_options_py + return dfso + + @default_fragment_scan_options.setter + def default_fragment_scan_options(self, FragmentScanOptions options): + if options is None: + self.wrapped.get().default_fragment_scan_options =\ + nullptr + else: + self._set_default_fragment_scan_options(options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + raise ValueError(f"Cannot set fragment scan options for " + f"'{options.type_name}' on {self.__class__.__name__}") + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class Fragment(_Weakrefable): + """Fragment of data from a Dataset.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragment]& sp): + self.wrapped = sp + self.fragment = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + # IpcFileFormat, CsvFileFormat, JsonFileFormat and OrcFileFormat do not have + # corresponding subclasses of FileFragment + 'ipc': FileFragment, + 'csv': FileFragment, + 'json': FileFragment, + 'orc': FileFragment, + 'parquet': _get_parquet_symbol('ParquetFileFragment'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + class_ = Fragment + + cdef Fragment self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CFragment] unwrap(self): + return self.wrapped + + @property + def physical_schema(self): + """Return the physical schema of this Fragment. This schema can be + different from the dataset read schema.""" + cdef: + CResult[shared_ptr[CSchema]] maybe_schema + with nogil: + maybe_schema = self.fragment.ReadPhysicalSchema() + return pyarrow_wrap_schema(GetResultValue(maybe_schema)) + + @property + def partition_expression(self): + """An Expression which evaluates to true for all data viewed by this + Fragment. + """ + return Expression.wrap(self.fragment.partition_expression()) + + def scanner(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the fragment. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + Parameters + ---------- + schema : Schema + Schema to use for scanning. This is used to unify a Fragment to + its Dataset's schema. If not specified this will use the + Fragment's physical schema which might differ for each Fragment. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the fragment as materialized record batches. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Convert this Fragment into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + The indices of row to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the fragment. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + +cdef class FileFragment(Fragment): + """A Fragment representing a data file.""" + + cdef void init(self, const shared_ptr[CFragment]& sp): + Fragment.init(self, sp) + self.file_fragment = sp.get() + + def __repr__(self): + type_name = frombytes(self.fragment.type_name()) + if type_name != "parquet": + typ = f" type={type_name}" + else: + # parquet has a subclass -> type embedded in class name + typ = "" + partition_dict = get_partition_keys(self.partition_expression) + partition = ", ".join( + [f"{key}={val}" for key, val in partition_dict.items()] + ) + if partition: + partition = f" partition=[{partition}]" + return "".format( + self.__class__.__name__, typ, self.path, partition + ) + + def __reduce__(self): + buffer = self.buffer + return self.format.make_fragment, ( + self.path if buffer is None else buffer, + self.filesystem, + self.partition_expression + ) + + def open(self): + """ + Open a NativeFile of the buffer or file viewed by this fragment. + """ + cdef: + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CRandomAccessFile] opened + c_string c_path + NativeFile out = NativeFile() + + if self.buffer is not None: + return pa.BufferReader(self.buffer) + + c_path = tobytes(self.file_fragment.source().path()) + with nogil: + c_filesystem = self.file_fragment.source().filesystem() + opened = GetResultValue(c_filesystem.get().OpenInputFile(c_path)) + + out.set_random_access_file(opened) + out.is_readable = True + return out + + @property + def path(self): + """ + The path of the data file viewed by this fragment, if it views a + file. If instead it views a buffer, this will be "". + """ + return frombytes(self.file_fragment.source().path()) + + @property + def filesystem(self): + """ + The FileSystem containing the data file viewed by this fragment, if + it views a file. If instead it views a buffer, this will be None. + """ + cdef: + shared_ptr[CFileSystem] c_fs + c_fs = self.file_fragment.source().filesystem() + + if c_fs.get() == nullptr: + return None + + return FileSystem.wrap(c_fs) + + @property + def buffer(self): + """ + The buffer viewed by this fragment, if it views a buffer. If + instead it views a file, this will be None. + """ + cdef: + shared_ptr[CBuffer] c_buffer + c_buffer = self.file_fragment.source().buffer() + + if c_buffer.get() == nullptr: + return None + + return pyarrow_wrap_buffer(c_buffer) + + @property + def format(self): + """ + The format of the data file viewed by this fragment. + """ + return FileFormat.wrap(self.file_fragment.format()) + + +cdef class FragmentScanOptions(_Weakrefable): + """Scan options specific to a particular fragment and scan operation.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + self.wrapped = sp + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp): + if not sp: + return None + + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFragmentScanOptions, + 'json': JsonFragmentScanOptions, + 'parquet': _get_parquet_symbol('ParquetFragmentScanOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FragmentScanOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def type_name(self): + return frombytes(self.wrapped.get().type_name()) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class IpcFileWriteOptions(FileWriteOptions): + cdef: + CIpcFileWriteOptions* ipc_options + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + out = IpcWriteOptions() + out.c_options = CIpcWriteOptions(deref(self.ipc_options.options)) + return out + + @write_options.setter + def write_options(self, IpcWriteOptions write_options not None): + self.ipc_options.options.reset( + new CIpcWriteOptions(write_options.c_options)) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.ipc_options = sp.get() + + +cdef class IpcFileFormat(FileFormat): + + def __init__(self): + self.init(shared_ptr[CFileFormat](new CIpcFileFormat())) + + def equals(self, IpcFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.IpcFileFormat + + Returns + ------- + True + """ + return True + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.ipc.IpcWriteOptions + """ + cdef IpcFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = IpcWriteOptions(**kwargs) + return opts + + @property + def default_extname(self): + return "arrow" + + def __reduce__(self): + return IpcFileFormat, tuple() + + +cdef class FeatherFileFormat(IpcFileFormat): + + @property + def default_extname(self): + return "feather" + + +cdef class CsvFileFormat(FileFormat): + """ + FileFormat for CSV files. + + Parameters + ---------- + parse_options : pyarrow.csv.ParseOptions + Options regarding CSV parsing. + default_fragment_scan_options : CsvFragmentScanOptions + Default options for fragments scan. + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + cdef: + CCsvFileFormat* csv_format + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading + # default_fragment_scan_options.read_options + public ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ParseOptions parse_options=None, + default_fragment_scan_options=None, + ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CCsvFileFormat())) + if parse_options is not None: + self.parse_options = parse_options + if convert_options is not None or read_options is not None: + if default_fragment_scan_options: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify convert_options ' + 'or read_options') + self.default_fragment_scan_options = CsvFragmentScanOptions( + convert_options=convert_options, read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = CsvFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, CsvFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'CsvFragmentScanOptions') + if read_options is not None: + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.csv_format = sp.get() + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.csv.WriteOptions + """ + cdef CsvFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = WriteOptions(**kwargs) + return opts + + @property + def parse_options(self): + return ParseOptions.wrap(self.csv_format.parse_options) + + @parse_options.setter + def parse_options(self, ParseOptions parse_options not None): + self.csv_format.parse_options = deref(parse_options.options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'csv': + self.csv_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self._read_options_py = options.read_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, CsvFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFileFormat + + Returns + ------- + bool + """ + return ( + self.parse_options.equals(other.parse_options) and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return CsvFileFormat, (self.parse_options, + self.default_fragment_scan_options) + + def __repr__(self): + return f"" + + +cdef class CsvFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for CSV fragments. + + Parameters + ---------- + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + + cdef: + CCsvFragmentScanOptions* csv_options + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading read_options + ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CCsvFragmentScanOptions())) + if convert_options is not None: + self.convert_options = convert_options + if read_options is not None: + self.read_options = read_options + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.csv_options = sp.get() + + @property + def convert_options(self): + return ConvertOptions.wrap(self.csv_options.convert_options) + + @convert_options.setter + def convert_options(self, ConvertOptions convert_options not None): + self.csv_options.convert_options = deref(convert_options.options) + + @property + def read_options(self): + read_options = ReadOptions.wrap(self.csv_options.read_options) + if self._read_options_py is not None: + read_options.encoding = self._read_options_py.encoding + return read_options + + @read_options.setter + def read_options(self, ReadOptions read_options not None): + self.csv_options.read_options = deref(read_options.options) + self._read_options_py = read_options + if codecs.lookup(read_options.encoding).name != 'utf-8': + self.csv_options.stream_transform_func = deref( + make_streamwrap_func(read_options.encoding, 'utf-8')) + + def equals(self, CsvFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.convert_options.equals(other.convert_options) and + self.read_options.equals(other.read_options)) + + def __reduce__(self): + return CsvFragmentScanOptions, (self.convert_options, + self.read_options) + + +cdef class CsvFileWriteOptions(FileWriteOptions): + cdef: + CCsvFileWriteOptions* csv_options + object _properties + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + return WriteOptions.wrap(deref(self.csv_options.write_options)) + + @write_options.setter + def write_options(self, WriteOptions write_options not None): + self.csv_options.write_options.reset( + new CCSVWriteOptions(deref(write_options.options))) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.csv_options = sp.get() + + +cdef class JsonFileFormat(FileFormat): + """ + FileFormat for JSON files. + + Parameters + ---------- + default_fragment_scan_options : JsonFragmentScanOptions + Default options for fragments scan. + parse_options : pyarrow.json.ParseOptions + Options regarding json parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFileFormat* json_format + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, default_fragment_scan_options=None, + JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CJsonFileFormat())) + if parse_options is not None or read_options is not None: + if default_fragment_scan_options is not None: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify read_options') + self.default_fragment_scan_options = JsonFragmentScanOptions( + parse_options=parse_options, + read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = JsonFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, JsonFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'JsonFragmentScanOptions') + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.json_format = sp.get() + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'json': + self.json_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self.default_fragment_scan_options.parse_options = options.parse_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, JsonFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFileFormat + + Returns + ------- + bool + """ + return (other and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return JsonFileFormat, (self.default_fragment_scan_options,) + + def __repr__(self): + return "" + + +cdef class JsonFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for JSON fragments. + + Parameters + ---------- + parse_options : pyarrow.json.ParseOptions + Options regarding JSON parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFragmentScanOptions* json_options + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CJsonFragmentScanOptions())) + if parse_options is not None: + self.parse_options = parse_options + if read_options is not None: + self.read_options = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.json_options = sp.get() + + @property + def parse_options(self): + return JsonParseOptions.wrap(self.json_options.parse_options) + + @parse_options.setter + def parse_options(self, JsonParseOptions parse_options not None): + self.json_options.parse_options = parse_options.options + + @property + def read_options(self): + return JsonReadOptions.wrap(self.json_options.read_options) + + @read_options.setter + def read_options(self, JsonReadOptions read_options not None): + self.json_options.read_options = read_options.options + + def equals(self, JsonFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.read_options.equals(other.read_options) and + self.parse_options.equals(other.parse_options)) + + def __reduce__(self): + return JsonFragmentScanOptions, (self.parse_options, self.read_options) + + +cdef class Partitioning(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + self.wrapped = sp + self.partitioning = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'directory': DirectoryPartitioning, + 'hive': HivePartitioning, + 'filename': FilenamePartitioning, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Partitioning self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CPartitioning] unwrap(self): + return self.wrapped + + def __eq__(self, other): + if isinstance(other, Partitioning): + return self.partitioning.Equals(deref((other).unwrap())) + return False + + def parse(self, path): + """ + Parse a path into a partition expression. + + Parameters + ---------- + path : str + + Returns + ------- + pyarrow.dataset.Expression + """ + cdef CResult[CExpression] result + result = self.partitioning.Parse(tobytes(path)) + return Expression.wrap(GetResultValue(result)) + + @property + def schema(self): + """The arrow Schema attached to the partitioning.""" + return pyarrow_wrap_schema(self.partitioning.schema()) + + +cdef class PartitioningFactory(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options): + cdef PartitioningFactory self = PartitioningFactory.__new__( + PartitioningFactory + ) + self.init(sp) + self.constructor = constructor + self.options = options + return self + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self): + return self.wrapped + + def __reduce__(self): + return self.constructor, self.options + + @property + def type_name(self): + return frombytes(self.factory.type_name()) + + +cdef vector[shared_ptr[CArray]] _partitioning_dictionaries( + Schema schema, dictionaries) except *: + cdef: + vector[shared_ptr[CArray]] c_dictionaries + + dictionaries = dictionaries or {} + + for field in schema: + dictionary = dictionaries.get(field.name) + + if (isinstance(field.type, pa.DictionaryType) and + dictionary is not None): + c_dictionaries.push_back(pyarrow_unwrap_array(dictionary)) + else: + c_dictionaries.push_back( nullptr) + + return c_dictionaries + + +cdef class KeyValuePartitioning(Partitioning): + + cdef: + CKeyValuePartitioning* keyvalue_partitioning + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + Partitioning.init(self, sp) + self.keyvalue_partitioning = sp.get() + self.wrapped = sp + self.partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + return self.__class__, (self.schema, dictionaries, segment_encoding) + + @property + def dictionaries(self): + """ + The unique values for each partition field, if available. + + Those values are only available if the Partitioning object was + created through dataset discovery from a PartitioningFactory, or + if the dictionaries were manually specified in the constructor. + If no dictionary field is available, this returns an empty list. + """ + cdef vector[shared_ptr[CArray]] c_arrays + c_arrays = self.keyvalue_partitioning.dictionaries() + res = [] + for arr in c_arrays: + if arr.get() == nullptr: + # Partitioning object has not been created through + # inspected Factory + res.append(None) + else: + res.append(pyarrow_wrap_array(arr)) + return res + + +def _constructor_directory_partitioning_factory(*args): + return DirectoryPartitioning.discover(*args) + + +cdef class DirectoryPartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The DirectoryPartitioning expects one segment in the file path for each + field in the schema (all fields are required to be present). + For example given schema the path "/2009/11" would + be parsed to ("year"_ == 2009 and "month"_ == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + DirectoryPartitioning + + Examples + -------- + >>> from pyarrow.dataset import DirectoryPartitioning + >>> partitioning = DirectoryPartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/2009/11/")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CDirectoryPartitioning* directory_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CDirectoryPartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CDirectoryPartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.directory_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + max_partition_dictionary_size=0, + schema=None, segment_encoding="uri"): + """ + Discover a DirectoryPartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise ValueError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CDirectoryPartitioning.MakeFactory(c_field_names, c_options), + _constructor_directory_partitioning_factory, + (field_names, infer_dictionary, max_partition_dictionary_size, + schema, segment_encoding) + ) + + +def _constructor_hive_partitioning_factory(*args): + return HivePartitioning.discover(*args) + + +cdef class HivePartitioning(KeyValuePartitioning): + """ + A Partitioning for "/$key=$value/" nested directories as found in + Apache Hive. + + Multi-level, directory based partitioning scheme originating from + Apache Hive with all data files stored in the leaf directories. Data is + partitioned by static values of a particular column in the schema. + Partition keys are represented in the form $key=$value in directory names. + Field order is ignored, as are missing or unrecognized field names. + + For example, given schema, a possible + path would be "/year=2009/month=11/day=15". + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + If any field is None then this fallback will be used as a label + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + HivePartitioning + + Examples + -------- + >>> from pyarrow.dataset import HivePartitioning + >>> partitioning = HivePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/year=2009/month=11/")) + ((year == 2009) and (month == 11)) + + """ + + cdef: + CHivePartitioning* hive_partitioning + + def __init__(self, + Schema schema not None, + dictionaries=None, + null_fallback="__HIVE_DEFAULT_PARTITION__", + segment_encoding="uri"): + + cdef: + shared_ptr[CHivePartitioning] c_partitioning + CHivePartitioningOptions c_options + + c_options.null_fallback = tobytes(null_fallback) + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + c_partitioning = make_shared[CHivePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.hive_partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + null_fallback = frombytes(deref(self.hive_partitioning).null_fallback()) + return HivePartitioning, ( + self.schema, dictionaries, null_fallback, segment_encoding + ) + + @staticmethod + def discover(infer_dictionary=False, + max_partition_dictionary_size=0, + null_fallback="__HIVE_DEFAULT_PARTITION__", + schema=None, + segment_encoding="uri"): + """ + Discover a HivePartitioning. + + Parameters + ---------- + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain. This can be more efficient when + materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + When inferring a schema for partition fields this value will be + replaced by null. The default is set to __HIVE_DEFAULT_PARTITION__ + for compatibility with Spark + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CHivePartitioningFactoryOptions c_options + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + c_options.null_fallback = tobytes(null_fallback) + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CHivePartitioning.MakeFactory(c_options), + _constructor_hive_partitioning_factory, + (infer_dictionary, max_partition_dictionary_size, null_fallback, + schema, segment_encoding), + ) + + +def _constructor_filename_partitioning_factory(*args): + return FilenamePartitioning.discover(*args) + + +cdef class FilenamePartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The FilenamePartitioning expects one segment in the file name for each + field in the schema (all fields are required to be present) separated + by '_'. For example given schema the name + ``"2009_11_"`` would be parsed to ("year" == 2009 and "month" == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + FilenamePartitioning + + Examples + -------- + >>> from pyarrow.dataset import FilenamePartitioning + >>> partitioning = FilenamePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("2009_11_data.parquet")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CFilenamePartitioning* filename_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CFilenamePartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CFilenamePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.filename_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + schema=None, segment_encoding="uri"): + """ + Discover a FilenamePartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise TypeError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CFilenamePartitioning.MakeFactory(c_field_names, c_options), + _constructor_filename_partitioning_factory, + (field_names, infer_dictionary, schema, segment_encoding) + ) + + +cdef class DatasetFactory(_Weakrefable): + """ + DatasetFactory is used to create a Dataset, inspect the Schema + of the fragments contained in it, and declare a partitioning. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp): + cdef DatasetFactory self = \ + DatasetFactory.__new__(DatasetFactory) + self.init(sp) + return self + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil: + return self.wrapped + + @property + def root_partition(self): + return Expression.wrap(self.factory.root_partition()) + + @root_partition.setter + def root_partition(self, Expression expr): + check_status(self.factory.SetRootPartition(expr.unwrap())) + + def inspect_schemas(self): + cdef CResult[vector[shared_ptr[CSchema]]] result + cdef CInspectOptions options + with nogil: + result = self.factory.InspectSchemas(options) + + schemas = [] + for s in GetResultValue(result): + schemas.append(pyarrow_wrap_schema(s)) + return schemas + + def inspect(self): + """ + Inspect all data fragments and return a common Schema. + + Returns + ------- + Schema + """ + cdef: + CInspectOptions options + CResult[shared_ptr[CSchema]] result + with nogil: + result = self.factory.Inspect(options) + return pyarrow_wrap_schema(GetResultValue(result)) + + def finish(self, Schema schema=None): + """ + Create a Dataset using the inspected schema or an explicit schema + (if given). + + Parameters + ---------- + schema : Schema, default None + The schema to conform the source to. If None, the inspected + schema is used. + + Returns + ------- + Dataset + """ + cdef: + shared_ptr[CSchema] sp_schema + CResult[shared_ptr[CDataset]] result + + if schema is not None: + sp_schema = pyarrow_unwrap_schema(schema) + with nogil: + result = self.factory.FinishWithSchema(sp_schema) + else: + with nogil: + result = self.factory.Finish() + + return Dataset.wrap(GetResultValue(result)) + + +cdef class FileSystemFactoryOptions(_Weakrefable): + """ + Influences the discovery of filesystem paths. + + Parameters + ---------- + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + partitioning : Partitioning/PartitioningFactory, optional + Apply the Partitioning to every discovered Fragment. See Partitioning or + PartitioningFactory documentation. + exclude_invalid_files : bool, optional (default True) + If True, invalid files will be excluded (file format specific check). + This will incur IO for each files in a serial and single threaded + fashion. Disabling this feature will skip the IO, but unsupported + files may be present in the Dataset (resulting in an error at scan + time). + selector_ignore_prefixes : list, optional + When discovering from a Selector (and not from an explicit file list), + ignore files and directories matching any of these prefixes. + By default this is ['.', '_']. + """ + + cdef: + CFileSystemFactoryOptions options + + __slots__ = () # avoid mistakingly creating attributes + + def __init__(self, partition_base_dir=None, partitioning=None, + exclude_invalid_files=None, + list selector_ignore_prefixes=None): + if isinstance(partitioning, PartitioningFactory): + self.partitioning_factory = partitioning + elif isinstance(partitioning, Partitioning): + self.partitioning = partitioning + + if partition_base_dir is not None: + self.partition_base_dir = partition_base_dir + if exclude_invalid_files is not None: + self.exclude_invalid_files = exclude_invalid_files + if selector_ignore_prefixes is not None: + self.selector_ignore_prefixes = selector_ignore_prefixes + + cdef inline CFileSystemFactoryOptions unwrap(self): + return self.options + + @property + def partitioning(self): + """Partitioning to apply to discovered files. + + NOTE: setting this property will overwrite partitioning_factory. + """ + c_partitioning = self.options.partitioning.partitioning() + if c_partitioning.get() == nullptr: + return None + return Partitioning.wrap(c_partitioning) + + @partitioning.setter + def partitioning(self, Partitioning value): + self.options.partitioning = ( value).unwrap() + + @property + def partitioning_factory(self): + """PartitioningFactory to apply to discovered files and + discover a Partitioning. + + NOTE: setting this property will overwrite partitioning. + """ + c_factory = self.options.partitioning.factory() + if c_factory.get() == nullptr: + return None + return PartitioningFactory.wrap(c_factory, None, None) + + @partitioning_factory.setter + def partitioning_factory(self, PartitioningFactory value): + self.options.partitioning = ( value).unwrap() + + @property + def partition_base_dir(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return frombytes(self.options.partition_base_dir) + + @partition_base_dir.setter + def partition_base_dir(self, value): + self.options.partition_base_dir = tobytes(value) + + @property + def exclude_invalid_files(self): + """Whether to exclude invalid files.""" + return self.options.exclude_invalid_files + + @exclude_invalid_files.setter + def exclude_invalid_files(self, bint value): + self.options.exclude_invalid_files = value + + @property + def selector_ignore_prefixes(self): + """ + List of prefixes. Files matching one of those prefixes will be + ignored by the discovery process. + """ + return [frombytes(p) for p in self.options.selector_ignore_prefixes] + + @selector_ignore_prefixes.setter + def selector_ignore_prefixes(self, values): + self.options.selector_ignore_prefixes = [tobytes(v) for v in values] + + +cdef vector[CFileInfo] unwrap_finfos(finfos): + cdef vector[CFileInfo] o_vect + for fi in finfos: + o_vect.push_back(( fi).unwrap()) + return o_vect + + +cdef class FileSystemDatasetFactory(DatasetFactory): + """ + Create a DatasetFactory from a list of paths with schema inspection. + + Parameters + ---------- + filesystem : pyarrow.fs.FileSystem + Filesystem to discover. + paths_or_selector : pyarrow.fs.FileSelector or list of path-likes + Either a Selector object or a list of path-like objects. + format : FileFormat + Currently only ParquetFileFormat and IpcFileFormat are supported. + options : FileSystemFactoryOptions, optional + Various flags influencing the discovery of filesystem paths. + """ + + cdef: + CFileSystemDatasetFactory* filesystem_factory + + def __init__(self, FileSystem filesystem not None, paths_or_selector, + FileFormat format not None, + FileSystemFactoryOptions options=None): + cdef: + vector[c_string] paths + vector[CFileInfo] finfos + CFileSelector c_selector + CResult[shared_ptr[CDatasetFactory]] result + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CFileFormat] c_format + CFileSystemFactoryOptions c_options + + options = options or FileSystemFactoryOptions() + c_options = options.unwrap() + c_filesystem = filesystem.unwrap() + c_format = format.unwrap() + + if isinstance(paths_or_selector, FileSelector): + with nogil: + c_selector = ( paths_or_selector).selector + result = CFileSystemDatasetFactory.MakeFromSelector( + c_filesystem, + c_selector, + c_format, + c_options + ) + elif isinstance(paths_or_selector, (list, tuple)): + if len(paths_or_selector) > 0 and isinstance(paths_or_selector[0], FileInfo): + finfos = unwrap_finfos(paths_or_selector) + with nogil: + result = CFileSystemDatasetFactory.MakeFromFileInfos( + c_filesystem, + finfos, + c_format, + c_options + ) + else: + paths = [tobytes(s) for s in paths_or_selector] + with nogil: + result = CFileSystemDatasetFactory.MakeFromPaths( + c_filesystem, + paths, + c_format, + c_options + ) + else: + raise TypeError('Must pass either paths or a FileSelector, but ' + 'passed {}'.format(type(paths_or_selector))) + + self.init(GetResultValue(result)) + + cdef init(self, shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.filesystem_factory = sp.get() + + +cdef class UnionDatasetFactory(DatasetFactory): + """ + Provides a way to inspect/discover a Dataset's expected schema before + materialization. + + Parameters + ---------- + factories : list of DatasetFactory + """ + + cdef: + CUnionDatasetFactory* union_factory + + def __init__(self, list factories): + cdef: + DatasetFactory factory + vector[shared_ptr[CDatasetFactory]] c_factories + for factory in factories: + c_factories.push_back(factory.unwrap()) + self.init(GetResultValue(CUnionDatasetFactory.Make(c_factories))) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.union_factory = sp.get() + + +cdef class RecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches.""" + cdef: + # An object that must be kept alive with the iterator. + object iterator_owner + # Iterator is a non-POD type and Cython uses offsetof, leading + # to a compiler warning unless wrapped like so + SharedPtrNoGIL[CRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CRecordBatchIterator iterator): + cdef RecordBatchIterator self = \ + RecordBatchIterator.__new__(RecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CRecordBatchIterator](move(iterator)) + return self + + cdef inline shared_ptr[CRecordBatchIterator] unwrap(self) nogil: + return self.iterator + + def __iter__(self): + return self + + def __next__(self): + cdef shared_ptr[CRecordBatch] record_batch + with nogil: + record_batch = GetResultValue(move(self.iterator.get().Next())) + if record_batch == NULL: + raise StopIteration + return pyarrow_wrap_batch(record_batch) + + +class TaggedRecordBatch(collections.namedtuple( + "TaggedRecordBatch", ["record_batch", "fragment"])): + """ + A combination of a record batch and the fragment it came from. + + Parameters + ---------- + record_batch : RecordBatch + The record batch. + fragment : Fragment + Fragment of the record batch. + """ + + +cdef class TaggedRecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches with fragments.""" + cdef: + object iterator_owner + SharedPtrNoGIL[CTaggedRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CTaggedRecordBatchIterator iterator): + cdef TaggedRecordBatchIterator self = \ + TaggedRecordBatchIterator.__new__(TaggedRecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CTaggedRecordBatchIterator]( + move(iterator)) + return self + + def __iter__(self): + return self + + def __next__(self): + cdef CTaggedRecordBatch batch + with nogil: + batch = GetResultValue(move(self.iterator.get().Next())) + if batch.record_batch == NULL: + raise StopIteration + return TaggedRecordBatch( + record_batch=pyarrow_wrap_batch(batch.record_batch), + fragment=Fragment.wrap(batch.fragment)) + + +cdef void _populate_builder(const shared_ptr[CScannerBuilder]& ptr, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + bint use_threads=True, MemoryPool memory_pool=None, + FragmentScanOptions fragment_scan_options=None)\ + except *: + cdef: + CScannerBuilder *builder + vector[CExpression] c_exprs + + builder = ptr.get() + + check_status(builder.Filter(_bind( + filter, pyarrow_wrap_schema(builder.schema())))) + + if columns is not None: + if isinstance(columns, dict): + for expr in columns.values(): + if not isinstance(expr, Expression): + raise TypeError( + "Expected an Expression for a 'column' dictionary " + "value, got {} instead".format(type(expr)) + ) + c_exprs.push_back(( expr).unwrap()) + + check_status( + builder.Project(c_exprs, [tobytes(c) for c in columns.keys()]) + ) + elif isinstance(columns, list): + check_status(builder.ProjectColumns([tobytes(c) for c in columns])) + else: + raise ValueError( + "Expected a list or a dict for 'columns', " + "got {} instead.".format(type(columns)) + ) + + check_status(builder.BatchSize(batch_size)) + check_status(builder.BatchReadahead(batch_readahead)) + check_status(builder.FragmentReadahead(fragment_readahead)) + check_status(builder.UseThreads(use_threads)) + check_status(builder.Pool(maybe_unbox_memory_pool(memory_pool))) + if fragment_scan_options: + check_status( + builder.FragmentScanOptions(fragment_scan_options.wrapped)) + + +cdef class Scanner(_Weakrefable): + """A materialized scan operation with context and options bound. + + A scanner is the class that glues the scan tasks, data fragments and data + sources together. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CScanner]& sp): + self.wrapped = sp + self.scanner = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp): + cdef Scanner self = Scanner.__new__(Scanner) + self.init(sp) + return self + + cdef inline shared_ptr[CScanner] unwrap(self): + return self.wrapped + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *: + cdef: + shared_ptr[CScannerBuilder] builder = make_shared[CScannerBuilder](dataset.unwrap()) + + py_scanoptions = dataset._scanner_options(py_scanoptions) + + # Need to explicitly expand the arguments as Cython doesn't support + # keyword expansion in cdef functions. + _populate_builder( + builder, + columns=py_scanoptions.get("columns"), + filter=py_scanoptions.get("filter"), + batch_size=py_scanoptions.get("batch_size", _DEFAULT_BATCH_SIZE), + batch_readahead=py_scanoptions.get( + "batch_readahead", _DEFAULT_BATCH_READAHEAD), + fragment_readahead=py_scanoptions.get( + "fragment_readahead", _DEFAULT_FRAGMENT_READAHEAD), + use_threads=py_scanoptions.get("use_threads", True), + memory_pool=py_scanoptions.get("memory_pool"), + fragment_scan_options=py_scanoptions.get("fragment_scan_options")) + + return GetResultValue(deref(builder).GetScanOptions()) + + @staticmethod + def from_dataset(Dataset dataset not None, *, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Dataset, + + Parameters + ---------- + dataset : Dataset + Dataset to scan. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + options = Scanner._make_scan_options( + dataset, + dict(columns=columns, filter=filter, batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, fragment_scan_options=fragment_scan_options) + ) + builder = make_shared[CScannerBuilder](dataset.unwrap(), options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_fragment(Fragment fragment not None, *, Schema schema=None, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Fragment, + + Parameters + ---------- + fragment : Fragment + fragment to scan. + schema : Schema, optional + The schema of the fragment. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options = make_shared[CScanOptions]() + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + schema = schema or fragment.physical_schema + + builder = make_shared[CScannerBuilder](pyarrow_unwrap_schema(schema), + fragment.unwrap(), options) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_batches(source, *, Schema schema=None, object columns=None, + Expression filter=None, int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create a Scanner from an iterator of batches. + + This creates a scanner which can be used only once. It is + intended to support writing a dataset (which takes a scanner) + from a source which can be read only once (e.g. a + RecordBatchReader or generator). + + Parameters + ---------- + source : Iterator + The iterator of Batches. + schema : Schema + The schema of the batches. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + RecordBatchReader reader + if isinstance(source, pa.ipc.RecordBatchReader): + if schema: + raise ValueError('Cannot specify a schema when providing ' + 'a RecordBatchReader') + reader = source + elif _is_iterable(source): + if schema is None: + raise ValueError('Must provide schema to construct scanner ' + 'from an iterable') + reader = pa.ipc.RecordBatchReader.from_batches(schema, source) + else: + raise TypeError('Expected a RecordBatchReader or an iterable of ' + 'batches instead of the given type: ' + + type(source).__name__) + builder = CScannerBuilder.FromRecordBatchReader(reader.reader) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @property + def dataset_schema(self): + """The schema with which batches will be read from fragments.""" + return pyarrow_wrap_schema( + self.scanner.options().get().dataset_schema) + + @property + def projected_schema(self): + """ + The materialized schema of the data, accounting for projections. + + This is the schema of any data returned from the scanner. + """ + return pyarrow_wrap_schema( + self.scanner.options().get().projected_schema) + + def to_batches(self): + """ + Consume a Scanner in record batches. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + def _iterator(batch_iter): + for batch in batch_iter: + yield batch.record_batch + # Don't make ourselves a generator so errors are raised immediately + return _iterator(self.scan_batches()) + + def scan_batches(self): + """ + Consume a Scanner in record batches with corresponding fragments. + + Returns + ------- + record_batches : iterator of TaggedRecordBatch + """ + cdef CTaggedRecordBatchIterator iterator + with nogil: + iterator = move(GetResultValue(self.scanner.ScanBatches())) + # Don't make ourselves a generator so errors are raised immediately + return TaggedRecordBatchIterator.wrap(self, move(iterator)) + + def to_table(self): + """ + Convert a Scanner into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + + with nogil: + result = self.scanner.ToTable() + + return pyarrow_wrap_table(GetResultValue(result)) + + def take(self, object indices): + """ + Select rows of data by index. + + Will only consume as many batches of the underlying dataset as + needed. Otherwise, this is equivalent to + ``to_table().take(indices)``. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + cdef shared_ptr[CArray] c_indices + + if not isinstance(indices, pa.Array): + indices = pa.array(indices) + c_indices = pyarrow_unwrap_array(indices) + + with nogil: + result = self.scanner.TakeRows(deref(c_indices)) + return pyarrow_wrap_table(GetResultValue(result)) + + def head(self, int num_rows): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + with nogil: + result = self.scanner.Head(num_rows) + return pyarrow_wrap_table(GetResultValue(result)) + + def count_rows(self): + """ + Count rows matching the scanner filter. + + Returns + ------- + count : int + """ + cdef CResult[int64_t] result + with nogil: + result = self.scanner.CountRows() + return GetResultValue(result) + + def to_reader(self): + """Consume this scanner as a RecordBatchReader. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = GetResultValue(self.scanner.ToRecordBatchReader()) + return reader + + +def get_partition_keys(Expression partition_expression): + """ + Extract partition keys (equality constraints between a field and a scalar) + from an expression as a dict mapping the field's name to its value. + + NB: All expressions yielded by a HivePartitioning or DirectoryPartitioning + will be conjunctions of equality conditions and are accessible through this + function. Other subexpressions will be ignored. + + Parameters + ---------- + partition_expression : pyarrow.dataset.Expression + + Returns + ------- + dict + + Examples + -------- + + For example, an expression of + + is converted to {'part': 'A', 'year': 2016} + """ + cdef: + CExpression expr = partition_expression.unwrap() + pair[CFieldRef, CDatum] ref_val + + out = {} + for ref_val in GetResultValue(CExtractKnownFieldValues(expr)).map: + assert ref_val.first.name() != nullptr + assert ref_val.second.kind() == DatumType_SCALAR + val = pyarrow_wrap_scalar(ref_val.second.scalar()) + out[frombytes(deref(ref_val.first.name()))] = val.as_py() + return out + + +cdef class WrittenFile(_Weakrefable): + """ + Metadata information about files written as + part of a dataset write operation + + Parameters + ---------- + path : str + Path to the file. + metadata : pyarrow.parquet.FileMetaData, optional + For Parquet files, the Parquet file metadata. + size : int + The size of the file in bytes. + """ + + def __init__(self, path, metadata, size): + self.path = path + self.metadata = metadata + self.size = size + + +cdef void _filesystemdataset_write_visitor( + dict visit_args, + CFileWriter* file_writer): + cdef: + str path + str base_dir + WrittenFile written_file + FileFormat file_format + + path = frombytes(deref(file_writer).destination().path) + base_dir = frombytes(visit_args['base_dir']) + file_format = FileFormat.wrap(file_writer.format()) + written_file = file_format._finish_write(path, base_dir, file_writer) + visit_args['file_visitor'](written_file) + + +def _filesystemdataset_write( + Scanner data not None, + object base_dir not None, + str basename_template not None, + FileSystem filesystem not None, + Partitioning partitioning not None, + FileWriteOptions file_options not None, + int max_partitions, + object file_visitor, + str existing_data_behavior not None, + int max_open_files, + int max_rows_per_file, + int min_rows_per_group, + int max_rows_per_group, + bool create_dir +): + """ + CFileSystemDataset.Write wrapper + """ + cdef: + CFileSystemDatasetWriteOptions c_options + shared_ptr[CScanner] c_scanner + dict visit_args + + c_options.file_write_options = file_options.unwrap() + c_options.filesystem = filesystem.unwrap() + c_options.base_dir = tobytes(_stringify_path(base_dir)) + c_options.partitioning = partitioning.unwrap() + c_options.max_partitions = max_partitions + c_options.max_open_files = max_open_files + c_options.max_rows_per_file = max_rows_per_file + c_options.max_rows_per_group = max_rows_per_group + c_options.min_rows_per_group = min_rows_per_group + c_options.basename_template = tobytes(basename_template) + if existing_data_behavior == 'error': + c_options.existing_data_behavior = ExistingDataBehavior_ERROR + elif existing_data_behavior == 'overwrite_or_ignore': + c_options.existing_data_behavior =\ + ExistingDataBehavior_OVERWRITE_OR_IGNORE + elif existing_data_behavior == 'delete_matching': + c_options.existing_data_behavior = ExistingDataBehavior_DELETE_MATCHING + else: + raise ValueError( + ("existing_data_behavior must be one of 'error', ", + "'overwrite_or_ignore' or 'delete_matching'") + ) + c_options.create_dir = create_dir + + if file_visitor is not None: + visit_args = {'base_dir': c_options.base_dir, + 'file_visitor': file_visitor} + # Need to use post_finish because parquet metadata is not available + # until after Finish has been called + c_options.writer_post_finish = BindFunction[cb_writer_finish_internal]( + &_filesystemdataset_write_visitor, visit_args) + + c_scanner = data.unwrap() + with nogil: + check_status(CFileSystemDataset.Write(c_options, c_scanner)) + + +cdef class _ScanNodeOptions(ExecNodeOptions): + + def _set_options(self, Dataset dataset, dict scan_options): + cdef: + shared_ptr[CScanOptions] c_scan_options + + c_scan_options = Scanner._make_scan_options(dataset, scan_options) + + self.wrapped.reset( + new CScanNodeOptions(dataset.unwrap(), c_scan_options) + ) + + +class ScanNodeOptions(_ScanNodeOptions): + """ + A Source node which yields batches from a Dataset scan. + + This is the option class for the "scan" node factory. + + This node is capable of applying pushdown projections or filters + to the file readers which reduce the amount of data that needs to + be read (if supported by the file format). But note that this does not + construct associated filter or project nodes to perform the final + filtering or projection. Rather, you may supply the same filter + expression or projection to the scan node that you also supply + to the filter or project node. + + Yielded batches will be augmented with fragment/batch indices to + enable stable ordering for simple ExecPlans. + + Parameters + ---------- + dataset : pyarrow.dataset.Dataset + The table which acts as the data source. + **kwargs : dict, optional + Scan options. See `Scanner.from_dataset` for possible arguments. + """ + + def __init__(self, Dataset dataset, **kwargs): + self._set_options(dataset, kwargs) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b1ad527d8c486d3b1285992af5794e6cf1b08d98 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0a3a2ff526ea48d7160763791ec3531a3b249bca --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet file format.""" + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libarrow_dataset_parquet cimport * + +from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions + + +cdef class ParquetFragmentScanOptions(FragmentScanOptions): + cdef: + CParquetFragmentScanOptions* parquet_options + object _parquet_decryption_config + object _decryption_properties + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + cdef CReaderProperties* reader_properties(self) + cdef ArrowReaderProperties* arrow_reader_properties(self) + + +cdef class ParquetFileWriteOptions(FileWriteOptions): + + cdef: + CParquetFileWriteOptions* parquet_options + object _properties diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx new file mode 100644 index 0000000000000000000000000000000000000000..8fe9f30d33af9bc5cbf7cb25978334292f5ae9dc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx @@ -0,0 +1,1053 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet file format.""" + +from cython cimport binding +from cython.operator cimport dereference as deref + +import os +import warnings + +import pyarrow as pa +from pyarrow.lib cimport * +from pyarrow.lib import frombytes, tobytes, is_threading_enabled +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libarrow_dataset_parquet cimport * +from pyarrow._fs cimport FileSystem + +from pyarrow._compute cimport Expression, _bind +from pyarrow._dataset cimport ( + _make_file_source, + DatasetFactory, + FileFormat, + FileFragment, + FileWriteOptions, + Fragment, + FragmentScanOptions, + CacheOptions, + Partitioning, + PartitioningFactory, + WrittenFile +) + +from pyarrow._parquet cimport ( + _create_writer_properties, _create_arrow_writer_properties, + FileMetaData, +) + + +try: + from pyarrow._dataset_parquet_encryption import ( + set_encryption_config, set_decryption_config, set_decryption_properties + ) + parquet_encryption_enabled = True +except ImportError: + parquet_encryption_enabled = False + + +cdef Expression _true = Expression._scalar(True) + +ctypedef CParquetFileWriter* _CParquetFileWriterPtr + + +cdef class ParquetFileFormat(FileFormat): + """ + FileFormat for Parquet + + Parameters + ---------- + read_options : ParquetReadOptions + Read options for the file. + default_fragment_scan_options : ParquetFragmentScanOptions + Scan Options for the file. + **kwargs : dict + Additional options for read option or scan option + """ + + cdef: + CParquetFileFormat* parquet_format + + def __init__(self, read_options=None, + default_fragment_scan_options=None, + **kwargs): + cdef: + shared_ptr[CParquetFileFormat] wrapped + CParquetFileFormatReaderOptions* options + + # Read/scan options + read_options_args = {option: kwargs[option] for option in kwargs + if option in _PARQUET_READ_OPTIONS} + scan_args = {option: kwargs[option] for option in kwargs + if option not in _PARQUET_READ_OPTIONS} + if read_options and read_options_args: + duplicates = ', '.join(sorted(read_options_args)) + raise ValueError(f'If `read_options` is given, ' + f'cannot specify {duplicates}') + if default_fragment_scan_options and scan_args: + duplicates = ', '.join(sorted(scan_args)) + raise ValueError(f'If `default_fragment_scan_options` is given, ' + f'cannot specify {duplicates}') + + if read_options is None: + read_options = ParquetReadOptions(**read_options_args) + elif isinstance(read_options, dict): + # For backwards compatibility + duplicates = [] + for option, value in read_options.items(): + if option in _PARQUET_READ_OPTIONS: + read_options_args[option] = value + else: + duplicates.append(option) + scan_args[option] = value + if duplicates: + duplicates = ", ".join(duplicates) + warnings.warn(f'The scan options {duplicates} should be ' + 'specified directly as keyword arguments') + read_options = ParquetReadOptions(**read_options_args) + elif not isinstance(read_options, ParquetReadOptions): + raise TypeError('`read_options` must be either a dictionary or an ' + 'instance of ParquetReadOptions') + + if default_fragment_scan_options is None: + default_fragment_scan_options = ParquetFragmentScanOptions(**scan_args) + elif isinstance(default_fragment_scan_options, dict): + default_fragment_scan_options = ParquetFragmentScanOptions( + **default_fragment_scan_options) + elif not isinstance(default_fragment_scan_options, + ParquetFragmentScanOptions): + raise TypeError('`default_fragment_scan_options` must be either a ' + 'dictionary or an instance of ' + 'ParquetFragmentScanOptions') + + wrapped = make_shared[CParquetFileFormat]() + + options = &(wrapped.get().reader_options) + if read_options.dictionary_columns is not None: + for column in read_options.dictionary_columns: + options.dict_columns.insert(tobytes(column)) + options.coerce_int96_timestamp_unit = \ + read_options._coerce_int96_timestamp_unit + + self.init( wrapped) + self.default_fragment_scan_options = default_fragment_scan_options + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.parquet_format = sp.get() + + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer): + cdef: + FileMetaData parquet_metadata + CParquetFileWriter* parquet_file_writer + + parquet_metadata = None + parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer) + with nogil: + metadata = deref( + deref(parquet_file_writer).parquet_writer()).metadata() + if metadata: + parquet_metadata = FileMetaData() + parquet_metadata.init(metadata) + parquet_metadata.set_file_path(os.path.relpath(path, base_dir)) + + size = GetResultValue(file_writer.GetBytesWritten()) + + return WrittenFile(path, parquet_metadata, size) + + @property + def read_options(self): + cdef CParquetFileFormatReaderOptions* options + options = &self.parquet_format.reader_options + parquet_read_options = ParquetReadOptions( + dictionary_columns={frombytes(col) + for col in options.dict_columns}, + ) + # Read options getter/setter works with strings so setting + # the private property which uses the C Type + parquet_read_options._coerce_int96_timestamp_unit = \ + options.coerce_int96_timestamp_unit + return parquet_read_options + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.dataset.FileWriteOptions + """ + # Safeguard from calling make_write_options as a static class method + if not isinstance(self, ParquetFileFormat): + raise TypeError("make_write_options() should be called on " + "an instance of ParquetFileFormat") + opts = FileFormat.make_write_options(self) + ( opts).update(**kwargs) + return opts + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'parquet': + self.parquet_format.default_fragment_scan_options = options.wrapped + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, ParquetFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetFileFormat + + Returns + ------- + bool + """ + return ( + self.read_options.equals(other.read_options) and + self.default_fragment_scan_options == + other.default_fragment_scan_options + ) + + @property + def default_extname(self): + return "parquet" + + def __reduce__(self): + return ParquetFileFormat, (self.read_options, + self.default_fragment_scan_options) + + def __repr__(self): + return f"" + + def make_fragment(self, file, filesystem=None, + Expression partition_expression=None, row_groups=None, *, file_size=None): + """ + Make a FileFragment from a given file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to make a fragment from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + partition_expression : Expression, optional + An expression that is guaranteed true for all rows in the fragment. Allows + fragment to be potentially skipped while scanning with a filter. + row_groups : Iterable, optional + The indices of the row groups to include + file_size : int, optional + The size of the file in bytes. Can improve performance with high-latency filesystems + when file size needs to be known before reading. + + Returns + ------- + fragment : Fragment + The file fragment + """ + cdef: + vector[int] c_row_groups + if partition_expression is None: + partition_expression = _true + if row_groups is None: + return super().make_fragment(file, filesystem, + partition_expression, file_size=file_size) + + c_source = _make_file_source(file, filesystem, file_size) + c_row_groups = [ row_group for row_group in set(row_groups)] + + c_fragment = GetResultValue( + self.parquet_format.MakeFragment(move(c_source), + partition_expression.unwrap(), + nullptr, + move(c_row_groups))) + return Fragment.wrap(move(c_fragment)) + + +class RowGroupInfo: + """ + A wrapper class for RowGroup information + + Parameters + ---------- + id : integer + The group ID. + metadata : FileMetaData + The rowgroup metadata. + schema : Schema + Schema of the rows. + """ + + def __init__(self, id, metadata, schema): + self.id = id + self.metadata = metadata + self.schema = schema + + @property + def num_rows(self): + return self.metadata.num_rows + + @property + def total_byte_size(self): + return self.metadata.total_byte_size + + @property + def statistics(self): + def name_stats(i): + col = self.metadata.column(i) + + stats = col.statistics + if stats is None or not stats.has_min_max: + return None, None + + name = col.path_in_schema + field_index = self.schema.get_field_index(name) + if field_index < 0: + return None, None + + typ = self.schema.field(field_index).type + return col.path_in_schema, { + 'min': pa.scalar(stats.min, type=typ).as_py(), + 'max': pa.scalar(stats.max, type=typ).as_py() + } + + return { + name: stats for name, stats + in map(name_stats, range(self.metadata.num_columns)) + if stats is not None + } + + def __repr__(self): + return "RowGroupInfo({})".format(self.id) + + def __eq__(self, other): + if isinstance(other, int): + return self.id == other + if not isinstance(other, RowGroupInfo): + return False + return self.id == other.id + + +cdef class ParquetFileFragment(FileFragment): + """A Fragment representing a parquet file.""" + + cdef: + CParquetFileFragment* parquet_file_fragment + + cdef void init(self, const shared_ptr[CFragment]& sp): + FileFragment.init(self, sp) + self.parquet_file_fragment = sp.get() + + def __reduce__(self): + buffer = self.buffer + # parquet_file_fragment.row_groups() is empty if the metadata + # information of the file is not yet populated + if not bool(self.parquet_file_fragment.row_groups()): + row_groups = None + else: + row_groups = [row_group.id for row_group in self.row_groups] + + return self.format.make_fragment, ( + self.path if buffer is None else buffer, + self.filesystem, + self.partition_expression, + row_groups + ) + + def ensure_complete_metadata(self): + """ + Ensure that all metadata (statistics, physical schema, ...) have + been read and cached in this fragment. + """ + with nogil: + check_status(self.parquet_file_fragment.EnsureCompleteMetadata()) + + @property + def row_groups(self): + metadata = self.metadata + cdef vector[int] row_groups = self.parquet_file_fragment.row_groups() + return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema) + for i in row_groups] + + @property + def metadata(self): + self.ensure_complete_metadata() + cdef FileMetaData metadata = FileMetaData() + metadata.init(self.parquet_file_fragment.metadata()) + return metadata + + @property + def num_row_groups(self): + """ + Return the number of row groups viewed by this fragment (not the + number of row groups in the origin file). + """ + self.ensure_complete_metadata() + return self.parquet_file_fragment.row_groups().size() + + def split_by_row_group(self, Expression filter=None, + Schema schema=None): + """ + Split the fragment into multiple fragments. + + Yield a Fragment wrapping each row group in this ParquetFileFragment. + Row groups will be excluded whose metadata contradicts the optional + filter. + + Parameters + ---------- + filter : Expression, default None + Only include the row groups which satisfy this predicate (using + the Parquet RowGroup statistics). + schema : Schema, default None + Schema to use when filtering row groups. Defaults to the + Fragment's physical schema + + Returns + ------- + A list of Fragments + """ + cdef: + vector[shared_ptr[CFragment]] c_fragments + CExpression c_filter + shared_ptr[CFragment] c_fragment + + schema = schema or self.physical_schema + c_filter = _bind(filter, schema) + with nogil: + c_fragments = move(GetResultValue( + self.parquet_file_fragment.SplitByRowGroup(move(c_filter)))) + + return [Fragment.wrap(c_fragment) for c_fragment in c_fragments] + + def subset(self, Expression filter=None, Schema schema=None, + object row_group_ids=None): + """ + Create a subset of the fragment (viewing a subset of the row groups). + + Subset can be specified by either a filter predicate (with optional + schema) or by a list of row group IDs. Note that when using a filter, + the resulting fragment can be empty (viewing no row groups). + + Parameters + ---------- + filter : Expression, default None + Only include the row groups which satisfy this predicate (using + the Parquet RowGroup statistics). + schema : Schema, default None + Schema to use when filtering row groups. Defaults to the + Fragment's physical schema + row_group_ids : list of ints + The row group IDs to include in the subset. Can only be specified + if `filter` is None. + + Returns + ------- + ParquetFileFragment + """ + cdef: + CExpression c_filter + vector[int] c_row_group_ids + shared_ptr[CFragment] c_fragment + + if filter is not None and row_group_ids is not None: + raise ValueError( + "Cannot specify both 'filter' and 'row_group_ids'." + ) + + if filter is not None: + schema = schema or self.physical_schema + c_filter = _bind(filter, schema) + with nogil: + c_fragment = move(GetResultValue( + self.parquet_file_fragment.SubsetWithFilter( + move(c_filter)))) + elif row_group_ids is not None: + c_row_group_ids = [ + row_group for row_group in sorted(set(row_group_ids)) + ] + with nogil: + c_fragment = move(GetResultValue( + self.parquet_file_fragment.SubsetWithIds( + move(c_row_group_ids)))) + else: + raise ValueError( + "Need to specify one of 'filter' or 'row_group_ids'" + ) + + return Fragment.wrap(c_fragment) + + +cdef class ParquetReadOptions(_Weakrefable): + """ + Parquet format specific options for reading. + + Parameters + ---------- + dictionary_columns : list of string, default None + Names of columns which should be dictionary encoded as + they are read + coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds + """ + + cdef public: + set dictionary_columns + TimeUnit _coerce_int96_timestamp_unit + + # Also see _PARQUET_READ_OPTIONS + def __init__(self, dictionary_columns=None, + coerce_int96_timestamp_unit=None): + self.dictionary_columns = set(dictionary_columns or set()) + self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit + + @property + def coerce_int96_timestamp_unit(self): + return timeunit_to_string(self._coerce_int96_timestamp_unit) + + @coerce_int96_timestamp_unit.setter + def coerce_int96_timestamp_unit(self, unit): + if unit is not None: + self._coerce_int96_timestamp_unit = string_to_timeunit(unit) + else: + self._coerce_int96_timestamp_unit = TimeUnit_NANO + + def equals(self, ParquetReadOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetReadOptions + + Returns + ------- + bool + """ + return (self.dictionary_columns == other.dictionary_columns and + self.coerce_int96_timestamp_unit == + other.coerce_int96_timestamp_unit) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + def __repr__(self): + return ( + f"" + ) + + +cdef class ParquetFileWriteOptions(FileWriteOptions): + + def update(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + """ + arrow_fields = { + "use_deprecated_int96_timestamps", + "coerce_timestamps", + "allow_truncated_timestamps", + "use_compliant_nested_type", + } + + setters = set() + for name, value in kwargs.items(): + if name not in self._properties: + raise TypeError("unexpected parquet write option: " + name) + self._properties[name] = value + if name in arrow_fields: + setters.add(self._set_arrow_properties) + elif name == "encryption_config" and value is not None: + setters.add(self._set_encryption_config) + else: + setters.add(self._set_properties) + + for setter in setters: + setter() + + def _set_properties(self): + cdef CParquetFileWriteOptions* opts = self.parquet_options + + opts.writer_properties = _create_writer_properties( + use_dictionary=self._properties["use_dictionary"], + compression=self._properties["compression"], + version=self._properties["version"], + write_statistics=self._properties["write_statistics"], + data_page_size=self._properties["data_page_size"], + compression_level=self._properties["compression_level"], + use_byte_stream_split=( + self._properties["use_byte_stream_split"] + ), + column_encoding=self._properties["column_encoding"], + data_page_version=self._properties["data_page_version"], + encryption_properties=self._properties["encryption_properties"], + write_batch_size=self._properties["write_batch_size"], + dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"], + write_page_index=self._properties["write_page_index"], + write_page_checksum=self._properties["write_page_checksum"], + sorting_columns=self._properties["sorting_columns"], + store_decimal_as_integer=self._properties["store_decimal_as_integer"], + ) + + def _set_arrow_properties(self): + cdef CParquetFileWriteOptions* opts = self.parquet_options + + opts.arrow_writer_properties = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=( + self._properties["use_deprecated_int96_timestamps"] + ), + coerce_timestamps=self._properties["coerce_timestamps"], + allow_truncated_timestamps=( + self._properties["allow_truncated_timestamps"] + ), + writer_engine_version="V2", + use_compliant_nested_type=( + self._properties["use_compliant_nested_type"] + ) + ) + + def _set_encryption_config(self): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Encryption is not enabled in your installation of pyarrow, but an " + "encryption_config was provided." + ) + set_encryption_config(self, self._properties["encryption_config"]) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.parquet_options = sp.get() + self._properties = dict( + use_dictionary=True, + compression="snappy", + version="2.6", + write_statistics=None, + data_page_size=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version="1.0", + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + write_page_index=False, + encryption_config=None, + write_page_checksum=False, + sorting_columns=None, + store_decimal_as_integer=False, + ) + + self._set_properties() + self._set_arrow_properties() + + def __repr__(self): + return "".format( + " ".join([f"{key}={value}" for key, value in self._properties.items()]) + ) + + +cdef set _PARQUET_READ_OPTIONS = { + 'dictionary_columns', 'coerce_int96_timestamp_unit' +} + + +cdef class ParquetFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for Parquet fragments. + + Parameters + ---------- + use_buffered_stream : bool, default False + Read files through buffered input streams rather than loading entire + row groups at once. This may be enabled to reduce memory overhead. + Disabled by default. + buffer_size : int, default 8192 + Size of buffered stream, if enabled. Default is 8KB. + pre_buffer : bool, default True + If enabled, pre-buffer the raw Parquet data instead of issuing one + read per column chunk. This can improve performance on high-latency + filesystems (e.g. S3, GCS) by coalescing and issuing file reads in + parallel using a background I/O thread pool. + Set to False if you want to prioritize minimal memory usage + over maximum speed. + cache_options : pyarrow.CacheOptions, default None + Cache options used when pre_buffer is enabled. The default values should + be good for most use cases. You may want to adjust these for example if + you have exceptionally high latency to the file system. + thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None + If not None, use the provided ParquetDecryptionConfig to decrypt the + Parquet file. + decryption_properties : pyarrow.parquet.FileDecryptionProperties, default None + If not None, use the provided FileDecryptionProperties to decrypt encrypted + Parquet file. + page_checksum_verification : bool, default False + If True, verify the page checksum for each page read from the file. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, bint use_buffered_stream=False, + buffer_size=8192, + bint pre_buffer=True, + cache_options=None, + thrift_string_size_limit=None, + thrift_container_size_limit=None, + decryption_config=None, + decryption_properties=None, + bint page_checksum_verification=False): + self.init(shared_ptr[CFragmentScanOptions]( + new CParquetFragmentScanOptions())) + self.use_buffered_stream = use_buffered_stream + self.buffer_size = buffer_size + if pre_buffer and not is_threading_enabled(): + pre_buffer = False + self.pre_buffer = pre_buffer + if cache_options is not None: + self.cache_options = cache_options + if thrift_string_size_limit is not None: + self.thrift_string_size_limit = thrift_string_size_limit + if thrift_container_size_limit is not None: + self.thrift_container_size_limit = thrift_container_size_limit + if decryption_config is not None: + self.parquet_decryption_config = decryption_config + if decryption_properties is not None: + self.decryption_properties = decryption_properties + self.page_checksum_verification = page_checksum_verification + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.parquet_options = sp.get() + + cdef CReaderProperties* reader_properties(self): + return self.parquet_options.reader_properties.get() + + cdef ArrowReaderProperties* arrow_reader_properties(self): + return self.parquet_options.arrow_reader_properties.get() + + @property + def use_buffered_stream(self): + return self.reader_properties().is_buffered_stream_enabled() + + @use_buffered_stream.setter + def use_buffered_stream(self, bint use_buffered_stream): + if use_buffered_stream: + self.reader_properties().enable_buffered_stream() + else: + self.reader_properties().disable_buffered_stream() + + @property + def buffer_size(self): + return self.reader_properties().buffer_size() + + @buffer_size.setter + def buffer_size(self, buffer_size): + if buffer_size <= 0: + raise ValueError("Buffer size must be larger than zero") + self.reader_properties().set_buffer_size(buffer_size) + + @property + def pre_buffer(self): + return self.arrow_reader_properties().pre_buffer() + + @pre_buffer.setter + def pre_buffer(self, bint pre_buffer): + if pre_buffer and not is_threading_enabled(): + return + self.arrow_reader_properties().set_pre_buffer(pre_buffer) + + @property + def cache_options(self): + return CacheOptions.wrap(self.arrow_reader_properties().cache_options()) + + @cache_options.setter + def cache_options(self, CacheOptions options): + self.arrow_reader_properties().set_cache_options(options.unwrap()) + + @property + def thrift_string_size_limit(self): + return self.reader_properties().thrift_string_size_limit() + + @thrift_string_size_limit.setter + def thrift_string_size_limit(self, size): + if size <= 0: + raise ValueError("size must be larger than zero") + self.reader_properties().set_thrift_string_size_limit(size) + + @property + def thrift_container_size_limit(self): + return self.reader_properties().thrift_container_size_limit() + + @thrift_container_size_limit.setter + def thrift_container_size_limit(self, size): + if size <= 0: + raise ValueError("size must be larger than zero") + self.reader_properties().set_thrift_container_size_limit(size) + + @property + def decryption_properties(self): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Unable to access encryption features. " + "Encryption is not enabled in your installation of pyarrow." + ) + return self._decryption_properties + + @decryption_properties.setter + def decryption_properties(self, config): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Encryption is not enabled in your installation of pyarrow, but " + "decryption_properties were provided." + ) + set_decryption_properties(self, config) + self._decryption_properties = config + + @property + def parquet_decryption_config(self): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Unable to access encryption features. " + "Encryption is not enabled in your installation of pyarrow." + ) + return self._parquet_decryption_config + + @parquet_decryption_config.setter + def parquet_decryption_config(self, config): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Encryption is not enabled in your installation of pyarrow, but a " + "decryption_config was provided." + ) + set_decryption_config(self, config) + self._parquet_decryption_config = config + + @property + def page_checksum_verification(self): + return self.reader_properties().page_checksum_verification() + + @page_checksum_verification.setter + def page_checksum_verification(self, bint page_checksum_verification): + self.reader_properties().set_page_checksum_verification(page_checksum_verification) + + def equals(self, ParquetFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetFragmentScanOptions + + Returns + ------- + bool + """ + attrs = ( + self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options, + self.thrift_string_size_limit, self.thrift_container_size_limit, + self.page_checksum_verification) + other_attrs = ( + other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options, + other.thrift_string_size_limit, + other.thrift_container_size_limit, other.page_checksum_verification) + return attrs == other_attrs + + @staticmethod + @binding(True) # Required for Cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return ParquetFragmentScanOptions(**kwargs) + + def __reduce__(self): + kwargs = dict( + use_buffered_stream=self.use_buffered_stream, + buffer_size=self.buffer_size, + pre_buffer=self.pre_buffer, + cache_options=self.cache_options, + thrift_string_size_limit=self.thrift_string_size_limit, + thrift_container_size_limit=self.thrift_container_size_limit, + page_checksum_verification=self.page_checksum_verification + ) + return ParquetFragmentScanOptions._reconstruct, (kwargs,) + + +cdef class ParquetFactoryOptions(_Weakrefable): + """ + Influences the discovery of parquet dataset. + + Parameters + ---------- + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + partitioning : Partitioning, PartitioningFactory, optional + The partitioning scheme applied to fragments, see ``Partitioning``. + validate_column_chunk_paths : bool, default False + Assert that all ColumnChunk paths are consistent. The parquet spec + allows for ColumnChunk data to be stored in multiple files, but + ParquetDatasetFactory supports only a single file with all ColumnChunk + data. If this flag is set construction of a ParquetDatasetFactory will + raise an error if ColumnChunk data is not resident in a single file. + """ + + cdef: + CParquetFactoryOptions options + + __slots__ = () # avoid mistakingly creating attributes + + def __init__(self, partition_base_dir=None, partitioning=None, + validate_column_chunk_paths=False): + if isinstance(partitioning, PartitioningFactory): + self.partitioning_factory = partitioning + elif isinstance(partitioning, Partitioning): + self.partitioning = partitioning + + if partition_base_dir is not None: + self.partition_base_dir = partition_base_dir + + self.options.validate_column_chunk_paths = validate_column_chunk_paths + + cdef inline CParquetFactoryOptions unwrap(self): + return self.options + + @property + def partitioning(self): + """Partitioning to apply to discovered files. + + NOTE: setting this property will overwrite partitioning_factory. + """ + c_partitioning = self.options.partitioning.partitioning() + if c_partitioning.get() == nullptr: + return None + return Partitioning.wrap(c_partitioning) + + @partitioning.setter + def partitioning(self, Partitioning value): + self.options.partitioning = ( value).unwrap() + + @property + def partitioning_factory(self): + """PartitioningFactory to apply to discovered files and + discover a Partitioning. + + NOTE: setting this property will overwrite partitioning. + """ + c_factory = self.options.partitioning.factory() + if c_factory.get() == nullptr: + return None + return PartitioningFactory.wrap(c_factory, None, None) + + @partitioning_factory.setter + def partitioning_factory(self, PartitioningFactory value): + self.options.partitioning = ( value).unwrap() + + @property + def partition_base_dir(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return frombytes(self.options.partition_base_dir) + + @partition_base_dir.setter + def partition_base_dir(self, value): + self.options.partition_base_dir = tobytes(value) + + @property + def validate_column_chunk_paths(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return self.options.validate_column_chunk_paths + + @validate_column_chunk_paths.setter + def validate_column_chunk_paths(self, value): + self.options.validate_column_chunk_paths = value + + +cdef class ParquetDatasetFactory(DatasetFactory): + """ + Create a ParquetDatasetFactory from a Parquet `_metadata` file. + + Parameters + ---------- + metadata_path : str + Path to the `_metadata` parquet metadata-only file generated with + `pyarrow.parquet.write_metadata`. + filesystem : pyarrow.fs.FileSystem + Filesystem to read the metadata_path from, and subsequent parquet + files. + format : ParquetFileFormat + Parquet format options. + options : ParquetFactoryOptions, optional + Various flags influencing the discovery of filesystem paths. + """ + + cdef: + CParquetDatasetFactory* parquet_factory + + def __init__(self, metadata_path, FileSystem filesystem not None, + FileFormat format not None, + ParquetFactoryOptions options=None): + cdef: + c_string c_path + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CParquetFileFormat] c_format + CResult[shared_ptr[CDatasetFactory]] result + CParquetFactoryOptions c_options + + c_path = tobytes(metadata_path) + c_filesystem = filesystem.unwrap() + c_format = static_pointer_cast[CParquetFileFormat, CFileFormat]( + format.unwrap()) + options = options or ParquetFactoryOptions() + c_options = options.unwrap() + + with nogil: + result = CParquetDatasetFactory.MakeFromMetaDataPath( + c_path, c_filesystem, c_format, c_options) + self.init(GetResultValue(result)) + + cdef init(self, shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.parquet_factory = sp.get() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_flight.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_flight.pyx new file mode 100644 index 0000000000000000000000000000000000000000..8289215de2e29c6cd7e09affd7ec5d377ee0fa9c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_flight.pyx @@ -0,0 +1,3189 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +import collections +import enum +import re +import time +import warnings +import weakref + +from cython.operator cimport dereference as deref +from cython.operator cimport postincrement +from libcpp cimport bool as c_bool + +from pyarrow.lib cimport * +from pyarrow.lib import (ArrowCancelled, ArrowException, ArrowInvalid, + SignalStopHandler) +from pyarrow.lib import as_buffer, frombytes, tobytes +from pyarrow.includes.libarrow_flight cimport * +from pyarrow.ipc import _get_legacy_format_default, _ReadPandasMixin +import pyarrow.lib as lib + + +cdef CFlightCallOptions DEFAULT_CALL_OPTIONS + + +cdef int check_flight_status(const CStatus& status) except -1 nogil: + cdef shared_ptr[FlightStatusDetail] detail + + if status.ok(): + return 0 + + detail = FlightStatusDetail.UnwrapStatus(status) + if detail: + with gil: + message = frombytes(status.message(), safe=True) + detail_msg = detail.get().extra_info() + if detail.get().code() == CFlightStatusInternal: + raise FlightInternalError(message, detail_msg) + elif detail.get().code() == CFlightStatusFailed: + message = _munge_grpc_python_error(message) + raise FlightServerError(message, detail_msg) + elif detail.get().code() == CFlightStatusTimedOut: + raise FlightTimedOutError(message, detail_msg) + elif detail.get().code() == CFlightStatusCancelled: + raise FlightCancelledError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthenticated: + raise FlightUnauthenticatedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthorized: + raise FlightUnauthorizedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnavailable: + raise FlightUnavailableError(message, detail_msg) + + size_detail = FlightWriteSizeStatusDetail.UnwrapStatus(status) + if size_detail: + with gil: + message = frombytes(status.message(), safe=True) + raise FlightWriteSizeExceededError( + message, + size_detail.get().limit(), size_detail.get().actual()) + + return check_status(status) + + +_FLIGHT_SERVER_ERROR_REGEX = re.compile( + r'Flight RPC failed with message: (.*). Detail: ' + r'Python exception: (.*)', + re.DOTALL +) + + +def _munge_grpc_python_error(message): + m = _FLIGHT_SERVER_ERROR_REGEX.match(message) + if m: + return ('Flight RPC failed with Python exception \"{}: {}\"' + .format(m.group(2), m.group(1))) + else: + return message + + +cdef IpcWriteOptions _get_options(options): + return _get_legacy_format_default( + use_legacy_format=None, options=options) + + +cdef class FlightCallOptions(_Weakrefable): + """RPC-layer options for a Flight call.""" + + cdef: + CFlightCallOptions options + + def __init__(self, timeout=None, write_options=None, headers=None, + IpcReadOptions read_options=None): + """Create call options. + + Parameters + ---------- + timeout : float, None + A timeout for the call, in seconds. None means that the + timeout defaults to an implementation-specific value. + write_options : pyarrow.ipc.IpcWriteOptions, optional + IPC write options. The default options can be controlled + by environment variables (see pyarrow.ipc). + headers : List[Tuple[str, str]], optional + A list of arbitrary headers as key, value tuples + read_options : pyarrow.ipc.IpcReadOptions, optional + Serialization options for reading IPC format. + """ + cdef IpcWriteOptions c_write_options + + if timeout is not None: + self.options.timeout = CTimeoutDuration(timeout) + if write_options is not None: + c_write_options = _get_options(write_options) + self.options.write_options = c_write_options.c_options + if read_options is not None: + if not isinstance(read_options, IpcReadOptions): + raise TypeError("expected IpcReadOptions, got {}" + .format(type(read_options))) + self.options.read_options = read_options.c_options + if headers is not None: + self.options.headers = headers + + @staticmethod + cdef CFlightCallOptions* unwrap(obj): + if not obj: + return &DEFAULT_CALL_OPTIONS + elif isinstance(obj, FlightCallOptions): + return &(( obj).options) + raise TypeError("Expected a FlightCallOptions object, not " + "'{}'".format(type(obj))) + + +_CertKeyPair = collections.namedtuple('_CertKeyPair', ['cert', 'key']) + + +class CertKeyPair(_CertKeyPair): + """A TLS certificate and key for use in Flight.""" + + +cdef class FlightError(Exception): + """ + The base class for Flight-specific errors. + + A server may raise this class or one of its subclasses to provide + a more detailed error to clients. + + Parameters + ---------- + message : str, optional + The error message. + extra_info : bytes, optional + Extra binary error details that were provided by the + server/will be sent to the client. + + Attributes + ---------- + extra_info : bytes + Extra binary error details that were provided by the + server/will be sent to the client. + """ + + cdef dict __dict__ + + def __init__(self, message='', extra_info=b''): + super().__init__(message) + self.extra_info = tobytes(extra_info) + + cdef CStatus to_status(self): + message = tobytes("Flight error: {}".format(str(self))) + return CStatus_UnknownError(message) + + +cdef class FlightInternalError(FlightError, ArrowException): + """An error internal to the Flight server occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusInternal, + tobytes(str(self)), self.extra_info) + + +cdef class FlightTimedOutError(FlightError, ArrowException): + """The Flight RPC call timed out.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusTimedOut, + tobytes(str(self)), self.extra_info) + + +cdef class FlightCancelledError(FlightError, ArrowCancelled): + """The operation was cancelled.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusCancelled, tobytes(str(self)), + self.extra_info) + + +cdef class FlightServerError(FlightError, ArrowException): + """A server error occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusFailed, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnauthenticatedError(FlightError, ArrowException): + """The client is not authenticated.""" + + cdef CStatus to_status(self): + return MakeFlightError( + CFlightStatusUnauthenticated, tobytes(str(self)), self.extra_info) + + +cdef class FlightUnauthorizedError(FlightError, ArrowException): + """The client is not authorized to perform the given operation.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnauthorized, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnavailableError(FlightError, ArrowException): + """The server is not reachable or available.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnavailable, tobytes(str(self)), + self.extra_info) + + +class FlightWriteSizeExceededError(ArrowInvalid): + """A write operation exceeded the client-configured limit.""" + + def __init__(self, message, limit, actual): + super().__init__(message) + self.limit = limit + self.actual = actual + + +cdef class Action(_Weakrefable): + """An action executable on a Flight service.""" + cdef: + CAction action + + def __init__(self, action_type, buf): + """Create an action from a type and a buffer. + + Parameters + ---------- + action_type : bytes or str + buf : Buffer or bytes-like object + """ + self.action.type = tobytes(action_type) + self.action.body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def type(self): + """The action type.""" + return frombytes(self.action.type) + + @property + def body(self): + """The action body (arguments for the action).""" + return pyarrow_wrap_buffer(self.action.body) + + @staticmethod + cdef CAction unwrap(action) except *: + if not isinstance(action, Action): + raise TypeError("Must provide Action, not '{}'".format( + type(action))) + return ( action).action + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.action.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Action action = Action.__new__(Action) + action.action = GetResultValue( + CAction.Deserialize(tobytes(serialized))) + return action + + def __eq__(self, Action other): + return self.action == other.action + + def __repr__(self): + return (f"") + + +_ActionType = collections.namedtuple('_ActionType', ['type', 'description']) + + +class ActionType(_ActionType): + """A type of action that is executable on a Flight service.""" + + def make_action(self, buf): + """Create an Action with this type. + + Parameters + ---------- + buf : obj + An Arrow buffer or Python bytes or bytes-like object. + """ + return Action(self.type, buf) + + +cdef class Result(_Weakrefable): + """A result from executing an Action.""" + cdef: + unique_ptr[CFlightResult] result + + def __init__(self, buf): + """Create a new result. + + Parameters + ---------- + buf : Buffer or bytes-like object + """ + self.result.reset(new CFlightResult()) + self.result.get().body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def body(self): + """Get the Buffer containing the result.""" + return pyarrow_wrap_buffer(self.result.get().body) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Result result = Result.__new__(Result) + result.result.reset(new CFlightResult(GetResultValue( + CFlightResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, Result other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class BasicAuth(_Weakrefable): + """A container for basic auth.""" + cdef: + unique_ptr[CBasicAuth] basic_auth + + def __init__(self, username=None, password=None): + """Create a new basic auth object. + + Parameters + ---------- + username : string + password : string + """ + self.basic_auth.reset(new CBasicAuth()) + if username: + self.basic_auth.get().username = tobytes(username) + if password: + self.basic_auth.get().password = tobytes(password) + + @property + def username(self): + """Get the username.""" + return self.basic_auth.get().username + + @property + def password(self): + """Get the password.""" + return self.basic_auth.get().password + + @staticmethod + def deserialize(serialized): + auth = BasicAuth() + auth.basic_auth.reset(new CBasicAuth(GetResultValue( + CBasicAuth.Deserialize(tobytes(serialized))))) + return auth + + def serialize(self): + return GetResultValue(self.basic_auth.get().SerializeToString()) + + def __eq__(self, BasicAuth other): + return deref(self.basic_auth.get()) == deref(other.basic_auth.get()) + + def __repr__(self): + return (f"") + + +class DescriptorType(enum.Enum): + """ + The type of a FlightDescriptor. + + Attributes + ---------- + + UNKNOWN + An unknown descriptor type. + + PATH + A Flight stream represented by a path. + + CMD + A Flight stream represented by an application-defined command. + + """ + + UNKNOWN = 0 + PATH = 1 + CMD = 2 + + +class FlightMethod(enum.Enum): + """The implemented methods in Flight.""" + + INVALID = 0 + HANDSHAKE = 1 + LIST_FLIGHTS = 2 + GET_FLIGHT_INFO = 3 + GET_SCHEMA = 4 + DO_GET = 5 + DO_PUT = 6 + DO_ACTION = 7 + LIST_ACTIONS = 8 + DO_EXCHANGE = 9 + + +cdef wrap_flight_method(CFlightMethod method): + if method == CFlightMethodHandshake: + return FlightMethod.HANDSHAKE + elif method == CFlightMethodListFlights: + return FlightMethod.LIST_FLIGHTS + elif method == CFlightMethodGetFlightInfo: + return FlightMethod.GET_FLIGHT_INFO + elif method == CFlightMethodGetSchema: + return FlightMethod.GET_SCHEMA + elif method == CFlightMethodDoGet: + return FlightMethod.DO_GET + elif method == CFlightMethodDoPut: + return FlightMethod.DO_PUT + elif method == CFlightMethodDoAction: + return FlightMethod.DO_ACTION + elif method == CFlightMethodListActions: + return FlightMethod.LIST_ACTIONS + elif method == CFlightMethodDoExchange: + return FlightMethod.DO_EXCHANGE + return FlightMethod.INVALID + + +cdef class FlightDescriptor(_Weakrefable): + """A description of a data stream available from a Flight service.""" + cdef: + CFlightDescriptor descriptor + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.flight.FlightDescriptor.for_{path,command}` " + "function instead." + .format(self.__class__.__name__)) + + @staticmethod + def for_path(*path): + """Create a FlightDescriptor for a resource path.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypePath + result.descriptor.path = [tobytes(p) for p in path] + return result + + @staticmethod + def for_command(command): + """Create a FlightDescriptor for an opaque command.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypeCmd + result.descriptor.cmd = tobytes(command) + return result + + @property + def descriptor_type(self): + """Get the type of this descriptor.""" + if self.descriptor.type == CDescriptorTypeUnknown: + return DescriptorType.UNKNOWN + elif self.descriptor.type == CDescriptorTypePath: + return DescriptorType.PATH + elif self.descriptor.type == CDescriptorTypeCmd: + return DescriptorType.CMD + raise RuntimeError("Invalid descriptor type!") + + @property + def command(self): + """Get the command for this descriptor.""" + if self.descriptor_type != DescriptorType.CMD: + return None + return self.descriptor.cmd + + @property + def path(self): + """Get the path for this descriptor.""" + if self.descriptor_type != DescriptorType.PATH: + return None + return self.descriptor.path + + def __repr__(self): + if self.descriptor_type == DescriptorType.PATH: + return f"" + elif self.descriptor_type == DescriptorType.CMD: + return f"" + else: + return "" + + @staticmethod + cdef CFlightDescriptor unwrap(descriptor) except *: + if not isinstance(descriptor, FlightDescriptor): + raise TypeError("Must provide a FlightDescriptor, not '{}'".format( + type(descriptor))) + return ( descriptor).descriptor + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.descriptor.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + descriptor.descriptor = GetResultValue( + CFlightDescriptor.Deserialize(tobytes(serialized))) + return descriptor + + def __eq__(self, FlightDescriptor other): + return self.descriptor == other.descriptor + + +cdef class Ticket(_Weakrefable): + """A ticket for requesting a Flight stream.""" + + cdef: + CTicket c_ticket + + def __init__(self, ticket): + self.c_ticket.ticket = tobytes(ticket) + + @property + def ticket(self): + return self.c_ticket.ticket + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.c_ticket.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Ticket ticket = Ticket.__new__(Ticket) + ticket.c_ticket = GetResultValue( + CTicket.Deserialize(tobytes(serialized))) + return ticket + + def __eq__(self, Ticket other): + return self.c_ticket == other.c_ticket + + def __repr__(self): + return f"" + + +cdef class Location(_Weakrefable): + """The location of a Flight service.""" + cdef: + CLocation location + + def __init__(self, uri): + check_flight_status(CLocation.Parse(tobytes(uri)).Value(&self.location)) + + def __repr__(self): + return f'' + + @property + def uri(self): + return self.location.ToString() + + def equals(self, Location other): + return self == other + + def __eq__(self, other): + if not isinstance(other, Location): + return NotImplemented + return self.location.Equals(( other).location) + + @staticmethod + def for_grpc_tcp(host, port): + """Create a Location for a TCP-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTcp(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_tls(host, port): + """Create a Location for a TLS-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTls(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_unix(path): + """Create a Location for a domain socket-based gRPC service.""" + cdef: + c_string c_path = tobytes(path) + Location result = Location.__new__(Location) + check_flight_status(CLocation.ForGrpcUnix(c_path).Value(&result.location)) + return result + + @staticmethod + cdef Location wrap(CLocation location): + cdef Location result = Location.__new__(Location) + result.location = location + return result + + @staticmethod + cdef CLocation unwrap(object location) except *: + cdef CLocation c_location + if isinstance(location, str): + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + return c_location + elif not isinstance(location, Location): + raise TypeError("Must provide a Location, not '{}'".format( + type(location))) + return ( location).location + + +cdef class FlightEndpoint(_Weakrefable): + """A Flight stream, along with the ticket and locations to access it.""" + cdef: + CFlightEndpoint endpoint + + def __init__(self, ticket, locations): + """Create a FlightEndpoint from a ticket and list of locations. + + Parameters + ---------- + ticket : Ticket or bytes + the ticket needed to access this flight + locations : list of string URIs + locations where this flight is available + + Raises + ------ + ArrowException + If one of the location URIs is not a valid URI. + """ + cdef: + CLocation c_location + + if isinstance(ticket, Ticket): + self.endpoint.ticket.ticket = tobytes(ticket.ticket) + else: + self.endpoint.ticket.ticket = tobytes(ticket) + + for location in locations: + if isinstance(location, Location): + c_location = ( location).location + else: + c_location = CLocation() + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + self.endpoint.locations.push_back(c_location) + + @property + def ticket(self): + """Get the ticket in this endpoint.""" + return Ticket(self.endpoint.ticket.ticket) + + @property + def locations(self): + return [Location.wrap(location) + for location in self.endpoint.locations] + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.endpoint.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightEndpoint endpoint = FlightEndpoint.__new__(FlightEndpoint) + endpoint.endpoint = GetResultValue( + CFlightEndpoint.Deserialize(tobytes(serialized))) + return endpoint + + def __repr__(self): + return (f"") + + def __eq__(self, FlightEndpoint other): + return self.endpoint == other.endpoint + + +cdef class SchemaResult(_Weakrefable): + """The serialized schema returned from a GetSchema request.""" + cdef: + unique_ptr[CSchemaResult] result + + def __init__(self, Schema schema): + """Create a SchemaResult from a schema. + + Parameters + ---------- + schema: Schema + the schema of the data in this flight. + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + check_flight_status(CreateSchemaResult(c_schema, &self.result)) + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.result.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef SchemaResult result = SchemaResult.__new__(SchemaResult) + result.result.reset(new CSchemaResult(GetResultValue( + CSchemaResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, SchemaResult other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class FlightInfo(_Weakrefable): + """A description of a Flight stream.""" + cdef: + unique_ptr[CFlightInfo] info + + @staticmethod + cdef wrap(CFlightInfo c_info): + cdef FlightInfo obj = FlightInfo.__new__(FlightInfo) + obj.info.reset(new CFlightInfo(move(c_info))) + return obj + + def __init__(self, Schema schema, FlightDescriptor descriptor, endpoints, + total_records, total_bytes): + """Create a FlightInfo object from a schema, descriptor, and endpoints. + + Parameters + ---------- + schema : Schema + the schema of the data in this flight. + descriptor : FlightDescriptor + the descriptor for this flight. + endpoints : list of FlightEndpoint + a list of endpoints where this flight is available. + total_records : int + the total records in this flight, or -1 if unknown + total_bytes : int + the total bytes in this flight, or -1 if unknown + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + vector[CFlightEndpoint] c_endpoints + + for endpoint in endpoints: + if isinstance(endpoint, FlightEndpoint): + c_endpoints.push_back(( endpoint).endpoint) + else: + raise TypeError('Endpoint {} is not instance of' + ' FlightEndpoint'.format(endpoint)) + + check_flight_status(CreateFlightInfo(c_schema, + descriptor.descriptor, + c_endpoints, + total_records, + total_bytes, &self.info)) + + @property + def total_records(self): + """The total record count of this flight, or -1 if unknown.""" + return self.info.get().total_records() + + @property + def total_bytes(self): + """The size in bytes of the data in this flight, or -1 if unknown.""" + return self.info.get().total_bytes() + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.info.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + @property + def descriptor(self): + """The descriptor of the data in this flight.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor = self.info.get().descriptor() + return result + + @property + def endpoints(self): + """The endpoints where this flight is available.""" + # TODO: get Cython to iterate over reference directly + cdef: + vector[CFlightEndpoint] endpoints = self.info.get().endpoints() + FlightEndpoint py_endpoint + + result = [] + for endpoint in endpoints: + py_endpoint = FlightEndpoint.__new__(FlightEndpoint) + py_endpoint.endpoint = endpoint + result.append(py_endpoint) + return result + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.info.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightInfo info = FlightInfo.__new__(FlightInfo) + info.info = move(GetResultValue( + CFlightInfo.Deserialize(tobytes(serialized)))) + return info + + def __eq__(self, FlightInfo other): + return deref(self.info.get()) == deref(other.info.get()) + + def __repr__(self): + return (f"") + + +cdef class FlightStreamChunk(_Weakrefable): + """A RecordBatch with application metadata on the side.""" + cdef: + CFlightStreamChunk chunk + + @property + def data(self): + if self.chunk.data == NULL: + return None + return pyarrow_wrap_batch(self.chunk.data) + + @property + def app_metadata(self): + if self.chunk.app_metadata == NULL: + return None + return pyarrow_wrap_buffer(self.chunk.app_metadata) + + def __iter__(self): + return iter((self.data, self.app_metadata)) + + def __repr__(self): + return "".format( + self.chunk.data != NULL, self.chunk.app_metadata != NULL) + + +cdef class _MetadataRecordBatchReader(_Weakrefable, _ReadPandasMixin): + """A reader for Flight streams.""" + + # Needs to be separate class so the "real" class can subclass the + # pure-Python mixin class + + cdef dict __dict__ + cdef shared_ptr[CMetadataRecordBatchReader] reader + + def __iter__(self): + return self + + def __next__(self): + return self.read_chunk() + + @property + def schema(self): + """Get the schema for this reader.""" + cdef shared_ptr[CSchema] c_schema + with nogil: + check_flight_status(self.reader.get().GetSchema().Value(&c_schema)) + return pyarrow_wrap_schema(c_schema) + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + with nogil: + check_flight_status(self.reader.get().ToTable().Value(&c_table)) + return pyarrow_wrap_table(c_table) + + def read_chunk(self): + """Read the next FlightStreamChunk along with any metadata. + + Returns + ------- + chunk : FlightStreamChunk + The next FlightStreamChunk in the stream. + + Raises + ------ + StopIteration + when the stream is finished + """ + cdef: + FlightStreamChunk chunk = FlightStreamChunk() + + with nogil: + check_flight_status(self.reader.get().Next().Value(&chunk.chunk)) + + if chunk.chunk.data == NULL and chunk.chunk.app_metadata == NULL: + raise StopIteration + + return chunk + + def to_reader(self): + """Convert this reader into a regular RecordBatchReader. + + This may fail if the schema cannot be read from the remote end. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + with nogil: + reader.reader = GetResultValue(MakeRecordBatchReader(self.reader)) + + return reader + + +cdef class MetadataRecordBatchReader(_MetadataRecordBatchReader): + """The base class for readers for Flight streams. + + See Also + -------- + FlightStreamReader + """ + + +cdef class FlightStreamReader(MetadataRecordBatchReader): + """A reader that can also be canceled.""" + + def cancel(self): + """Cancel the read operation.""" + with nogil: + ( self.reader.get()).Cancel() + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + CStopToken stop_token + with SignalStopHandler() as stop_handler: + stop_token = ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + ( self.reader.get()) + .ToTableWithStopToken(stop_token).Value(&c_table)) + return pyarrow_wrap_table(c_table) + + +cdef class MetadataRecordBatchWriter(_CRecordBatchWriter): + """A RecordBatchWriter that also allows writing application metadata. + + This class is a context manager; on exit, close() will be called. + """ + + cdef CMetadataRecordBatchWriter* _writer(self) nogil: + return self.writer.get() + + def begin(self, schema: Schema, options=None): + """Prepare to write data to this stream with the given schema.""" + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CIpcWriteOptions c_options = _get_options(options).c_options + with nogil: + check_flight_status(self._writer().Begin(c_schema, c_options)) + + def write_metadata(self, buf): + """Write Flight metadata by itself.""" + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteMetadata(c_buf)) + + def write_batch(self, RecordBatch batch): + """ + Write RecordBatch to stream. + + Parameters + ---------- + batch : RecordBatch + """ + cdef: + shared_ptr[const CKeyValueMetadata] custom_metadata + + # Override superclass method to use check_flight_status so we + # can generate FlightWriteSizeExceededError. We don't do this + # for write_table as callers who intend to handle the error + # and retry with a smaller batch should be working with + # individual batches to have control. + + with nogil: + check_flight_status( + self._writer().WriteRecordBatch(deref(batch.batch), custom_metadata)) + + def write_table(self, Table table, max_chunksize=None, **kwargs): + """ + Write Table to stream in (contiguous) RecordBatch objects. + + Parameters + ---------- + table : Table + max_chunksize : int, default None + Maximum number of rows for RecordBatch chunks. Individual chunks may + be smaller depending on the chunk layout of individual columns. + """ + cdef: + # max_chunksize must be > 0 to have any impact + int64_t c_max_chunksize = -1 + + if 'chunksize' in kwargs: + max_chunksize = kwargs['chunksize'] + msg = ('The parameter chunksize is deprecated for the write_table ' + 'methods as of 0.15, please use parameter ' + 'max_chunksize instead') + warnings.warn(msg, FutureWarning) + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + + with nogil: + check_flight_status( + self._writer().WriteTable(table.table[0], c_max_chunksize)) + + def close(self): + """ + Close stream and write end-of-stream 0 marker. + """ + with nogil: + check_flight_status(self._writer().Close()) + + def write_with_metadata(self, RecordBatch batch, buf): + """Write a RecordBatch along with Flight metadata. + + Parameters + ---------- + batch : RecordBatch + The next RecordBatch in the stream. + buf : Buffer + Application-specific metadata for the batch as defined by + Flight. + """ + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteWithMetadata(deref(batch.batch), c_buf)) + + +cdef class FlightStreamWriter(MetadataRecordBatchWriter): + """A writer that also allows closing the write side of a stream.""" + + def done_writing(self): + """Indicate that the client is done writing, but not done reading.""" + with nogil: + check_flight_status( + ( self.writer.get()).DoneWriting()) + + +cdef class FlightMetadataReader(_Weakrefable): + """A reader for Flight metadata messages sent during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataReader] reader + + def read(self): + """Read the next metadata message.""" + cdef shared_ptr[CBuffer] buf + with nogil: + check_flight_status(self.reader.get().ReadMetadata(&buf)) + if buf == NULL: + return None + return pyarrow_wrap_buffer(buf) + + +cdef class FlightMetadataWriter(_Weakrefable): + """A sender for Flight metadata messages during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataWriter] writer + + def write(self, message): + """Write the next metadata message. + + Parameters + ---------- + message : Buffer + """ + cdef shared_ptr[CBuffer] buf = \ + pyarrow_unwrap_buffer(as_buffer(message)) + with nogil: + check_flight_status(self.writer.get().WriteMetadata(deref(buf))) + + +class AsyncioCall: + """State for an async RPC using asyncio.""" + + def __init__(self) -> None: + import asyncio + self._future = asyncio.get_running_loop().create_future() + + def as_awaitable(self) -> object: + return self._future + + def wakeup(self, result_or_exception) -> None: + # Mark the Future done from within its loop (asyncio + # objects are generally not thread-safe) + loop = self._future.get_loop() + if isinstance(result_or_exception, BaseException): + loop.call_soon_threadsafe( + self._future.set_exception, result_or_exception) + else: + loop.call_soon_threadsafe( + self._future.set_result, result_or_exception) + + +cdef class AsyncioFlightClient: + """ + A FlightClient with an asyncio-based async interface. + + This interface is EXPERIMENTAL. + """ + + cdef: + FlightClient _client + + def __init__(self, FlightClient client) -> None: + self._client = client + + async def get_flight_info( + self, + descriptor: FlightDescriptor, + *, + options: FlightCallOptions = None, + ): + call = AsyncioCall() + self._get_flight_info(call, descriptor, options) + return await call.as_awaitable() + + cdef _get_flight_info(self, call, descriptor, options): + cdef: + CFlightCallOptions* c_options = \ + FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + CFuture[CFlightInfo] c_future + + with nogil: + c_future = self._client.client.get().GetFlightInfoAsync( + deref(c_options), c_descriptor) + + BindFuture(move(c_future), call.wakeup, FlightInfo.wrap) + + +cdef class FlightClient(_Weakrefable): + """A client to a Flight service. + + Connect to a Flight service on the given host and port. + + Parameters + ---------- + location : str, tuple or Location + Location to connect to. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + tls_root_certs : bytes or None + PEM-encoded + cert_chain: bytes or None + Client certificate if using mutual TLS + private_key: bytes or None + Client private key for cert_chain is using mutual TLS + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list optional, default None + A list of ClientMiddlewareFactory instances. + write_size_limit_bytes : int optional, default None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean optional, default False + A flag that indicates that, if the client is connecting + with TLS, that it skips server verification. If this is + enabled, all other TLS settings are overridden. + generic_options : list optional, default None + A list of generic (string, int or string) option tuples passed + to the underlying transport. Effect is implementation + dependent. + """ + cdef: + unique_ptr[CFlightClient] client + + def __init__(self, location, *, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, middleware=None, + write_size_limit_bytes=None, + disable_server_verification=None, generic_options=None): + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, tuple): + host, port = location + if tls_root_certs or disable_server_verification is not None: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, tls_root_certs, cert_chain, private_key, + override_hostname, middleware, write_size_limit_bytes, + disable_server_verification, generic_options) + + cdef init(self, Location location, tls_root_certs, cert_chain, + private_key, override_hostname, middleware, + write_size_limit_bytes, disable_server_verification, + generic_options): + cdef: + CLocation c_location = Location.unwrap(location) + CFlightClientOptions c_options = CFlightClientOptions.Defaults() + function[cb_client_middleware_start_call] start_call = \ + &_client_middleware_start_call + CIntStringVariant variant + + if tls_root_certs: + c_options.tls_root_certs = tobytes(tls_root_certs) + if cert_chain: + c_options.cert_chain = tobytes(cert_chain) + if private_key: + c_options.private_key = tobytes(private_key) + if override_hostname: + c_options.override_hostname = tobytes(override_hostname) + if disable_server_verification is not None: + c_options.disable_server_verification = disable_server_verification + if middleware: + for factory in middleware: + c_options.middleware.push_back( + + make_shared[CPyClientMiddlewareFactory]( + factory, start_call)) + if write_size_limit_bytes is not None: + c_options.write_size_limit_bytes = write_size_limit_bytes + else: + c_options.write_size_limit_bytes = 0 + if generic_options: + for key, value in generic_options: + if isinstance(value, (str, bytes)): + variant = CIntStringVariant( tobytes(value)) + else: + variant = CIntStringVariant( value) + c_options.generic_options.push_back( + pair[c_string, CIntStringVariant](tobytes(key), variant)) + + with nogil: + check_flight_status(CFlightClient.Connect(c_location, c_options + ).Value(&self.client)) + + @property + def supports_async(self): + return self.client.get().supports_async() + + def as_async(self) -> None: + check_status(self.client.get().CheckAsyncSupport()) + return AsyncioFlightClient(self) + + def wait_for_available(self, timeout=5): + """Block until the server can be contacted. + + Parameters + ---------- + timeout : int, default 5 + The maximum seconds to wait. + """ + deadline = time.time() + timeout + while True: + try: + list(self.list_flights()) + except FlightUnavailableError: + if time.time() < deadline: + time.sleep(0.025) + continue + else: + raise + except NotImplementedError: + # allow if list_flights is not implemented, because + # the server can be contacted nonetheless + break + else: + break + + @classmethod + def connect(cls, location, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, + disable_server_verification=None): + """Connect to a Flight server. + + .. deprecated:: 0.15.0 + Use the ``FlightClient`` constructor or ``pyarrow.flight.connect`` function instead. + """ + warnings.warn("The 'FlightClient.connect' method is deprecated, use " + "FlightClient constructor or pyarrow.flight.connect " + "function instead") + return FlightClient( + location, tls_root_certs=tls_root_certs, + cert_chain=cert_chain, private_key=private_key, + override_hostname=override_hostname, + disable_server_verification=disable_server_verification + ) + + def authenticate(self, auth_handler, options: FlightCallOptions = None): + """Authenticate to the server. + + Parameters + ---------- + auth_handler : ClientAuthHandler + The authentication mechanism to use. + options : FlightCallOptions + Options for this call. + """ + cdef: + unique_ptr[CClientAuthHandler] handler + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if not isinstance(auth_handler, ClientAuthHandler): + raise TypeError( + "FlightClient.authenticate takes a ClientAuthHandler, " + "not '{}'".format(type(auth_handler))) + handler.reset(( auth_handler).to_handler()) + with nogil: + check_flight_status( + self.client.get().Authenticate(deref(c_options), + move(handler))) + + def authenticate_basic_token(self, username, password, + options: FlightCallOptions = None): + """Authenticate to the server with HTTP basic authentication. + + Parameters + ---------- + username : string + Username to authenticate with + password : string + Password to authenticate with + options : FlightCallOptions + Options for this call + + Returns + ------- + tuple : Tuple[str, str] + A tuple representing the FlightCallOptions authorization + header entry of a bearer token. + """ + cdef: + CResult[pair[c_string, c_string]] result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + c_string user = tobytes(username) + c_string pw = tobytes(password) + + with nogil: + result = self.client.get().AuthenticateBasicToken(deref(c_options), + user, pw) + check_flight_status(result.status()) + + return GetResultValue(result) + + def list_actions(self, options: FlightCallOptions = None): + """List the actions available on a service.""" + cdef: + vector[CActionType] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListActions(deref(c_options)).Value(&results)) + + result = [] + for action_type in results: + py_action = ActionType(frombytes(action_type.type), + frombytes(action_type.description)) + result.append(py_action) + + return result + + def do_action(self, action, options: FlightCallOptions = None): + """ + Execute an action on a service. + + Parameters + ---------- + action : str, tuple, or Action + Can be action type name (no body), type and body, or any Action + object + options : FlightCallOptions + RPC options + + Returns + ------- + results : iterator of Result values + """ + cdef: + unique_ptr[CResultStream] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if isinstance(action, (str, bytes)): + action = Action(action, b'') + elif isinstance(action, tuple): + action = Action(*action) + elif not isinstance(action, Action): + raise TypeError("Action must be Action instance, string, or tuple") + + cdef CAction c_action = Action.unwrap( action) + with nogil: + check_flight_status( + self.client.get().DoAction( + deref(c_options), c_action).Value(&results)) + + def _do_action_response(): + cdef: + Result result + while True: + result = Result.__new__(Result) + with nogil: + check_flight_status(results.get().Next().Value(&result.result)) + if result.result == NULL: + break + yield result + return _do_action_response() + + def list_flights(self, criteria: bytes = None, + options: FlightCallOptions = None): + """List the flights available on a service.""" + cdef: + unique_ptr[CFlightListing] listing + FlightInfo result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CCriteria c_criteria + + if criteria: + c_criteria.expression = tobytes(criteria) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListFlights(deref(c_options), + c_criteria).Value(&listing)) + + while True: + result = FlightInfo.__new__(FlightInfo) + with nogil: + check_flight_status(listing.get().Next().Value(&result.info)) + if result.info == NULL: + break + yield result + + def get_flight_info(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request information about an available flight.""" + cdef: + FlightInfo result = FlightInfo.__new__(FlightInfo) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().GetFlightInfo( + deref(c_options), c_descriptor).Value(&result.info)) + + return result + + def get_schema(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request schema for an available flight.""" + cdef: + SchemaResult result = SchemaResult.__new__(SchemaResult) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + with nogil: + check_status( + self.client.get() + .GetSchema(deref(c_options), c_descriptor).Value(&result.result) + ) + + return result + + def do_get(self, ticket: Ticket, options: FlightCallOptions = None): + """Request the data for a flight. + + Returns + ------- + reader : FlightStreamReader + """ + cdef: + unique_ptr[CFlightStreamReader] reader + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with nogil: + check_flight_status( + self.client.get().DoGet( + deref(c_options), ticket.c_ticket).Value(&reader)) + result = FlightStreamReader() + result.reader.reset(reader.release()) + return result + + def do_put(self, descriptor: FlightDescriptor, Schema schema not None, + options: FlightCallOptions = None): + """Upload data to a flight. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightMetadataReader + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CDoPutResult c_do_put_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoPut( + deref(c_options), + c_descriptor, + c_schema).Value(&c_do_put_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_put_result.writer.release()) + py_reader = FlightMetadataReader() + py_reader.reader.reset(c_do_put_result.reader.release()) + return py_writer, py_reader + + def do_exchange(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Start a bidirectional data exchange with a server. + + Parameters + ---------- + descriptor : FlightDescriptor + A descriptor for the flight. + options : FlightCallOptions + RPC options. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightStreamReader + """ + cdef: + CDoExchangeResult c_do_exchange_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoExchange( + deref(c_options), + c_descriptor).Value(&c_do_exchange_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_exchange_result.writer.release()) + py_reader = FlightStreamReader() + py_reader.reader.reset(c_do_exchange_result.reader.release()) + return py_writer, py_reader + + def close(self): + """Close the client and disconnect.""" + client = self.client.get() + if client != NULL: + check_flight_status(client.Close()) + + def __del__(self): + # Not ideal, but close() wasn't originally present so + # applications may not be calling it + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +cdef class FlightDataStream(_Weakrefable): + """ + Abstract base class for Flight data streams. + + See Also + -------- + RecordBatchStream + GeneratorStream + """ + + cdef CFlightDataStream* to_stream(self) except *: + """Create the C++ data stream for the backing Python object. + + We don't expose the C++ object to Python, so we can manage its + lifetime from the Cython/C++ side. + """ + raise NotImplementedError + + +cdef class RecordBatchStream(FlightDataStream): + """A Flight data stream backed by RecordBatches. + + The remainder of this DoGet request will be handled in C++, + without having to acquire the GIL. + + """ + cdef: + object data_source + CIpcWriteOptions write_options + + def __init__(self, data_source, options=None): + """Create a RecordBatchStream from a data source. + + Parameters + ---------- + data_source : RecordBatchReader or Table + The data to stream to the client. + options : pyarrow.ipc.IpcWriteOptions, optional + Optional IPC options to control how to write the data. + """ + if (not isinstance(data_source, RecordBatchReader) and + not isinstance(data_source, lib.Table)): + raise TypeError("Expected RecordBatchReader or Table, " + "but got: {}".format(type(data_source))) + self.data_source = data_source + self.write_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + shared_ptr[CRecordBatchReader] reader + if isinstance(self.data_source, RecordBatchReader): + reader = ( self.data_source).reader + elif isinstance(self.data_source, lib.Table): + table = (
self.data_source).table + reader.reset(new TableBatchReader(deref(table))) + else: + raise RuntimeError("Can't construct RecordBatchStream " + "from type {}".format(type(self.data_source))) + return new CRecordBatchStream(reader, self.write_options) + + +cdef class GeneratorStream(FlightDataStream): + """A Flight data stream backed by a Python generator.""" + cdef: + shared_ptr[CSchema] schema + object generator + # A substream currently being consumed by the client, if + # present. Produced by the generator. + unique_ptr[CFlightDataStream] current_stream + CIpcWriteOptions c_options + + def __init__(self, schema, generator, options=None): + """Create a GeneratorStream from a Python generator. + + Parameters + ---------- + schema : Schema + The schema for the data to be returned. + + generator : iterator or iterable + The generator should yield other FlightDataStream objects, + Tables, RecordBatches, or RecordBatchReaders. + + options : pyarrow.ipc.IpcWriteOptions, optional + """ + self.schema = pyarrow_unwrap_schema(schema) + self.generator = iter(generator) + self.c_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + function[cb_data_stream_next] callback = &_data_stream_next + return new CPyGeneratorFlightDataStream(self, self.schema, callback, + self.c_options) + + +cdef class ServerCallContext(_Weakrefable): + """Per-call state/context.""" + cdef: + const CServerCallContext* context + + def peer_identity(self): + """Get the identity of the authenticated peer. + + May be the empty string. + """ + return tobytes(self.context.peer_identity()) + + def peer(self): + """Get the address of the peer.""" + # Set safe=True as gRPC on Windows sometimes gives garbage bytes + return frombytes(self.context.peer(), safe=True) + + def is_cancelled(self): + """Check if the current RPC call has been canceled by the client.""" + return self.context.is_cancelled() + + def add_header(self, key, value): + """Add a response header.""" + self.context.AddHeader(tobytes(key), tobytes(value)) + + def add_trailer(self, key, value): + """Add a response trailer.""" + self.context.AddTrailer(tobytes(key), tobytes(value)) + + def get_middleware(self, key): + """ + Get a middleware instance by key. + + Returns None if the middleware was not found. + """ + cdef: + CServerMiddleware* c_middleware = \ + self.context.GetMiddleware(CPyServerMiddlewareName) + CPyServerMiddleware* middleware + vector[CTracingServerMiddlewareTraceKey] c_trace_context + if c_middleware == NULL: + c_middleware = self.context.GetMiddleware(tobytes(key)) + + if c_middleware == NULL: + return None + elif c_middleware.name() == CPyServerMiddlewareName: + middleware = c_middleware + py_middleware = <_ServerMiddlewareWrapper> middleware.py_object() + return py_middleware.middleware.get(key) + elif c_middleware.name() == CTracingServerMiddlewareName: + c_trace_context = ( c_middleware + ).GetTraceContext() + trace_context = {pair.key: pair.value for pair in c_trace_context} + return TracingServerMiddleware(trace_context) + return None + + @staticmethod + cdef ServerCallContext wrap(const CServerCallContext& context): + cdef ServerCallContext result = \ + ServerCallContext.__new__(ServerCallContext) + result.context = &context + return result + + +cdef class ServerAuthReader(_Weakrefable): + """A reader for messages from the client during an auth handshake.""" + cdef: + CServerAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ServerAuthReader outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ServerAuthReader wrap(CServerAuthReader* reader): + cdef ServerAuthReader result = \ + ServerAuthReader.__new__(ServerAuthReader) + result.reader = reader + return result + + +cdef class ServerAuthSender(_Weakrefable): + """A writer for messages to the client during an auth handshake.""" + cdef: + CServerAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ServerAuthSender outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ServerAuthSender wrap(CServerAuthSender* sender): + cdef ServerAuthSender result = \ + ServerAuthSender.__new__(ServerAuthSender) + result.sender = sender + return result + + +cdef class ClientAuthReader(_Weakrefable): + """A reader for messages from the server during an auth handshake.""" + cdef: + CClientAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ClientAuthReader outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ClientAuthReader wrap(CClientAuthReader* reader): + cdef ClientAuthReader result = \ + ClientAuthReader.__new__(ClientAuthReader) + result.reader = reader + return result + + +cdef class ClientAuthSender(_Weakrefable): + """A writer for messages to the server during an auth handshake.""" + cdef: + CClientAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ClientAuthSender outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ClientAuthSender wrap(CClientAuthSender* sender): + cdef ClientAuthSender result = \ + ClientAuthSender.__new__(ClientAuthSender) + result.sender = sender + return result + + +cdef CStatus _data_stream_next(void* self, CFlightPayload* payload) except *: + """Callback for implementing FlightDataStream in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_stream = self + if not isinstance(py_stream, GeneratorStream): + raise RuntimeError("self object in callback is not GeneratorStream") + stream = py_stream + + # The generator is allowed to yield a reader or table which we + # yield from; if that sub-generator is empty, we need to reset and + # try again. However, limit the number of attempts so that we + # don't just spin forever. + max_attempts = 128 + for _ in range(max_attempts): + if stream.current_stream != nullptr: + with nogil: + check_flight_status( + stream.current_stream.get().Next().Value(payload)) + # If the stream ended, see if there's another stream from the + # generator + if payload.ipc_message.metadata != nullptr: + return CStatus_OK() + stream.current_stream.reset(nullptr) + + try: + result = next(stream.generator) + except StopIteration: + payload.ipc_message.metadata.reset( nullptr) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + if isinstance(result, (list, tuple)): + result, metadata = result + else: + result, metadata = result, None + + if isinstance(result, (Table, RecordBatchReader)): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + result = RecordBatchStream(result) + + stream_schema = pyarrow_wrap_schema(stream.schema) + if isinstance(result, FlightDataStream): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + substream_schema = pyarrow_wrap_schema(data_stream.get().schema()) + if substream_schema != stream_schema: + raise ValueError("Got a FlightDataStream whose schema " + "does not match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format( + substream_schema, stream_schema)) + stream.current_stream.reset( + new CPyFlightDataStream(result, move(data_stream))) + # Loop around and try again + continue + elif isinstance(result, RecordBatch): + batch = result + if batch.schema != stream_schema: + raise ValueError("Got a RecordBatch whose schema does not " + "match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format(batch.schema, + stream_schema)) + check_flight_status(GetRecordBatchPayload( + deref(batch.batch), + stream.c_options, + &payload.ipc_message)) + if metadata: + payload.app_metadata = pyarrow_unwrap_buffer( + as_buffer(metadata)) + else: + raise TypeError("GeneratorStream must be initialized with " + "an iterator of FlightDataStream, Table, " + "RecordBatch, or RecordBatchStreamReader objects, " + "not {}.".format(type(result))) + # Don't loop around + return CStatus_OK() + # Ran out of attempts (the RPC handler kept yielding empty tables/readers) + raise RuntimeError("While getting next payload, ran out of attempts to " + "get something to send " + "(application server implementation error)") + + +cdef CStatus _list_flights(void* self, const CServerCallContext& context, + const CCriteria* c_criteria, + unique_ptr[CFlightListing]* listing) except *: + """Callback for implementing ListFlights in Python.""" + cdef: + vector[CFlightInfo] flights + + try: + result = ( self).list_flights(ServerCallContext.wrap(context), + c_criteria.expression) + for info in result: + if not isinstance(info, FlightInfo): + raise TypeError("FlightServerBase.list_flights must return " + "FlightInfo instances, but got {}".format( + type(info))) + flights.push_back(deref(( info).info.get())) + listing.reset(new CSimpleFlightListing(flights)) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _get_flight_info(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CFlightInfo]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + try: + result = ( self).get_flight_info( + ServerCallContext.wrap(context), + py_descriptor) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightInfo): + raise TypeError("FlightServerBase.get_flight_info must return " + "a FlightInfo instance, but got {}".format( + type(result))) + info.reset(new CFlightInfo(deref(( result).info.get()))) + return CStatus_OK() + +cdef CStatus _get_schema(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CSchemaResult]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + result = ( self).get_schema(ServerCallContext.wrap(context), + py_descriptor) + if not isinstance(result, SchemaResult): + raise TypeError("FlightServerBase.get_schema_info must return " + "a SchemaResult instance, but got {}".format( + type(result))) + info.reset(new CSchemaResult(deref(( result).result.get()))) + return CStatus_OK() + +cdef CStatus _do_put(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMetadataWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + FlightMetadataWriter py_writer = FlightMetadataWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_put(ServerCallContext.wrap(context), descriptor, + py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_get(void* self, const CServerCallContext& context, + CTicket ticket, + unique_ptr[CFlightDataStream]* stream) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_ticket = Ticket(ticket.ticket) + try: + result = ( self).do_get(ServerCallContext.wrap(context), + py_ticket) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightDataStream): + raise TypeError("FlightServerBase.do_get must return " + "a FlightDataStream") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + stream[0] = unique_ptr[CFlightDataStream]( + new CPyFlightDataStream(result, move(data_stream))) + return CStatus_OK() + + +cdef CStatus _do_exchange(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMessageWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + MetadataRecordBatchWriter py_writer = MetadataRecordBatchWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_exchange(ServerCallContext.wrap(context), + descriptor, py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_action_result_next( + void* self, + unique_ptr[CFlightResult]* result +) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CFlightResult* c_result + + try: + action_result = next( self) + if not isinstance(action_result, Result): + action_result = Result(action_result) + c_result = ( action_result).result.get() + result.reset(new CFlightResult(deref(c_result))) + except StopIteration: + result.reset(nullptr) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _do_action(void* self, const CServerCallContext& context, + const CAction& action, + unique_ptr[CResultStream]* result) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + function[cb_result_next] ptr = &_do_action_result_next + py_action = Action(action.type, pyarrow_wrap_buffer(action.body)) + try: + responses = ( self).do_action(ServerCallContext.wrap(context), + py_action) + except FlightError as flight_error: + return ( flight_error).to_status() + # Let the application return an iterator or anything convertible + # into one + if responses is None: + # Server didn't return anything + responses = [] + result.reset(new CPyFlightResultStream(iter(responses), ptr)) + return CStatus_OK() + + +cdef CStatus _list_actions(void* self, const CServerCallContext& context, + vector[CActionType]* actions) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CActionType action_type + # Method should return a list of ActionTypes or similar tuple + try: + result = ( self).list_actions(ServerCallContext.wrap(context)) + for action in result: + if not isinstance(action, tuple): + raise TypeError( + "Results of list_actions must be ActionType or tuple") + action_type.type = tobytes(action[0]) + action_type.description = tobytes(action[1]) + actions.push_back(action_type) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _server_authenticate(void* self, CServerAuthSender* outgoing, + CServerAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ServerAuthSender.wrap(outgoing) + reader = ServerAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + +cdef CStatus _is_valid(void* self, const c_string& token, + c_string* peer_identity) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).is_valid(token)) + peer_identity[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _client_authenticate(void* self, CClientAuthSender* outgoing, + CClientAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ClientAuthSender.wrap(outgoing) + reader = ClientAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + + +cdef CStatus _get_token(void* self, c_string* token) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).get_token()) + token[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_sending_headers( + void* self, CAddCallHeaders* add_headers) except *: + """Callback for implementing middleware.""" + try: + headers = ( self).sending_headers() + except FlightError as flight_error: + return ( flight_error).to_status() + + if headers: + for header, values in headers.items(): + if isinstance(values, (str, bytes)): + values = (values,) + # Headers in gRPC (and HTTP/1, HTTP/2) are required to be + # valid, lowercase ASCII. + header = header.lower() + if isinstance(header, str): + header = header.encode("ascii") + for value in values: + if isinstance(value, str): + value = value.encode("ascii") + # Allow bytes values to pass through. + add_headers.AddHeader(header, value) + + return CStatus_OK() + + +cdef CStatus _middleware_call_completed( + void* self, + const CStatus& call_status) except *: + """Callback for implementing middleware.""" + try: + try: + check_flight_status(call_status) + except Exception as e: + ( self).call_completed(e) + else: + ( self).call_completed(None) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_received_headers( + void* self, + const CCallHeaders& c_headers) except *: + """Callback for implementing middleware.""" + try: + headers = convert_headers(c_headers) + ( self).received_headers(headers) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef dict convert_headers(const CCallHeaders& c_headers): + cdef: + CCallHeaders.const_iterator header_iter = c_headers.cbegin() + headers = {} + while header_iter != c_headers.cend(): + header = c_string(deref(header_iter).first).decode("ascii") + value = c_string(deref(header_iter).second) + if not header.endswith("-bin"): + # Text header values in gRPC (and HTTP/1, HTTP/2) are + # required to be valid ASCII. Binary header values are + # exposed as bytes. + value = value.decode("ascii") + headers.setdefault(header, []).append(value) + postincrement(header_iter) + return headers + + +cdef CStatus _server_middleware_start_call( + void* self, + const CCallInfo& c_info, + const CCallHeaders& c_headers, + shared_ptr[CServerMiddleware]* c_instance) except *: + """Callback for implementing server middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + headers = convert_headers(c_headers) + instance = ( self).start_call(call_info, headers) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ServerMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef CStatus _client_middleware_start_call( + void* self, + const CCallInfo& c_info, + unique_ptr[CClientMiddleware]* c_instance) except *: + """Callback for implementing client middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + instance = ( self).start_call(call_info) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ClientMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef class ServerAuthHandler(_Weakrefable): + """Authentication middleware for a server. + + To implement an authentication mechanism, subclass this class and + override its methods. + + """ + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the client. + + May raise an error if the client cannot authenticate. + + Parameters + ---------- + outgoing : ServerAuthSender + A channel to send messages to the client. + incoming : ServerAuthReader + A channel to read messages from the client. + """ + raise NotImplementedError + + def is_valid(self, token): + """Validate a client token, returning their identity. + + May return an empty string (if the auth mechanism does not + name the peer) or raise an exception (if the token is + invalid). + + Parameters + ---------- + token : bytes + The authentication token from the client. + + """ + raise NotImplementedError + + cdef PyServerAuthHandler* to_handler(self): + cdef PyServerAuthHandlerVtable vtable + vtable.authenticate = _server_authenticate + vtable.is_valid = _is_valid + return new PyServerAuthHandler(self, vtable) + + +cdef class ClientAuthHandler(_Weakrefable): + """Authentication plugin for a client.""" + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the server. + + Parameters + ---------- + outgoing : ClientAuthSender + A channel to send messages to the server. + incoming : ClientAuthReader + A channel to read messages from the server. + """ + raise NotImplementedError + + def get_token(self): + """Get the auth token for a call.""" + raise NotImplementedError + + cdef PyClientAuthHandler* to_handler(self): + cdef PyClientAuthHandlerVtable vtable + vtable.authenticate = _client_authenticate + vtable.get_token = _get_token + return new PyClientAuthHandler(self, vtable) + + +_CallInfo = collections.namedtuple("_CallInfo", ["method"]) + + +class CallInfo(_CallInfo): + """Information about a particular RPC for Flight middleware.""" + + +cdef wrap_call_info(const CCallInfo& c_info): + method = wrap_flight_method(c_info.method) + return CallInfo(method=method) + + +cdef class ClientMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + client are accessible from the middleware itself. + + """ + + def start_call(self, info): + """Called at the start of an RPC. + + This must be thread-safe and must not raise exceptions. + + Parameters + ---------- + info : CallInfo + Information about the call. + + Returns + ------- + instance : ClientMiddleware + An instance of ClientMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + """ + + +cdef class ClientMiddleware(_Weakrefable): + """Client-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the request, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def received_headers(self, headers): + """A callback when headers are received. + + The default implementation does nothing. + + Parameters + ---------- + headers : dict + A dictionary of headers from the server. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + The default implementation does nothing. + + Parameters + ---------- + exception : ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + unique_ptr[CClientMiddleware]* c_instance): + cdef PyClientMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.received_headers = _middleware_received_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyClientMiddleware(py_middleware, vtable)) + + +cdef class ServerMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + middleware are accessible from the method itself. + + """ + + def start_call(self, info, headers): + """Called at the start of an RPC. + + This must be thread-safe. + + Parameters + ---------- + info : CallInfo + Information about the call. + headers : dict + A dictionary of headers from the client. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + Returns + ------- + instance : ServerMiddleware + An instance of ServerMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + Raises + ------ + exception : pyarrow.ArrowException + If an exception is raised, the call will be rejected with + the given error. + + """ + + +cdef class TracingServerMiddlewareFactory(ServerMiddlewareFactory): + """A factory for tracing middleware instances. + + This enables OpenTelemetry support in Arrow (if Arrow was compiled + with OpenTelemetry support enabled). A new span will be started on + each RPC call. The TracingServerMiddleware instance can then be + retrieved within an RPC handler to get the propagated context, + which can be used to start a new span on the Python side. + + Because the Python/C++ OpenTelemetry libraries do not + interoperate, spans on the C++ side are not directly visible to + the Python side and vice versa. + + """ + + +cdef class ServerMiddleware(_Weakrefable): + """Server-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the response, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + Parameters + ---------- + exception : pyarrow.ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + shared_ptr[CServerMiddleware]* c_instance): + cdef PyServerMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyServerMiddleware(py_middleware, vtable)) + + +class TracingServerMiddleware(ServerMiddleware): + __slots__ = ["trace_context"] + + def __init__(self, trace_context): + self.trace_context = trace_context + + +cdef class _ServerMiddlewareFactoryWrapper(ServerMiddlewareFactory): + """Wrapper to bundle server middleware into a single C++ one.""" + + cdef: + dict factories + + def __init__(self, dict factories): + self.factories = factories + + def start_call(self, info, headers): + instances = {} + for key, factory in self.factories.items(): + instance = factory.start_call(info, headers) + if instance: + # TODO: prevent duplicate keys + instances[key] = instance + if instances: + wrapper = _ServerMiddlewareWrapper(instances) + return wrapper + return None + + +cdef class _ServerMiddlewareWrapper(ServerMiddleware): + cdef: + dict middleware + + def __init__(self, dict middleware): + self.middleware = middleware + + def sending_headers(self): + headers = collections.defaultdict(list) + for instance in self.middleware.values(): + more_headers = instance.sending_headers() + if not more_headers: + continue + # Manually merge with existing headers (since headers are + # multi-valued) + for key, values in more_headers.items(): + # ARROW-16606 gRPC aborts given non-lowercase headers + key = key.lower() + if isinstance(values, (bytes, str)): + values = (values,) + headers[key].extend(values) + return headers + + def call_completed(self, exception): + for instance in self.middleware.values(): + instance.call_completed(exception) + + +cdef class _FlightServerFinalizer(_Weakrefable): + """ + A finalizer that shuts down the server on destruction. + + See ARROW-16597. If the server is still active at interpreter + exit, the process may segfault. + """ + + cdef: + shared_ptr[PyFlightServer] server + + def finalize(self): + cdef: + PyFlightServer* server = self.server.get() + CStatus status + if server == NULL: + return + try: + with nogil: + status = server.Shutdown() + if status.ok(): + status = server.Wait() + check_flight_status(status) + finally: + self.server.reset() + + +cdef class FlightServerBase(_Weakrefable): + """A Flight service definition. + + To start the server, create an instance of this class with an + appropriate location. The server will be running as soon as the + instance is created; it is not required to call :meth:`serve`. + + Override methods to define your Flight service. + + Parameters + ---------- + location : str, tuple or Location optional, default None + Location to serve on. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + If None is passed then the server will be started on localhost with a + system provided random port. + auth_handler : ServerAuthHandler optional, default None + An authentication mechanism to use. May be None. + tls_certificates : list optional, default None + A list of (certificate, key) pairs. + verify_client : boolean optional, default False + If True, then enable mutual TLS: require the client to present + a client certificate, and validate the certificate. + root_certificates : bytes optional, default None + If enabling mutual TLS, this specifies the PEM-encoded root + certificate used to validate client certificates. + middleware : dict optional, default None + A dictionary of :class:`ServerMiddlewareFactory` instances. The + string keys can be used to retrieve the middleware instance within + RPC handlers (see :meth:`ServerCallContext.get_middleware`). + + """ + + cdef: + shared_ptr[PyFlightServer] server + object finalizer + + def __init__(self, location=None, auth_handler=None, + tls_certificates=None, verify_client=None, + root_certificates=None, middleware=None): + self.finalizer = None + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, (tuple, type(None))): + if location is None: + location = ('localhost', 0) + host, port = location + if tls_certificates: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, auth_handler, tls_certificates, verify_client, + tobytes(root_certificates or b""), middleware) + + cdef init(self, Location location, ServerAuthHandler auth_handler, + list tls_certificates, c_bool verify_client, + bytes root_certificates, dict middleware): + cdef: + PyFlightServerVtable vtable = PyFlightServerVtable() + PyFlightServer* c_server + unique_ptr[CFlightServerOptions] c_options + CCertKeyPair c_cert + function[cb_server_middleware_start_call] start_call = \ + &_server_middleware_start_call + pair[c_string, shared_ptr[CServerMiddlewareFactory]] c_middleware + + c_options.reset(new CFlightServerOptions(Location.unwrap(location))) + # mTLS configuration + c_options.get().verify_client = verify_client + c_options.get().root_certificates = root_certificates + + if auth_handler: + if not isinstance(auth_handler, ServerAuthHandler): + raise TypeError("auth_handler must be a ServerAuthHandler, " + "not a '{}'".format(type(auth_handler))) + c_options.get().auth_handler.reset( + ( auth_handler).to_handler()) + + if tls_certificates: + for cert, key in tls_certificates: + c_cert.pem_cert = tobytes(cert) + c_cert.pem_key = tobytes(key) + c_options.get().tls_certificates.push_back(c_cert) + + if middleware: + non_tracing_middleware = {} + enable_tracing = None + for key, factory in middleware.items(): + if isinstance(factory, TracingServerMiddlewareFactory): + if enable_tracing is not None: + raise ValueError( + "Can only provide " + "TracingServerMiddlewareFactory once") + if tobytes(key) == CPyServerMiddlewareName: + raise ValueError(f"Middleware key cannot be {key}") + enable_tracing = key + else: + non_tracing_middleware[key] = factory + + if enable_tracing: + c_middleware.first = tobytes(enable_tracing) + c_middleware.second = MakeTracingServerMiddlewareFactory() + c_options.get().middleware.push_back(c_middleware) + + py_middleware = _ServerMiddlewareFactoryWrapper( + non_tracing_middleware) + c_middleware.first = CPyServerMiddlewareName + c_middleware.second.reset(new CPyServerMiddlewareFactory( + py_middleware, + start_call)) + c_options.get().middleware.push_back(c_middleware) + + vtable.list_flights = &_list_flights + vtable.get_flight_info = &_get_flight_info + vtable.get_schema = &_get_schema + vtable.do_put = &_do_put + vtable.do_get = &_do_get + vtable.do_exchange = &_do_exchange + vtable.list_actions = &_list_actions + vtable.do_action = &_do_action + + c_server = new PyFlightServer(self, vtable) + self.server.reset(c_server) + with nogil: + check_flight_status(c_server.Init(deref(c_options))) + cdef _FlightServerFinalizer finalizer = _FlightServerFinalizer() + finalizer.server = self.server + self.finalizer = weakref.finalize(self, finalizer.finalize) + + @property + def port(self): + """ + Get the port that this server is listening on. + + Returns a non-positive value if the operation is invalid + (e.g. init() was not called or server is listening on a domain + socket). + """ + return self.server.get().port() + + def list_flights(self, context, criteria): + """List flights available on this service. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + criteria : bytes + Filter criteria provided by the client. + + Returns + ------- + iterator of FlightInfo + + """ + raise NotImplementedError + + def get_flight_info(self, context, descriptor): + """Get information about a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + FlightInfo + + """ + raise NotImplementedError + + def get_schema(self, context, descriptor): + """Get the schema of a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + Schema + + """ + raise NotImplementedError + + def do_put(self, context, descriptor, reader: MetadataRecordBatchReader, + writer: FlightMetadataWriter): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : FlightMetadataWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def do_get(self, context, ticket): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + ticket : Ticket + The ticket for the flight. + + Returns + ------- + FlightDataStream + A stream of data to send back to the client. + + """ + raise NotImplementedError + + def do_exchange(self, context, descriptor, reader, writer): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : MetadataRecordBatchWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def list_actions(self, context): + """List custom actions available on this server. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + + Returns + ------- + iterator of ActionType or tuple + + """ + raise NotImplementedError + + def do_action(self, context, action): + """Execute a custom action. + + This method should return an iterator, or it should be a + generator. Applications should override this method to + implement their own behavior. The default method raises a + NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + action : Action + The action to execute. + + Returns + ------- + iterator of bytes + + """ + raise NotImplementedError + + def serve(self): + """Block until the server shuts down. + + This method only returns if shutdown() is called or a signal is + received. + """ + if self.server.get() == nullptr: + raise ValueError("run() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().ServeWithSignals()) + + def run(self): + """Block until the server shuts down. + + .. deprecated:: 0.15.0 + Use the ``FlightServer.serve`` method instead + """ + warnings.warn("The 'FlightServer.run' method is deprecated, use " + "FlightServer.serve method instead") + self.serve() + + def shutdown(self): + """Shut down the server, blocking until current requests finish. + + Do not call this directly from the implementation of a Flight + method, as then the server will block forever waiting for that + request to finish. Instead, call this method from a background + thread. + + This method should only be called once. + """ + # Must not hold the GIL: shutdown waits for pending RPCs to + # complete. Holding the GIL means Python-implemented Flight + # methods will never get to run, so this will hang + # indefinitely. + if self.server.get() == nullptr: + raise ValueError("shutdown() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().Shutdown()) + + def wait(self): + """Block until server is terminated with shutdown.""" + with nogil: + self.server.get().Wait() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.finalizer: + self.finalizer() + + +def connect(location, **kwargs): + """ + Connect to a Flight server. + + Parameters + ---------- + location : str, tuple, or Location + Location to connect to. Either a URI like "grpc://localhost:port", + a tuple of (host, port), or a Location instance. + tls_root_certs : bytes or None + PEM-encoded. + cert_chain: str or None + If provided, enables TLS mutual authentication. + private_key: str or None + If provided, enables TLS mutual authentication. + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list or None + A list of ClientMiddlewareFactory instances to apply. + write_size_limit_bytes : int or None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean or None + Disable verifying the server when using TLS. + Insecure, use with caution. + generic_options : list or None + A list of generic (string, int or string) options to pass to + the underlying transport. + + Returns + ------- + client : FlightClient + """ + return FlightClient(location, **kwargs) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_fs.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_fs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..dbfb6ed114553bc8435d0f61090c554e4ede5632 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_fs.pyx @@ -0,0 +1,1628 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cpython.datetime cimport datetime, PyDateTime_DateTime +from cython cimport binding + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint +from pyarrow.lib import _detect_compression, frombytes, tobytes +from pyarrow.lib cimport * +from pyarrow.util import _stringify_path + +from abc import ABC, abstractmethod +from datetime import datetime, timezone +import os +import pathlib +import sys + + +cdef _init_ca_paths(): + cdef CFileSystemGlobalOptions options + + import ssl + paths = ssl.get_default_verify_paths() + if paths.cafile: + options.tls_ca_file_path = os.fsencode(paths.cafile) + if paths.capath: + options.tls_ca_dir_path = os.fsencode(paths.capath) + check_status(CFileSystemsInitialize(options)) + + +if sys.platform == 'linux': + # ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs + # (especially in manylinux packages) since the values hardcoded at + # compile-time in libcurl may be wrong. + _init_ca_paths() + + +cdef inline c_string _path_as_bytes(path) except *: + # handle only abstract paths, not bound to any filesystem like pathlib is, + # so we only accept plain strings + if not isinstance(path, (bytes, str)): + raise TypeError('Path must be a string') + # tobytes always uses utf-8, which is more or less ok, at least on Windows + # since the C++ side then decodes from utf-8. On Unix, os.fsencode may be + # better. + return tobytes(path) + + +cdef object _wrap_file_type(CFileType ty): + return FileType( ty) + + +cdef CFileType _unwrap_file_type(FileType ty) except *: + if ty == FileType.Unknown: + return CFileType_Unknown + elif ty == FileType.NotFound: + return CFileType_NotFound + elif ty == FileType.File: + return CFileType_File + elif ty == FileType.Directory: + return CFileType_Directory + assert 0 + + +def _file_type_to_string(ty): + # Python 3.11 changed str(IntEnum) to return the string representation + # of the integer value: https://github.com/python/cpython/issues/94763 + return f"{ty.__class__.__name__}.{ty._name_}" + + +cdef class FileInfo(_Weakrefable): + """ + FileSystem entry info. + + Parameters + ---------- + path : str + The full path to the filesystem entry. + type : FileType + The type of the filesystem entry. + mtime : datetime or float, default None + If given, the modification time of the filesystem entry. + If a float is given, it is the number of seconds since the + Unix epoch. + mtime_ns : int, default None + If given, the modification time of the filesystem entry, + in nanoseconds since the Unix epoch. + `mtime` and `mtime_ns` are mutually exclusive. + size : int, default None + If given, the filesystem entry size in bytes. This should only + be given if `type` is `FileType.File`. + + Examples + -------- + Generate a file: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> path_fs = local_path + '/pyarrow-fs-example.dat' + >>> with local.open_output_stream(path_fs) as stream: + ... stream.write(b'data') + 4 + + Get FileInfo object using ``get_file_info()``: + + >>> file_info = local.get_file_info(path_fs) + >>> file_info + + + Inspect FileInfo attributes: + + >>> file_info.type + + + >>> file_info.is_file + True + + >>> file_info.path + '/.../pyarrow-fs-example.dat' + + >>> file_info.base_name + 'pyarrow-fs-example.dat' + + >>> file_info.size + 4 + + >>> file_info.extension + 'dat' + + >>> file_info.mtime # doctest: +SKIP + datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc) + + >>> file_info.mtime_ns # doctest: +SKIP + 1656489370873922073 + """ + + def __init__(self, path, FileType type=FileType.Unknown, *, + mtime=None, mtime_ns=None, size=None): + self.info.set_path(tobytes(path)) + self.info.set_type(_unwrap_file_type(type)) + if mtime is not None: + if mtime_ns is not None: + raise TypeError("Only one of mtime and mtime_ns " + "can be given") + if isinstance(mtime, datetime): + self.info.set_mtime(PyDateTime_to_TimePoint( + mtime)) + else: + self.info.set_mtime(TimePoint_from_s(mtime)) + elif mtime_ns is not None: + self.info.set_mtime(TimePoint_from_ns(mtime_ns)) + if size is not None: + self.info.set_size(size) + + @staticmethod + cdef wrap(CFileInfo info): + cdef FileInfo self = FileInfo.__new__(FileInfo) + self.info = move(info) + return self + + cdef inline CFileInfo unwrap(self) nogil: + return self.info + + @staticmethod + cdef CFileInfo unwrap_safe(obj): + if not isinstance(obj, FileInfo): + raise TypeError("Expected FileInfo instance, got {0}" + .format(type(obj))) + return ( obj).unwrap() + + def __repr__(self): + def getvalue(attr): + try: + return getattr(self, attr) + except ValueError: + return '' + + s = (f'>> file_info = local.get_file_info(path) + >>> file_info.path + '/.../pyarrow-fs-example.dat' + """ + return frombytes(self.info.path()) + + @property + def base_name(self): + """ + The file base name. + + Component after the last directory separator. + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.base_name + 'pyarrow-fs-example.dat' + """ + return frombytes(self.info.base_name()) + + @property + def size(self): + """ + The size in bytes, if available. + + Only regular files are guaranteed to have a size. + + Returns + ------- + size : int or None + """ + cdef int64_t size + size = self.info.size() + return (size if size != -1 else None) + + @property + def extension(self): + """ + The file extension. + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.extension + 'dat' + """ + return frombytes(self.info.extension()) + + @property + def mtime(self): + """ + The time of last modification, if available. + + Returns + ------- + mtime : datetime.datetime or None + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.mtime # doctest: +SKIP + datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc) + """ + cdef int64_t nanoseconds + nanoseconds = TimePoint_to_ns(self.info.mtime()) + return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc) + if nanoseconds != -1 else None) + + @property + def mtime_ns(self): + """ + The time of last modification, if available, expressed in nanoseconds + since the Unix epoch. + + Returns + ------- + mtime_ns : int or None + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.mtime_ns # doctest: +SKIP + 1656489370873922073 + """ + cdef int64_t nanoseconds + nanoseconds = TimePoint_to_ns(self.info.mtime()) + return (nanoseconds if nanoseconds != -1 else None) + + +cdef class FileSelector(_Weakrefable): + """ + File and directory selector. + + It contains a set of options that describes how to search for files and + directories. + + Parameters + ---------- + base_dir : str + The directory in which to select files. Relative paths also work, use + '.' for the current directory and '..' for the parent. + allow_not_found : bool, default False + The behavior if `base_dir` doesn't exist in the filesystem. + If false, an error is returned. + If true, an empty selection is returned. + recursive : bool, default False + Whether to recurse into subdirectories. + + Examples + -------- + List the contents of a directory and subdirectories: + + >>> selector_1 = fs.FileSelector(local_path, recursive=True) + >>> local.get_file_info(selector_1) # doctest: +SKIP + [, + , + ] + + List only the contents of the base directory: + + >>> selector_2 = fs.FileSelector(local_path) + >>> local.get_file_info(selector_2) # doctest: +SKIP + [, + ] + + Return empty selection if the directory doesn't exist: + + >>> selector_not_found = fs.FileSelector(local_path + '/missing', + ... recursive=True, + ... allow_not_found=True) + >>> local.get_file_info(selector_not_found) + [] + """ + + def __init__(self, base_dir, bint allow_not_found=False, + bint recursive=False): + self.base_dir = base_dir + self.recursive = recursive + self.allow_not_found = allow_not_found + + @staticmethod + cdef FileSelector wrap(CFileSelector wrapped): + cdef FileSelector self = FileSelector.__new__(FileSelector) + self.selector = move(wrapped) + return self + + cdef inline CFileSelector unwrap(self) nogil: + return self.selector + + @property + def base_dir(self): + return frombytes(self.selector.base_dir) + + @base_dir.setter + def base_dir(self, base_dir): + self.selector.base_dir = _path_as_bytes(base_dir) + + @property + def allow_not_found(self): + return self.selector.allow_not_found + + @allow_not_found.setter + def allow_not_found(self, bint allow_not_found): + self.selector.allow_not_found = allow_not_found + + @property + def recursive(self): + return self.selector.recursive + + @recursive.setter + def recursive(self, bint recursive): + self.selector.recursive = recursive + + def __repr__(self): + return ("".format(self)) + + +cdef class FileSystem(_Weakrefable): + """ + Abstract file system API. + """ + + def __init__(self): + raise TypeError("FileSystem is an abstract class, instantiate one of " + "the subclasses instead: LocalFileSystem or " + "SubTreeFileSystem") + + @staticmethod + @binding(True) # Required for cython < 3 + def _from_uri(uri): + fs, _path = FileSystem.from_uri(uri) + return fs + + @staticmethod + def from_uri(uri): + """ + Create a new FileSystem from URI or Path. + + Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs". + In addition, the argument can be a pathlib.Path object, or a string + describing an absolute local path. + + Parameters + ---------- + uri : string + URI-based path, for example: file:///some/local/path. + + Returns + ------- + tuple of (FileSystem, str path) + With (filesystem, path) tuple where path is the abstract path + inside the FileSystem instance. + + Examples + -------- + Create a new FileSystem subclass from a URI: + + >>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path) + >>> local_new, path_new = fs.FileSystem.from_uri(uri) + >>> local_new + >> path_new + '/.../pyarrow-fs-example.dat' + + Or from a s3 bucket: + + >>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/") + (, 'usgs-landsat/collection02') + """ + cdef: + c_string c_path + c_string c_uri + CResult[shared_ptr[CFileSystem]] result + + if isinstance(uri, pathlib.Path): + # Make absolute + uri = uri.resolve().absolute() + c_uri = tobytes(_stringify_path(uri)) + with nogil: + result = CFileSystemFromUriOrPath(c_uri, &c_path) + return FileSystem.wrap(GetResultValue(result)), frombytes(c_path) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + self.wrapped = wrapped + self.fs = wrapped.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileSystem]& sp): + cdef FileSystem self + + typ = frombytes(sp.get().type_name()) + if typ == 'local': + self = LocalFileSystem.__new__(LocalFileSystem) + elif typ == 'mock': + self = _MockFileSystem.__new__(_MockFileSystem) + elif typ == 'subtree': + self = SubTreeFileSystem.__new__(SubTreeFileSystem) + elif typ == 's3': + from pyarrow._s3fs import S3FileSystem + self = S3FileSystem.__new__(S3FileSystem) + elif typ == 'gcs': + from pyarrow._gcsfs import GcsFileSystem + self = GcsFileSystem.__new__(GcsFileSystem) + elif typ == 'abfs': + from pyarrow._azurefs import AzureFileSystem + self = AzureFileSystem.__new__(AzureFileSystem) + elif typ == 'hdfs': + from pyarrow._hdfs import HadoopFileSystem + self = HadoopFileSystem.__new__(HadoopFileSystem) + elif typ.startswith('py::'): + self = PyFileSystem.__new__(PyFileSystem) + else: + raise TypeError('Cannot wrap FileSystem pointer') + + self.init(sp) + return self + + cdef inline shared_ptr[CFileSystem] unwrap(self) nogil: + return self.wrapped + + def equals(self, FileSystem other not None): + """ + Parameters + ---------- + other : pyarrow.fs.FileSystem + + Returns + ------- + bool + """ + return self.fs.Equals(other.unwrap()) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + @property + def type_name(self): + """ + The filesystem's type name. + """ + return frombytes(self.fs.type_name()) + + def get_file_info(self, paths_or_selector): + """ + Get info for the given files. + + Any symlink is automatically dereferenced, recursively. A non-existing + or unreachable file returns a FileStat object and has a FileType of + value NotFound. An exception indicates a truly exceptional condition + (low-level I/O error, etc.). + + Parameters + ---------- + paths_or_selector : FileSelector, path-like or list of path-likes + Either a selector object, a path-like object or a list of + path-like objects. The selector's base directory will not be + part of the results, even if it exists. If it doesn't exist, + use `allow_not_found`. + + Returns + ------- + FileInfo or list of FileInfo + Single FileInfo object is returned for a single path, otherwise + a list of FileInfo objects is returned. + + Examples + -------- + >>> local + + >>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path)) + + """ + cdef: + CFileInfo info + c_string path + vector[CFileInfo] infos + vector[c_string] paths + CFileSelector selector + + if isinstance(paths_or_selector, FileSelector): + with nogil: + selector = (paths_or_selector).selector + infos = GetResultValue(self.fs.GetFileInfo(selector)) + elif isinstance(paths_or_selector, (list, tuple)): + paths = [_path_as_bytes(s) for s in paths_or_selector] + with nogil: + infos = GetResultValue(self.fs.GetFileInfo(paths)) + elif isinstance(paths_or_selector, (bytes, str)): + path =_path_as_bytes(paths_or_selector) + with nogil: + info = GetResultValue(self.fs.GetFileInfo(path)) + return FileInfo.wrap(info) + else: + raise TypeError('Must pass either path(s) or a FileSelector') + + return [FileInfo.wrap(info) for info in infos] + + def create_dir(self, path, *, bint recursive=True): + """ + Create a directory and subdirectories. + + This function succeeds if the directory already exists. + + Parameters + ---------- + path : str + The path of the new directory. + recursive : bool, default True + Create nested directories as well. + """ + cdef c_string directory = _path_as_bytes(path) + with nogil: + check_status(self.fs.CreateDir(directory, recursive=recursive)) + + def delete_dir(self, path): + """ + Delete a directory and its contents, recursively. + + Parameters + ---------- + path : str + The path of the directory to be deleted. + """ + cdef c_string directory = _path_as_bytes(path) + with nogil: + check_status(self.fs.DeleteDir(directory)) + + def delete_dir_contents(self, path, *, + bint accept_root_dir=False, + bint missing_dir_ok=False): + """ + Delete a directory's contents, recursively. + + Like delete_dir, but doesn't delete the directory itself. + + Parameters + ---------- + path : str + The path of the directory to be deleted. + accept_root_dir : boolean, default False + Allow deleting the root directory's contents + (if path is empty or "/") + missing_dir_ok : boolean, default False + If False then an error is raised if path does + not exist + """ + cdef c_string directory = _path_as_bytes(path) + if accept_root_dir and directory.strip(b"/") == b"": + with nogil: + check_status(self.fs.DeleteRootDirContents()) + else: + with nogil: + check_status(self.fs.DeleteDirContents(directory, + missing_dir_ok)) + + def move(self, src, dest): + """ + Move / rename a file or directory. + + If the destination exists: + - if it is a non-empty directory, an error is returned + - otherwise, if it has the same type as the source, it is replaced + - otherwise, behavior is unspecified (implementation-dependent). + + Parameters + ---------- + src : str + The path of the file or the directory to be moved. + dest : str + The destination path where the file or directory is moved to. + + Examples + -------- + Create a new folder with a file: + + >>> local.create_dir('/tmp/other_dir') + >>> local.copy_file(path,'/tmp/move_example.dat') + + Move the file: + + >>> local.move('/tmp/move_example.dat', + ... '/tmp/other_dir/move_example_2.dat') + + Inspect the file info: + + >>> local.get_file_info('/tmp/other_dir/move_example_2.dat') + + >>> local.get_file_info('/tmp/move_example.dat') + + + Delete the folder: + >>> local.delete_dir('/tmp/other_dir') + """ + cdef: + c_string source = _path_as_bytes(src) + c_string destination = _path_as_bytes(dest) + with nogil: + check_status(self.fs.Move(source, destination)) + + def copy_file(self, src, dest): + """ + Copy a file. + + If the destination exists and is a directory, an error is returned. + Otherwise, it is replaced. + + Parameters + ---------- + src : str + The path of the file to be copied from. + dest : str + The destination path where the file is copied to. + + Examples + -------- + >>> local.copy_file(path, + ... local_path + '/pyarrow-fs-example_copy.dat') + + Inspect the file info: + + >>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat') + + >>> local.get_file_info(path) + + """ + cdef: + c_string source = _path_as_bytes(src) + c_string destination = _path_as_bytes(dest) + with nogil: + check_status(self.fs.CopyFile(source, destination)) + + def delete_file(self, path): + """ + Delete a file. + + Parameters + ---------- + path : str + The path of the file to be deleted. + """ + cdef c_string file = _path_as_bytes(path) + with nogil: + check_status(self.fs.DeleteFile(file)) + + def _wrap_input_stream(self, stream, path, compression, buffer_size): + if buffer_size is not None and buffer_size != 0: + stream = BufferedInputStream(stream, buffer_size) + if compression == 'detect': + compression = _detect_compression(path) + if compression is not None: + stream = CompressedInputStream(stream, compression) + return stream + + def _wrap_output_stream(self, stream, path, compression, buffer_size): + if buffer_size is not None and buffer_size != 0: + stream = BufferedOutputStream(stream, buffer_size) + if compression == 'detect': + compression = _detect_compression(path) + if compression is not None: + stream = CompressedOutputStream(stream, compression) + return stream + + def open_input_file(self, path): + """ + Open an input file for random access reading. + + Parameters + ---------- + path : str + The source to open for reading. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Print the data from the file with `open_input_file()`: + + >>> with local.open_input_file(path) as f: + ... print(f.readall()) + b'data' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[CRandomAccessFile] in_handle + + with nogil: + in_handle = GetResultValue(self.fs.OpenInputFile(pathstr)) + + stream.set_random_access_file(in_handle) + stream.is_readable = True + return stream + + def open_input_stream(self, path, compression='detect', buffer_size=None): + """ + Open an input stream for sequential reading. + + Parameters + ---------- + path : str + The source to open for reading. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly decompression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary read buffer. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Print the data from the file with `open_input_stream()`: + + >>> with local.open_input_stream(path) as f: + ... print(f.readall()) + b'data' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[CInputStream] in_handle + + with nogil: + in_handle = GetResultValue(self.fs.OpenInputStream(pathstr)) + + stream.set_input_stream(in_handle) + stream.is_readable = True + + return self._wrap_input_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def open_output_stream(self, path, compression='detect', + buffer_size=None, metadata=None): + """ + Open an output stream for sequential writing. + + If the target already exists, existing data is truncated. + + Parameters + ---------- + path : str + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + metadata : dict optional, default None + If not None, a mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + Unsupported metadata keys will be ignored. + + Returns + ------- + stream : NativeFile + + Examples + -------- + >>> local = fs.LocalFileSystem() + >>> with local.open_output_stream(path) as stream: + ... stream.write(b'data') + 4 + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[COutputStream] out_handle + shared_ptr[const CKeyValueMetadata] c_metadata + + if metadata is not None: + c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata)) + + with nogil: + out_handle = GetResultValue( + self.fs.OpenOutputStream(pathstr, c_metadata)) + + stream.set_output_stream(out_handle) + stream.is_writable = True + + return self._wrap_output_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def open_append_stream(self, path, compression='detect', + buffer_size=None, metadata=None): + """ + Open an output stream for appending. + + If the target doesn't exist, a new empty file is created. + + .. note:: + Some filesystem implementations do not support efficient + appending to an existing file, in which case this method will + raise NotImplementedError. + Consider writing to multiple files (using e.g. the dataset layer) + instead. + + Parameters + ---------- + path : str + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + metadata : dict optional, default None + If not None, a mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + Unsupported metadata keys will be ignored. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Append new data to a FileSystem subclass with nonempty file: + + >>> with local.open_append_stream(path) as f: + ... f.write(b'+newly added') + 12 + + Print out the content fo the file: + + >>> with local.open_input_file(path) as f: + ... print(f.readall()) + b'data+newly added' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[COutputStream] out_handle + shared_ptr[const CKeyValueMetadata] c_metadata + + if metadata is not None: + c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata)) + + with nogil: + out_handle = GetResultValue( + self.fs.OpenAppendStream(pathstr, c_metadata)) + + stream.set_output_stream(out_handle) + stream.is_writable = True + + return self._wrap_output_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def normalize_path(self, path): + """ + Normalize filesystem path. + + Parameters + ---------- + path : str + The path to normalize + + Returns + ------- + normalized_path : str + The normalized path + """ + cdef: + c_string c_path = _path_as_bytes(path) + c_string c_path_normalized + + c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path)) + return frombytes(c_path_normalized) + + +cdef class LocalFileSystem(FileSystem): + """ + A FileSystem implementation accessing files on the local machine. + + Details such as symlinks are abstracted away (symlinks are always followed, + except when deleting an entry). + + Parameters + ---------- + use_mmap : bool, default False + Whether open_input_stream and open_input_file should return + a mmap'ed file or a regular file. + + Examples + -------- + Create a FileSystem object with LocalFileSystem constructor: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> local + + + and write data on to the file: + + >>> with local.open_output_stream('/tmp/local_fs.dat') as stream: + ... stream.write(b'data') + 4 + >>> with local.open_input_stream('/tmp/local_fs.dat') as stream: + ... print(stream.readall()) + b'data' + + Create a FileSystem object inferred from a URI of the saved file: + + >>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat') + >>> local_new + >> path + '/tmp/local_fs.dat' + + Check if FileSystems `local` and `local_new` are equal: + + >>> local.equals(local_new) + True + + Compare two different FileSystems: + + >>> local2 = fs.LocalFileSystem(use_mmap=True) + >>> local.equals(local2) + False + + Copy a file and print out the data: + + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat') + >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream: + ... print(stream.readall()) + ... + b'data' + + Open an output stream for appending, add text and print the new data: + + >>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f: + ... f.write(b'+newly added') + 12 + + >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f: + ... print(f.readall()) + b'data+newly added' + + Create a directory, copy a file into it and then delete the whole directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder') + + >>> local.delete_dir('/tmp/new_folder') + >>> local.get_file_info('/tmp/new_folder') + + + Create a directory, copy a file into it and then delete + the content of the directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + >>> local.delete_dir_contents('/tmp/new_folder') + >>> local.get_file_info('/tmp/new_folder') + + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + + Create a directory, copy a file into it and then delete + the file from the directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.delete_file('/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + >>> local.get_file_info('/tmp/new_folder') + + + Move the file: + + >>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat') + + >>> local.get_file_info('/tmp/local_fs-copy.dat') + + + To finish delete the file left: + >>> local.delete_file('/tmp/local_fs.dat') + """ + + def __init__(self, *, use_mmap=False): + cdef: + shared_ptr[CFileSystem] fs + c_string c_uri + + # from_uri needs a non-empty path, so just use a placeholder of /_ + c_uri = tobytes(f"file:///_?use_mmap={int(use_mmap)}") + with nogil: + fs = GetResultValue(CFileSystemFromUri(c_uri)) + self.init( fs) + + def __reduce__(self): + uri = frombytes(GetResultValue(self.fs.MakeUri(b"/_"))) + return FileSystem._from_uri, (uri,) + + +cdef class SubTreeFileSystem(FileSystem): + """ + Delegates to another implementation after prepending a fixed base path. + + This is useful to expose a logical view of a subtree of a filesystem, + for example a directory in a LocalFileSystem. + + Note, that this makes no security guarantee. For example, symlinks may + allow to "escape" the subtree and access other parts of the underlying + filesystem. + + Parameters + ---------- + base_path : str + The root of the subtree. + base_fs : FileSystem + FileSystem object the operations delegated to. + + Examples + -------- + Create a LocalFileSystem instance: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> with local.open_output_stream('/tmp/local_fs.dat') as stream: + ... stream.write(b'data') + 4 + + Create a directory and a SubTreeFileSystem instance: + + >>> local.create_dir('/tmp/sub_tree') + >>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local) + + Write data into the existing file: + + >>> with subtree.open_append_stream('sub_tree_fs.dat') as f: + ... f.write(b'+newly added') + 12 + + Print out the attributes: + + >>> subtree.base_fs + + >>> subtree.base_path + '/tmp/sub_tree/' + + Get info for the given directory or given file: + + >>> subtree.get_file_info('') + + >>> subtree.get_file_info('sub_tree_fs.dat') + + + Delete the file and directory: + + >>> subtree.delete_file('sub_tree_fs.dat') + >>> local.delete_dir('/tmp/sub_tree') + >>> local.delete_file('/tmp/local_fs.dat') + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + + def __init__(self, base_path, FileSystem base_fs): + cdef: + c_string pathstr + shared_ptr[CSubTreeFileSystem] wrapped + + pathstr = _path_as_bytes(base_path) + wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.subtreefs = wrapped.get() + + def __repr__(self): + return ("SubTreeFileSystem(base_path={}, base_fs={}" + .format(self.base_path, self.base_fs)) + + def __reduce__(self): + return SubTreeFileSystem, ( + frombytes(self.subtreefs.base_path()), + FileSystem.wrap(self.subtreefs.base_fs()) + ) + + @property + def base_path(self): + return frombytes(self.subtreefs.base_path()) + + @property + def base_fs(self): + return FileSystem.wrap(self.subtreefs.base_fs()) + + +cdef class _MockFileSystem(FileSystem): + + def __init__(self, datetime current_time=None): + cdef shared_ptr[CMockFileSystem] wrapped + + current_time = current_time or datetime.now() + wrapped = make_shared[CMockFileSystem]( + PyDateTime_to_TimePoint( current_time) + ) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.mockfs = wrapped.get() + + +cdef class PyFileSystem(FileSystem): + """ + A FileSystem with behavior implemented in Python. + + Parameters + ---------- + handler : FileSystemHandler + The handler object implementing custom filesystem behavior. + + Examples + -------- + Create an fsspec-based filesystem object for GitHub: + + >>> from fsspec.implementations import github + >>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP + + Get a PyArrow FileSystem object: + + >>> from pyarrow.fs import PyFileSystem, FSSpecHandler + >>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP + + Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``: + + >>> pa_fs.get_file_info('README.md') # doctest: +SKIP + + """ + + def __init__(self, handler): + cdef: + CPyFileSystemVtable vtable + shared_ptr[CPyFileSystem] wrapped + + if not isinstance(handler, FileSystemHandler): + raise TypeError("Expected a FileSystemHandler instance, got {0}" + .format(type(handler))) + + vtable.get_type_name = _cb_get_type_name + vtable.equals = _cb_equals + vtable.get_file_info = _cb_get_file_info + vtable.get_file_info_vector = _cb_get_file_info_vector + vtable.get_file_info_selector = _cb_get_file_info_selector + vtable.create_dir = _cb_create_dir + vtable.delete_dir = _cb_delete_dir + vtable.delete_dir_contents = _cb_delete_dir_contents + vtable.delete_root_dir_contents = _cb_delete_root_dir_contents + vtable.delete_file = _cb_delete_file + vtable.move = _cb_move + vtable.copy_file = _cb_copy_file + vtable.open_input_stream = _cb_open_input_stream + vtable.open_input_file = _cb_open_input_file + vtable.open_output_stream = _cb_open_output_stream + vtable.open_append_stream = _cb_open_append_stream + vtable.normalize_path = _cb_normalize_path + + wrapped = CPyFileSystem.Make(handler, move(vtable)) + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.pyfs = wrapped.get() + + @property + def handler(self): + """ + The filesystem's underlying handler. + + Returns + ------- + handler : FileSystemHandler + """ + return self.pyfs.handler() + + def __reduce__(self): + return PyFileSystem, (self.handler,) + + +class FileSystemHandler(ABC): + """ + An abstract class exposing methods to implement PyFileSystem's behavior. + """ + + @abstractmethod + def get_type_name(self): + """ + Implement PyFileSystem.type_name. + """ + + @abstractmethod + def get_file_info(self, paths): + """ + Implement PyFileSystem.get_file_info(paths). + + Parameters + ---------- + paths : list of str + paths for which we want to retrieve the info. + """ + + @abstractmethod + def get_file_info_selector(self, selector): + """ + Implement PyFileSystem.get_file_info(selector). + + Parameters + ---------- + selector : FileSelector + selector for which we want to retrieve the info. + """ + + @abstractmethod + def create_dir(self, path, recursive): + """ + Implement PyFileSystem.create_dir(...). + + Parameters + ---------- + path : str + path of the directory. + recursive : bool + if the parent directories should be created too. + """ + + @abstractmethod + def delete_dir(self, path): + """ + Implement PyFileSystem.delete_dir(...). + + Parameters + ---------- + path : str + path of the directory. + """ + + @abstractmethod + def delete_dir_contents(self, path, missing_dir_ok=False): + """ + Implement PyFileSystem.delete_dir_contents(...). + + Parameters + ---------- + path : str + path of the directory. + missing_dir_ok : bool + if False an error should be raised if path does not exist + """ + + @abstractmethod + def delete_root_dir_contents(self): + """ + Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True). + """ + + @abstractmethod + def delete_file(self, path): + """ + Implement PyFileSystem.delete_file(...). + + Parameters + ---------- + path : str + path of the file. + """ + + @abstractmethod + def move(self, src, dest): + """ + Implement PyFileSystem.move(...). + + Parameters + ---------- + src : str + path of what should be moved. + dest : str + path of where it should be moved to. + """ + + @abstractmethod + def copy_file(self, src, dest): + """ + Implement PyFileSystem.copy_file(...). + + Parameters + ---------- + src : str + path of what should be copied. + dest : str + path of where it should be copied to. + """ + + @abstractmethod + def open_input_stream(self, path): + """ + Implement PyFileSystem.open_input_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + """ + + @abstractmethod + def open_input_file(self, path): + """ + Implement PyFileSystem.open_input_file(...). + + Parameters + ---------- + path : str + path of what should be opened. + """ + + @abstractmethod + def open_output_stream(self, path, metadata): + """ + Implement PyFileSystem.open_output_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + metadata : mapping + Mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + """ + + @abstractmethod + def open_append_stream(self, path, metadata): + """ + Implement PyFileSystem.open_append_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + metadata : mapping + Mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + """ + + @abstractmethod + def normalize_path(self, path): + """ + Implement PyFileSystem.normalize_path(...). + + Parameters + ---------- + path : str + path of what should be normalized. + """ + +# Callback definitions for CPyFileSystemVtable + + +cdef void _cb_get_type_name(handler, c_string* out) except *: + out[0] = tobytes("py::" + handler.get_type_name()) + +cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False: + if c_other.type_name().startswith(b"py::"): + return ( c_other).handler() == handler + + return False + +cdef void _cb_get_file_info(handler, const c_string& path, + CFileInfo* out) except *: + infos = handler.get_file_info([frombytes(path)]) + if not isinstance(infos, list) or len(infos) != 1: + raise TypeError("get_file_info should have returned a 1-element list") + out[0] = FileInfo.unwrap_safe(infos[0]) + +cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths, + vector[CFileInfo]* out) except *: + py_paths = [frombytes(paths[i]) for i in range(len(paths))] + infos = handler.get_file_info(py_paths) + if not isinstance(infos, list): + raise TypeError("get_file_info should have returned a list") + out[0].clear() + out[0].reserve(len(infos)) + for info in infos: + out[0].push_back(FileInfo.unwrap_safe(info)) + +cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector, + vector[CFileInfo]* out) except *: + infos = handler.get_file_info_selector(FileSelector.wrap(selector)) + if not isinstance(infos, list): + raise TypeError("get_file_info_selector should have returned a list") + out[0].clear() + out[0].reserve(len(infos)) + for info in infos: + out[0].push_back(FileInfo.unwrap_safe(info)) + +cdef void _cb_create_dir(handler, const c_string& path, + c_bool recursive) except *: + handler.create_dir(frombytes(path), recursive) + +cdef void _cb_delete_dir(handler, const c_string& path) except *: + handler.delete_dir(frombytes(path)) + +cdef void _cb_delete_dir_contents(handler, const c_string& path, + c_bool missing_dir_ok) except *: + handler.delete_dir_contents(frombytes(path), missing_dir_ok) + +cdef void _cb_delete_root_dir_contents(handler) except *: + handler.delete_root_dir_contents() + +cdef void _cb_delete_file(handler, const c_string& path) except *: + handler.delete_file(frombytes(path)) + +cdef void _cb_move(handler, const c_string& src, + const c_string& dest) except *: + handler.move(frombytes(src), frombytes(dest)) + +cdef void _cb_copy_file(handler, const c_string& src, + const c_string& dest) except *: + handler.copy_file(frombytes(src), frombytes(dest)) + +cdef void _cb_open_input_stream(handler, const c_string& path, + shared_ptr[CInputStream]* out) except *: + stream = handler.open_input_stream(frombytes(path)) + if not isinstance(stream, NativeFile): + raise TypeError("open_input_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_input_stream() + +cdef void _cb_open_input_file(handler, const c_string& path, + shared_ptr[CRandomAccessFile]* out) except *: + stream = handler.open_input_file(frombytes(path)) + if not isinstance(stream, NativeFile): + raise TypeError("open_input_file should have returned " + "a PyArrow file") + out[0] = ( stream).get_random_access_file() + +cdef void _cb_open_output_stream( + handler, const c_string& path, + const shared_ptr[const CKeyValueMetadata]& metadata, + shared_ptr[COutputStream]* out) except *: + stream = handler.open_output_stream( + frombytes(path), pyarrow_wrap_metadata(metadata)) + if not isinstance(stream, NativeFile): + raise TypeError("open_output_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_output_stream() + +cdef void _cb_open_append_stream( + handler, const c_string& path, + const shared_ptr[const CKeyValueMetadata]& metadata, + shared_ptr[COutputStream]* out) except *: + stream = handler.open_append_stream( + frombytes(path), pyarrow_wrap_metadata(metadata)) + if not isinstance(stream, NativeFile): + raise TypeError("open_append_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_output_stream() + +cdef void _cb_normalize_path(handler, const c_string& path, + c_string* out) except *: + out[0] = tobytes(handler.normalize_path(frombytes(path))) + + +def _copy_files(FileSystem source_fs, str source_path, + FileSystem destination_fs, str destination_path, + int64_t chunk_size, c_bool use_threads): + # low-level helper exposed through pyarrow/fs.py::copy_files + cdef: + CFileLocator c_source + vector[CFileLocator] c_sources + CFileLocator c_destination + vector[CFileLocator] c_destinations + + c_source.filesystem = source_fs.unwrap() + c_source.path = tobytes(source_path) + c_sources.push_back(c_source) + + c_destination.filesystem = destination_fs.unwrap() + c_destination.path = tobytes(destination_path) + c_destinations.push_back(c_destination) + + with nogil: + check_status(CCopyFiles( + c_sources, c_destinations, + c_default_io_context(), chunk_size, use_threads, + )) + + +def _copy_files_selector(FileSystem source_fs, FileSelector source_sel, + FileSystem destination_fs, str destination_base_dir, + int64_t chunk_size, c_bool use_threads): + # low-level helper exposed through pyarrow/fs.py::copy_files + cdef c_string c_destination_base_dir = tobytes(destination_base_dir) + + with nogil: + check_status(CCopyFilesWithSelector( + source_fs.unwrap(), source_sel.unwrap(), + destination_fs.unwrap(), c_destination_base_dir, + c_default_io_context(), chunk_size, use_threads, + )) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_orc.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_orc.pyx new file mode 100644 index 0000000000000000000000000000000000000000..1dd6848122c2d4d5d2a40faf70bbb4647329f9d8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_orc.pyx @@ -0,0 +1,445 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from cython.operator cimport dereference as deref +from libcpp.vector cimport vector as std_vector +from libcpp.utility cimport move +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (check_status, _Weakrefable, + MemoryPool, maybe_unbox_memory_pool, + pyarrow_wrap_schema, + pyarrow_wrap_batch, + Table, + pyarrow_wrap_table, + pyarrow_wrap_metadata, + pyarrow_unwrap_table, + get_reader, + get_writer) +from pyarrow.lib import frombytes, tobytes +from pyarrow.util import _stringify_path + + +cdef compression_type_from_enum(CCompressionType compression_type): + compression_map = { + CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED', + CCompressionType_GZIP: 'ZLIB', + CCompressionType_SNAPPY: 'SNAPPY', + CCompressionType_LZ4: 'LZ4', + CCompressionType_ZSTD: 'ZSTD', + } + if compression_type in compression_map: + return compression_map[compression_type] + raise ValueError('Unsupported compression') + + +cdef CCompressionType compression_type_from_name(name) except *: + if not isinstance(name, str): + raise TypeError('compression must be a string') + name = name.upper() + if name == 'ZLIB': + return CCompressionType_GZIP + elif name == 'SNAPPY': + return CCompressionType_SNAPPY + elif name == 'LZ4': + return CCompressionType_LZ4 + elif name == 'ZSTD': + return CCompressionType_ZSTD + elif name == 'UNCOMPRESSED': + return CCompressionType_UNCOMPRESSED + raise ValueError(f'Unknown CompressionKind: {name}') + + +cdef compression_strategy_from_enum( + CompressionStrategy compression_strategy +): + compression_strategy_map = { + _CompressionStrategy_SPEED: 'SPEED', + _CompressionStrategy_COMPRESSION: 'COMPRESSION', + } + if compression_strategy in compression_strategy_map: + return compression_strategy_map[compression_strategy] + raise ValueError('Unsupported compression strategy') + + +cdef CompressionStrategy compression_strategy_from_name(name) except *: + if not isinstance(name, str): + raise TypeError('compression strategy must be a string') + name = name.upper() + if name == 'COMPRESSION': + return _CompressionStrategy_COMPRESSION + elif name == 'SPEED': + return _CompressionStrategy_SPEED + raise ValueError(f'Unknown CompressionStrategy: {name}') + + +cdef file_version_from_class(FileVersion file_version): + return frombytes(file_version.ToString()) + + +cdef writer_id_from_enum(WriterId writer_id): + writer_id_map = { + _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA', + _WriterId_ORC_CPP_WRITER: 'ORC_CPP', + _WriterId_PRESTO_WRITER: 'PRESTO', + _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO', + _WriterId_TRINO_WRITER: 'TRINO', + } + if writer_id in writer_id_map: + return writer_id_map[writer_id] + raise ValueError('Unsupported writer ID') + + +cdef writer_version_from_enum(WriterVersion writer_version): + writer_version_map = { + _WriterVersion_ORIGINAL: 'ORIGINAL', + _WriterVersion_HIVE_8732: 'HIVE_8732', + _WriterVersion_HIVE_4243: 'HIVE_4243', + _WriterVersion_HIVE_12055: 'HIVE_12055', + _WriterVersion_HIVE_13083: 'HIVE_13083', + _WriterVersion_ORC_101: 'ORC_101', + _WriterVersion_ORC_135: 'ORC_135', + _WriterVersion_ORC_517: 'ORC_517', + _WriterVersion_ORC_203: 'ORC_203', + _WriterVersion_ORC_14: 'ORC_14', + } + if writer_version in writer_version_map: + return writer_version_map[writer_version] + raise ValueError('Unsupported writer version') + + +cdef shared_ptr[WriteOptions] _create_write_options( + file_version=None, + batch_size=None, + stripe_size=None, + compression=None, + compression_block_size=None, + compression_strategy=None, + row_index_stride=None, + padding_tolerance=None, + dictionary_key_size_threshold=None, + bloom_filter_columns=None, + bloom_filter_fpp=None +) except *: + """General writer options""" + cdef: + shared_ptr[WriteOptions] options + options = make_shared[WriteOptions]() + # batch_size + if batch_size is not None: + if isinstance(batch_size, int) and batch_size > 0: + deref(options).batch_size = batch_size + else: + raise ValueError(f"Invalid ORC writer batch size: {batch_size}") + # file_version + if file_version is not None: + if file_version == "0.12": + deref(options).file_version = FileVersion(0, 12) + elif file_version == "0.11": + deref(options).file_version = FileVersion(0, 11) + else: + raise ValueError(f"Unsupported ORC file version: {file_version}") + # stripe_size + if stripe_size is not None: + if isinstance(stripe_size, int) and stripe_size > 0: + deref(options).stripe_size = stripe_size + else: + raise ValueError(f"Invalid ORC stripe size: {stripe_size}") + # compression + if compression is not None: + if isinstance(compression, str): + deref(options).compression = compression_type_from_name( + compression) + else: + raise TypeError("Unsupported ORC compression type: " + f"{compression}") + # compression_block_size + if compression_block_size is not None: + if (isinstance(compression_block_size, int) and + compression_block_size > 0): + deref(options).compression_block_size = compression_block_size + else: + raise ValueError("Invalid ORC compression block size: " + f"{compression_block_size}") + # compression_strategy + if compression_strategy is not None: + if isinstance(compression, str): + deref(options).compression_strategy = \ + compression_strategy_from_name(compression_strategy) + else: + raise TypeError("Unsupported ORC compression strategy: " + f"{compression_strategy}") + # row_index_stride + if row_index_stride is not None: + if isinstance(row_index_stride, int) and row_index_stride > 0: + deref(options).row_index_stride = row_index_stride + else: + raise ValueError("Invalid ORC row index stride: " + f"{row_index_stride}") + # padding_tolerance + if padding_tolerance is not None: + try: + padding_tolerance = float(padding_tolerance) + deref(options).padding_tolerance = padding_tolerance + except Exception: + raise ValueError("Invalid ORC padding tolerance: " + f"{padding_tolerance}") + # dictionary_key_size_threshold + if dictionary_key_size_threshold is not None: + try: + dictionary_key_size_threshold = float( + dictionary_key_size_threshold) + assert 0 <= dictionary_key_size_threshold <= 1 + deref(options).dictionary_key_size_threshold = \ + dictionary_key_size_threshold + except Exception: + raise ValueError("Invalid ORC dictionary key size threshold: " + f"{dictionary_key_size_threshold}") + # bloom_filter_columns + if bloom_filter_columns is not None: + try: + bloom_filter_columns = list(bloom_filter_columns) + for col in bloom_filter_columns: + assert isinstance(col, int) and col >= 0 + deref(options).bloom_filter_columns = bloom_filter_columns + except Exception: + raise ValueError("Invalid ORC BloomFilter columns: " + f"{bloom_filter_columns}") + # Max false positive rate of the Bloom Filter + if bloom_filter_fpp is not None: + try: + bloom_filter_fpp = float(bloom_filter_fpp) + assert 0 <= bloom_filter_fpp <= 1 + deref(options).bloom_filter_fpp = bloom_filter_fpp + except Exception: + raise ValueError("Invalid ORC BloomFilter false positive rate: " + f"{bloom_filter_fpp}") + return options + + +cdef class ORCReader(_Weakrefable): + cdef: + object source + CMemoryPool* allocator + unique_ptr[ORCFileReader] reader + + def __cinit__(self, MemoryPool memory_pool=None): + self.allocator = maybe_unbox_memory_pool(memory_pool) + + def open(self, object source, c_bool use_memory_map=True): + cdef: + shared_ptr[CRandomAccessFile] rd_handle + + self.source = source + + get_reader(source, use_memory_map, &rd_handle) + with nogil: + self.reader = move(GetResultValue( + ORCFileReader.Open(rd_handle, self.allocator) + )) + + def metadata(self): + """ + The arrow metadata for this file. + + Returns + ------- + metadata : pyarrow.KeyValueMetadata + """ + cdef: + shared_ptr[const CKeyValueMetadata] sp_arrow_metadata + + with nogil: + sp_arrow_metadata = GetResultValue( + deref(self.reader).ReadMetadata() + ) + + return pyarrow_wrap_metadata(sp_arrow_metadata) + + def schema(self): + """ + The arrow schema for this file. + + Returns + ------- + schema : pyarrow.Schema + """ + cdef: + shared_ptr[CSchema] sp_arrow_schema + + with nogil: + sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema()) + + return pyarrow_wrap_schema(sp_arrow_schema) + + def nrows(self): + return deref(self.reader).NumberOfRows() + + def nstripes(self): + return deref(self.reader).NumberOfStripes() + + def file_version(self): + return file_version_from_class(deref(self.reader).GetFileVersion()) + + def software_version(self): + return frombytes(deref(self.reader).GetSoftwareVersion()) + + def compression(self): + return compression_type_from_enum( + GetResultValue(deref(self.reader).GetCompression())) + + def compression_size(self): + return deref(self.reader).GetCompressionSize() + + def row_index_stride(self): + return deref(self.reader).GetRowIndexStride() + + def writer(self): + writer_name = writer_id_from_enum(deref(self.reader).GetWriterId()) + if writer_name == 'UNKNOWN': + return deref(self.reader).GetWriterIdValue() + else: + return writer_name + + def writer_version(self): + return writer_version_from_enum(deref(self.reader).GetWriterVersion()) + + def nstripe_statistics(self): + return deref(self.reader).GetNumberOfStripeStatistics() + + def content_length(self): + return deref(self.reader).GetContentLength() + + def stripe_statistics_length(self): + return deref(self.reader).GetStripeStatisticsLength() + + def file_footer_length(self): + return deref(self.reader).GetFileFooterLength() + + def file_postscript_length(self): + return deref(self.reader).GetFilePostscriptLength() + + def file_length(self): + return deref(self.reader).GetFileLength() + + def serialized_file_tail(self): + return deref(self.reader).GetSerializedFileTail() + + def read_stripe(self, n, columns=None): + cdef: + shared_ptr[CRecordBatch] sp_record_batch + int64_t stripe + std_vector[c_string] c_names + + stripe = n + + if columns is None: + with nogil: + sp_record_batch = GetResultValue( + deref(self.reader).ReadStripe(stripe) + ) + else: + c_names = [tobytes(name) for name in columns] + with nogil: + sp_record_batch = GetResultValue( + deref(self.reader).ReadStripe(stripe, c_names) + ) + + return pyarrow_wrap_batch(sp_record_batch) + + def read(self, columns=None): + cdef: + shared_ptr[CTable] sp_table + std_vector[c_string] c_names + + if columns is None: + with nogil: + sp_table = GetResultValue(deref(self.reader).Read()) + else: + c_names = [tobytes(name) for name in columns] + with nogil: + sp_table = GetResultValue(deref(self.reader).Read(c_names)) + + return pyarrow_wrap_table(sp_table) + + +cdef class ORCWriter(_Weakrefable): + cdef: + unique_ptr[ORCFileWriter] writer + shared_ptr[COutputStream] sink + c_bool own_sink + + def open(self, object where, *, + file_version=None, + batch_size=None, + stripe_size=None, + compression=None, + compression_block_size=None, + compression_strategy=None, + row_index_stride=None, + padding_tolerance=None, + dictionary_key_size_threshold=None, + bloom_filter_columns=None, + bloom_filter_fpp=None): + cdef: + shared_ptr[WriteOptions] write_options + c_string c_where + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &self.sink) + self.own_sink = False + else: + c_where = tobytes(where) + with nogil: + self.sink = GetResultValue(FileOutputStream.Open(c_where)) + self.own_sink = True + + write_options = _create_write_options( + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) + + with nogil: + self.writer = move(GetResultValue( + ORCFileWriter.Open(self.sink.get(), + deref(write_options)))) + + def write(self, Table table): + cdef: + shared_ptr[CTable] sp_table + sp_table = pyarrow_unwrap_table(table) + with nogil: + check_status(deref(self.writer).Write(deref(sp_table))) + + def close(self): + with nogil: + check_status(deref(self.writer).Close()) + if self.own_sink: + check_status(deref(self.sink).Close()) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..35d15227ee5dc579f53396c7285a5ff2cc3976f6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pxd @@ -0,0 +1,679 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus, + CTable, CMemoryPool, CBuffer, + CKeyValueMetadata, CRandomAccessFile, + COutputStream, CCacheOptions, + TimeUnit, CRecordBatchReader) +from pyarrow.lib cimport _Weakrefable + + +cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil: + cdef cppclass Node: + pass + + cdef cppclass GroupNode(Node): + pass + + cdef cppclass PrimitiveNode(Node): + pass + + cdef cppclass ColumnPath: + c_string ToDotString() + vector[c_string] ToDotVector() + + +cdef extern from "parquet/api/schema.h" namespace "parquet" nogil: + enum ParquetType" parquet::Type::type": + ParquetType_BOOLEAN" parquet::Type::BOOLEAN" + ParquetType_INT32" parquet::Type::INT32" + ParquetType_INT64" parquet::Type::INT64" + ParquetType_INT96" parquet::Type::INT96" + ParquetType_FLOAT" parquet::Type::FLOAT" + ParquetType_DOUBLE" parquet::Type::DOUBLE" + ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY" + ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY" + + enum ParquetLogicalTypeId" parquet::LogicalType::Type::type": + ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED" + ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING" + ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP" + ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST" + ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM" + ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL" + ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE" + ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME" + ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP" + ParquetLogicalType_INT" parquet::LogicalType::Type::INT" + ParquetLogicalType_FLOAT16" parquet::LogicalType::Type::FLOAT16" + ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON" + ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON" + ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID" + ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE" + + enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit": + ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN" + ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS" + ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS" + ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS" + + enum ParquetConvertedType" parquet::ConvertedType::type": + ParquetConvertedType_NONE" parquet::ConvertedType::NONE" + ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8" + ParquetConvertedType_MAP" parquet::ConvertedType::MAP" + ParquetConvertedType_MAP_KEY_VALUE \ + " parquet::ConvertedType::MAP_KEY_VALUE" + ParquetConvertedType_LIST" parquet::ConvertedType::LIST" + ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM" + ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL" + ParquetConvertedType_DATE" parquet::ConvertedType::DATE" + ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS" + ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS" + ParquetConvertedType_TIMESTAMP_MILLIS \ + " parquet::ConvertedType::TIMESTAMP_MILLIS" + ParquetConvertedType_TIMESTAMP_MICROS \ + " parquet::ConvertedType::TIMESTAMP_MICROS" + ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8" + ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16" + ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32" + ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64" + ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8" + ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16" + ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32" + ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64" + ParquetConvertedType_JSON" parquet::ConvertedType::JSON" + ParquetConvertedType_BSON" parquet::ConvertedType::BSON" + ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL" + + enum ParquetRepetition" parquet::Repetition::type": + ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED" + ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL" + ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED" + + enum ParquetEncoding" parquet::Encoding::type": + ParquetEncoding_PLAIN" parquet::Encoding::PLAIN" + ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY" + ParquetEncoding_RLE" parquet::Encoding::RLE" + ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED" + ParquetEncoding_DELTA_BINARY_PACKED \ + " parquet::Encoding::DELTA_BINARY_PACKED" + ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \ + " parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY" + ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY" + ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY" + ParquetEncoding_BYTE_STREAM_SPLIT \ + " parquet::Encoding::BYTE_STREAM_SPLIT" + + enum ParquetCompression" parquet::Compression::type": + ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED" + ParquetCompression_SNAPPY" parquet::Compression::SNAPPY" + ParquetCompression_GZIP" parquet::Compression::GZIP" + ParquetCompression_LZO" parquet::Compression::LZO" + ParquetCompression_BROTLI" parquet::Compression::BROTLI" + ParquetCompression_LZ4" parquet::Compression::LZ4" + ParquetCompression_ZSTD" parquet::Compression::ZSTD" + + enum ParquetVersion" parquet::ParquetVersion::type": + ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0" + ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0" + ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4" + ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6" + + enum ParquetSortOrder" parquet::SortOrder::type": + ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED" + ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED" + ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN" + + cdef cppclass CParquetLogicalType" parquet::LogicalType": + c_string ToString() const + c_string ToJSON() const + ParquetLogicalTypeId type() const + + cdef cppclass CParquetDecimalType \ + " parquet::DecimalLogicalType"(CParquetLogicalType): + int32_t precision() const + int32_t scale() const + + cdef cppclass CParquetIntType \ + " parquet::IntLogicalType"(CParquetLogicalType): + int bit_width() const + c_bool is_signed() const + + cdef cppclass CParquetTimeType \ + " parquet::TimeLogicalType"(CParquetLogicalType): + c_bool is_adjusted_to_utc() const + ParquetTimeUnit time_unit() const + + cdef cppclass CParquetTimestampType \ + " parquet::TimestampLogicalType"(CParquetLogicalType): + c_bool is_adjusted_to_utc() const + ParquetTimeUnit time_unit() const + + cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor": + c_bool Equals(const ColumnDescriptor& other) + + shared_ptr[ColumnPath] path() + int16_t max_definition_level() + int16_t max_repetition_level() + + ParquetType physical_type() + const shared_ptr[const CParquetLogicalType]& logical_type() + ParquetConvertedType converted_type() + const c_string& name() + int type_length() + int type_precision() + int type_scale() + + cdef cppclass SchemaDescriptor: + const ColumnDescriptor* Column(int i) + shared_ptr[Node] schema() + GroupNode* group() + c_bool Equals(const SchemaDescriptor& other) + c_string ToString() + int num_columns() + + cdef c_string FormatStatValue(ParquetType parquet_type, c_string val) + + enum ParquetCipher" parquet::ParquetCipher::type": + ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1" + ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1" + + struct AadMetadata: + c_string aad_prefix + c_string aad_file_unique + c_bool supply_aad_prefix + + struct EncryptionAlgorithm: + ParquetCipher algorithm + AadMetadata aad + +cdef extern from "parquet/api/reader.h" namespace "parquet" nogil: + cdef cppclass ColumnReader: + pass + + cdef cppclass BoolReader(ColumnReader): + pass + + cdef cppclass Int32Reader(ColumnReader): + pass + + cdef cppclass Int64Reader(ColumnReader): + pass + + cdef cppclass Int96Reader(ColumnReader): + pass + + cdef cppclass FloatReader(ColumnReader): + pass + + cdef cppclass DoubleReader(ColumnReader): + pass + + cdef cppclass ByteArrayReader(ColumnReader): + pass + + cdef cppclass RowGroupReader: + pass + + cdef cppclass CEncodedStatistics" parquet::EncodedStatistics": + const c_string& max() const + const c_string& min() const + int64_t null_count + int64_t distinct_count + bint has_min + bint has_max + bint has_null_count + bint has_distinct_count + + cdef cppclass ParquetByteArray" parquet::ByteArray": + uint32_t len + const uint8_t* ptr + + cdef cppclass ParquetFLBA" parquet::FLBA": + const uint8_t* ptr + + cdef cppclass CStatistics" parquet::Statistics": + int64_t null_count() const + int64_t distinct_count() const + int64_t num_values() const + bint HasMinMax() + bint HasNullCount() + bint HasDistinctCount() + c_bool Equals(const CStatistics&) const + void Reset() + c_string EncodeMin() + c_string EncodeMax() + CEncodedStatistics Encode() + void SetComparator() + ParquetType physical_type() const + const ColumnDescriptor* descr() const + + cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics): + c_bool min() + c_bool max() + + cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics): + int32_t min() + int32_t max() + + cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics): + int64_t min() + int64_t max() + + cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics): + float min() + float max() + + cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics): + double min() + double max() + + cdef cppclass CByteArrayStatistics \ + " parquet::ByteArrayStatistics"(CStatistics): + ParquetByteArray min() + ParquetByteArray max() + + cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics): + ParquetFLBA min() + ParquetFLBA max() + + cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData": + shared_ptr[ColumnPath] path_in_schema() const + c_bool encrypted_with_footer_key() const + const c_string& key_metadata() const + + cdef cppclass ParquetIndexLocation" parquet::IndexLocation": + int64_t offset + int32_t length + + cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData": + int64_t file_offset() const + const c_string& file_path() const + + c_bool is_metadata_set() const + ParquetType type() const + int64_t num_values() const + shared_ptr[ColumnPath] path_in_schema() const + bint is_stats_set() const + shared_ptr[CStatistics] statistics() const + ParquetCompression compression() const + const vector[ParquetEncoding]& encodings() const + c_bool Equals(const CColumnChunkMetaData&) const + + int64_t has_dictionary_page() const + int64_t dictionary_page_offset() const + int64_t data_page_offset() const + int64_t index_page_offset() const + int64_t total_compressed_size() const + int64_t total_uncompressed_size() const + unique_ptr[CColumnCryptoMetaData] crypto_metadata() const + optional[ParquetIndexLocation] GetColumnIndexLocation() const + optional[ParquetIndexLocation] GetOffsetIndexLocation() const + + struct CSortingColumn" parquet::SortingColumn": + int column_idx + c_bool descending + c_bool nulls_first + + cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData": + c_bool Equals(const CRowGroupMetaData&) const + int num_columns() const + int64_t num_rows() const + int64_t total_byte_size() const + vector[CSortingColumn] sorting_columns() const + unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const + + cdef cppclass CFileMetaData" parquet::FileMetaData": + c_bool Equals(const CFileMetaData&) const + uint32_t size() + int num_columns() + int64_t num_rows() + int num_row_groups() + ParquetVersion version() + const c_string created_by() + int num_schema_elements() + + void set_file_path(const c_string& path) + void AppendRowGroups(const CFileMetaData& other) except + + + unique_ptr[CRowGroupMetaData] RowGroup(int i) + const SchemaDescriptor* schema() + shared_ptr[const CKeyValueMetadata] key_value_metadata() const + void WriteTo(COutputStream* dst) const + + inline c_bool is_encryption_algorithm_set() const + inline EncryptionAlgorithm encryption_algorithm() const + inline const c_string& footer_signing_key_metadata() const + + cdef shared_ptr[CFileMetaData] CFileMetaData_Make \ + " parquet::FileMetaData::Make"(const void* serialized_metadata, + uint32_t* metadata_len) + + cdef cppclass CReaderProperties" parquet::ReaderProperties": + c_bool is_buffered_stream_enabled() const + void enable_buffered_stream() + void disable_buffered_stream() + + void set_buffer_size(int64_t buf_size) + int64_t buffer_size() const + + void set_thrift_string_size_limit(int32_t size) + int32_t thrift_string_size_limit() const + + void set_thrift_container_size_limit(int32_t size) + int32_t thrift_container_size_limit() const + + void file_decryption_properties(shared_ptr[CFileDecryptionProperties] + decryption) + shared_ptr[CFileDecryptionProperties] file_decryption_properties() \ + const + + c_bool page_checksum_verification() const + void set_page_checksum_verification(c_bool check_crc) + + CReaderProperties default_reader_properties() + + cdef cppclass ArrowReaderProperties: + ArrowReaderProperties() + void set_read_dictionary(int column_index, c_bool read_dict) + c_bool read_dictionary() + void set_batch_size(int64_t batch_size) + int64_t batch_size() + void set_pre_buffer(c_bool pre_buffer) + c_bool pre_buffer() const + void set_cache_options(CCacheOptions options) + CCacheOptions cache_options() const + void set_coerce_int96_timestamp_unit(TimeUnit unit) + TimeUnit coerce_int96_timestamp_unit() const + + ArrowReaderProperties default_arrow_reader_properties() + + cdef cppclass ParquetFileReader: + shared_ptr[CFileMetaData] metadata() + + +cdef extern from "parquet/api/writer.h" namespace "parquet" nogil: + cdef cppclass WriterProperties: + cppclass Builder: + Builder* data_page_version(ParquetDataPageVersion version) + Builder* version(ParquetVersion version) + Builder* compression(ParquetCompression codec) + Builder* compression(const c_string& path, + ParquetCompression codec) + Builder* compression_level(int compression_level) + Builder* compression_level(const c_string& path, + int compression_level) + Builder* encryption( + shared_ptr[CFileEncryptionProperties] + file_encryption_properties) + Builder* disable_dictionary() + Builder* enable_dictionary() + Builder* enable_dictionary(const c_string& path) + Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns) + Builder* disable_statistics() + Builder* enable_statistics() + Builder* enable_statistics(const c_string& path) + Builder* enable_store_decimal_as_integer() + Builder* disable_store_decimal_as_integer() + Builder* data_pagesize(int64_t size) + Builder* encoding(ParquetEncoding encoding) + Builder* encoding(const c_string& path, + ParquetEncoding encoding) + Builder* max_row_group_length(int64_t size) + Builder* write_batch_size(int64_t batch_size) + Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit) + Builder* enable_write_page_index() + Builder* disable_write_page_index() + Builder* enable_page_checksum() + Builder* disable_page_checksum() + shared_ptr[WriterProperties] build() + + cdef cppclass ArrowWriterProperties: + cppclass Builder: + Builder() + Builder* disable_deprecated_int96_timestamps() + Builder* enable_deprecated_int96_timestamps() + Builder* coerce_timestamps(TimeUnit unit) + Builder* allow_truncated_timestamps() + Builder* disallow_truncated_timestamps() + Builder* store_schema() + Builder* enable_compliant_nested_types() + Builder* disable_compliant_nested_types() + Builder* set_engine_version(ArrowWriterEngineVersion version) + shared_ptr[ArrowWriterProperties] build() + c_bool support_deprecated_int96_timestamps() + + +cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil: + cdef cppclass FileReader: + FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader) + + CStatus GetSchema(shared_ptr[CSchema]* out) + + CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out) + CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out) + + int num_row_groups() + CStatus ReadRowGroup(int i, shared_ptr[CTable]* out) + CStatus ReadRowGroup(int i, const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus ReadRowGroups(const vector[int]& row_groups, + shared_ptr[CTable]* out) + CStatus ReadRowGroups(const vector[int]& row_groups, + const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus GetRecordBatchReader(const vector[int]& row_group_indices, + const vector[int]& column_indices, + unique_ptr[CRecordBatchReader]* out) + CStatus GetRecordBatchReader(const vector[int]& row_group_indices, + unique_ptr[CRecordBatchReader]* out) + + CStatus ReadTable(shared_ptr[CTable]* out) + CStatus ReadTable(const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus ScanContents(vector[int] columns, int32_t column_batch_size, + int64_t* num_rows) + + const ParquetFileReader* parquet_reader() + + void set_use_threads(c_bool use_threads) + + void set_batch_size(int64_t batch_size) + + cdef cppclass FileReaderBuilder: + FileReaderBuilder() + CStatus Open(const shared_ptr[CRandomAccessFile]& file, + const CReaderProperties& properties, + const shared_ptr[CFileMetaData]& metadata) + + ParquetFileReader* raw_reader() + FileReaderBuilder* memory_pool(CMemoryPool*) + FileReaderBuilder* properties(const ArrowReaderProperties&) + CStatus Build(unique_ptr[FileReader]* out) + + CStatus FromParquetSchema( + const SchemaDescriptor* parquet_schema, + const ArrowReaderProperties& properties, + const shared_ptr[const CKeyValueMetadata]& key_value_metadata, + shared_ptr[CSchema]* out) + + CStatus StatisticsAsScalars(const CStatistics& Statistics, + shared_ptr[CScalar]* min, + shared_ptr[CScalar]* max) + +cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil: + + CStatus ToParquetSchema( + const CSchema* arrow_schema, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + shared_ptr[SchemaDescriptor]* out) + + +cdef extern from "parquet/properties.h" namespace "parquet" nogil: + cdef enum ArrowWriterEngineVersion: + V1 "parquet::ArrowWriterProperties::V1", + V2 "parquet::ArrowWriterProperties::V2" + + cdef cppclass ParquetDataPageVersion: + pass + + cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \ + " parquet::ParquetDataPageVersion::V1" + cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \ + " parquet::ParquetDataPageVersion::V2" + +cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil: + cdef cppclass FileWriter: + + @staticmethod + CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool, + const shared_ptr[COutputStream]& sink, + const shared_ptr[WriterProperties]& properties, + const shared_ptr[ArrowWriterProperties]& arrow_properties) + + CStatus WriteTable(const CTable& table, int64_t chunk_size) + CStatus NewRowGroup(int64_t chunk_size) + CStatus Close() + CStatus AddKeyValueMetadata(const shared_ptr[const CKeyValueMetadata]& key_value_metadata) + + const shared_ptr[CFileMetaData] metadata() const + + CStatus WriteMetaDataFile( + const CFileMetaData& file_metadata, + const COutputStream* sink) + +cdef class FileEncryptionProperties: + """File-level encryption properties for the low-level API""" + cdef: + shared_ptr[CFileEncryptionProperties] properties + + @staticmethod + cdef inline FileEncryptionProperties wrap( + shared_ptr[CFileEncryptionProperties] properties): + + result = FileEncryptionProperties() + result.properties = properties + return result + + cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self): + return self.properties + +cdef shared_ptr[WriterProperties] _create_writer_properties( + use_dictionary=*, + compression=*, + version=*, + write_statistics=*, + data_page_size=*, + compression_level=*, + use_byte_stream_split=*, + column_encoding=*, + data_page_version=*, + FileEncryptionProperties encryption_properties=*, + write_batch_size=*, + dictionary_pagesize_limit=*, + write_page_index=*, + write_page_checksum=*, + sorting_columns=*, + store_decimal_as_integer=*, +) except * + + +cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties( + use_deprecated_int96_timestamps=*, + coerce_timestamps=*, + allow_truncated_timestamps=*, + writer_engine_version=*, + use_compliant_nested_type=*, + store_schema=*, +) except * + +cdef class ParquetSchema(_Weakrefable): + cdef: + FileMetaData parent # the FileMetaData owning the SchemaDescriptor + const SchemaDescriptor* schema + +cdef class FileMetaData(_Weakrefable): + cdef: + shared_ptr[CFileMetaData] sp_metadata + CFileMetaData* _metadata + ParquetSchema _schema + + cdef inline init(self, const shared_ptr[CFileMetaData]& metadata): + self.sp_metadata = metadata + self._metadata = metadata.get() + +cdef class RowGroupMetaData(_Weakrefable): + cdef: + int index # for pickling support + unique_ptr[CRowGroupMetaData] up_metadata + CRowGroupMetaData* metadata + FileMetaData parent + +cdef class ColumnChunkMetaData(_Weakrefable): + cdef: + unique_ptr[CColumnChunkMetaData] up_metadata + CColumnChunkMetaData* metadata + RowGroupMetaData parent + + cdef inline init(self, RowGroupMetaData parent, int i): + self.up_metadata = parent.metadata.ColumnChunk(i) + self.metadata = self.up_metadata.get() + self.parent = parent + +cdef class Statistics(_Weakrefable): + cdef: + shared_ptr[CStatistics] statistics + ColumnChunkMetaData parent + + cdef inline init(self, const shared_ptr[CStatistics]& statistics, + ColumnChunkMetaData parent): + self.statistics = statistics + self.parent = parent + +cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil: + cdef cppclass CFileDecryptionProperties\ + " parquet::FileDecryptionProperties": + pass + + cdef cppclass CFileEncryptionProperties\ + " parquet::FileEncryptionProperties": + pass + +cdef class FileDecryptionProperties: + """File-level decryption properties for the low-level API""" + cdef: + shared_ptr[CFileDecryptionProperties] properties + + @staticmethod + cdef inline FileDecryptionProperties wrap( + shared_ptr[CFileDecryptionProperties] properties): + + result = FileDecryptionProperties() + result.properties = properties + return result + + cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self): + return self.properties diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pyx new file mode 100644 index 0000000000000000000000000000000000000000..41b15b633d3d28b9fd9a86720ee11e18205de113 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_parquet.pyx @@ -0,0 +1,2253 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from collections.abc import Sequence +from textwrap import indent +import warnings + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (_Weakrefable, Buffer, Schema, + check_status, + MemoryPool, maybe_unbox_memory_pool, + Table, KeyValueMetadata, + pyarrow_wrap_chunked_array, + pyarrow_wrap_schema, + pyarrow_unwrap_metadata, + pyarrow_unwrap_schema, + pyarrow_wrap_table, + pyarrow_wrap_batch, + pyarrow_wrap_scalar, + NativeFile, get_reader, get_writer, + string_to_timeunit) + +from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream, + _stringify_path, + tobytes, frombytes, is_threading_enabled) + +cimport cpython as cp + +_DEFAULT_ROW_GROUP_SIZE = 1024*1024 +_MAX_ROW_GROUP_SIZE = 64*1024*1024 + +cdef class Statistics(_Weakrefable): + """Statistics for a single column in a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + return """{} + has_min_max: {} + min: {} + max: {} + null_count: {} + distinct_count: {} + num_values: {} + physical_type: {} + logical_type: {} + converted_type (legacy): {}""".format(object.__repr__(self), + self.has_min_max, + self.min, + self.max, + self.null_count, + self.distinct_count, + self.num_values, + self.physical_type, + str(self.logical_type), + self.converted_type) + + def to_dict(self): + """ + Get dictionary representation of statistics. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + d = dict( + has_min_max=self.has_min_max, + min=self.min, + max=self.max, + null_count=self.null_count, + distinct_count=self.distinct_count, + num_values=self.num_values, + physical_type=self.physical_type + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, Statistics other): + """ + Return whether the two column statistics objects are equal. + + Parameters + ---------- + other : Statistics + Statistics to compare against. + + Returns + ------- + are_equal : bool + """ + return self.statistics.get().Equals(deref(other.statistics.get())) + + @property + def has_min_max(self): + """Whether min and max are present (bool).""" + return self.statistics.get().HasMinMax() + + @property + def has_null_count(self): + """Whether null count is present (bool).""" + return self.statistics.get().HasNullCount() + + @property + def has_distinct_count(self): + """Whether distinct count is preset (bool).""" + return self.statistics.get().HasDistinctCount() + + @property + def min_raw(self): + """Min value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_min(self.statistics.get()) + else: + return None + + @property + def max_raw(self): + """Max value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_max(self.statistics.get()) + else: + return None + + @property + def min(self): + """ + Min value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + min_scalar, _ = _cast_statistics(self.statistics.get()) + return min_scalar.as_py() + else: + return None + + @property + def max(self): + """ + Max value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + _, max_scalar = _cast_statistics(self.statistics.get()) + return max_scalar.as_py() + else: + return None + + @property + def null_count(self): + """Number of null values in chunk (int).""" + if self.has_null_count: + return self.statistics.get().null_count() + else: + return None + + @property + def distinct_count(self): + """Distinct number of values in chunk (int).""" + if self.has_distinct_count: + return self.statistics.get().distinct_count() + else: + return None + + @property + def num_values(self): + """Number of non-null values (int).""" + return self.statistics.get().num_values() + + @property + def physical_type(self): + """Physical type of column (str).""" + raw_physical_type = self.statistics.get().physical_type() + return physical_type_name_from_enum(raw_physical_type) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.statistics.get().descr().logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + raw_converted_type = self.statistics.get().descr().converted_type() + return converted_type_name_from_enum(raw_converted_type) + + +cdef class ParquetLogicalType(_Weakrefable): + """Logical type of parquet type.""" + cdef: + shared_ptr[const CParquetLogicalType] type + + def __cinit__(self): + pass + + cdef init(self, const shared_ptr[const CParquetLogicalType]& type): + self.type = type + + def __repr__(self): + return "{}\n {}".format(object.__repr__(self), str(self)) + + def __str__(self): + return frombytes(self.type.get().ToString(), safe=True) + + def to_json(self): + """ + Get a JSON string containing type and type parameters. + + Returns + ------- + json : str + JSON representation of type, with at least a field called 'Type' + which contains the type name. If the type is parameterized, such + as a decimal with scale and precision, will contain those as fields + as well. + """ + return frombytes(self.type.get().ToJSON()) + + @property + def type(self): + """Name of the logical type (str).""" + return logical_type_name_from_enum(self.type.get().type()) + + +cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type): + cdef ParquetLogicalType out = ParquetLogicalType() + out.init(type) + return out + + +cdef _cast_statistic_raw_min(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).min() + elif physical_type == ParquetType_INT32: + return ( statistics).min() + elif physical_type == ParquetType_INT64: + return ( statistics).min() + elif physical_type == ParquetType_FLOAT: + return ( statistics).min() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).min() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).min()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).min(), type_length) + + +cdef _cast_statistic_raw_max(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).max() + elif physical_type == ParquetType_INT32: + return ( statistics).max() + elif physical_type == ParquetType_INT64: + return ( statistics).max() + elif physical_type == ParquetType_FLOAT: + return ( statistics).max() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).max() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).max()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).max(), type_length) + + +cdef _cast_statistics(CStatistics* statistics): + cdef: + shared_ptr[CScalar] c_min + shared_ptr[CScalar] c_max + check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max)) + return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max)) + + +cdef _box_byte_array(ParquetByteArray val): + return cp.PyBytes_FromStringAndSize( val.ptr, val.len) + + +cdef _box_flba(ParquetFLBA val, uint32_t len): + return cp.PyBytes_FromStringAndSize( val.ptr, len) + + +cdef class ColumnChunkMetaData(_Weakrefable): + """Column metadata for a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + statistics = indent(repr(self.statistics), 4 * ' ') + return """{0} + file_offset: {1} + file_path: {2} + physical_type: {3} + num_values: {4} + path_in_schema: {5} + is_stats_set: {6} + statistics: +{7} + compression: {8} + encodings: {9} + has_dictionary_page: {10} + dictionary_page_offset: {11} + data_page_offset: {12} + total_compressed_size: {13} + total_uncompressed_size: {14}""".format(object.__repr__(self), + self.file_offset, + self.file_path, + self.physical_type, + self.num_values, + self.path_in_schema, + self.is_stats_set, + statistics, + self.compression, + self.encodings, + self.has_dictionary_page, + self.dictionary_page_offset, + self.data_page_offset, + self.total_compressed_size, + self.total_uncompressed_size) + + def to_dict(self): + """ + Get dictionary representation of the column chunk metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + statistics = self.statistics.to_dict() if self.is_stats_set else None + d = dict( + file_offset=self.file_offset, + file_path=self.file_path, + physical_type=self.physical_type, + num_values=self.num_values, + path_in_schema=self.path_in_schema, + is_stats_set=self.is_stats_set, + statistics=statistics, + compression=self.compression, + encodings=self.encodings, + has_dictionary_page=self.has_dictionary_page, + dictionary_page_offset=self.dictionary_page_offset, + data_page_offset=self.data_page_offset, + total_compressed_size=self.total_compressed_size, + total_uncompressed_size=self.total_uncompressed_size + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ColumnChunkMetaData other): + """ + Return whether the two column chunk metadata objects are equal. + + Parameters + ---------- + other : ColumnChunkMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + @property + def file_offset(self): + """Offset into file where column chunk is located (int).""" + return self.metadata.file_offset() + + @property + def file_path(self): + """Optional file path if set (str or None).""" + return frombytes(self.metadata.file_path()) + + @property + def physical_type(self): + """Physical type of column (str).""" + return physical_type_name_from_enum(self.metadata.type()) + + @property + def num_values(self): + """Total number of values (int).""" + return self.metadata.num_values() + + @property + def path_in_schema(self): + """Nested path to field, separated by periods (str).""" + path = self.metadata.path_in_schema().get().ToDotString() + return frombytes(path) + + @property + def is_stats_set(self): + """Whether or not statistics are present in metadata (bool).""" + return self.metadata.is_stats_set() + + @property + def statistics(self): + """Statistics for column chunk (:class:`Statistics`).""" + if not self.metadata.is_stats_set(): + return None + statistics = Statistics() + statistics.init(self.metadata.statistics(), self) + return statistics + + @property + def compression(self): + """ + Type of compression used for column (str). + + One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD', + or 'UNKNOWN'. + """ + return compression_name_from_enum(self.metadata.compression()) + + @property + def encodings(self): + """ + Encodings used for column (tuple of str). + + One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED', + 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'. + """ + return tuple(map(encoding_name_from_enum, self.metadata.encodings())) + + @property + def has_dictionary_page(self): + """Whether there is dictionary data present in the column chunk (bool).""" + return bool(self.metadata.has_dictionary_page()) + + @property + def dictionary_page_offset(self): + """Offset of dictionary page relative to column chunk offset (int).""" + if self.has_dictionary_page: + return self.metadata.dictionary_page_offset() + else: + return None + + @property + def data_page_offset(self): + """Offset of data page relative to column chunk offset (int).""" + return self.metadata.data_page_offset() + + @property + def has_index_page(self): + """Not yet supported.""" + raise NotImplementedError('not supported in parquet-cpp') + + @property + def index_page_offset(self): + """Not yet supported.""" + raise NotImplementedError("parquet-cpp doesn't return valid values") + + @property + def total_compressed_size(self): + """Compressed size in bytes (int).""" + return self.metadata.total_compressed_size() + + @property + def total_uncompressed_size(self): + """Uncompressed size in bytes (int).""" + return self.metadata.total_uncompressed_size() + + @property + def has_offset_index(self): + """Whether the column chunk has an offset index""" + return self.metadata.GetOffsetIndexLocation().has_value() + + @property + def has_column_index(self): + """Whether the column chunk has a column index""" + return self.metadata.GetColumnIndexLocation().has_value() + + +cdef class SortingColumn: + """ + Sorting specification for a single column. + + Returned by :meth:`RowGroupMetaData.sorting_columns` and used in + :class:`ParquetWriter` to specify the sort order of the data. + + Parameters + ---------- + column_index : int + Index of column that data is sorted by. + descending : bool, default False + Whether column is sorted in descending order. + nulls_first : bool, default False + Whether null values appear before valid values. + + Notes + ----- + + Column indices are zero-based, refer only to leaf fields, and are in + depth-first order. This may make the column indices for nested schemas + different from what you expect. In most cases, it will be easier to + specify the sort order using column names instead of column indices + and converting using the ``from_ordering`` method. + + Examples + -------- + + In other APIs, sort order is specified by names, such as: + + >>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')] + + For Parquet, the column index must be used instead: + + >>> import pyarrow.parquet as pq + >>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)] + [SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)] + + Convert the sort_order into the list of sorting columns with + ``from_ordering`` (note that the schema must be provided as well): + + >>> import pyarrow as pa + >>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))]) + >>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order) + >>> sorting_columns + (SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)) + + Convert back to the sort order with ``to_ordering``: + + >>> pq.SortingColumn.to_ordering(schema, sorting_columns) + ((('id', 'ascending'), ('timestamp', 'descending')), 'at_end') + + See Also + -------- + RowGroupMetaData.sorting_columns + """ + cdef int column_index + cdef c_bool descending + cdef c_bool nulls_first + + def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False): + self.column_index = column_index + self.descending = descending + self.nulls_first = nulls_first + + @classmethod + def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'): + """ + Create a tuple of SortingColumn objects from the same arguments as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sort_keys : Sequence of (name, order) tuples + Names of field/column keys (str) to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + null_placement : {'at_start', 'at_end'}, default 'at_end' + Where null values should appear in the sort order. + + Returns + ------- + sorting_columns : tuple of SortingColumn + """ + if null_placement == 'at_start': + nulls_first = True + elif null_placement == 'at_end': + nulls_first = False + else: + raise ValueError('null_placement must be "at_start" or "at_end"') + + col_map = _name_to_index_map(schema) + + sorting_columns = [] + + for sort_key in sort_keys: + if isinstance(sort_key, str): + name = sort_key + descending = False + elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and + isinstance(sort_key[0], str) and + isinstance(sort_key[1], str)): + name, descending = sort_key + if descending == "descending": + descending = True + elif descending == "ascending": + descending = False + else: + raise ValueError("Invalid sort key direction: {0}" + .format(descending)) + else: + raise ValueError("Invalid sort key: {0}".format(sort_key)) + + try: + column_index = col_map[name] + except KeyError: + raise ValueError("Sort key name '{0}' not found in schema:\n{1}" + .format(name, schema)) + + sorting_columns.append( + cls(column_index, descending=descending, nulls_first=nulls_first) + ) + + return tuple(sorting_columns) + + @staticmethod + def to_ordering(Schema schema, sorting_columns): + """ + Convert a tuple of SortingColumn objects to the same format as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sorting_columns : tuple of SortingColumn + Columns to sort the input on. + + Returns + ------- + sort_keys : tuple of (name, order) tuples + null_placement : {'at_start', 'at_end'} + """ + col_map = {i: name for name, i in _name_to_index_map(schema).items()} + + sort_keys = [] + nulls_first = None + + for sorting_column in sorting_columns: + name = col_map[sorting_column.column_index] + if sorting_column.descending: + order = "descending" + else: + order = "ascending" + sort_keys.append((name, order)) + if nulls_first is None: + nulls_first = sorting_column.nulls_first + elif nulls_first != sorting_column.nulls_first: + raise ValueError("Sorting columns have inconsistent null placement") + + if nulls_first: + null_placement = "at_start" + else: + null_placement = "at_end" + + return tuple(sort_keys), null_placement + + def __repr__(self): + return """{}(column_index={}, descending={}, nulls_first={})""".format( + self.__class__.__name__, + self.column_index, self.descending, self.nulls_first) + + def __eq__(self, SortingColumn other): + return (self.column_index == other.column_index and + self.descending == other.descending and + self.nulls_first == other.nulls_first) + + def __hash__(self): + return hash((self.column_index, self.descending, self.nulls_first)) + + @property + def column_index(self): + """"Index of column data is sorted by (int).""" + return self.column_index + + @property + def descending(self): + """Whether column is sorted in descending order (bool).""" + return self.descending + + @property + def nulls_first(self): + """Whether null values appear before valid values (bool).""" + return self.nulls_first + + def to_dict(self): + """ + Get dictionary representation of the SortingColumn. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + d = dict( + column_index=self.column_index, + descending=self.descending, + nulls_first=self.nulls_first + ) + return d + + +cdef class RowGroupMetaData(_Weakrefable): + """Metadata for a single row group.""" + + def __cinit__(self, FileMetaData parent, int index): + if index < 0 or index >= parent.num_row_groups: + raise IndexError('{0} out of bounds'.format(index)) + self.up_metadata = parent._metadata.RowGroup(index) + self.metadata = self.up_metadata.get() + self.parent = parent + self.index = index + + def __reduce__(self): + return RowGroupMetaData, (self.parent, self.index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, RowGroupMetaData other): + """ + Return whether the two row group metadata objects are equal. + + Parameters + ---------- + other : RowGroupMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + def column(self, int i): + """ + Get column metadata at given index. + + Parameters + ---------- + i : int + Index of column to get metadata for. + + Returns + ------- + ColumnChunkMetaData + Metadata for column within this chunk. + """ + if i < 0 or i >= self.num_columns: + raise IndexError('{0} out of bounds'.format(i)) + chunk = ColumnChunkMetaData() + chunk.init(self, i) + return chunk + + def __repr__(self): + return """{0} + num_columns: {1} + num_rows: {2} + total_byte_size: {3} + sorting_columns: {4}""".format(object.__repr__(self), + self.num_columns, + self.num_rows, + self.total_byte_size, + self.sorting_columns) + + def to_dict(self): + """ + Get dictionary representation of the row group metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + columns = [] + d = dict( + num_columns=self.num_columns, + num_rows=self.num_rows, + total_byte_size=self.total_byte_size, + columns=columns, + sorting_columns=[col.to_dict() for col in self.sorting_columns] + ) + for i in range(self.num_columns): + columns.append(self.column(i).to_dict()) + return d + + @property + def num_columns(self): + """Number of columns in this row group (int).""" + return self.metadata.num_columns() + + @property + def num_rows(self): + """Number of rows in this row group (int).""" + return self.metadata.num_rows() + + @property + def total_byte_size(self): + """Total byte size of all the uncompressed column data in this row group (int).""" + return self.metadata.total_byte_size() + + @property + def sorting_columns(self): + """Columns the row group is sorted by (tuple of :class:`SortingColumn`)).""" + out = [] + cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns() + for sorting_col in sorting_columns: + out.append(SortingColumn( + sorting_col.column_idx, + sorting_col.descending, + sorting_col.nulls_first + )) + return tuple(out) + + +def _reconstruct_filemetadata(Buffer serialized): + cdef: + FileMetaData metadata = FileMetaData.__new__(FileMetaData) + CBuffer *buffer = serialized.buffer.get() + uint32_t metadata_len = buffer.size() + + metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len)) + + return metadata + + +cdef class FileMetaData(_Weakrefable): + """Parquet metadata for a single file.""" + + def __cinit__(self): + pass + + def __reduce__(self): + cdef: + NativeFile sink = BufferOutputStream() + COutputStream* c_sink = sink.get_output_stream().get() + with nogil: + self._metadata.WriteTo(c_sink) + + cdef Buffer buffer = sink.getvalue() + return _reconstruct_filemetadata, (buffer,) + + def __hash__(self): + return hash((self.schema, + self.num_rows, + self.num_row_groups, + self.format_version, + self.serialized_size)) + + def __repr__(self): + return """{0} + created_by: {1} + num_columns: {2} + num_rows: {3} + num_row_groups: {4} + format_version: {5} + serialized_size: {6}""".format(object.__repr__(self), + self.created_by, self.num_columns, + self.num_rows, self.num_row_groups, + self.format_version, + self.serialized_size) + + def to_dict(self): + """ + Get dictionary representation of the file metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + row_groups = [] + d = dict( + created_by=self.created_by, + num_columns=self.num_columns, + num_rows=self.num_rows, + num_row_groups=self.num_row_groups, + row_groups=row_groups, + format_version=self.format_version, + serialized_size=self.serialized_size + ) + for i in range(self.num_row_groups): + row_groups.append(self.row_group(i).to_dict()) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, FileMetaData other not None): + """ + Return whether the two file metadata objects are equal. + + Parameters + ---------- + other : FileMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self._metadata.Equals(deref(other._metadata)) + + @property + def schema(self): + """Schema of the file (:class:`ParquetSchema`).""" + if self._schema is None: + self._schema = ParquetSchema(self) + return self._schema + + @property + def serialized_size(self): + """Size of the original thrift encoded metadata footer (int).""" + return self._metadata.size() + + @property + def num_columns(self): + """Number of columns in file (int).""" + return self._metadata.num_columns() + + @property + def num_rows(self): + """Total number of rows in file (int).""" + return self._metadata.num_rows() + + @property + def num_row_groups(self): + """Number of row groups in file (int).""" + return self._metadata.num_row_groups() + + @property + def format_version(self): + """ + Parquet format version used in file (str, such as '1.0', '2.4'). + + If version is missing or unparsable, will default to assuming '2.6'. + """ + cdef ParquetVersion version = self._metadata.version() + if version == ParquetVersion_V1: + return '1.0' + elif version == ParquetVersion_V2_0: + return 'pseudo-2.0' + elif version == ParquetVersion_V2_4: + return '2.4' + elif version == ParquetVersion_V2_6: + return '2.6' + else: + warnings.warn('Unrecognized file version, assuming 2.6: {}' + .format(version)) + return '2.6' + + @property + def created_by(self): + """ + String describing source of the parquet file (str). + + This typically includes library name and version number. For example, Arrow 7.0's + writer returns 'parquet-cpp-arrow version 7.0.0'. + """ + return frombytes(self._metadata.created_by()) + + @property + def metadata(self): + """Additional metadata as key value pairs (dict[bytes, bytes]).""" + cdef: + unordered_map[c_string, c_string] metadata + const CKeyValueMetadata* underlying_metadata + underlying_metadata = self._metadata.key_value_metadata().get() + if underlying_metadata != NULL: + underlying_metadata.ToUnorderedMap(&metadata) + return metadata + else: + return None + + def row_group(self, int i): + """ + Get metadata for row group at index i. + + Parameters + ---------- + i : int + Row group index to get. + + Returns + ------- + row_group_metadata : RowGroupMetaData + """ + return RowGroupMetaData(self, i) + + def set_file_path(self, path): + """ + Set ColumnChunk file paths to the given value. + + This method modifies the ``file_path`` field of each ColumnChunk + in the FileMetaData to be a particular value. + + Parameters + ---------- + path : str + The file path to set on all ColumnChunks. + """ + cdef: + c_string c_path = tobytes(path) + self._metadata.set_file_path(c_path) + + def append_row_groups(self, FileMetaData other): + """ + Append row groups from other FileMetaData object. + + Parameters + ---------- + other : FileMetaData + Other metadata to append row groups from. + """ + cdef shared_ptr[CFileMetaData] c_metadata + + c_metadata = other.sp_metadata + self._metadata.AppendRowGroups(deref(c_metadata)) + + def write_metadata_file(self, where): + """ + Write the metadata to a metadata-only Parquet file. + + Parameters + ---------- + where : path or file-like object + Where to write the metadata. Should be a writable path on + the local filesystem, or a writable file-like object. + """ + cdef: + shared_ptr[COutputStream] sink + c_string c_where + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &sink) + else: + c_where = tobytes(where) + with nogil: + sink = GetResultValue(FileOutputStream.Open(c_where)) + + with nogil: + check_status( + WriteMetaDataFile(deref(self._metadata), sink.get())) + + +cdef class ParquetSchema(_Weakrefable): + """A Parquet schema.""" + + def __cinit__(self, FileMetaData container): + self.parent = container + self.schema = container._metadata.schema() + + def __repr__(self): + return "{0}\n{1}".format( + object.__repr__(self), + frombytes(self.schema.ToString(), safe=True)) + + def __reduce__(self): + return ParquetSchema, (self.parent,) + + def __len__(self): + return self.schema.num_columns() + + def __getitem__(self, i): + return self.column(i) + + def __hash__(self): + return hash(self.schema.ToString()) + + @property + def names(self): + """Name of each field (list of str).""" + return [self[i].name for i in range(len(self))] + + def to_arrow_schema(self): + """ + Convert Parquet schema to effective Arrow schema. + + Returns + ------- + schema : Schema + """ + cdef shared_ptr[CSchema] sp_arrow_schema + + with nogil: + check_status(FromParquetSchema( + self.schema, default_arrow_reader_properties(), + self.parent._metadata.key_value_metadata(), + &sp_arrow_schema)) + + return pyarrow_wrap_schema(sp_arrow_schema) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ParquetSchema other): + """ + Return whether the two schemas are equal. + + Parameters + ---------- + other : ParquetSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.schema.Equals(deref(other.schema)) + + def column(self, i): + """ + Return the schema for a single column. + + Parameters + ---------- + i : int + Index of column in schema. + + Returns + ------- + column_schema : ColumnSchema + """ + if i < 0 or i >= len(self): + raise IndexError('{0} out of bounds'.format(i)) + + return ColumnSchema(self, i) + + +cdef class ColumnSchema(_Weakrefable): + """Schema for a single column.""" + cdef: + int index + ParquetSchema parent + const ColumnDescriptor* descr + + def __cinit__(self, ParquetSchema schema, int index): + self.parent = schema + self.index = index # for pickling support + self.descr = schema.schema.Column(index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __reduce__(self): + return ColumnSchema, (self.parent, self.index) + + def equals(self, ColumnSchema other): + """ + Return whether the two column schemas are equal. + + Parameters + ---------- + other : ColumnSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.descr.Equals(deref(other.descr)) + + def __repr__(self): + physical_type = self.physical_type + converted_type = self.converted_type + if converted_type == 'DECIMAL': + converted_type = 'DECIMAL({0}, {1})'.format(self.precision, + self.scale) + elif physical_type == 'FIXED_LEN_BYTE_ARRAY': + converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})' + .format(self.length)) + + return """ + name: {0} + path: {1} + max_definition_level: {2} + max_repetition_level: {3} + physical_type: {4} + logical_type: {5} + converted_type (legacy): {6}""".format(self.name, self.path, + self.max_definition_level, + self.max_repetition_level, + physical_type, + str(self.logical_type), + converted_type) + + @property + def name(self): + """Name of field (str).""" + return frombytes(self.descr.name()) + + @property + def path(self): + """Nested path to field, separated by periods (str).""" + return frombytes(self.descr.path().get().ToDotString()) + + @property + def max_definition_level(self): + """Maximum definition level (int).""" + return self.descr.max_definition_level() + + @property + def max_repetition_level(self): + """Maximum repetition level (int).""" + return self.descr.max_repetition_level() + + @property + def physical_type(self): + """Name of physical type (str).""" + return physical_type_name_from_enum(self.descr.physical_type()) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.descr.logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + return converted_type_name_from_enum(self.descr.converted_type()) + + # FIXED_LEN_BYTE_ARRAY attribute + @property + def length(self): + """Array length if fixed length byte array type, None otherwise (int or None).""" + return self.descr.type_length() + + # Decimal attributes + @property + def precision(self): + """Precision if decimal type, None otherwise (int or None).""" + return self.descr.type_precision() + + @property + def scale(self): + """Scale if decimal type, None otherwise (int or None).""" + return self.descr.type_scale() + + +cdef physical_type_name_from_enum(ParquetType type_): + return { + ParquetType_BOOLEAN: 'BOOLEAN', + ParquetType_INT32: 'INT32', + ParquetType_INT64: 'INT64', + ParquetType_INT96: 'INT96', + ParquetType_FLOAT: 'FLOAT', + ParquetType_DOUBLE: 'DOUBLE', + ParquetType_BYTE_ARRAY: 'BYTE_ARRAY', + ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY', + }.get(type_, 'UNKNOWN') + + +cdef logical_type_name_from_enum(ParquetLogicalTypeId type_): + return { + ParquetLogicalType_UNDEFINED: 'UNDEFINED', + ParquetLogicalType_STRING: 'STRING', + ParquetLogicalType_MAP: 'MAP', + ParquetLogicalType_LIST: 'LIST', + ParquetLogicalType_ENUM: 'ENUM', + ParquetLogicalType_DECIMAL: 'DECIMAL', + ParquetLogicalType_DATE: 'DATE', + ParquetLogicalType_TIME: 'TIME', + ParquetLogicalType_TIMESTAMP: 'TIMESTAMP', + ParquetLogicalType_INT: 'INT', + ParquetLogicalType_FLOAT16: 'FLOAT16', + ParquetLogicalType_JSON: 'JSON', + ParquetLogicalType_BSON: 'BSON', + ParquetLogicalType_UUID: 'UUID', + ParquetLogicalType_NONE: 'NONE', + }.get(type_, 'UNKNOWN') + + +cdef converted_type_name_from_enum(ParquetConvertedType type_): + return { + ParquetConvertedType_NONE: 'NONE', + ParquetConvertedType_UTF8: 'UTF8', + ParquetConvertedType_MAP: 'MAP', + ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE', + ParquetConvertedType_LIST: 'LIST', + ParquetConvertedType_ENUM: 'ENUM', + ParquetConvertedType_DECIMAL: 'DECIMAL', + ParquetConvertedType_DATE: 'DATE', + ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS', + ParquetConvertedType_TIME_MICROS: 'TIME_MICROS', + ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS', + ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS', + ParquetConvertedType_UINT_8: 'UINT_8', + ParquetConvertedType_UINT_16: 'UINT_16', + ParquetConvertedType_UINT_32: 'UINT_32', + ParquetConvertedType_UINT_64: 'UINT_64', + ParquetConvertedType_INT_8: 'INT_8', + ParquetConvertedType_INT_16: 'INT_16', + ParquetConvertedType_INT_32: 'INT_32', + ParquetConvertedType_INT_64: 'INT_64', + ParquetConvertedType_JSON: 'JSON', + ParquetConvertedType_BSON: 'BSON', + ParquetConvertedType_INTERVAL: 'INTERVAL', + }.get(type_, 'UNKNOWN') + + +cdef encoding_name_from_enum(ParquetEncoding encoding_): + return { + ParquetEncoding_PLAIN: 'PLAIN', + ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY', + ParquetEncoding_RLE: 'RLE', + ParquetEncoding_BIT_PACKED: 'BIT_PACKED', + ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED', + ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY', + ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY', + ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY', + ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT', + }.get(encoding_, 'UNKNOWN') + + +cdef encoding_enum_from_name(str encoding_name): + enc = { + 'PLAIN': ParquetEncoding_PLAIN, + 'BIT_PACKED': ParquetEncoding_BIT_PACKED, + 'RLE': ParquetEncoding_RLE, + 'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT, + 'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED, + 'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY, + 'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY, + 'RLE_DICTIONARY': 'dict', + 'PLAIN_DICTIONARY': 'dict', + }.get(encoding_name, None) + if enc is None: + raise ValueError(f"Unsupported column encoding: {encoding_name!r}") + elif enc == 'dict': + raise ValueError(f"{encoding_name!r} is already used by default.") + else: + return enc + + +cdef compression_name_from_enum(ParquetCompression compression_): + return { + ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED', + ParquetCompression_SNAPPY: 'SNAPPY', + ParquetCompression_GZIP: 'GZIP', + ParquetCompression_LZO: 'LZO', + ParquetCompression_BROTLI: 'BROTLI', + ParquetCompression_LZ4: 'LZ4', + ParquetCompression_ZSTD: 'ZSTD', + }.get(compression_, 'UNKNOWN') + + +cdef int check_compression_name(name) except -1: + if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', + 'ZSTD'}: + raise ArrowException("Unsupported compression: " + name) + return 0 + + +cdef ParquetCompression compression_from_name(name): + name = name.upper() + if name == 'SNAPPY': + return ParquetCompression_SNAPPY + elif name == 'GZIP': + return ParquetCompression_GZIP + elif name == 'LZO': + return ParquetCompression_LZO + elif name == 'BROTLI': + return ParquetCompression_BROTLI + elif name == 'LZ4': + return ParquetCompression_LZ4 + elif name == 'ZSTD': + return ParquetCompression_ZSTD + else: + return ParquetCompression_UNCOMPRESSED + + +cdef class ParquetReader(_Weakrefable): + cdef: + object source + CMemoryPool* pool + UniquePtrNoGIL[FileReader] reader + FileMetaData _metadata + shared_ptr[CRandomAccessFile] rd_handle + + cdef public: + _column_idx_map + + def __cinit__(self, MemoryPool memory_pool=None): + self.pool = maybe_unbox_memory_pool(memory_pool) + self._metadata = None + + def open(self, object source not None, *, bint use_memory_map=False, + read_dictionary=None, FileMetaData metadata=None, + int buffer_size=0, bint pre_buffer=False, + coerce_int96_timestamp_unit=None, + FileDecryptionProperties decryption_properties=None, + thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False): + """ + Open a parquet file for reading. + + Parameters + ---------- + source : str, pathlib.Path, pyarrow.NativeFile, or file-like object + use_memory_map : bool, default False + read_dictionary : iterable[int or str], optional + metadata : FileMetaData, optional + buffer_size : int, default 0 + pre_buffer : bool, default False + coerce_int96_timestamp_unit : str, optional + decryption_properties : FileDecryptionProperties, optional + thrift_string_size_limit : int, optional + thrift_container_size_limit : int, optional + page_checksum_verification : bool, default False + """ + cdef: + shared_ptr[CFileMetaData] c_metadata + CReaderProperties properties = default_reader_properties() + ArrowReaderProperties arrow_props = ( + default_arrow_reader_properties()) + FileReaderBuilder builder + + if pre_buffer and not is_threading_enabled(): + pre_buffer = False + + if metadata is not None: + c_metadata = metadata.sp_metadata + + if buffer_size > 0: + properties.enable_buffered_stream() + properties.set_buffer_size(buffer_size) + elif buffer_size == 0: + properties.disable_buffered_stream() + else: + raise ValueError('Buffer size must be larger than zero') + + if thrift_string_size_limit is not None: + if thrift_string_size_limit <= 0: + raise ValueError("thrift_string_size_limit " + "must be larger than zero") + properties.set_thrift_string_size_limit(thrift_string_size_limit) + if thrift_container_size_limit is not None: + if thrift_container_size_limit <= 0: + raise ValueError("thrift_container_size_limit " + "must be larger than zero") + properties.set_thrift_container_size_limit( + thrift_container_size_limit) + + if decryption_properties is not None: + properties.file_decryption_properties( + decryption_properties.unwrap()) + + arrow_props.set_pre_buffer(pre_buffer) + + properties.set_page_checksum_verification(page_checksum_verification) + + if coerce_int96_timestamp_unit is None: + # use the default defined in default_arrow_reader_properties() + pass + else: + arrow_props.set_coerce_int96_timestamp_unit( + string_to_timeunit(coerce_int96_timestamp_unit)) + + self.source = source + get_reader(source, use_memory_map, &self.rd_handle) + + with nogil: + check_status(builder.Open(self.rd_handle, properties, c_metadata)) + + # Set up metadata + with nogil: + c_metadata = builder.raw_reader().metadata() + self._metadata = result = FileMetaData() + result.init(c_metadata) + + if read_dictionary is not None: + self._set_read_dictionary(read_dictionary, &arrow_props) + + with nogil: + check_status(builder.memory_pool(self.pool) + .properties(arrow_props) + .Build(&self.reader)) + + cdef _set_read_dictionary(self, read_dictionary, + ArrowReaderProperties* props): + for column in read_dictionary: + if not isinstance(column, int): + column = self.column_name_idx(column) + props.set_read_dictionary(column, True) + + @property + def column_paths(self): + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + vector[c_string] path + int i = 0 + + paths = [] + for i in range(0, metadata.num_columns()): + path = (metadata.schema().Column(i) + .path().get().ToDotVector()) + paths.append([frombytes(x) for x in path]) + + return paths + + @property + def metadata(self): + return self._metadata + + @property + def schema_arrow(self): + cdef shared_ptr[CSchema] out + with nogil: + check_status(self.reader.get().GetSchema(&out)) + return pyarrow_wrap_schema(out) + + @property + def num_row_groups(self): + return self.reader.get().num_row_groups() + + def set_use_threads(self, bint use_threads): + """ + Parameters + ---------- + use_threads : bool + """ + if is_threading_enabled(): + self.reader.get().set_use_threads(use_threads) + else: + self.reader.get().set_use_threads(False) + + def set_batch_size(self, int64_t batch_size): + """ + Parameters + ---------- + batch_size : int64 + """ + self.reader.get().set_batch_size(batch_size) + + def iter_batches(self, int64_t batch_size, row_groups, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + batch_size : int64 + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Yields + ------ + next : RecordBatch + """ + cdef: + vector[int] c_row_groups + vector[int] c_column_indices + shared_ptr[CRecordBatch] record_batch + UniquePtrNoGIL[CRecordBatchReader] recordbatchreader + + self.set_batch_size(batch_size) + + if use_threads: + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, c_column_indices, &recordbatchreader + ) + ) + else: + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, &recordbatchreader + ) + ) + + while True: + with nogil: + check_status( + recordbatchreader.get().ReadNext(&record_batch) + ) + if record_batch.get() == NULL: + break + + yield pyarrow_wrap_batch(record_batch) + + def read_row_group(self, int i, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + i : int + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + return self.read_row_groups([i], column_indices, use_threads) + + def read_row_groups(self, row_groups not None, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_row_groups + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, c_column_indices, + &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, &ctable)) + return pyarrow_wrap_table(ctable) + + def read_all(self, column_indices=None, bint use_threads=True): + """ + Parameters + ---------- + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadTable(c_column_indices, &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadTable(&ctable)) + return pyarrow_wrap_table(ctable) + + def scan_contents(self, column_indices=None, batch_size=65536): + """ + Parameters + ---------- + column_indices : list[int], optional + batch_size : int32, default 65536 + + Returns + ------- + num_rows : int64 + """ + cdef: + vector[int] c_column_indices + int32_t c_batch_size + int64_t c_num_rows + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + c_batch_size = batch_size + + with nogil: + check_status(self.reader.get() + .ScanContents(c_column_indices, c_batch_size, + &c_num_rows)) + + return c_num_rows + + def column_name_idx(self, column_name): + """ + Find the index of a column by its name. + + Parameters + ---------- + column_name : str + Name of the column; separation of nesting levels is done via ".". + + Returns + ------- + column_idx : int + Integer index of the column in the schema. + """ + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + int i = 0 + + if self._column_idx_map is None: + self._column_idx_map = {} + for i in range(0, metadata.num_columns()): + col_bytes = tobytes(metadata.schema().Column(i) + .path().get().ToDotString()) + self._column_idx_map[col_bytes] = i + + return self._column_idx_map[tobytes(column_name)] + + def read_column(self, int column_index): + """ + Read the column at the specified index. + + Parameters + ---------- + column_index : int + Index of the column. + + Returns + ------- + column : pyarrow.ChunkedArray + """ + cdef shared_ptr[CChunkedArray] out + with nogil: + check_status(self.reader.get() + .ReadColumn(column_index, &out)) + return pyarrow_wrap_chunked_array(out) + + def close(self): + if not self.closed: + with nogil: + check_status(self.rd_handle.get().Close()) + + @property + def closed(self): + if self.rd_handle == NULL: + return True + with nogil: + closed = self.rd_handle.get().closed() + return closed + + +cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column): + cdef CSortingColumn c_sorting_column + + c_sorting_column.column_idx = sorting_column.column_index + c_sorting_column.descending = sorting_column.descending + c_sorting_column.nulls_first = sorting_column.nulls_first + + return c_sorting_column + + +cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *: + if not (isinstance(sorting_columns, Sequence) + and all(isinstance(col, SortingColumn) for col in sorting_columns)): + raise ValueError( + "'sorting_columns' must be a list of `SortingColumn`") + + cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col) + for col in sorting_columns] + + return c_sorting_columns + + +cdef shared_ptr[WriterProperties] _create_writer_properties( + use_dictionary=None, + compression=None, + version=None, + write_statistics=None, + data_page_size=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version=None, + FileEncryptionProperties encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + store_decimal_as_integer=False) except *: + + """General writer properties""" + cdef: + shared_ptr[WriterProperties] properties + WriterProperties.Builder props + + # data_page_version + + if data_page_version is not None: + if data_page_version == "1.0": + props.data_page_version(ParquetDataPageVersion_V1) + elif data_page_version == "2.0": + props.data_page_version(ParquetDataPageVersion_V2) + else: + raise ValueError("Unsupported Parquet data page version: {0}" + .format(data_page_version)) + + # version + + if version is not None: + if version == "1.0": + props.version(ParquetVersion_V1) + elif version in ("2.0", "pseudo-2.0"): + warnings.warn( + "Parquet format '2.0' pseudo version is deprecated, use " + "'2.4' or '2.6' for fine-grained feature selection", + FutureWarning, stacklevel=2) + props.version(ParquetVersion_V2_0) + elif version == "2.4": + props.version(ParquetVersion_V2_4) + elif version == "2.6": + props.version(ParquetVersion_V2_6) + else: + raise ValueError("Unsupported Parquet format version: {0}" + .format(version)) + + # compression + + if isinstance(compression, basestring): + check_compression_name(compression) + props.compression(compression_from_name(compression)) + elif compression is not None: + for column, codec in compression.iteritems(): + check_compression_name(codec) + props.compression(tobytes(column), compression_from_name(codec)) + + if isinstance(compression_level, int): + props.compression_level(compression_level) + elif compression_level is not None: + for column, level in compression_level.iteritems(): + props.compression_level(tobytes(column), level) + + # use_dictionary + + if isinstance(use_dictionary, bool): + if use_dictionary: + props.enable_dictionary() + if column_encoding is not None: + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + else: + props.disable_dictionary() + elif use_dictionary is not None: + # Deactivate dictionary encoding by default + props.disable_dictionary() + for column in use_dictionary: + props.enable_dictionary(tobytes(column)) + if (column_encoding is not None and + column_encoding.get(column) is not None): + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + + # write_statistics + + if isinstance(write_statistics, bool): + if write_statistics: + props.enable_statistics() + else: + props.disable_statistics() + elif write_statistics is not None: + # Deactivate statistics by default and enable for specified columns + props.disable_statistics() + for column in write_statistics: + props.enable_statistics(tobytes(column)) + + # sorting_columns + + if sorting_columns is not None: + props.set_sorting_columns(_convert_sorting_columns(sorting_columns)) + + # use_byte_stream_split + + if isinstance(use_byte_stream_split, bool): + if use_byte_stream_split: + if column_encoding is not None: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + elif use_byte_stream_split is not None: + for column in use_byte_stream_split: + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column] = 'BYTE_STREAM_SPLIT' + else: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + + # store_decimal_as_integer + + if isinstance(store_decimal_as_integer, bool): + if store_decimal_as_integer: + props.enable_store_decimal_as_integer() + else: + props.disable_store_decimal_as_integer() + else: + raise TypeError("'store_decimal_as_integer' must be a boolean") + + # column_encoding + # encoding map - encode individual columns + + if column_encoding is not None: + if isinstance(column_encoding, dict): + for column, _encoding in column_encoding.items(): + props.encoding(tobytes(column), + encoding_enum_from_name(_encoding)) + elif isinstance(column_encoding, str): + props.encoding(encoding_enum_from_name(column_encoding)) + else: + raise TypeError( + "'column_encoding' should be a dictionary or a string") + + if data_page_size is not None: + props.data_pagesize(data_page_size) + + if write_batch_size is not None: + props.write_batch_size(write_batch_size) + + if dictionary_pagesize_limit is not None: + props.dictionary_pagesize_limit(dictionary_pagesize_limit) + + # encryption + + if encryption_properties is not None: + props.encryption( + (encryption_properties).unwrap()) + + # For backwards compatibility reasons we cap the maximum row group size + # at 64Mi rows. This could be changed in the future, though it would be + # a breaking change. + # + # The user can always specify a smaller row group size (and the default + # is smaller) when calling write_table. If the call to write_table uses + # a size larger than this then it will be latched to this value. + props.max_row_group_length(_MAX_ROW_GROUP_SIZE) + + # checksum + + if write_page_checksum: + props.enable_page_checksum() + else: + props.disable_page_checksum() + + # page index + + if write_page_index: + props.enable_write_page_index() + else: + props.disable_write_page_index() + + properties = props.build() + + return properties + + +cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version=None, + use_compliant_nested_type=True, + store_schema=True) except *: + """Arrow writer properties""" + cdef: + shared_ptr[ArrowWriterProperties] arrow_properties + ArrowWriterProperties.Builder arrow_props + + # Store the original Arrow schema so things like dictionary types can + # be automatically reconstructed + if store_schema: + arrow_props.store_schema() + + # int96 support + + if use_deprecated_int96_timestamps: + arrow_props.enable_deprecated_int96_timestamps() + else: + arrow_props.disable_deprecated_int96_timestamps() + + # coerce_timestamps + + if coerce_timestamps == 'ms': + arrow_props.coerce_timestamps(TimeUnit_MILLI) + elif coerce_timestamps == 'us': + arrow_props.coerce_timestamps(TimeUnit_MICRO) + elif coerce_timestamps is not None: + raise ValueError('Invalid value for coerce_timestamps: {0}' + .format(coerce_timestamps)) + + # allow_truncated_timestamps + + if allow_truncated_timestamps: + arrow_props.allow_truncated_timestamps() + else: + arrow_props.disallow_truncated_timestamps() + + # use_compliant_nested_type + + if use_compliant_nested_type: + arrow_props.enable_compliant_nested_types() + else: + arrow_props.disable_compliant_nested_types() + + # writer_engine_version + + if writer_engine_version == "V1": + warnings.warn("V1 parquet writer engine is a no-op. Use V2.") + arrow_props.set_engine_version(ArrowWriterEngineVersion.V1) + elif writer_engine_version != "V2": + raise ValueError("Unsupported Writer Engine Version: {0}" + .format(writer_engine_version)) + + arrow_properties = arrow_props.build() + + return arrow_properties + +cdef _name_to_index_map(Schema arrow_schema): + cdef: + shared_ptr[CSchema] sp_arrow_schema + shared_ptr[SchemaDescriptor] sp_parquet_schema + shared_ptr[WriterProperties] props = _create_writer_properties() + shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version="V2" + ) + + sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema) + + with nogil: + check_status(ToParquetSchema( + sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema)) + + out = dict() + + cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get() + + for i in range(parquet_schema.num_columns()): + name = frombytes(parquet_schema.Column(i).path().get().ToDotString()) + out[name] = i + + return out + + +cdef class ParquetWriter(_Weakrefable): + cdef: + unique_ptr[FileWriter] writer + shared_ptr[COutputStream] sink + bint own_sink + + cdef readonly: + object use_dictionary + object use_deprecated_int96_timestamps + object use_byte_stream_split + object column_encoding + object coerce_timestamps + object allow_truncated_timestamps + object compression + object compression_level + object data_page_version + object use_compliant_nested_type + object version + object write_statistics + object writer_engine_version + int row_group_size + int64_t data_page_size + FileEncryptionProperties encryption_properties + int64_t write_batch_size + int64_t dictionary_pagesize_limit + object store_schema + object store_decimal_as_integer + + def __cinit__(self, where, Schema schema not None, use_dictionary=None, + compression=None, version=None, + write_statistics=None, + MemoryPool memory_pool=None, + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + data_page_size=None, + allow_truncated_timestamps=False, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + writer_engine_version=None, + data_page_version=None, + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + store_decimal_as_integer=False): + cdef: + shared_ptr[WriterProperties] properties + shared_ptr[ArrowWriterProperties] arrow_properties + c_string c_where + CMemoryPool* pool + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &self.sink) + self.own_sink = False + else: + c_where = tobytes(where) + with nogil: + self.sink = GetResultValue(FileOutputStream.Open(c_where)) + self.own_sink = True + + properties = _create_writer_properties( + use_dictionary=use_dictionary, + compression=compression, + version=version, + write_statistics=write_statistics, + data_page_size=data_page_size, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + data_page_version=data_page_version, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + store_decimal_as_integer=store_decimal_as_integer, + ) + arrow_properties = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=use_deprecated_int96_timestamps, + coerce_timestamps=coerce_timestamps, + allow_truncated_timestamps=allow_truncated_timestamps, + writer_engine_version=writer_engine_version, + use_compliant_nested_type=use_compliant_nested_type, + store_schema=store_schema, + ) + + pool = maybe_unbox_memory_pool(memory_pool) + with nogil: + self.writer = move(GetResultValue( + FileWriter.Open(deref(schema.schema), pool, + self.sink, properties, arrow_properties))) + + def close(self): + with nogil: + check_status(self.writer.get().Close()) + if self.own_sink: + check_status(self.sink.get().Close()) + + def write_table(self, Table table, row_group_size=None): + cdef: + CTable* ctable = table.table + int64_t c_row_group_size + + if row_group_size is None or row_group_size == -1: + c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE) + elif row_group_size == 0: + raise ValueError('Row group size cannot be 0') + else: + c_row_group_size = row_group_size + + with nogil: + check_status(self.writer.get() + .WriteTable(deref(ctable), c_row_group_size)) + + def add_key_value_metadata(self, key_value_metadata): + cdef: + shared_ptr[const CKeyValueMetadata] c_metadata + + c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(key_value_metadata)) + with nogil: + check_status(self.writer.get() + .AddKeyValueMetadata(c_metadata)) + + @property + def metadata(self): + cdef: + shared_ptr[CFileMetaData] metadata + FileMetaData result + with nogil: + metadata = self.writer.get().metadata() + if metadata: + result = FileMetaData() + result.init(metadata) + return result + raise RuntimeError( + 'file metadata is only available after writer close') diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..476ec87c1441ad6cd6c87af79a8c43d1a9736b6d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_substrait.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_substrait.pyx new file mode 100644 index 0000000000000000000000000000000000000000..067cb5f91681bacf430945bc5aec2bb04e0cb01b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_substrait.pyx @@ -0,0 +1,349 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 +from cython.operator cimport dereference as deref +from libcpp.vector cimport vector as std_vector + +from pyarrow import Buffer, py_buffer +from pyarrow._compute cimport Expression +from pyarrow.lib import frombytes, tobytes +from pyarrow.lib cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_substrait cimport * + + +# TODO GH-37235: Fix exception handling +cdef CDeclaration _create_named_table_provider( + dict named_args, const std_vector[c_string]& names, const CSchema& schema +) noexcept: + cdef: + c_string c_name + shared_ptr[CTable] c_in_table + shared_ptr[CTableSourceNodeOptions] c_tablesourceopts + shared_ptr[CExecNodeOptions] c_input_node_opts + vector[CDeclaration.Input] no_c_inputs + + py_names = [] + for i in range(names.size()): + c_name = names[i] + py_names.append(frombytes(c_name)) + py_schema = pyarrow_wrap_schema(make_shared[CSchema](schema)) + + py_table = named_args["provider"](py_names, py_schema) + c_in_table = pyarrow_unwrap_table(py_table) + c_tablesourceopts = make_shared[CTableSourceNodeOptions](c_in_table) + c_input_node_opts = static_pointer_cast[CExecNodeOptions, CTableSourceNodeOptions]( + c_tablesourceopts) + return CDeclaration(tobytes("table_source"), + no_c_inputs, c_input_node_opts) + + +def run_query(plan, *, table_provider=None, use_threads=True): + """ + Execute a Substrait plan and read the results as a RecordBatchReader. + + Parameters + ---------- + plan : Union[Buffer, bytes] + The serialized Substrait plan to execute. + table_provider : object (optional) + A function to resolve any NamedTable relation to a table. + The function will receive two arguments which will be a list + of strings representing the table name and a pyarrow.Schema representing + the expected schema and should return a pyarrow.Table. + use_threads : bool, default True + If True then multiple threads will be used to run the query. If False then + all CPU intensive work will be done on the calling thread. + + Returns + ------- + RecordBatchReader + A reader containing the result of the executed query + + Examples + -------- + >>> import pyarrow as pa + >>> from pyarrow.lib import tobytes + >>> import pyarrow.substrait as substrait + >>> test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]}) + >>> test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]}) + >>> def table_provider(names, schema): + ... if not names: + ... raise Exception("No names provided") + ... elif names[0] == "t1": + ... return test_table_1 + ... elif names[1] == "t2": + ... return test_table_2 + ... else: + ... raise Exception("Unrecognized table name") + ... + >>> substrait_query = ''' + ... { + ... "relations": [ + ... {"rel": { + ... "read": { + ... "base_schema": { + ... "struct": { + ... "types": [ + ... {"i64": {}} + ... ] + ... }, + ... "names": [ + ... "x" + ... ] + ... }, + ... "namedTable": { + ... "names": ["t1"] + ... } + ... } + ... }} + ... ] + ... } + ... ''' + >>> buf = pa._substrait._parse_json_plan(tobytes(substrait_query)) + >>> reader = pa.substrait.run_query(buf, table_provider=table_provider) + >>> reader.read_all() + pyarrow.Table + x: int64 + ---- + x: [[1,2,3]] + """ + + cdef: + CResult[shared_ptr[CRecordBatchReader]] c_res_reader + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + shared_ptr[CBuffer] c_buf_plan + CConversionOptions c_conversion_options + c_bool c_use_threads + + c_use_threads = use_threads + if isinstance(plan, bytes): + c_buf_plan = pyarrow_unwrap_buffer(py_buffer(plan)) + elif isinstance(plan, Buffer): + c_buf_plan = pyarrow_unwrap_buffer(plan) + else: + raise TypeError( + f"Expected 'pyarrow.Buffer' or bytes, got '{type(plan)}'") + + if table_provider is not None: + named_table_args = { + "provider": table_provider + } + c_conversion_options.named_table_provider = BindFunction[CNamedTableProvider]( + &_create_named_table_provider, named_table_args) + + with nogil: + c_res_reader = ExecuteSerializedPlan( + deref(c_buf_plan), default_extension_id_registry(), + GetFunctionRegistry(), c_conversion_options, c_use_threads) + + c_reader = GetResultValue(c_res_reader) + + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return reader + + +def _parse_json_plan(plan): + """ + Parse a JSON plan into equivalent serialized Protobuf. + + Parameters + ---------- + plan : bytes + Substrait plan in JSON. + + Returns + ------- + Buffer + A buffer containing the serialized Protobuf plan. + """ + + cdef: + CResult[shared_ptr[CBuffer]] c_res_buffer + c_string c_str_plan + shared_ptr[CBuffer] c_buf_plan + + c_str_plan = plan + c_res_buffer = SerializeJsonPlan(c_str_plan) + with nogil: + c_buf_plan = GetResultValue(c_res_buffer) + return pyarrow_wrap_buffer(c_buf_plan) + + +def serialize_expressions(exprs, names, schema, *, allow_arrow_extensions=False): + """ + Serialize a collection of expressions into Substrait + + Substrait expressions must be bound to a schema. For example, + the Substrait expression ``a:i32 + b:i32`` is different from the + Substrait expression ``a:i64 + b:i64``. Pyarrow expressions are + typically unbound. For example, both of the above expressions + would be represented as ``a + b`` in pyarrow. + + This means a schema must be provided when serializing an expression. + It also means that the serialization may fail if a matching function + call cannot be found for the expression. + + Parameters + ---------- + exprs : list of Expression + The expressions to serialize + names : list of str + Names for the expressions + schema : Schema + The schema the expressions will be bound to + allow_arrow_extensions : bool, default False + If False then only functions that are part of the core Substrait function + definitions will be allowed. Set this to True to allow pyarrow-specific functions + and user defined functions but the result may not be accepted by other + compute libraries. + + Returns + ------- + Buffer + An ExtendedExpression message containing the serialized expressions + """ + cdef: + CResult[shared_ptr[CBuffer]] c_res_buffer + shared_ptr[CBuffer] c_buffer + CNamedExpression c_named_expr + CBoundExpressions c_bound_exprs + CConversionOptions c_conversion_options + + if len(exprs) != len(names): + raise ValueError("exprs and names need to have the same length") + for expr, name in zip(exprs, names): + if not isinstance(expr, Expression): + raise TypeError(f"Expected Expression, got '{type(expr)}' in exprs") + if not isinstance(name, str): + raise TypeError(f"Expected str, got '{type(name)}' in names") + c_named_expr.expression = ( expr).unwrap() + c_named_expr.name = tobytes( name) + c_bound_exprs.named_expressions.push_back(c_named_expr) + + c_bound_exprs.schema = ( schema).sp_schema + + c_conversion_options.allow_arrow_extensions = allow_arrow_extensions + + with nogil: + c_res_buffer = SerializeExpressions(c_bound_exprs, c_conversion_options) + c_buffer = GetResultValue(c_res_buffer) + return pyarrow_wrap_buffer(c_buffer) + + +cdef class BoundExpressions(_Weakrefable): + """ + A collection of named expressions and the schema they are bound to + + This is equivalent to the Substrait ExtendedExpression message + """ + + cdef: + CBoundExpressions c_bound_exprs + + def __init__(self): + msg = 'BoundExpressions is an abstract class thus cannot be initialized.' + raise TypeError(msg) + + cdef void init(self, CBoundExpressions bound_expressions): + self.c_bound_exprs = bound_expressions + + @property + def schema(self): + """ + The common schema that all expressions are bound to + """ + return pyarrow_wrap_schema(self.c_bound_exprs.schema) + + @property + def expressions(self): + """ + A dict from expression name to expression + """ + expr_dict = {} + for named_expr in self.c_bound_exprs.named_expressions: + name = frombytes(named_expr.name) + expr = Expression.wrap(named_expr.expression) + expr_dict[name] = expr + return expr_dict + + @staticmethod + cdef wrap(const CBoundExpressions& bound_expressions): + cdef BoundExpressions self = BoundExpressions.__new__(BoundExpressions) + self.init(bound_expressions) + return self + + +def deserialize_expressions(buf): + """ + Deserialize an ExtendedExpression Substrait message into a BoundExpressions object + + Parameters + ---------- + buf : Buffer or bytes + The message to deserialize + + Returns + ------- + BoundExpressions + The deserialized expressions, their names, and the bound schema + """ + cdef: + shared_ptr[CBuffer] c_buffer + CResult[CBoundExpressions] c_res_bound_exprs + CBoundExpressions c_bound_exprs + + if isinstance(buf, bytes): + c_buffer = pyarrow_unwrap_buffer(py_buffer(buf)) + elif isinstance(buf, Buffer): + c_buffer = pyarrow_unwrap_buffer(buf) + else: + raise TypeError( + f"Expected 'pyarrow.Buffer' or bytes, got '{type(buf)}'") + + with nogil: + c_res_bound_exprs = DeserializeExpressions(deref(c_buffer)) + c_bound_exprs = GetResultValue(c_res_bound_exprs) + + return BoundExpressions.wrap(c_bound_exprs) + + +def get_supported_functions(): + """ + Get a list of Substrait functions that the underlying + engine currently supports. + + Returns + ------- + list[str] + A list of function ids encoded as '{uri}#{name}' + """ + + cdef: + ExtensionIdRegistry* c_id_registry + std_vector[c_string] c_ids + + c_id_registry = default_extension_id_registry() + c_ids = c_id_registry.GetSupportedSubstraitFunctions() + + functions_list = [] + for c_id in c_ids: + functions_list.append(frombytes(c_id)) + return functions_list diff --git a/parrot/lib/python3.10/site-packages/pyarrow/benchmark.py b/parrot/lib/python3.10/site-packages/pyarrow/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..25ee1141f08d1f4ac19ab7ade92eafbf786d685a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/benchmark.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + + +from pyarrow.lib import benchmark_PandasObjectIsNull diff --git a/parrot/lib/python3.10/site-packages/pyarrow/builder.pxi b/parrot/lib/python3.10/site-packages/pyarrow/builder.pxi new file mode 100644 index 0000000000000000000000000000000000000000..2af39e2c589e683a1f8491ddb3909d483609ceff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/builder.pxi @@ -0,0 +1,148 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +cdef class StringBuilder(_Weakrefable): + """ + Builder class for UTF8 strings. + + This class exposes facilities for incrementally adding string values and + building the null bitmap for a pyarrow.Array (type='string'). + """ + cdef: + unique_ptr[CStringBuilder] builder + + def __cinit__(self, MemoryPool memory_pool=None): + cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + self.builder.reset(new CStringBuilder(pool)) + + def append(self, value): + """ + Append a single value to the builder. + + The value can either be a string/bytes object or a null value + (np.nan or None). + + Parameters + ---------- + value : string/bytes or np.nan/None + The value to append to the string array builder. + """ + if value is None or value is np.nan: + self.builder.get().AppendNull() + elif isinstance(value, (bytes, str)): + self.builder.get().Append(tobytes(value)) + else: + raise TypeError('StringBuilder only accepts string objects') + + def append_values(self, values): + """ + Append all the values from an iterable. + + Parameters + ---------- + values : iterable of string/bytes or np.nan/None values + The values to append to the string array builder. + """ + for value in values: + self.append(value) + + def finish(self): + """ + Return result of builder as an Array object; also resets the builder. + + Returns + ------- + array : pyarrow.Array + """ + cdef shared_ptr[CArray] out + with nogil: + self.builder.get().Finish(&out) + return pyarrow_wrap_array(out) + + @property + def null_count(self): + return self.builder.get().null_count() + + def __len__(self): + return self.builder.get().length() + + +cdef class StringViewBuilder(_Weakrefable): + """ + Builder class for UTF8 string views. + + This class exposes facilities for incrementally adding string values and + building the null bitmap for a pyarrow.Array (type='string_view'). + """ + cdef: + unique_ptr[CStringViewBuilder] builder + + def __cinit__(self, MemoryPool memory_pool=None): + cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + self.builder.reset(new CStringViewBuilder(pool)) + + def append(self, value): + """ + Append a single value to the builder. + + The value can either be a string/bytes object or a null value + (np.nan or None). + + Parameters + ---------- + value : string/bytes or np.nan/None + The value to append to the string array builder. + """ + if value is None or value is np.nan: + self.builder.get().AppendNull() + elif isinstance(value, (bytes, str)): + self.builder.get().Append(tobytes(value)) + else: + raise TypeError('StringViewBuilder only accepts string objects') + + def append_values(self, values): + """ + Append all the values from an iterable. + + Parameters + ---------- + values : iterable of string/bytes or np.nan/None values + The values to append to the string array builder. + """ + for value in values: + self.append(value) + + def finish(self): + """ + Return result of builder as an Array object; also resets the builder. + + Returns + ------- + array : pyarrow.Array + """ + cdef shared_ptr[CArray] out + with nogil: + self.builder.get().Finish(&out) + return pyarrow_wrap_array(out) + + @property + def null_count(self): + return self.builder.get().null_count() + + def __len__(self): + return self.builder.get().length() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/cffi.py b/parrot/lib/python3.10/site-packages/pyarrow/cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..1da1a916914049513b89c68bd60f08ba32b67edb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/cffi.py @@ -0,0 +1,81 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import + +import cffi + +c_source = """ + struct ArrowSchema { + // Array type description + const char* format; + const char* name; + const char* metadata; + int64_t flags; + int64_t n_children; + struct ArrowSchema** children; + struct ArrowSchema* dictionary; + + // Release callback + void (*release)(struct ArrowSchema*); + // Opaque producer-specific data + void* private_data; + }; + + struct ArrowArray { + // Array data description + int64_t length; + int64_t null_count; + int64_t offset; + int64_t n_buffers; + int64_t n_children; + const void** buffers; + struct ArrowArray** children; + struct ArrowArray* dictionary; + + // Release callback + void (*release)(struct ArrowArray*); + // Opaque producer-specific data + void* private_data; + }; + + struct ArrowArrayStream { + int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out); + int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out); + + const char* (*get_last_error)(struct ArrowArrayStream*); + + // Release callback + void (*release)(struct ArrowArrayStream*); + // Opaque producer-specific data + void* private_data; + }; + + typedef int32_t ArrowDeviceType; + + struct ArrowDeviceArray { + struct ArrowArray array; + int64_t device_id; + ArrowDeviceType device_type; + void* sync_event; + int64_t reserved[3]; + }; + """ + +# TODO use out-of-line mode for faster import and avoid C parsing +ffi = cffi.FFI() +ffi.cdef(c_source) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/config.pxi b/parrot/lib/python3.10/site-packages/pyarrow/config.pxi new file mode 100644 index 0000000000000000000000000000000000000000..cf751b810cad1503356b69f4c2abeed92fb8d19d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/config.pxi @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyarrow.includes.libarrow cimport GetBuildInfo + +from collections import namedtuple +import os + + +VersionInfo = namedtuple('VersionInfo', ('major', 'minor', 'patch')) + +BuildInfo = namedtuple( + 'BuildInfo', + ('version', 'version_info', 'so_version', 'full_so_version', + 'compiler_id', 'compiler_version', 'compiler_flags', + 'git_id', 'git_description', 'package_kind', 'build_type')) + +RuntimeInfo = namedtuple('RuntimeInfo', + ('simd_level', 'detected_simd_level')) + +cdef _build_info(): + cdef: + const CBuildInfo* c_info + + c_info = &GetBuildInfo() + + return BuildInfo(version=frombytes(c_info.version_string), + version_info=VersionInfo(c_info.version_major, + c_info.version_minor, + c_info.version_patch), + so_version=frombytes(c_info.so_version), + full_so_version=frombytes(c_info.full_so_version), + compiler_id=frombytes(c_info.compiler_id), + compiler_version=frombytes(c_info.compiler_version), + compiler_flags=frombytes(c_info.compiler_flags), + git_id=frombytes(c_info.git_id), + git_description=frombytes(c_info.git_description), + package_kind=frombytes(c_info.package_kind), + build_type=frombytes(c_info.build_type).lower(), + ) + + +cpp_build_info = _build_info() +cpp_version = cpp_build_info.version +cpp_version_info = cpp_build_info.version_info + + +def runtime_info(): + """ + Get runtime information. + + Returns + ------- + info : pyarrow.RuntimeInfo + """ + cdef: + CRuntimeInfo c_info + + c_info = GetRuntimeInfo() + + return RuntimeInfo( + simd_level=frombytes(c_info.simd_level), + detected_simd_level=frombytes(c_info.detected_simd_level)) + + +def set_timezone_db_path(path): + """ + Configure the path to text timezone database on Windows. + + Parameters + ---------- + path : str + Path to text timezone database. + """ + cdef: + CGlobalOptions options + + if path is not None: + options.timezone_db_path = tobytes(path) + + check_status(Initialize(options)) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/conftest.py b/parrot/lib/python3.10/site-packages/pyarrow/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..29c850c142da1cc18042ebe5029abf2c587e6ca8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/conftest.py @@ -0,0 +1,375 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +import os +import pyarrow as pa +from pyarrow import Codec +from pyarrow import fs +from pyarrow.lib import is_threading_enabled +from pyarrow.tests.util import windows_has_tzdata +import sys + +import numpy as np + +groups = [ + 'acero', + 'azure', + 'brotli', + 'bz2', + 'cython', + 'dataset', + 'hypothesis', + 'fastparquet', + 'flight', + 'gandiva', + 'gcs', + 'gdb', + 'gzip', + 'hdfs', + 'large_memory', + 'lz4', + 'memory_leak', + 'nopandas', + 'orc', + 'pandas', + 'parquet', + 'parquet_encryption', + 'processes', + 'requires_testing_data', + 's3', + 'slow', + 'snappy', + 'sockets', + 'substrait', + 'threading', + 'timezone_data', + 'zstd', +] + +defaults = { + 'acero': False, + 'azure': False, + 'brotli': Codec.is_available('brotli'), + 'bz2': Codec.is_available('bz2'), + 'cython': False, + 'dataset': False, + 'fastparquet': False, + 'flight': False, + 'gandiva': False, + 'gcs': False, + 'gdb': True, + 'gzip': Codec.is_available('gzip'), + 'hdfs': False, + 'hypothesis': False, + 'large_memory': False, + 'lz4': Codec.is_available('lz4'), + 'memory_leak': False, + 'nopandas': False, + 'orc': False, + 'pandas': False, + 'parquet': False, + 'parquet_encryption': False, + 'processes': True, + 'requires_testing_data': True, + 's3': False, + 'slow': False, + 'snappy': Codec.is_available('snappy'), + 'sockets': True, + 'substrait': False, + 'threading': is_threading_enabled(), + 'timezone_data': True, + 'zstd': Codec.is_available('zstd'), +} + +if sys.platform == "emscripten": + # Emscripten doesn't support subprocess, + # multiprocessing, gdb or socket based + # networking + defaults['gdb'] = False + defaults['processes'] = False + defaults['sockets'] = False + +if sys.platform == "win32": + defaults['timezone_data'] = windows_has_tzdata() +elif sys.platform == "emscripten": + defaults['timezone_data'] = os.path.exists("/usr/share/zoneinfo") + +try: + import cython # noqa + defaults['cython'] = True +except ImportError: + pass + +try: + import fastparquet # noqa + defaults['fastparquet'] = True +except ImportError: + pass + +try: + import pyarrow.gandiva # noqa + defaults['gandiva'] = True +except ImportError: + pass + +try: + import pyarrow.acero # noqa + defaults['acero'] = True +except ImportError: + pass + +try: + import pyarrow.dataset # noqa + defaults['dataset'] = True +except ImportError: + pass + +try: + import pyarrow.orc # noqa + if sys.platform == "win32": + defaults['orc'] = True + else: + # orc tests on non-Windows platforms only work + # if timezone data exists, so skip them if + # not. + defaults['orc'] = defaults['timezone_data'] +except ImportError: + pass + +try: + import pandas # noqa + defaults['pandas'] = True +except ImportError: + defaults['nopandas'] = True + +try: + import pyarrow.parquet # noqa + defaults['parquet'] = True +except ImportError: + pass + +try: + import pyarrow.parquet.encryption # noqa + defaults['parquet_encryption'] = True +except ImportError: + pass + +try: + import pyarrow.flight # noqa + defaults['flight'] = True +except ImportError: + pass + +try: + from pyarrow.fs import AzureFileSystem # noqa + defaults['azure'] = True +except ImportError: + pass + +try: + from pyarrow.fs import GcsFileSystem # noqa + defaults['gcs'] = True +except ImportError: + pass + +try: + from pyarrow.fs import S3FileSystem # noqa + defaults['s3'] = True +except ImportError: + pass + +try: + from pyarrow.fs import HadoopFileSystem # noqa + defaults['hdfs'] = True +except ImportError: + pass + +try: + import pyarrow.substrait # noqa + defaults['substrait'] = True +except ImportError: + pass + + +# Doctest should ignore files for the modules that are not built +def pytest_ignore_collect(path, config): + if config.option.doctestmodules: + # don't try to run doctests on the /tests directory + if "/pyarrow/tests/" in str(path): + return True + + doctest_groups = [ + 'dataset', + 'orc', + 'parquet', + 'flight', + 'substrait', + ] + + # handle cuda, flight, etc + for group in doctest_groups: + if 'pyarrow/{}'.format(group) in str(path): + if not defaults[group]: + return True + + if 'pyarrow/parquet/encryption' in str(path): + if not defaults['parquet_encryption']: + return True + + if 'pyarrow/cuda' in str(path): + try: + import pyarrow.cuda # noqa + return False + except ImportError: + return True + + if 'pyarrow/fs' in str(path): + try: + from pyarrow.fs import S3FileSystem # noqa + return False + except ImportError: + return True + + if getattr(config.option, "doctest_cython", False): + if "/pyarrow/tests/" in str(path): + return True + if "/pyarrow/_parquet_encryption" in str(path): + return True + + return False + + +# Save output files from doctest examples into temp dir +@pytest.fixture(autouse=True) +def _docdir(request): + + # Trigger ONLY for the doctests + doctest_m = request.config.option.doctestmodules + doctest_c = getattr(request.config.option, "doctest_cython", False) + + if doctest_m or doctest_c: + + # Get the fixture dynamically by its name. + tmpdir = request.getfixturevalue('tmpdir') + + # Chdir only for the duration of the test. + with tmpdir.as_cwd(): + yield + + else: + yield + + +# Define doctest_namespace for fs module docstring import +@pytest.fixture(autouse=True) +def add_fs(doctest_namespace, request, tmp_path): + + # Trigger ONLY for the doctests + doctest_m = request.config.option.doctestmodules + doctest_c = getattr(request.config.option, "doctest_cython", False) + + if doctest_m or doctest_c: + # fs import + doctest_namespace["fs"] = fs + + # Creation of an object and file with data + local = fs.LocalFileSystem() + path = tmp_path / 'pyarrow-fs-example.dat' + with local.open_output_stream(str(path)) as stream: + stream.write(b'data') + doctest_namespace["local"] = local + doctest_namespace["local_path"] = str(tmp_path) + doctest_namespace["path"] = str(path) + yield + + +# Define udf fixture for test_udf.py and test_substrait.py +@pytest.fixture(scope="session") +def unary_func_fixture(): + """ + Register a unary scalar function. + """ + from pyarrow import compute as pc + + def unary_function(ctx, x): + return pc.call_function("add", [x, 1], + memory_pool=ctx.memory_pool) + func_name = "y=x+1" + unary_doc = {"summary": "add function", + "description": "test add function"} + pc.register_scalar_function(unary_function, + func_name, + unary_doc, + {"array": pa.int64()}, + pa.int64()) + return unary_function, func_name + + +@pytest.fixture(scope="session") +def unary_agg_func_fixture(): + """ + Register a unary aggregate function (mean) + """ + from pyarrow import compute as pc + + def func(ctx, x): + return pa.scalar(np.nanmean(x)) + + func_name = "mean_udf" + func_doc = {"summary": "y=avg(x)", + "description": "find mean of x"} + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.float64(), + }, + pa.float64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def varargs_agg_func_fixture(): + """ + Register a unary aggregate function + """ + from pyarrow import compute as pc + + def func(ctx, *args): + sum = 0.0 + for arg in args: + sum += np.nanmean(arg) + return pa.scalar(sum) + + func_name = "sum_mean" + func_doc = {"summary": "Varargs aggregate", + "description": "Varargs aggregate"} + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + "y": pa.float64() + }, + pa.float64() + ) + return func, func_name diff --git a/parrot/lib/python3.10/site-packages/pyarrow/device.pxi b/parrot/lib/python3.10/site-packages/pyarrow/device.pxi new file mode 100644 index 0000000000000000000000000000000000000000..6e6034752085a2491def23c9322db95f115cfca0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/device.pxi @@ -0,0 +1,162 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: embedsignature = True + + +cpdef enum DeviceAllocationType: + CPU = CDeviceAllocationType_kCPU + CUDA = CDeviceAllocationType_kCUDA + CUDA_HOST = CDeviceAllocationType_kCUDA_HOST + OPENCL = CDeviceAllocationType_kOPENCL + VULKAN = CDeviceAllocationType_kVULKAN + METAL = CDeviceAllocationType_kMETAL + VPI = CDeviceAllocationType_kVPI + ROCM = CDeviceAllocationType_kROCM + ROCM_HOST = CDeviceAllocationType_kROCM_HOST + EXT_DEV = CDeviceAllocationType_kEXT_DEV + CUDA_MANAGED = CDeviceAllocationType_kCUDA_MANAGED + ONEAPI = CDeviceAllocationType_kONEAPI + WEBGPU = CDeviceAllocationType_kWEBGPU + HEXAGON = CDeviceAllocationType_kHEXAGON + + +cdef object _wrap_device_allocation_type(CDeviceAllocationType device_type): + return DeviceAllocationType( device_type) + + +cdef class Device(_Weakrefable): + """ + Abstract interface for hardware devices + + This object represents a device with access to some memory spaces. + When handling a Buffer or raw memory address, it allows deciding in which + context the raw memory address should be interpreted + (e.g. CPU-accessible memory, or embedded memory on some particular GPU). + """ + + def __init__(self): + raise TypeError("Do not call Device's constructor directly, " + "use the device attribute of the MemoryManager instead.") + + cdef void init(self, const shared_ptr[CDevice]& device): + self.device = device + + @staticmethod + cdef wrap(const shared_ptr[CDevice]& device): + cdef Device self = Device.__new__(Device) + self.init(device) + return self + + def __eq__(self, other): + if not isinstance(other, Device): + return False + return self.device.get().Equals(deref((other).device.get())) + + def __repr__(self): + return "".format(frombytes(self.device.get().ToString())) + + @property + def type_name(self): + """ + A shorthand for this device's type. + """ + return frombytes(self.device.get().type_name()) + + @property + def device_id(self): + """ + A device ID to identify this device if there are multiple of this type. + + If there is no "device_id" equivalent (such as for the main CPU device on + non-numa systems) returns -1. + """ + return self.device.get().device_id() + + @property + def is_cpu(self): + """ + Whether this device is the main CPU device. + + This shorthand method is very useful when deciding whether a memory address + is CPU-accessible. + """ + return self.device.get().is_cpu() + + @property + def device_type(self): + """ + Return the DeviceAllocationType of this device. + """ + return _wrap_device_allocation_type(self.device.get().device_type()) + + +cdef class MemoryManager(_Weakrefable): + """ + An object that provides memory management primitives. + + A MemoryManager is always tied to a particular Device instance. + It can also have additional parameters (such as a MemoryPool to + allocate CPU memory). + + """ + + def __init__(self): + raise TypeError("Do not call MemoryManager's constructor directly, " + "use pyarrow.default_cpu_memory_manager() instead.") + + cdef void init(self, const shared_ptr[CMemoryManager]& mm): + self.memory_manager = mm + + @staticmethod + cdef wrap(const shared_ptr[CMemoryManager]& mm): + cdef MemoryManager self = MemoryManager.__new__(MemoryManager) + self.init(mm) + return self + + def __repr__(self): + return "".format( + frombytes(self.memory_manager.get().device().get().ToString()) + ) + + @property + def device(self): + """ + The device this MemoryManager is tied to. + """ + return Device.wrap(self.memory_manager.get().device()) + + @property + def is_cpu(self): + """ + Whether this MemoryManager is tied to the main CPU device. + + This shorthand method is very useful when deciding whether a memory + address is CPU-accessible. + """ + return self.memory_manager.get().is_cpu() + + +def default_cpu_memory_manager(): + """ + Return the default CPU MemoryManager instance. + + The returned singleton instance uses the default MemoryPool. + """ + return MemoryManager.wrap(c_default_cpu_memory_manager()) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/flight.py b/parrot/lib/python3.10/site-packages/pyarrow/flight.py new file mode 100644 index 0000000000000000000000000000000000000000..b1836907c6744161c86f32e873316923c60b4226 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/flight.py @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +try: + from pyarrow._flight import ( # noqa:F401 + connect, + Action, + ActionType, + BasicAuth, + CallInfo, + CertKeyPair, + ClientAuthHandler, + ClientMiddleware, + ClientMiddlewareFactory, + DescriptorType, + FlightCallOptions, + FlightCancelledError, + FlightClient, + FlightDataStream, + FlightDescriptor, + FlightEndpoint, + FlightError, + FlightInfo, + FlightInternalError, + FlightMetadataReader, + FlightMetadataWriter, + FlightMethod, + FlightServerBase, + FlightServerError, + FlightStreamChunk, + FlightStreamReader, + FlightStreamWriter, + FlightTimedOutError, + FlightUnauthenticatedError, + FlightUnauthorizedError, + FlightUnavailableError, + FlightWriteSizeExceededError, + GeneratorStream, + Location, + MetadataRecordBatchReader, + MetadataRecordBatchWriter, + RecordBatchStream, + Result, + SchemaResult, + ServerAuthHandler, + ServerCallContext, + ServerMiddleware, + ServerMiddlewareFactory, + Ticket, + TracingServerMiddlewareFactory, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'flight' ({str(exc)})" + ) from None diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/common.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..044dd0333f323367dcba32a8fe013eccd0986e08 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/common.pxd @@ -0,0 +1,175 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libc.stdint cimport * +from libcpp cimport bool as c_bool, nullptr +from libcpp.functional cimport function +from libcpp.memory cimport shared_ptr, unique_ptr, make_shared +from libcpp.string cimport string as c_string +from libcpp.utility cimport pair +from libcpp.vector cimport vector +from libcpp.unordered_map cimport unordered_map +from libcpp.unordered_set cimport unordered_set + +from cpython cimport PyObject +from cpython.datetime cimport PyDateTime_DateTime +cimport cpython + + +cdef extern from * namespace "std" nogil: + cdef shared_ptr[T] static_pointer_cast[T, U](shared_ptr[U]) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + c_bool has_value() + T& value() + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + # T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + + +# vendored from the cymove project https://github.com/ozars/cymove +cdef extern from * namespace "cymove" nogil: + """ + #include + #include + namespace cymove { + template + inline typename std::remove_reference::type&& cymove(T& t) { + return std::move(t); + } + template + inline typename std::remove_reference::type&& cymove(T&& t) { + return std::move(t); + } + } // namespace cymove + """ + cdef T move" cymove::cymove"[T](T) + +cdef extern from * namespace "arrow::py" nogil: + """ + #include + #include + + namespace arrow { + namespace py { + template + std::shared_ptr to_shared(std::unique_ptr& t) { + return std::move(t); + } + template + std::shared_ptr to_shared(std::unique_ptr&& t) { + return std::move(t); + } + } // namespace py + } // namespace arrow + """ + cdef shared_ptr[T] to_shared" arrow::py::to_shared"[T](unique_ptr[T]) + +cdef extern from "arrow/python/platform.h": + pass + +cdef extern from "": + void Py_XDECREF(PyObject* o) + Py_ssize_t Py_REFCNT(PyObject* o) + +cdef extern from "numpy/halffloat.h": + ctypedef uint16_t npy_half + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + # We can later add more of the common status factory methods as needed + cdef CStatus CStatus_OK "arrow::Status::OK"() + + cdef CStatus CStatus_Invalid "arrow::Status::Invalid"() + cdef CStatus CStatus_NotImplemented \ + "arrow::Status::NotImplemented"(const c_string& msg) + cdef CStatus CStatus_UnknownError \ + "arrow::Status::UnknownError"(const c_string& msg) + + cdef cppclass CStatus "arrow::Status": + CStatus() + + c_string ToString() + c_string message() + shared_ptr[CStatusDetail] detail() + + c_bool ok() + c_bool IsIOError() + c_bool IsOutOfMemory() + c_bool IsInvalid() + c_bool IsKeyError() + c_bool IsNotImplemented() + c_bool IsTypeError() + c_bool IsCapacityError() + c_bool IsIndexError() + c_bool IsSerializationError() + c_bool IsCancelled() + + void Warn() + + cdef cppclass CStatusDetail "arrow::StatusDetail": + c_string ToString() + + +cdef extern from "arrow/result.h" namespace "arrow" nogil: + cdef cppclass CResult "arrow::Result"[T]: + CResult() + CResult(CStatus) + CResult(T) + c_bool ok() + CStatus status() + CStatus Value(T*) + T operator*() + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture "arrow::Future"[T]: + CFuture() + + +cdef extern from "arrow/python/async.h" namespace "arrow::py" nogil: + # BindFuture's third argument is really a C++ callable with + # the signature `object(T*)`, but Cython does not allow declaring that. + # We use an ellipsis as a workaround. + # Another possibility is to type-erase the argument by making it + # `object(void*)`, but it would lose compile-time C++ type safety. + void BindFuture[T](CFuture[T], object cb, ...) + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + T GetResultValue[T](CResult[T]) except * + cdef function[F] BindFunction[F](void* unbound, object bound, ...) + + +cdef inline object PyObject_to_object(PyObject* o): + # Cast to "object" increments reference count + cdef object result = o + cpython.Py_DECREF(result) + return result diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0d871f411b11b27063fc7c90cc5ff08b7a91576d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd @@ -0,0 +1,3078 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * + + +cdef extern from "arrow/util/key_value_metadata.h" namespace "arrow" nogil: + cdef cppclass CKeyValueMetadata" arrow::KeyValueMetadata": + CKeyValueMetadata() + CKeyValueMetadata(const unordered_map[c_string, c_string]&) + CKeyValueMetadata(const vector[c_string]& keys, + const vector[c_string]& values) + + void reserve(int64_t n) + int64_t size() const + c_string key(int64_t i) const + c_string value(int64_t i) const + int FindKey(const c_string& key) const + + shared_ptr[CKeyValueMetadata] Copy() const + c_bool Equals(const CKeyValueMetadata& other) + void Append(const c_string& key, const c_string& value) + void ToUnorderedMap(unordered_map[c_string, c_string]*) const + c_string ToString() const + + CResult[c_string] Get(const c_string& key) const + CStatus Delete(const c_string& key) + CStatus Set(const c_string& key, const c_string& value) + c_bool Contains(const c_string& key) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal128" arrow::Decimal128": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal256" arrow::Decimal256": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/config.h" namespace "arrow" nogil: + cdef cppclass CBuildInfo" arrow::BuildInfo": + int version + int version_major + int version_minor + int version_patch + c_string version_string + c_string so_version + c_string full_so_version + c_string compiler_id + c_string compiler_version + c_string compiler_flags + c_string git_id + c_string git_description + c_string package_kind + c_string build_type + + const CBuildInfo& GetBuildInfo() + + cdef cppclass CRuntimeInfo" arrow::RuntimeInfo": + c_string simd_level + c_string detected_simd_level + + CRuntimeInfo GetRuntimeInfo() + + cdef cppclass CGlobalOptions" arrow::GlobalOptions": + optional[c_string] timezone_db_path + + CStatus Initialize(const CGlobalOptions& options) + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture_Void" arrow::Future<>": + CStatus status() + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + cdef enum Type" arrow::Type::type": + _Type_NA" arrow::Type::NA" + + _Type_BOOL" arrow::Type::BOOL" + + _Type_UINT8" arrow::Type::UINT8" + _Type_INT8" arrow::Type::INT8" + _Type_UINT16" arrow::Type::UINT16" + _Type_INT16" arrow::Type::INT16" + _Type_UINT32" arrow::Type::UINT32" + _Type_INT32" arrow::Type::INT32" + _Type_UINT64" arrow::Type::UINT64" + _Type_INT64" arrow::Type::INT64" + + _Type_HALF_FLOAT" arrow::Type::HALF_FLOAT" + _Type_FLOAT" arrow::Type::FLOAT" + _Type_DOUBLE" arrow::Type::DOUBLE" + + _Type_DECIMAL128" arrow::Type::DECIMAL128" + _Type_DECIMAL256" arrow::Type::DECIMAL256" + + _Type_DATE32" arrow::Type::DATE32" + _Type_DATE64" arrow::Type::DATE64" + _Type_TIMESTAMP" arrow::Type::TIMESTAMP" + _Type_TIME32" arrow::Type::TIME32" + _Type_TIME64" arrow::Type::TIME64" + _Type_DURATION" arrow::Type::DURATION" + _Type_INTERVAL_MONTH_DAY_NANO" arrow::Type::INTERVAL_MONTH_DAY_NANO" + + _Type_BINARY" arrow::Type::BINARY" + _Type_STRING" arrow::Type::STRING" + _Type_LARGE_BINARY" arrow::Type::LARGE_BINARY" + _Type_LARGE_STRING" arrow::Type::LARGE_STRING" + _Type_FIXED_SIZE_BINARY" arrow::Type::FIXED_SIZE_BINARY" + _Type_BINARY_VIEW" arrow::Type::BINARY_VIEW" + _Type_STRING_VIEW" arrow::Type::STRING_VIEW" + + _Type_LIST" arrow::Type::LIST" + _Type_LARGE_LIST" arrow::Type::LARGE_LIST" + _Type_FIXED_SIZE_LIST" arrow::Type::FIXED_SIZE_LIST" + _Type_LIST_VIEW" arrow::Type::LIST_VIEW" + _Type_LARGE_LIST_VIEW" arrow::Type::LARGE_LIST_VIEW" + _Type_STRUCT" arrow::Type::STRUCT" + _Type_SPARSE_UNION" arrow::Type::SPARSE_UNION" + _Type_DENSE_UNION" arrow::Type::DENSE_UNION" + _Type_DICTIONARY" arrow::Type::DICTIONARY" + _Type_RUN_END_ENCODED" arrow::Type::RUN_END_ENCODED" + _Type_MAP" arrow::Type::MAP" + + _Type_EXTENSION" arrow::Type::EXTENSION" + + cdef enum UnionMode" arrow::UnionMode::type": + _UnionMode_SPARSE" arrow::UnionMode::SPARSE" + _UnionMode_DENSE" arrow::UnionMode::DENSE" + + cdef enum TimeUnit" arrow::TimeUnit::type": + TimeUnit_SECOND" arrow::TimeUnit::SECOND" + TimeUnit_MILLI" arrow::TimeUnit::MILLI" + TimeUnit_MICRO" arrow::TimeUnit::MICRO" + TimeUnit_NANO" arrow::TimeUnit::NANO" + + cdef cppclass CBufferSpec" arrow::DataTypeLayout::BufferSpec": + pass + + cdef cppclass CDataTypeLayout" arrow::DataTypeLayout": + vector[CBufferSpec] buffers + c_bool has_dictionary + + cdef cppclass CDataType" arrow::DataType": + Type id() + + c_bool Equals(const CDataType& other, c_bool check_metadata) + c_bool Equals(const shared_ptr[CDataType]& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + const vector[shared_ptr[CField]] fields() + int num_fields() + CDataTypeLayout layout() + c_string ToString() + + c_bool is_primitive(Type type) + c_bool is_numeric(Type type) + + cdef cppclass CArrayData" arrow::ArrayData": + shared_ptr[CDataType] type + int64_t length + int64_t null_count + int64_t offset + vector[shared_ptr[CBuffer]] buffers + vector[shared_ptr[CArrayData]] child_data + shared_ptr[CArrayData] dictionary + + @staticmethod + shared_ptr[CArrayData] Make(const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildren" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildrenAndDictionary" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + shared_ptr[CArrayData]& dictionary, + int64_t null_count, + int64_t offset) + + cdef cppclass CArray" arrow::Array": + shared_ptr[CDataType] type() + + int64_t length() + int64_t null_count() + int64_t offset() + Type type_id() + + int num_fields() + + CResult[shared_ptr[CScalar]] GetScalar(int64_t i) const + + c_string Diff(const CArray& other) + c_bool Equals(const CArray& arr) + c_bool IsNull(int i) + + shared_ptr[CArrayData] data() + + shared_ptr[CArray] Slice(int64_t offset) + shared_ptr[CArray] Slice(int64_t offset, int64_t length) + + CStatus Validate() const + CStatus ValidateFull() const + CResult[shared_ptr[CArray]] View(const shared_ptr[CDataType]& type) + CDeviceAllocationType device_type() + + shared_ptr[CArray] MakeArray(const shared_ptr[CArrayData]& data) + CResult[shared_ptr[CArray]] MakeArrayOfNull( + const shared_ptr[CDataType]& type, int64_t length, CMemoryPool* pool) + + CResult[shared_ptr[CArray]] MakeArrayFromScalar( + const CScalar& scalar, int64_t length, CMemoryPool* pool) + + CStatus DebugPrint(const CArray& arr, int indent) + + cdef cppclass CFixedWidthType" arrow::FixedWidthType"(CDataType): + int bit_width() + int byte_width() + + cdef cppclass CNullArray" arrow::NullArray"(CArray): + CNullArray(int64_t length) + + cdef cppclass CDictionaryArray" arrow::DictionaryArray"(CArray): + CDictionaryArray(const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + CDictionaryArray(const shared_ptr[CArrayData]& data) + + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + + shared_ptr[CArray] indices() + shared_ptr[CArray] dictionary() + + cdef cppclass CDate32Type" arrow::Date32Type"(CFixedWidthType): + pass + + cdef cppclass CDate64Type" arrow::Date64Type"(CFixedWidthType): + pass + + cdef cppclass CTimestampType" arrow::TimestampType"(CFixedWidthType): + CTimestampType(TimeUnit unit) + TimeUnit unit() + const c_string& timezone() + + cdef cppclass CTime32Type" arrow::Time32Type"(CFixedWidthType): + TimeUnit unit() + + cdef cppclass CTime64Type" arrow::Time64Type"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] ctime32" arrow::time32"(TimeUnit unit) + shared_ptr[CDataType] ctime64" arrow::time64"(TimeUnit unit) + + cdef cppclass CDurationType" arrow::DurationType"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] cduration" arrow::duration"(TimeUnit unit) + + cdef cppclass CDictionaryType" arrow::DictionaryType"(CFixedWidthType): + CDictionaryType(const shared_ptr[CDataType]& index_type, + const shared_ptr[CDataType]& value_type, + c_bool ordered) + + shared_ptr[CDataType] index_type() + shared_ptr[CDataType] value_type() + c_bool ordered() + + shared_ptr[CDataType] ctimestamp" arrow::timestamp"(TimeUnit unit) + shared_ptr[CDataType] ctimestamp" arrow::timestamp"( + TimeUnit unit, const c_string& timezone) + + cdef cppclass CMemoryPool" arrow::MemoryPool": + int64_t bytes_allocated() + int64_t max_memory() + c_string backend_name() + void ReleaseUnused() + + cdef cppclass CLoggingMemoryPool" arrow::LoggingMemoryPool"(CMemoryPool): + CLoggingMemoryPool(CMemoryPool*) + + cdef cppclass CProxyMemoryPool" arrow::ProxyMemoryPool"(CMemoryPool): + CProxyMemoryPool(CMemoryPool*) + + ctypedef enum CDeviceAllocationType "arrow::DeviceAllocationType": + CDeviceAllocationType_kCPU "arrow::DeviceAllocationType::kCPU" + CDeviceAllocationType_kCUDA "arrow::DeviceAllocationType::kCUDA" + CDeviceAllocationType_kCUDA_HOST "arrow::DeviceAllocationType::kCUDA_HOST" + CDeviceAllocationType_kOPENCL "arrow::DeviceAllocationType::kOPENCL" + CDeviceAllocationType_kVULKAN "arrow::DeviceAllocationType::kVULKAN" + CDeviceAllocationType_kMETAL "arrow::DeviceAllocationType::kMETAL" + CDeviceAllocationType_kVPI "arrow::DeviceAllocationType::kVPI" + CDeviceAllocationType_kROCM "arrow::DeviceAllocationType::kROCM" + CDeviceAllocationType_kROCM_HOST "arrow::DeviceAllocationType::kROCM_HOST" + CDeviceAllocationType_kEXT_DEV "arrow::DeviceAllocationType::kEXT_DEV" + CDeviceAllocationType_kCUDA_MANAGED "arrow::DeviceAllocationType::kCUDA_MANAGED" + CDeviceAllocationType_kONEAPI "arrow::DeviceAllocationType::kONEAPI" + CDeviceAllocationType_kWEBGPU "arrow::DeviceAllocationType::kWEBGPU" + CDeviceAllocationType_kHEXAGON "arrow::DeviceAllocationType::kHEXAGON" + + cdef cppclass CDevice" arrow::Device": + const char* type_name() + c_string ToString() + c_bool Equals(const CDevice& other) + int64_t device_id() + c_bool is_cpu() const + shared_ptr[CMemoryManager] default_memory_manager() + CDeviceAllocationType device_type() + + cdef cppclass CMemoryManager" arrow::MemoryManager": + const shared_ptr[CDevice] device() + c_bool is_cpu() const + + shared_ptr[CMemoryManager] c_default_cpu_memory_manager \ + " arrow::default_cpu_memory_manager"() + + cdef cppclass CBuffer" arrow::Buffer": + CBuffer(const uint8_t* data, int64_t size) + const uint8_t* data() + uint8_t* mutable_data() + uintptr_t address() + uintptr_t mutable_address() + int64_t size() + shared_ptr[CBuffer] parent() + c_bool is_cpu() const + c_bool is_mutable() const + c_string ToHexString() + c_bool Equals(const CBuffer& other) + shared_ptr[CDevice] device() + const shared_ptr[CMemoryManager] memory_manager() + CDeviceAllocationType device_type() + + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset) + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset, int64_t length) + + cdef cppclass CMutableBuffer" arrow::MutableBuffer"(CBuffer): + CMutableBuffer(const uint8_t* data, int64_t size) + + cdef cppclass CResizableBuffer" arrow::ResizableBuffer"(CMutableBuffer): + CStatus Resize(const int64_t new_size, c_bool shrink_to_fit) + CStatus Reserve(const int64_t new_size) + + CResult[unique_ptr[CBuffer]] AllocateBuffer(const int64_t size, + CMemoryPool* pool) + + CResult[unique_ptr[CResizableBuffer]] AllocateResizableBuffer( + const int64_t size, CMemoryPool* pool) + + cdef cppclass CSyncEvent" arrow::Device::SyncEvent": + pass + + cdef cppclass CDevice" arrow::Device": + pass + + cdef CMemoryPool* c_default_memory_pool" arrow::default_memory_pool"() + cdef CMemoryPool* c_system_memory_pool" arrow::system_memory_pool"() + cdef CStatus c_jemalloc_memory_pool" arrow::jemalloc_memory_pool"( + CMemoryPool** out) + cdef CStatus c_mimalloc_memory_pool" arrow::mimalloc_memory_pool"( + CMemoryPool** out) + cdef vector[c_string] c_supported_memory_backends \ + " arrow::SupportedMemoryBackendNames"() + + CStatus c_jemalloc_set_decay_ms" arrow::jemalloc_set_decay_ms"(int ms) + + cdef cppclass CListType" arrow::ListType"(CDataType): + CListType(const shared_ptr[CDataType]& value_type) + CListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListType" arrow::LargeListType"(CDataType): + CLargeListType(const shared_ptr[CDataType]& value_type) + CLargeListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CListViewType" arrow::ListViewType"(CDataType): + CListViewType(const shared_ptr[CDataType]& value_type) + CListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListViewType" arrow::LargeListViewType"(CDataType): + CLargeListViewType(const shared_ptr[CDataType]& value_type) + CLargeListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CMapType" arrow::MapType"(CDataType): + CMapType(const shared_ptr[CField]& key_field, + const shared_ptr[CField]& item_field, c_bool keys_sorted) + shared_ptr[CDataType] key_type() + shared_ptr[CField] key_field() + shared_ptr[CDataType] item_type() + shared_ptr[CField] item_field() + c_bool keys_sorted() + + cdef cppclass CFixedSizeListType" arrow::FixedSizeListType"(CDataType): + CFixedSizeListType(const shared_ptr[CDataType]& value_type, + int32_t list_size) + CFixedSizeListType(const shared_ptr[CField]& field, int32_t list_size) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + int32_t list_size() + + cdef cppclass CStringType" arrow::StringType"(CDataType): + pass + + cdef cppclass CFixedSizeBinaryType \ + " arrow::FixedSizeBinaryType"(CFixedWidthType): + CFixedSizeBinaryType(int byte_width) + int byte_width() + int bit_width() + + cdef cppclass CDecimal128Type \ + " arrow::Decimal128Type"(CFixedSizeBinaryType): + CDecimal128Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CDecimal256Type \ + " arrow::Decimal256Type"(CFixedSizeBinaryType): + CDecimal256Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CRunEndEncodedType " arrow::RunEndEncodedType"(CDataType): + CRunEndEncodedType(const shared_ptr[CDataType]& run_end_type, + const shared_ptr[CDataType]& value_type) + const shared_ptr[CDataType]& run_end_type() + const shared_ptr[CDataType]& value_type() + + cdef cppclass CField" arrow::Field": + cppclass CMergeOptions "MergeOptions": + CMergeOptions() + c_bool promote_nullability + + @staticmethod + CMergeOptions Defaults() + + @staticmethod + CMergeOptions Permissive() + + const c_string& name() + shared_ptr[CDataType] type() + c_bool nullable() + + c_string ToString() + c_bool Equals(const CField& other, c_bool check_metadata) + + shared_ptr[const CKeyValueMetadata] metadata() + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable) + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable, const shared_ptr[CKeyValueMetadata]& metadata) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CField] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] RemoveMetadata() + shared_ptr[CField] WithType(const shared_ptr[CDataType]& type) + shared_ptr[CField] WithName(const c_string& name) + shared_ptr[CField] WithNullable(c_bool nullable) + vector[shared_ptr[CField]] Flatten() + + cdef cppclass CFieldRef" arrow::FieldRef": + CFieldRef() + CFieldRef(c_string name) + CFieldRef(int index) + CFieldRef(vector[CFieldRef]) + + @staticmethod + CResult[CFieldRef] FromDotPath(c_string& dot_path) + const c_string* name() const + + cdef cppclass CFieldRefHash" arrow::FieldRef::Hash": + pass + + cdef cppclass CStructType" arrow::StructType"(CDataType): + CStructType(const vector[shared_ptr[CField]]& fields) + + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + + cdef cppclass CUnionType" arrow::UnionType"(CDataType): + UnionMode mode() + const vector[int8_t]& type_codes() + const vector[int]& child_ids() + + cdef shared_ptr[CDataType] CMakeSparseUnionType" arrow::sparse_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeDenseUnionType" arrow::dense_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeRunEndEncodedType" arrow::run_end_encoded"( + shared_ptr[CDataType] run_end_type, + shared_ptr[CDataType] value_type) + + cdef shared_ptr[CDataType] CMakeListViewType" arrow::list_view"( + shared_ptr[CField] value_type) + + cdef shared_ptr[CDataType] CMakeLargeListViewType" arrow::large_list_view"( + shared_ptr[CField] value_type) + + cdef cppclass CSchema" arrow::Schema": + CSchema(const vector[shared_ptr[CField]]& fields) + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[const CKeyValueMetadata]& metadata) + + # Does not actually exist, but gets Cython to not complain + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[CKeyValueMetadata]& metadata) + + c_bool Equals(const CSchema& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + shared_ptr[const CKeyValueMetadata] metadata() + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + const vector[shared_ptr[CField]] fields() + int num_fields() + c_string ToString() + + CResult[shared_ptr[CSchema]] AddField(int i, + const shared_ptr[CField]& field) + CResult[shared_ptr[CSchema]] RemoveField(int i) + CResult[shared_ptr[CSchema]] SetField(int i, + const shared_ptr[CField]& field) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CSchema] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] RemoveMetadata() + + CResult[shared_ptr[CSchema]] UnifySchemas( + const vector[shared_ptr[CSchema]]& schemas, + CField.CMergeOptions field_merge_options) + + cdef cppclass PrettyPrintOptions: + PrettyPrintOptions() + PrettyPrintOptions(int indent_arg) + PrettyPrintOptions(int indent_arg, int window_arg) + int indent + int indent_size + int window + int container_window + c_string null_rep + c_bool skip_new_lines + c_bool truncate_metadata + c_bool show_field_metadata + c_bool show_schema_metadata + + @staticmethod + PrettyPrintOptions Defaults() + + CStatus PrettyPrint(const CArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CChunkedArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CSchema& schema, + const PrettyPrintOptions& options, + c_string* result) + + cdef cppclass CBooleanArray" arrow::BooleanArray"(CArray): + c_bool Value(int i) + int64_t false_count() + int64_t true_count() + + cdef cppclass CUInt8Array" arrow::UInt8Array"(CArray): + uint8_t Value(int i) + + cdef cppclass CInt8Array" arrow::Int8Array"(CArray): + int8_t Value(int i) + + cdef cppclass CUInt16Array" arrow::UInt16Array"(CArray): + uint16_t Value(int i) + + cdef cppclass CInt16Array" arrow::Int16Array"(CArray): + int16_t Value(int i) + + cdef cppclass CUInt32Array" arrow::UInt32Array"(CArray): + uint32_t Value(int i) + + cdef cppclass CInt32Array" arrow::Int32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CUInt64Array" arrow::UInt64Array"(CArray): + uint64_t Value(int i) + + cdef cppclass CInt64Array" arrow::Int64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CDate32Array" arrow::Date32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CDate64Array" arrow::Date64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTime32Array" arrow::Time32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CTime64Array" arrow::Time64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTimestampArray" arrow::TimestampArray"(CArray): + int64_t Value(int i) + + cdef cppclass CDurationArray" arrow::DurationArray"(CArray): + int64_t Value(int i) + + cdef cppclass CMonthDayNanoIntervalArray \ + "arrow::MonthDayNanoIntervalArray"(CArray): + pass + + cdef cppclass CHalfFloatArray" arrow::HalfFloatArray"(CArray): + uint16_t Value(int i) + + cdef cppclass CFloatArray" arrow::FloatArray"(CArray): + float Value(int i) + + cdef cppclass CDoubleArray" arrow::DoubleArray"(CArray): + double Value(int i) + + cdef cppclass CFixedSizeBinaryArray" arrow::FixedSizeBinaryArray"(CArray): + const uint8_t* GetValue(int i) + + cdef cppclass CDecimal128Array" arrow::Decimal128Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CDecimal256Array" arrow::Decimal256Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CListArray" arrow::ListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + const int32_t* raw_value_offsets() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListArray" arrow::LargeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CFixedSizeListArray" arrow::FixedSizeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& values, + int32_t list_size, + shared_ptr[CBuffer] null_bitmap) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + const shared_ptr[CArray]& values, + shared_ptr[CDataType], + shared_ptr[CBuffer] null_bitmap) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CListViewArray" arrow::ListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + const int32_t* raw_value_offsets() + const int32_t* raw_value_sizes() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListViewArray" arrow::LargeListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CMapArray" arrow::MapArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool, + const shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool, + const shared_ptr[CBuffer] null_bitmap, + ) + + shared_ptr[CArray] keys() + shared_ptr[CArray] items() + CMapType* map_type() + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CUnionArray" arrow::UnionArray"(CArray): + shared_ptr[CBuffer] type_codes() + int8_t* raw_type_codes() + int child_id(int64_t index) + shared_ptr[CArray] field(int pos) + const CArray* UnsafeField(int pos) + UnionMode mode() + + cdef cppclass CSparseUnionArray" arrow::SparseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + cdef cppclass CDenseUnionArray" arrow::DenseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const CArray& value_offsets, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + int32_t value_offset(int i) + shared_ptr[CBuffer] value_offsets() + + cdef cppclass CBinaryArray" arrow::BinaryArray"(CArray): + const uint8_t* GetValue(int i, int32_t* length) + shared_ptr[CBuffer] value_data() + int32_t value_offset(int64_t i) + int32_t value_length(int64_t i) + int32_t total_values_length() + + cdef cppclass CLargeBinaryArray" arrow::LargeBinaryArray"(CArray): + const uint8_t* GetValue(int i, int64_t* length) + shared_ptr[CBuffer] value_data() + int64_t value_offset(int64_t i) + int64_t value_length(int64_t i) + int64_t total_values_length() + + cdef cppclass CStringArray" arrow::StringArray"(CBinaryArray): + CStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CLargeStringArray" arrow::LargeStringArray" \ + (CLargeBinaryArray): + CLargeStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CStructArray" arrow::StructArray"(CArray): + CStructArray(shared_ptr[CDataType]& type, int64_t length, + vector[shared_ptr[CArray]]& children, + shared_ptr[CBuffer] null_bitmap=nullptr, + int64_t null_count=-1, + int64_t offset=0) + + # XXX Cython crashes if default argument values are declared here + # https://github.com/cython/cython/issues/2167 + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFieldNames "Make"( + vector[shared_ptr[CArray]] children, + vector[c_string] field_names, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFields "Make"( + vector[shared_ptr[CArray]] children, + vector[shared_ptr[CField]] fields, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + shared_ptr[CArray] field(int pos) + shared_ptr[CArray] GetFieldByName(const c_string& name) const + CResult[shared_ptr[CArray]] GetFlattenedField(int index, CMemoryPool* pool) const + + CResult[vector[shared_ptr[CArray]]] Flatten(CMemoryPool* pool) + + cdef cppclass CRunEndEncodedArray" arrow::RunEndEncodedArray"(CArray): + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] Make( + const shared_ptr[CDataType]& type, + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] MakeFromArrays "Make"( + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + shared_ptr[CArray]& run_ends() + shared_ptr[CArray]& values() + + int64_t FindPhysicalOffset() + int64_t FindPhysicalLength() + + cdef cppclass CChunkedArray" arrow::ChunkedArray": + CChunkedArray(const vector[shared_ptr[CArray]]& arrays) + CChunkedArray(const vector[shared_ptr[CArray]]& arrays, + const shared_ptr[CDataType]& type) + + @staticmethod + CResult[shared_ptr[CChunkedArray]] Make(vector[shared_ptr[CArray]] chunks, + shared_ptr[CDataType] type) + int64_t length() + int64_t null_count() + int num_chunks() + c_bool Equals(const CChunkedArray& other) + + shared_ptr[CArray] chunk(int i) + shared_ptr[CDataType] type() + CResult[shared_ptr[CScalar]] GetScalar(int64_t index) const + shared_ptr[CChunkedArray] Slice(int64_t offset, int64_t length) const + shared_ptr[CChunkedArray] Slice(int64_t offset) const + + CResult[vector[shared_ptr[CChunkedArray]]] Flatten(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CRecordBatch" arrow::RecordBatch": + @staticmethod + shared_ptr[CRecordBatch] Make( + const shared_ptr[CSchema]& schema, int64_t num_rows, + const vector[shared_ptr[CArray]]& columns) + + CResult[shared_ptr[CStructArray]] ToStructArray() const + + @staticmethod + CResult[shared_ptr[CRecordBatch]] FromStructArray( + const shared_ptr[CArray]& array) + + c_bool Equals(const CRecordBatch& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CArray] column(int i) + const c_string& column_name(int i) + + CResult[shared_ptr[CRecordBatch]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + CResult[shared_ptr[CRecordBatch]] RemoveColumn(int i) + CResult[shared_ptr[CRecordBatch]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + + const vector[shared_ptr[CArray]]& columns() + + CResult[shared_ptr[CRecordBatch]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CRecordBatch]] SelectColumns(const vector[int]&) + + int num_columns() + int64_t num_rows() + + CDeviceAllocationType device_type() + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CRecordBatch] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CRecordBatch] Slice(int64_t offset) + shared_ptr[CRecordBatch] Slice(int64_t offset, int64_t length) + + CResult[shared_ptr[CTensor]] ToTensor(c_bool null_to_nan, c_bool row_major, + CMemoryPool* pool) const + + cdef cppclass CRecordBatchWithMetadata" arrow::RecordBatchWithMetadata": + shared_ptr[CRecordBatch] batch + # The struct in C++ does not actually have these two `const` qualifiers, but + # adding `const` gets Cython to not complain + const shared_ptr[const CKeyValueMetadata] custom_metadata + + cdef cppclass CTable" arrow::Table": + CTable(const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] Make( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] MakeWithRows "Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns, + int64_t num_rows) + + @staticmethod + shared_ptr[CTable] MakeFromArrays" Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CArray]]& arrays) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatchReader( + CRecordBatchReader *reader) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatches( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CRecordBatch]]& batches) + + int num_columns() + int64_t num_rows() + + c_bool Equals(const CTable& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CChunkedArray] column(int i) + shared_ptr[CField] field(int i) + + CResult[shared_ptr[CTable]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + CResult[shared_ptr[CTable]] RemoveColumn(int i) + CResult[shared_ptr[CTable]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + + vector[c_string] ColumnNames() + CResult[shared_ptr[CTable]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CTable]] SelectColumns(const vector[int]&) + + CResult[shared_ptr[CTable]] Flatten(CMemoryPool* pool) + + CResult[shared_ptr[CTable]] CombineChunks(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CTable] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CTable] Slice(int64_t offset) + shared_ptr[CTable] Slice(int64_t offset, int64_t length) + + cdef cppclass CRecordBatchReader" arrow::RecordBatchReader": + shared_ptr[CSchema] schema() + CStatus Close() + CResult[CRecordBatchWithMetadata] ReadNext() + CStatus ReadNext(shared_ptr[CRecordBatch]* batch) + CResult[shared_ptr[CTable]] ToTable() + + cdef cppclass TableBatchReader(CRecordBatchReader): + TableBatchReader(const CTable& table) + TableBatchReader(shared_ptr[CTable] table) + void set_chunksize(int64_t chunksize) + + cdef cppclass CTensor" arrow::Tensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + + const vector[int64_t]& shape() + const vector[int64_t]& strides() + int64_t size() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + c_bool is_contiguous() + Type type_id() + c_bool Equals(const CTensor& other) + + cdef cppclass CSparseIndex" arrow::SparseIndex": + pass + + cdef cppclass CSparseCOOIndex" arrow::SparseCOOIndex": + c_bool is_canonical() + + cdef cppclass CSparseCOOTensor" arrow::SparseCOOTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + shared_ptr[CSparseIndex] sparse_index() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCOOTensor& other) + + cdef cppclass CSparseCSRMatrix" arrow::SparseCSRMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSRMatrix& other) + + cdef cppclass CSparseCSCMatrix" arrow::SparseCSCMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSCMatrix& other) + + cdef cppclass CSparseCSFTensor" arrow::SparseCSFTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSFTensor& other) + + cdef cppclass CScalar" arrow::Scalar": + CScalar(shared_ptr[CDataType]) + + shared_ptr[CDataType] type + c_bool is_valid + + c_string ToString() const + c_bool Equals(const CScalar& other) const + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CScalarHash" arrow::Scalar::Hash": + size_t operator()(const shared_ptr[CScalar]& scalar) const + + cdef cppclass CNullScalar" arrow::NullScalar"(CScalar): + CNullScalar() + + cdef cppclass CBooleanScalar" arrow::BooleanScalar"(CScalar): + CBooleanScalar(c_bool value) + c_bool value + + cdef cppclass CInt8Scalar" arrow::Int8Scalar"(CScalar): + int8_t value + + cdef cppclass CUInt8Scalar" arrow::UInt8Scalar"(CScalar): + uint8_t value + + cdef cppclass CInt16Scalar" arrow::Int16Scalar"(CScalar): + int16_t value + + cdef cppclass CUInt16Scalar" arrow::UInt16Scalar"(CScalar): + uint16_t value + + cdef cppclass CInt32Scalar" arrow::Int32Scalar"(CScalar): + int32_t value + + cdef cppclass CUInt32Scalar" arrow::UInt32Scalar"(CScalar): + uint32_t value + + cdef cppclass CInt64Scalar" arrow::Int64Scalar"(CScalar): + int64_t value + + cdef cppclass CUInt64Scalar" arrow::UInt64Scalar"(CScalar): + uint64_t value + + cdef cppclass CHalfFloatScalar" arrow::HalfFloatScalar"(CScalar): + npy_half value + + cdef cppclass CFloatScalar" arrow::FloatScalar"(CScalar): + float value + + cdef cppclass CDoubleScalar" arrow::DoubleScalar"(CScalar): + double value + + cdef cppclass CDecimal128Scalar" arrow::Decimal128Scalar"(CScalar): + CDecimal128 value + + cdef cppclass CDecimal256Scalar" arrow::Decimal256Scalar"(CScalar): + CDecimal256 value + + cdef cppclass CDate32Scalar" arrow::Date32Scalar"(CScalar): + int32_t value + + cdef cppclass CDate64Scalar" arrow::Date64Scalar"(CScalar): + int64_t value + + cdef cppclass CTime32Scalar" arrow::Time32Scalar"(CScalar): + int32_t value + + cdef cppclass CTime64Scalar" arrow::Time64Scalar"(CScalar): + int64_t value + + cdef cppclass CTimestampScalar" arrow::TimestampScalar"(CScalar): + int64_t value + + cdef cppclass CDurationScalar" arrow::DurationScalar"(CScalar): + int64_t value + + cdef cppclass CMonthDayNanoIntervalScalar \ + "arrow::MonthDayNanoIntervalScalar"(CScalar): + pass + + cdef cppclass CBaseBinaryScalar" arrow::BaseBinaryScalar"(CScalar): + shared_ptr[CBuffer] value + + cdef cppclass CBaseListScalar" arrow::BaseListScalar"(CScalar): + shared_ptr[CArray] value + + cdef cppclass CListScalar" arrow::ListScalar"(CBaseListScalar): + pass + + cdef cppclass CListViewScalar" arrow::ListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CLargeListViewScalar" arrow::LargeListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CMapScalar" arrow::MapScalar"(CListScalar): + pass + + cdef cppclass CStructScalar" arrow::StructScalar"(CScalar): + vector[shared_ptr[CScalar]] value + CResult[shared_ptr[CScalar]] field(CFieldRef ref) const + + cdef cppclass CDictionaryScalarIndexAndDictionary \ + "arrow::DictionaryScalar::ValueType": + shared_ptr[CScalar] index + shared_ptr[CArray] dictionary + + cdef cppclass CDictionaryScalar" arrow::DictionaryScalar"(CScalar): + CDictionaryScalar(CDictionaryScalarIndexAndDictionary value, + shared_ptr[CDataType], c_bool is_valid) + CDictionaryScalarIndexAndDictionary value + + CResult[shared_ptr[CScalar]] GetEncodedValue() + + cdef cppclass CUnionScalar" arrow::UnionScalar"(CScalar): + int8_t type_code + + cdef cppclass CDenseUnionScalar" arrow::DenseUnionScalar"(CUnionScalar): + shared_ptr[CScalar] value + + cdef cppclass CSparseUnionScalar" arrow::SparseUnionScalar"(CUnionScalar): + vector[shared_ptr[CScalar]] value + int child_id + + cdef cppclass CRunEndEncodedScalar" arrow::RunEndEncodedScalar"(CScalar): + shared_ptr[CScalar] value + + cdef cppclass CExtensionScalar" arrow::ExtensionScalar"(CScalar): + CExtensionScalar(shared_ptr[CScalar] storage, + shared_ptr[CDataType], c_bool is_valid) + shared_ptr[CScalar] value + + shared_ptr[CScalar] MakeScalar[Value](Value value) + + cdef cppclass CConcatenateTablesOptions" arrow::ConcatenateTablesOptions": + c_bool unify_schemas + CField.CMergeOptions field_merge_options + + @staticmethod + CConcatenateTablesOptions Defaults() + + CResult[shared_ptr[CTable]] ConcatenateTables( + const vector[shared_ptr[CTable]]& tables, + CConcatenateTablesOptions options, + CMemoryPool* memory_pool) + + cdef cppclass CDictionaryUnifier" arrow::DictionaryUnifier": + @staticmethod + CResult[shared_ptr[CChunkedArray]] UnifyChunkedArray( + shared_ptr[CChunkedArray] array, CMemoryPool* pool) + + @staticmethod + CResult[shared_ptr[CTable]] UnifyTable( + const CTable& table, CMemoryPool* pool) + + shared_ptr[CScalar] MakeNullScalar(shared_ptr[CDataType] type) + + +cdef extern from "arrow/c/dlpack_abi.h" nogil: + ctypedef enum DLDeviceType: + kDLCPU = 1 + + ctypedef struct DLDevice: + DLDeviceType device_type + int32_t device_id + + ctypedef struct DLManagedTensor: + void (*deleter)(DLManagedTensor*) + + +cdef extern from "arrow/c/dlpack.h" namespace "arrow::dlpack" nogil: + CResult[DLManagedTensor*] ExportToDLPack" arrow::dlpack::ExportArray"( + const shared_ptr[CArray]& arr) + + CResult[DLDevice] ExportDevice(const shared_ptr[CArray]& arr) + + +cdef extern from "arrow/builder.h" namespace "arrow" nogil: + + cdef cppclass CArrayBuilder" arrow::ArrayBuilder": + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + + int64_t length() + int64_t null_count() + CStatus AppendNull() + CStatus Finish(shared_ptr[CArray]* out) + CStatus Reserve(int64_t additional_capacity) + + cdef cppclass CBooleanBuilder" arrow::BooleanBuilder"(CArrayBuilder): + CBooleanBuilder(CMemoryPool* pool) + CStatus Append(const c_bool val) + CStatus Append(const uint8_t val) + + cdef cppclass CInt8Builder" arrow::Int8Builder"(CArrayBuilder): + CInt8Builder(CMemoryPool* pool) + CStatus Append(const int8_t value) + + cdef cppclass CInt16Builder" arrow::Int16Builder"(CArrayBuilder): + CInt16Builder(CMemoryPool* pool) + CStatus Append(const int16_t value) + + cdef cppclass CInt32Builder" arrow::Int32Builder"(CArrayBuilder): + CInt32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CInt64Builder" arrow::Int64Builder"(CArrayBuilder): + CInt64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CUInt8Builder" arrow::UInt8Builder"(CArrayBuilder): + CUInt8Builder(CMemoryPool* pool) + CStatus Append(const uint8_t value) + + cdef cppclass CUInt16Builder" arrow::UInt16Builder"(CArrayBuilder): + CUInt16Builder(CMemoryPool* pool) + CStatus Append(const uint16_t value) + + cdef cppclass CUInt32Builder" arrow::UInt32Builder"(CArrayBuilder): + CUInt32Builder(CMemoryPool* pool) + CStatus Append(const uint32_t value) + + cdef cppclass CUInt64Builder" arrow::UInt64Builder"(CArrayBuilder): + CUInt64Builder(CMemoryPool* pool) + CStatus Append(const uint64_t value) + + cdef cppclass CHalfFloatBuilder" arrow::HalfFloatBuilder"(CArrayBuilder): + CHalfFloatBuilder(CMemoryPool* pool) + + cdef cppclass CFloatBuilder" arrow::FloatBuilder"(CArrayBuilder): + CFloatBuilder(CMemoryPool* pool) + CStatus Append(const float value) + + cdef cppclass CDoubleBuilder" arrow::DoubleBuilder"(CArrayBuilder): + CDoubleBuilder(CMemoryPool* pool) + CStatus Append(const double value) + + cdef cppclass CBinaryBuilder" arrow::BinaryBuilder"(CArrayBuilder): + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringBuilder" arrow::StringBuilder"(CBinaryBuilder): + CStringBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CBinaryViewBuilder" arrow::BinaryViewBuilder"(CArrayBuilder): + CBinaryViewBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringViewBuilder" arrow::StringViewBuilder"(CBinaryViewBuilder): + CStringViewBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CTimestampBuilder "arrow::TimestampBuilder"(CArrayBuilder): + CTimestampBuilder(const shared_ptr[CDataType] typ, CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CDate32Builder "arrow::Date32Builder"(CArrayBuilder): + CDate32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CDate64Builder "arrow::Date64Builder"(CArrayBuilder): + CDate64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + +# Use typedef to emulate syntax for std::function +ctypedef void CallbackTransform(object, const shared_ptr[CBuffer]& src, + shared_ptr[CBuffer]* dest) + +ctypedef CResult[shared_ptr[CInputStream]] StreamWrapFunc( + shared_ptr[CInputStream]) + + +cdef extern from "arrow/util/cancel.h" namespace "arrow" nogil: + cdef cppclass CStopToken "arrow::StopToken": + CStatus Poll() + c_bool IsStopRequested() + + cdef cppclass CStopSource "arrow::StopSource": + CStopToken token() + + CResult[CStopSource*] SetSignalStopSource() + void ResetSignalStopSource() + + CStatus RegisterCancellingSignalHandler(vector[int] signals) + void UnregisterCancellingSignalHandler() + + +cdef extern from "arrow/io/api.h" namespace "arrow::io" nogil: + cdef enum FileMode" arrow::io::FileMode::type": + FileMode_READ" arrow::io::FileMode::READ" + FileMode_WRITE" arrow::io::FileMode::WRITE" + FileMode_READWRITE" arrow::io::FileMode::READWRITE" + + cdef enum ObjectType" arrow::io::ObjectType::type": + ObjectType_FILE" arrow::io::ObjectType::FILE" + ObjectType_DIRECTORY" arrow::io::ObjectType::DIRECTORY" + + cdef cppclass CIOContext" arrow::io::IOContext": + CIOContext() + CIOContext(CStopToken) + CIOContext(CMemoryPool*) + CIOContext(CMemoryPool*, CStopToken) + + CIOContext c_default_io_context "arrow::io::default_io_context"() + int GetIOThreadPoolCapacity() + CStatus SetIOThreadPoolCapacity(int threads) + + cdef cppclass FileStatistics: + int64_t size + ObjectType kind + + cdef cppclass FileInterface: + CStatus Close() + CResult[int64_t] Tell() + FileMode mode() + c_bool closed() + + cdef cppclass Readable: + # put overload under a different name to avoid cython bug with multiple + # layers of inheritance + CResult[shared_ptr[CBuffer]] ReadBuffer" Read"(int64_t nbytes) + CResult[int64_t] Read(int64_t nbytes, uint8_t* out) + + cdef cppclass Seekable: + CStatus Seek(int64_t position) + + cdef cppclass Writable: + CStatus WriteBuffer" Write"(shared_ptr[CBuffer] data) + CStatus Write(const uint8_t* data, int64_t nbytes) + CStatus Flush() + + cdef cppclass CCacheOptions "arrow::io::CacheOptions": + int64_t hole_size_limit + int64_t range_size_limit + c_bool lazy + int64_t prefetch_limit + c_bool Equals "operator==" (CCacheOptions other) + + @staticmethod + CCacheOptions MakeFromNetworkMetrics(int64_t time_to_first_byte_millis, + int64_t transfer_bandwidth_mib_per_sec, + double ideal_bandwidth_utilization_frac, + int64_t max_ideal_request_size_mib) + + @staticmethod + CCacheOptions LazyDefaults() + + cdef cppclass COutputStream" arrow::io::OutputStream"(FileInterface, + Writable): + pass + + cdef cppclass CInputStream" arrow::io::InputStream"(FileInterface, + Readable): + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + cdef cppclass CRandomAccessFile" arrow::io::RandomAccessFile"(CInputStream, + Seekable): + CResult[int64_t] GetSize() + + @staticmethod + CResult[shared_ptr[CInputStream]] GetStream( + shared_ptr[CRandomAccessFile] file, + int64_t file_offset, + int64_t nbytes) + + CResult[int64_t] ReadAt(int64_t position, int64_t nbytes, + uint8_t* buffer) + CResult[shared_ptr[CBuffer]] ReadAt(int64_t position, int64_t nbytes) + c_bool supports_zero_copy() + + cdef cppclass WritableFile(COutputStream, Seekable): + CStatus WriteAt(int64_t position, const uint8_t* data, + int64_t nbytes) + + cdef cppclass ReadWriteFileInterface(CRandomAccessFile, + WritableFile): + pass + + cdef cppclass CIOFileSystem" arrow::io::FileSystem": + CStatus Stat(const c_string& path, FileStatistics* stat) + + cdef cppclass FileOutputStream(COutputStream): + @staticmethod + CResult[shared_ptr[COutputStream]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[COutputStream]] OpenWithAppend" Open"( + const c_string& path, c_bool append) + + int file_descriptor() + + cdef cppclass ReadableFile(CRandomAccessFile): + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path, + CMemoryPool* memory_pool) + + int file_descriptor() + + cdef cppclass CMemoryMappedFile \ + " arrow::io::MemoryMappedFile"(ReadWriteFileInterface): + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Create(const c_string& path, + int64_t size) + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Open(const c_string& path, + FileMode mode) + + CStatus Resize(int64_t size) + + int file_descriptor() + + cdef cppclass CCompressedInputStream \ + " arrow::io::CompressedInputStream"(CInputStream): + @staticmethod + CResult[shared_ptr[CCompressedInputStream]] Make( + CCodec* codec, shared_ptr[CInputStream] raw) + + cdef cppclass CCompressedOutputStream \ + " arrow::io::CompressedOutputStream"(COutputStream): + @staticmethod + CResult[shared_ptr[CCompressedOutputStream]] Make( + CCodec* codec, shared_ptr[COutputStream] raw) + + cdef cppclass CBufferedInputStream \ + " arrow::io::BufferedInputStream"(CInputStream): + + @staticmethod + CResult[shared_ptr[CBufferedInputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[CInputStream] raw) + + CResult[shared_ptr[CInputStream]] Detach() + + cdef cppclass CBufferedOutputStream \ + " arrow::io::BufferedOutputStream"(COutputStream): + + @staticmethod + CResult[shared_ptr[CBufferedOutputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[COutputStream] raw) + + CResult[shared_ptr[COutputStream]] Detach() + + cdef cppclass CTransformInputStreamVTable \ + "arrow::py::TransformInputStreamVTable": + CTransformInputStreamVTable() + function[CallbackTransform] transform + + shared_ptr[CInputStream] MakeTransformInputStream \ + "arrow::py::MakeTransformInputStream"( + shared_ptr[CInputStream] wrapped, CTransformInputStreamVTable vtable, + object method_arg) + + shared_ptr[function[StreamWrapFunc]] MakeStreamTransformFunc \ + "arrow::py::MakeStreamTransformFunc"( + CTransformInputStreamVTable vtable, + object method_arg) + + # ---------------------------------------------------------------------- + # HDFS + + CStatus HaveLibHdfs() + CStatus HaveLibHdfs3() + + cdef enum HdfsDriver" arrow::io::HdfsDriver": + HdfsDriver_LIBHDFS" arrow::io::HdfsDriver::LIBHDFS" + HdfsDriver_LIBHDFS3" arrow::io::HdfsDriver::LIBHDFS3" + + cdef cppclass HdfsConnectionConfig: + c_string host + int port + c_string user + c_string kerb_ticket + unordered_map[c_string, c_string] extra_conf + HdfsDriver driver + + cdef cppclass HdfsPathInfo: + ObjectType kind + c_string name + c_string owner + c_string group + int32_t last_modified_time + int32_t last_access_time + int64_t size + int16_t replication + int64_t block_size + int16_t permissions + + cdef cppclass HdfsReadableFile(CRandomAccessFile): + pass + + cdef cppclass HdfsOutputStream(COutputStream): + pass + + cdef cppclass CIOHadoopFileSystem \ + "arrow::io::HadoopFileSystem"(CIOFileSystem): + @staticmethod + CStatus Connect(const HdfsConnectionConfig* config, + shared_ptr[CIOHadoopFileSystem]* client) + + CStatus MakeDirectory(const c_string& path) + + CStatus Delete(const c_string& path, c_bool recursive) + + CStatus Disconnect() + + c_bool Exists(const c_string& path) + + CStatus Chmod(const c_string& path, int mode) + CStatus Chown(const c_string& path, const char* owner, + const char* group) + + CStatus GetCapacity(int64_t* nbytes) + CStatus GetUsed(int64_t* nbytes) + + CStatus ListDirectory(const c_string& path, + vector[HdfsPathInfo]* listing) + + CStatus GetPathInfo(const c_string& path, HdfsPathInfo* info) + + CStatus Rename(const c_string& src, const c_string& dst) + + CStatus OpenReadable(const c_string& path, + shared_ptr[HdfsReadableFile]* handle) + + CStatus OpenWritable(const c_string& path, c_bool append, + int32_t buffer_size, int16_t replication, + int64_t default_block_size, + shared_ptr[HdfsOutputStream]* handle) + + cdef cppclass CBufferReader \ + " arrow::io::BufferReader"(CRandomAccessFile): + CBufferReader(const shared_ptr[CBuffer]& buffer) + CBufferReader(const uint8_t* data, int64_t nbytes) + + cdef cppclass CBufferOutputStream \ + " arrow::io::BufferOutputStream"(COutputStream): + CBufferOutputStream(const shared_ptr[CResizableBuffer]& buffer) + + cdef cppclass CMockOutputStream \ + " arrow::io::MockOutputStream"(COutputStream): + CMockOutputStream() + int64_t GetExtentBytesWritten() + + cdef cppclass CFixedSizeBufferWriter \ + " arrow::io::FixedSizeBufferWriter"(WritableFile): + CFixedSizeBufferWriter(const shared_ptr[CBuffer]& buffer) + + void set_memcopy_threads(int num_threads) + void set_memcopy_blocksize(int64_t blocksize) + void set_memcopy_threshold(int64_t threshold) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + cdef enum MessageType" arrow::ipc::MessageType": + MessageType_SCHEMA" arrow::ipc::MessageType::SCHEMA" + MessageType_RECORD_BATCH" arrow::ipc::MessageType::RECORD_BATCH" + MessageType_DICTIONARY_BATCH \ + " arrow::ipc::MessageType::DICTIONARY_BATCH" + + # TODO: use "cpdef enum class" to automatically get a Python wrapper? + # See + # https://github.com/cython/cython/commit/2c7c22f51405299a4e247f78edf52957d30cf71d#diff-61c1365c0f761a8137754bb3a73bfbf7 + ctypedef enum CMetadataVersion" arrow::ipc::MetadataVersion": + CMetadataVersion_V1" arrow::ipc::MetadataVersion::V1" + CMetadataVersion_V2" arrow::ipc::MetadataVersion::V2" + CMetadataVersion_V3" arrow::ipc::MetadataVersion::V3" + CMetadataVersion_V4" arrow::ipc::MetadataVersion::V4" + CMetadataVersion_V5" arrow::ipc::MetadataVersion::V5" + + cdef cppclass CIpcWriteOptions" arrow::ipc::IpcWriteOptions": + c_bool allow_64bit + int max_recursion_depth + int32_t alignment + c_bool write_legacy_ipc_format + CMemoryPool* memory_pool + CMetadataVersion metadata_version + shared_ptr[CCodec] codec + c_bool use_threads + c_bool emit_dictionary_deltas + c_bool unify_dictionaries + + CIpcWriteOptions() + CIpcWriteOptions(CIpcWriteOptions) + + @staticmethod + CIpcWriteOptions Defaults() + + cdef cppclass CIpcReadOptions" arrow::ipc::IpcReadOptions": + int max_recursion_depth + CMemoryPool* memory_pool + vector[int] included_fields + c_bool use_threads + c_bool ensure_native_endian + + @staticmethod + CIpcReadOptions Defaults() + + cdef cppclass CIpcWriteStats" arrow::ipc::WriteStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CIpcReadStats" arrow::ipc::ReadStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CDictionaryMemo" arrow::ipc::DictionaryMemo": + pass + + cdef cppclass CIpcPayload" arrow::ipc::IpcPayload": + MessageType type + shared_ptr[CBuffer] metadata + vector[shared_ptr[CBuffer]] body_buffers + int64_t body_length + + cdef cppclass CMessage" arrow::ipc::Message": + CResult[unique_ptr[CMessage]] Open(shared_ptr[CBuffer] metadata, + shared_ptr[CBuffer] body) + + shared_ptr[CBuffer] body() + + c_bool Equals(const CMessage& other) + + shared_ptr[CBuffer] metadata() + CMetadataVersion metadata_version() + MessageType type() + + CStatus SerializeTo(COutputStream* stream, + const CIpcWriteOptions& options, + int64_t* output_length) + + c_string FormatMessageType(MessageType type) + + cdef cppclass CMessageReader" arrow::ipc::MessageReader": + @staticmethod + unique_ptr[CMessageReader] Open(const shared_ptr[CInputStream]& stream) + + CResult[unique_ptr[CMessage]] ReadNextMessage() + + cdef cppclass CRecordBatchWriter" arrow::ipc::RecordBatchWriter": + CStatus Close() + CStatus WriteRecordBatch(const CRecordBatch& batch) + CStatus WriteRecordBatch( + const CRecordBatch& batch, + const shared_ptr[const CKeyValueMetadata]& metadata) + CStatus WriteTable(const CTable& table, int64_t max_chunksize) + + CIpcWriteStats stats() + + cdef cppclass CRecordBatchStreamReader \ + " arrow::ipc::RecordBatchStreamReader"(CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open( + const shared_ptr[CInputStream], const CIpcReadOptions&) + + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open2" Open"( + unique_ptr[CMessageReader] message_reader, + const CIpcReadOptions& options) + + CIpcReadStats stats() + + cdef cppclass CRecordBatchFileReader \ + " arrow::ipc::RecordBatchFileReader": + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open( + CRandomAccessFile* file, + const CIpcReadOptions& options) + + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open2" Open"( + CRandomAccessFile* file, int64_t footer_offset, + const CIpcReadOptions& options) + + shared_ptr[CSchema] schema() + + int num_record_batches() + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch(int i) + + CResult[CRecordBatchWithMetadata] ReadRecordBatchWithCustomMetadata(int i) + + CIpcReadStats stats() + + CResult[shared_ptr[CRecordBatchWriter]] MakeStreamWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[shared_ptr[CRecordBatchWriter]] MakeFileWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[unique_ptr[CMessage]] ReadMessage(CInputStream* stream, + CMemoryPool* pool) + + CStatus GetRecordBatchSize(const CRecordBatch& batch, int64_t* size) + CStatus GetTensorSize(const CTensor& tensor, int64_t* size) + + CStatus WriteTensor(const CTensor& tensor, COutputStream* dst, + int32_t* metadata_length, + int64_t* body_length) + + CResult[shared_ptr[CTensor]] ReadTensor(CInputStream* stream) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const CMessage& message, const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options) + + CResult[shared_ptr[CBuffer]] SerializeSchema( + const CSchema& schema, CMemoryPool* pool) + + CResult[shared_ptr[CBuffer]] SerializeRecordBatch( + const CRecordBatch& schema, const CIpcWriteOptions& options) + + CResult[shared_ptr[CSchema]] ReadSchema(const CMessage& message, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CSchema]] ReadSchema(CInputStream* stream, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options, + CInputStream* stream) + + CStatus AlignStream(CInputStream* stream, int64_t alignment) + CStatus AlignStream(COutputStream* stream, int64_t alignment) + + cdef CStatus GetRecordBatchPayload \ + " arrow::ipc::GetRecordBatchPayload"( + const CRecordBatch& batch, + const CIpcWriteOptions& options, + CIpcPayload* out) + + +cdef extern from "arrow/util/value_parsing.h" namespace "arrow" nogil: + cdef cppclass CTimestampParser" arrow::TimestampParser": + const char* kind() const + const char* format() const + + @staticmethod + shared_ptr[CTimestampParser] MakeStrptime(c_string format) + + @staticmethod + shared_ptr[CTimestampParser] MakeISO8601() + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + cdef cppclass CCSVInvalidRow" arrow::csv::InvalidRow": + int32_t expected_columns + int32_t actual_columns + int64_t number + c_string text + + ctypedef enum CInvalidRowResult" arrow::csv::InvalidRowResult": + CInvalidRowResult_Error" arrow::csv::InvalidRowResult::Error" + CInvalidRowResult_Skip" arrow::csv::InvalidRowResult::Skip" + + ctypedef CInvalidRowResult CInvalidRowHandler(const CCSVInvalidRow&) + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + ctypedef enum CQuotingStyle "arrow::csv::QuotingStyle": + CQuotingStyle_Needed "arrow::csv::QuotingStyle::Needed" + CQuotingStyle_AllValid "arrow::csv::QuotingStyle::AllValid" + CQuotingStyle_None "arrow::csv::QuotingStyle::None" + + cdef cppclass CCSVParseOptions" arrow::csv::ParseOptions": + unsigned char delimiter + c_bool quoting + unsigned char quote_char + c_bool double_quote + c_bool escaping + unsigned char escape_char + c_bool newlines_in_values + c_bool ignore_empty_lines + function[CInvalidRowHandler] invalid_row_handler + + CCSVParseOptions() + CCSVParseOptions(CCSVParseOptions) + + @staticmethod + CCSVParseOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVConvertOptions" arrow::csv::ConvertOptions": + c_bool check_utf8 + unordered_map[c_string, shared_ptr[CDataType]] column_types + vector[c_string] null_values + vector[c_string] true_values + vector[c_string] false_values + c_bool strings_can_be_null + c_bool quoted_strings_can_be_null + vector[shared_ptr[CTimestampParser]] timestamp_parsers + + c_bool auto_dict_encode + int32_t auto_dict_max_cardinality + unsigned char decimal_point + + vector[c_string] include_columns + c_bool include_missing_columns + + CCSVConvertOptions() + CCSVConvertOptions(CCSVConvertOptions) + + @staticmethod + CCSVConvertOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReadOptions" arrow::csv::ReadOptions": + c_bool use_threads + int32_t block_size + int32_t skip_rows + int32_t skip_rows_after_names + vector[c_string] column_names + c_bool autogenerate_column_names + + CCSVReadOptions() + CCSVReadOptions(CCSVReadOptions) + + @staticmethod + CCSVReadOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVWriteOptions" arrow::csv::WriteOptions": + c_bool include_header + int32_t batch_size + unsigned char delimiter + CQuotingStyle quoting_style + CIOContext io_context + + CCSVWriteOptions() + CCSVWriteOptions(CCSVWriteOptions) + + @staticmethod + CCSVWriteOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReader" arrow::csv::TableReader": + @staticmethod + CResult[shared_ptr[CCSVReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + CResult[shared_ptr[CTable]] Read() + + cdef cppclass CCSVStreamingReader" arrow::csv::StreamingReader"( + CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CCSVStreamingReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + cdef CStatus WriteCSV(CTable&, CCSVWriteOptions& options, COutputStream*) + cdef CStatus WriteCSV( + CRecordBatch&, CCSVWriteOptions& options, COutputStream*) + cdef CResult[shared_ptr[CRecordBatchWriter]] MakeCSVWriter( + shared_ptr[COutputStream], shared_ptr[CSchema], + CCSVWriteOptions& options) + + +cdef extern from "arrow/json/options.h" nogil: + + ctypedef enum CUnexpectedFieldBehavior \ + "arrow::json::UnexpectedFieldBehavior": + CUnexpectedFieldBehavior_Ignore \ + "arrow::json::UnexpectedFieldBehavior::Ignore" + CUnexpectedFieldBehavior_Error \ + "arrow::json::UnexpectedFieldBehavior::Error" + CUnexpectedFieldBehavior_InferType \ + "arrow::json::UnexpectedFieldBehavior::InferType" + + cdef cppclass CJSONReadOptions" arrow::json::ReadOptions": + c_bool use_threads + int32_t block_size + + @staticmethod + CJSONReadOptions Defaults() + + cdef cppclass CJSONParseOptions" arrow::json::ParseOptions": + shared_ptr[CSchema] explicit_schema + c_bool newlines_in_values + CUnexpectedFieldBehavior unexpected_field_behavior + + @staticmethod + CJSONParseOptions Defaults() + + +cdef extern from "arrow/json/reader.h" namespace "arrow::json" nogil: + + cdef cppclass CJSONReader" arrow::json::TableReader": + @staticmethod + CResult[shared_ptr[CJSONReader]] Make( + CMemoryPool*, shared_ptr[CInputStream], + CJSONReadOptions, CJSONParseOptions) + + CResult[shared_ptr[CTable]] Read() + + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow::internal" nogil: + + cdef cppclass CExecutor "arrow::internal::Executor": + pass + + cdef cppclass CThreadPool "arrow::internal::ThreadPool"(CExecutor): + @staticmethod + CResult[shared_ptr[CThreadPool]] Make(int threads) + + CThreadPool* GetCpuThreadPool() + + +cdef extern from "arrow/compute/api.h" namespace "arrow::compute" nogil: + + cdef cppclass CExecBatch "arrow::compute::ExecBatch": + vector[CDatum] values + int64_t length + + cdef cppclass CExecContext" arrow::compute::ExecContext": + CExecContext() + CExecContext(CMemoryPool* pool) + CExecContext(CMemoryPool* pool, CExecutor* exc) + + CMemoryPool* memory_pool() const + CExecutor* executor() + + cdef cppclass CKernelSignature" arrow::compute::KernelSignature": + c_string ToString() const + + cdef cppclass CKernel" arrow::compute::Kernel": + shared_ptr[CKernelSignature] signature + + cdef cppclass CArrayKernel" arrow::compute::ArrayKernel"(CKernel): + pass + + cdef cppclass CScalarKernel" arrow::compute::ScalarKernel"(CArrayKernel): + pass + + cdef cppclass CVectorKernel" arrow::compute::VectorKernel"(CArrayKernel): + pass + + cdef cppclass CScalarAggregateKernel \ + " arrow::compute::ScalarAggregateKernel"(CKernel): + pass + + cdef cppclass CHashAggregateKernel \ + " arrow::compute::HashAggregateKernel"(CKernel): + pass + + cdef cppclass CArity" arrow::compute::Arity": + int num_args + c_bool is_varargs + + CArity() + + CArity(int num_args, c_bool is_varargs) + + cdef enum FunctionKind" arrow::compute::Function::Kind": + FunctionKind_SCALAR" arrow::compute::Function::SCALAR" + FunctionKind_VECTOR" arrow::compute::Function::VECTOR" + FunctionKind_SCALAR_AGGREGATE \ + " arrow::compute::Function::SCALAR_AGGREGATE" + FunctionKind_HASH_AGGREGATE \ + " arrow::compute::Function::HASH_AGGREGATE" + FunctionKind_META \ + " arrow::compute::Function::META" + + cdef cppclass CFunctionDoc" arrow::compute::FunctionDoc": + c_string summary + c_string description + vector[c_string] arg_names + c_string options_class + c_bool options_required + + cdef cppclass CFunctionOptionsType" arrow::compute::FunctionOptionsType": + const char* type_name() const + + cdef cppclass CFunctionOptions" arrow::compute::FunctionOptions": + const CFunctionOptionsType* options_type() const + const char* type_name() const + c_bool Equals(const CFunctionOptions& other) const + c_string ToString() const + unique_ptr[CFunctionOptions] Copy() const + CResult[shared_ptr[CBuffer]] Serialize() const + + @staticmethod + CResult[unique_ptr[CFunctionOptions]] Deserialize( + const c_string& type_name, const CBuffer& buffer) + + cdef cppclass CFunction" arrow::compute::Function": + const c_string& name() const + FunctionKind kind() const + const CArity& arity() const + const CFunctionDoc& doc() const + int num_kernels() const + CResult[CDatum] Execute(const vector[CDatum]& args, + const CFunctionOptions* options, + CExecContext* ctx) const + CResult[CDatum] Execute(const CExecBatch& args, + const CFunctionOptions* options, + CExecContext* ctx) const + + cdef cppclass CScalarFunction" arrow::compute::ScalarFunction"(CFunction): + vector[const CScalarKernel*] kernels() const + + cdef cppclass CVectorFunction" arrow::compute::VectorFunction"(CFunction): + vector[const CVectorKernel*] kernels() const + + cdef cppclass CScalarAggregateFunction \ + " arrow::compute::ScalarAggregateFunction"(CFunction): + vector[const CScalarAggregateKernel*] kernels() const + + cdef cppclass CHashAggregateFunction \ + " arrow::compute::HashAggregateFunction"(CFunction): + vector[const CHashAggregateKernel*] kernels() const + + cdef cppclass CMetaFunction" arrow::compute::MetaFunction"(CFunction): + pass + + cdef cppclass CFunctionRegistry" arrow::compute::FunctionRegistry": + CResult[shared_ptr[CFunction]] GetFunction( + const c_string& name) const + vector[c_string] GetFunctionNames() const + int num_functions() const + + CFunctionRegistry* GetFunctionRegistry() + + cdef cppclass CElementWiseAggregateOptions \ + "arrow::compute::ElementWiseAggregateOptions"(CFunctionOptions): + CElementWiseAggregateOptions(c_bool skip_nulls) + c_bool skip_nulls + + ctypedef enum CRoundMode \ + "arrow::compute::RoundMode": + CRoundMode_DOWN \ + "arrow::compute::RoundMode::DOWN" + CRoundMode_UP \ + "arrow::compute::RoundMode::UP" + CRoundMode_TOWARDS_ZERO \ + "arrow::compute::RoundMode::TOWARDS_ZERO" + CRoundMode_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::TOWARDS_INFINITY" + CRoundMode_HALF_DOWN \ + "arrow::compute::RoundMode::HALF_DOWN" + CRoundMode_HALF_UP \ + "arrow::compute::RoundMode::HALF_UP" + CRoundMode_HALF_TOWARDS_ZERO \ + "arrow::compute::RoundMode::HALF_TOWARDS_ZERO" + CRoundMode_HALF_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::HALF_TOWARDS_INFINITY" + CRoundMode_HALF_TO_EVEN \ + "arrow::compute::RoundMode::HALF_TO_EVEN" + CRoundMode_HALF_TO_ODD \ + "arrow::compute::RoundMode::HALF_TO_ODD" + + cdef cppclass CRoundOptions \ + "arrow::compute::RoundOptions"(CFunctionOptions): + CRoundOptions(int64_t ndigits, CRoundMode round_mode) + int64_t ndigits + CRoundMode round_mode + + cdef cppclass CRoundBinaryOptions \ + "arrow::compute::RoundBinaryOptions"(CFunctionOptions): + CRoundBinaryOptions(CRoundMode round_mode) + CRoundMode round_mode + + ctypedef enum CCalendarUnit \ + "arrow::compute::CalendarUnit": + CCalendarUnit_NANOSECOND \ + "arrow::compute::CalendarUnit::NANOSECOND" + CCalendarUnit_MICROSECOND \ + "arrow::compute::CalendarUnit::MICROSECOND" + CCalendarUnit_MILLISECOND \ + "arrow::compute::CalendarUnit::MILLISECOND" + CCalendarUnit_SECOND \ + "arrow::compute::CalendarUnit::SECOND" + CCalendarUnit_MINUTE \ + "arrow::compute::CalendarUnit::MINUTE" + CCalendarUnit_HOUR \ + "arrow::compute::CalendarUnit::HOUR" + CCalendarUnit_DAY \ + "arrow::compute::CalendarUnit::DAY" + CCalendarUnit_WEEK \ + "arrow::compute::CalendarUnit::WEEK" + CCalendarUnit_MONTH \ + "arrow::compute::CalendarUnit::MONTH" + CCalendarUnit_QUARTER \ + "arrow::compute::CalendarUnit::QUARTER" + CCalendarUnit_YEAR \ + "arrow::compute::CalendarUnit::YEAR" + + cdef cppclass CRoundTemporalOptions \ + "arrow::compute::RoundTemporalOptions"(CFunctionOptions): + CRoundTemporalOptions(int multiple, CCalendarUnit unit, + c_bool week_starts_monday, + c_bool ceil_is_strictly_greater, + c_bool calendar_based_origin) + int multiple + CCalendarUnit unit + c_bool week_starts_monday + c_bool ceil_is_strictly_greater + c_bool calendar_based_origin + + cdef cppclass CRoundToMultipleOptions \ + "arrow::compute::RoundToMultipleOptions"(CFunctionOptions): + CRoundToMultipleOptions(shared_ptr[CScalar] multiple, CRoundMode round_mode) + shared_ptr[CScalar] multiple + CRoundMode round_mode + + cdef enum CJoinNullHandlingBehavior \ + "arrow::compute::JoinOptions::NullHandlingBehavior": + CJoinNullHandlingBehavior_EMIT_NULL \ + "arrow::compute::JoinOptions::EMIT_NULL" + CJoinNullHandlingBehavior_SKIP \ + "arrow::compute::JoinOptions::SKIP" + CJoinNullHandlingBehavior_REPLACE \ + "arrow::compute::JoinOptions::REPLACE" + + cdef cppclass CJoinOptions \ + "arrow::compute::JoinOptions"(CFunctionOptions): + CJoinOptions(CJoinNullHandlingBehavior null_handling, + c_string null_replacement) + CJoinNullHandlingBehavior null_handling + c_string null_replacement + + cdef cppclass CMatchSubstringOptions \ + "arrow::compute::MatchSubstringOptions"(CFunctionOptions): + CMatchSubstringOptions(c_string pattern, c_bool ignore_case) + c_string pattern + c_bool ignore_case + + cdef cppclass CTrimOptions \ + "arrow::compute::TrimOptions"(CFunctionOptions): + CTrimOptions(c_string characters) + c_string characters + + cdef cppclass CPadOptions \ + "arrow::compute::PadOptions"(CFunctionOptions): + CPadOptions(int64_t width, c_string padding, c_bool lean_left_on_odd_padding) + int64_t width + c_string padding + c_bool lean_left_on_odd_padding + + cdef cppclass CSliceOptions \ + "arrow::compute::SliceOptions"(CFunctionOptions): + CSliceOptions(int64_t start, int64_t stop, int64_t step) + int64_t start + int64_t stop + int64_t step + + cdef cppclass CListSliceOptions \ + "arrow::compute::ListSliceOptions"(CFunctionOptions): + CListSliceOptions(int64_t start, optional[int64_t] stop, + int64_t step, + optional[c_bool] return_fixed_size_list) + int64_t start + optional[int64_t] stop + int64_t step + optional[c_bool] return_fixed_size_list + + cdef cppclass CSplitOptions \ + "arrow::compute::SplitOptions"(CFunctionOptions): + CSplitOptions(int64_t max_splits, c_bool reverse) + int64_t max_splits + c_bool reverse + + cdef cppclass CSplitPatternOptions \ + "arrow::compute::SplitPatternOptions"(CFunctionOptions): + CSplitPatternOptions(c_string pattern, int64_t max_splits, + c_bool reverse) + int64_t max_splits + c_bool reverse + c_string pattern + + cdef cppclass CReplaceSliceOptions \ + "arrow::compute::ReplaceSliceOptions"(CFunctionOptions): + CReplaceSliceOptions(int64_t start, int64_t stop, c_string replacement) + int64_t start + int64_t stop + c_string replacement + + cdef cppclass CReplaceSubstringOptions \ + "arrow::compute::ReplaceSubstringOptions"(CFunctionOptions): + CReplaceSubstringOptions(c_string pattern, c_string replacement, + int64_t max_replacements) + c_string pattern + c_string replacement + int64_t max_replacements + + cdef cppclass CExtractRegexOptions \ + "arrow::compute::ExtractRegexOptions"(CFunctionOptions): + CExtractRegexOptions(c_string pattern) + c_string pattern + + cdef cppclass CCastOptions" arrow::compute::CastOptions"(CFunctionOptions): + CCastOptions() + CCastOptions(c_bool safe) + CCastOptions(CCastOptions options) + + @staticmethod + CCastOptions Safe() + + @staticmethod + CCastOptions Unsafe() + shared_ptr[CDataType] to_type + c_bool allow_int_overflow + c_bool allow_time_truncate + c_bool allow_time_overflow + c_bool allow_decimal_truncate + c_bool allow_float_truncate + c_bool allow_invalid_utf8 + + cdef enum CFilterNullSelectionBehavior \ + "arrow::compute::FilterOptions::NullSelectionBehavior": + CFilterNullSelectionBehavior_DROP \ + "arrow::compute::FilterOptions::DROP" + CFilterNullSelectionBehavior_EMIT_NULL \ + "arrow::compute::FilterOptions::EMIT_NULL" + + cdef cppclass CFilterOptions \ + " arrow::compute::FilterOptions"(CFunctionOptions): + CFilterOptions() + CFilterOptions(CFilterNullSelectionBehavior null_selection_behavior) + CFilterNullSelectionBehavior null_selection_behavior + + cdef enum CDictionaryEncodeNullEncodingBehavior \ + "arrow::compute::DictionaryEncodeOptions::NullEncodingBehavior": + CDictionaryEncodeNullEncodingBehavior_ENCODE \ + "arrow::compute::DictionaryEncodeOptions::ENCODE" + CDictionaryEncodeNullEncodingBehavior_MASK \ + "arrow::compute::DictionaryEncodeOptions::MASK" + + cdef cppclass CDictionaryEncodeOptions \ + "arrow::compute::DictionaryEncodeOptions"(CFunctionOptions): + CDictionaryEncodeOptions( + CDictionaryEncodeNullEncodingBehavior null_encoding) + CDictionaryEncodeNullEncodingBehavior null_encoding + + cdef cppclass CRunEndEncodeOptions \ + "arrow::compute::RunEndEncodeOptions"(CFunctionOptions): + CRunEndEncodeOptions() + CRunEndEncodeOptions(shared_ptr[CDataType] run_end_type) + shared_ptr[CDataType] run_end_type + + cdef cppclass CTakeOptions \ + " arrow::compute::TakeOptions"(CFunctionOptions): + CTakeOptions(c_bool boundscheck) + c_bool boundscheck + + cdef cppclass CStrptimeOptions \ + "arrow::compute::StrptimeOptions"(CFunctionOptions): + CStrptimeOptions(c_string format, TimeUnit unit, c_bool raise_error) + c_string format + TimeUnit unit + c_bool raise_error + + cdef cppclass CStrftimeOptions \ + "arrow::compute::StrftimeOptions"(CFunctionOptions): + CStrftimeOptions(c_string format, c_string locale) + c_string format + c_string locale + + cdef cppclass CDayOfWeekOptions \ + "arrow::compute::DayOfWeekOptions"(CFunctionOptions): + CDayOfWeekOptions(c_bool count_from_zero, uint32_t week_start) + c_bool count_from_zero + uint32_t week_start + + cdef enum CAssumeTimezoneAmbiguous \ + "arrow::compute::AssumeTimezoneOptions::Ambiguous": + CAssumeTimezoneAmbiguous_AMBIGUOUS_RAISE \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_RAISE" + CAssumeTimezoneAmbiguous_AMBIGUOUS_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_EARLIEST" + CAssumeTimezoneAmbiguous_AMBIGUOUS_LATEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_LATEST" + + cdef enum CAssumeTimezoneNonexistent \ + "arrow::compute::AssumeTimezoneOptions::Nonexistent": + CAssumeTimezoneNonexistent_NONEXISTENT_RAISE \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_RAISE" + CAssumeTimezoneNonexistent_NONEXISTENT_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_EARLIEST" + CAssumeTimezoneNonexistent_NONEXISTENT_LATEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_LATEST" + + cdef cppclass CAssumeTimezoneOptions \ + "arrow::compute::AssumeTimezoneOptions"(CFunctionOptions): + CAssumeTimezoneOptions(c_string timezone, + CAssumeTimezoneAmbiguous ambiguous, + CAssumeTimezoneNonexistent nonexistent) + c_string timezone + CAssumeTimezoneAmbiguous ambiguous + CAssumeTimezoneNonexistent nonexistent + + cdef cppclass CWeekOptions \ + "arrow::compute::WeekOptions"(CFunctionOptions): + CWeekOptions(c_bool week_starts_monday, c_bool count_from_zero, + c_bool first_week_is_fully_in_year) + c_bool week_starts_monday + c_bool count_from_zero + c_bool first_week_is_fully_in_year + + cdef cppclass CNullOptions \ + "arrow::compute::NullOptions"(CFunctionOptions): + CNullOptions(c_bool nan_is_null) + c_bool nan_is_null + + cdef cppclass CVarianceOptions \ + "arrow::compute::VarianceOptions"(CFunctionOptions): + CVarianceOptions(int ddof, c_bool skip_nulls, uint32_t min_count) + int ddof + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CScalarAggregateOptions \ + "arrow::compute::ScalarAggregateOptions"(CFunctionOptions): + CScalarAggregateOptions(c_bool skip_nulls, uint32_t min_count) + c_bool skip_nulls + uint32_t min_count + + cdef enum CCountMode "arrow::compute::CountOptions::CountMode": + CCountMode_ONLY_VALID "arrow::compute::CountOptions::ONLY_VALID" + CCountMode_ONLY_NULL "arrow::compute::CountOptions::ONLY_NULL" + CCountMode_ALL "arrow::compute::CountOptions::ALL" + + cdef cppclass CCountOptions \ + "arrow::compute::CountOptions"(CFunctionOptions): + CCountOptions(CCountMode mode) + CCountMode mode + + cdef cppclass CModeOptions \ + "arrow::compute::ModeOptions"(CFunctionOptions): + CModeOptions(int64_t n, c_bool skip_nulls, uint32_t min_count) + int64_t n + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CIndexOptions \ + "arrow::compute::IndexOptions"(CFunctionOptions): + CIndexOptions(shared_ptr[CScalar] value) + shared_ptr[CScalar] value + + cdef cppclass CAggregate "arrow::compute::Aggregate": + c_string function + shared_ptr[CFunctionOptions] options + vector[CFieldRef] target + c_string name + + cdef enum CMapLookupOccurrence \ + "arrow::compute::MapLookupOptions::Occurrence": + CMapLookupOccurrence_ALL "arrow::compute::MapLookupOptions::ALL" + CMapLookupOccurrence_FIRST "arrow::compute::MapLookupOptions::FIRST" + CMapLookupOccurrence_LAST "arrow::compute::MapLookupOptions::LAST" + + cdef cppclass CMapLookupOptions \ + "arrow::compute::MapLookupOptions"(CFunctionOptions): + CMapLookupOptions(shared_ptr[CScalar] query_key, + CMapLookupOccurrence occurrence) + CMapLookupOccurrence occurrence + shared_ptr[CScalar] query_key + + cdef cppclass CMakeStructOptions \ + "arrow::compute::MakeStructOptions"(CFunctionOptions): + CMakeStructOptions(vector[c_string] n, + vector[c_bool] r, + vector[shared_ptr[const CKeyValueMetadata]] m) + CMakeStructOptions(vector[c_string] n) + vector[c_string] field_names + vector[c_bool] field_nullability + vector[shared_ptr[const CKeyValueMetadata]] field_metadata + + cdef cppclass CStructFieldOptions \ + "arrow::compute::StructFieldOptions"(CFunctionOptions): + CStructFieldOptions(vector[int] indices) + CStructFieldOptions(CFieldRef field_ref) + vector[int] indices + CFieldRef field_ref + + ctypedef enum CSortOrder" arrow::compute::SortOrder": + CSortOrder_Ascending \ + "arrow::compute::SortOrder::Ascending" + CSortOrder_Descending \ + "arrow::compute::SortOrder::Descending" + + ctypedef enum CNullPlacement" arrow::compute::NullPlacement": + CNullPlacement_AtStart \ + "arrow::compute::NullPlacement::AtStart" + CNullPlacement_AtEnd \ + "arrow::compute::NullPlacement::AtEnd" + + cdef cppclass CPartitionNthOptions \ + "arrow::compute::PartitionNthOptions"(CFunctionOptions): + CPartitionNthOptions(int64_t pivot, CNullPlacement) + int64_t pivot + CNullPlacement null_placement + + cdef cppclass CCumulativeOptions \ + "arrow::compute::CumulativeOptions"(CFunctionOptions): + CCumulativeOptions(c_bool skip_nulls) + CCumulativeOptions(shared_ptr[CScalar] start, c_bool skip_nulls) + optional[shared_ptr[CScalar]] start + c_bool skip_nulls + + cdef cppclass CPairwiseOptions \ + "arrow::compute::PairwiseOptions"(CFunctionOptions): + CPairwiseOptions(int64_t period) + int64_t period + + cdef cppclass CListFlattenOptions\ + "arrow::compute::ListFlattenOptions"(CFunctionOptions): + CListFlattenOptions(c_bool recursive) + c_bool recursive + + cdef cppclass CArraySortOptions \ + "arrow::compute::ArraySortOptions"(CFunctionOptions): + CArraySortOptions(CSortOrder, CNullPlacement) + CSortOrder order + CNullPlacement null_placement + + cdef cppclass CSortKey" arrow::compute::SortKey": + CSortKey(CFieldRef target, CSortOrder order) + CFieldRef target + CSortOrder order + + cdef cppclass COrdering" arrow::compute::Ordering": + COrdering(vector[CSortKey] sort_keys, CNullPlacement null_placement) + + cdef cppclass CSortOptions \ + "arrow::compute::SortOptions"(CFunctionOptions): + CSortOptions(vector[CSortKey] sort_keys, CNullPlacement) + vector[CSortKey] sort_keys + CNullPlacement null_placement + + cdef cppclass CSelectKOptions \ + "arrow::compute::SelectKOptions"(CFunctionOptions): + CSelectKOptions(int64_t k, vector[CSortKey] sort_keys) + int64_t k + vector[CSortKey] sort_keys + + cdef enum CQuantileInterp \ + "arrow::compute::QuantileOptions::Interpolation": + CQuantileInterp_LINEAR "arrow::compute::QuantileOptions::LINEAR" + CQuantileInterp_LOWER "arrow::compute::QuantileOptions::LOWER" + CQuantileInterp_HIGHER "arrow::compute::QuantileOptions::HIGHER" + CQuantileInterp_NEAREST "arrow::compute::QuantileOptions::NEAREST" + CQuantileInterp_MIDPOINT "arrow::compute::QuantileOptions::MIDPOINT" + + cdef cppclass CQuantileOptions \ + "arrow::compute::QuantileOptions"(CFunctionOptions): + CQuantileOptions(vector[double] q, CQuantileInterp interpolation, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + CQuantileInterp interpolation + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CTDigestOptions \ + "arrow::compute::TDigestOptions"(CFunctionOptions): + CTDigestOptions(vector[double] q, + uint32_t delta, uint32_t buffer_size, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + uint32_t delta + uint32_t buffer_size + c_bool skip_nulls + uint32_t min_count + + cdef enum CUtf8NormalizeForm \ + "arrow::compute::Utf8NormalizeOptions::Form": + CUtf8NormalizeForm_NFC "arrow::compute::Utf8NormalizeOptions::NFC" + CUtf8NormalizeForm_NFKC "arrow::compute::Utf8NormalizeOptions::NFKC" + CUtf8NormalizeForm_NFD "arrow::compute::Utf8NormalizeOptions::NFD" + CUtf8NormalizeForm_NFKD "arrow::compute::Utf8NormalizeOptions::NFKD" + + cdef cppclass CUtf8NormalizeOptions \ + "arrow::compute::Utf8NormalizeOptions"(CFunctionOptions): + CUtf8NormalizeOptions(CUtf8NormalizeForm form) + CUtf8NormalizeForm form + + cdef cppclass CSetLookupOptions \ + "arrow::compute::SetLookupOptions"(CFunctionOptions): + CSetLookupOptions(CDatum value_set, c_bool skip_nulls) + CDatum value_set + c_bool skip_nulls + + cdef cppclass CRandomOptions \ + "arrow::compute::RandomOptions"(CFunctionOptions): + CRandomOptions(CRandomOptions) + + @staticmethod + CRandomOptions FromSystemRandom() + + @staticmethod + CRandomOptions FromSeed(uint64_t seed) + + cdef enum CRankOptionsTiebreaker \ + "arrow::compute::RankOptions::Tiebreaker": + CRankOptionsTiebreaker_Min "arrow::compute::RankOptions::Min" + CRankOptionsTiebreaker_Max "arrow::compute::RankOptions::Max" + CRankOptionsTiebreaker_First "arrow::compute::RankOptions::First" + CRankOptionsTiebreaker_Dense "arrow::compute::RankOptions::Dense" + + cdef cppclass CRankOptions \ + "arrow::compute::RankOptions"(CFunctionOptions): + CRankOptions(vector[CSortKey] sort_keys, CNullPlacement, + CRankOptionsTiebreaker tiebreaker) + vector[CSortKey] sort_keys + CNullPlacement null_placement + CRankOptionsTiebreaker tiebreaker + + cdef enum DatumType" arrow::Datum::type": + DatumType_NONE" arrow::Datum::NONE" + DatumType_SCALAR" arrow::Datum::SCALAR" + DatumType_ARRAY" arrow::Datum::ARRAY" + DatumType_CHUNKED_ARRAY" arrow::Datum::CHUNKED_ARRAY" + DatumType_RECORD_BATCH" arrow::Datum::RECORD_BATCH" + DatumType_TABLE" arrow::Datum::TABLE" + DatumType_COLLECTION" arrow::Datum::COLLECTION" + + cdef cppclass CDatum" arrow::Datum": + CDatum() + CDatum(const shared_ptr[CArray]& value) + CDatum(const shared_ptr[CChunkedArray]& value) + CDatum(const shared_ptr[CScalar]& value) + CDatum(const shared_ptr[CRecordBatch]& value) + CDatum(const shared_ptr[CTable]& value) + + DatumType kind() const + c_string ToString() const + + const shared_ptr[CArrayData]& array() const + const shared_ptr[CChunkedArray]& chunked_array() const + const shared_ptr[CRecordBatch]& record_batch() const + const shared_ptr[CTable]& table() const + const shared_ptr[CScalar]& scalar() const + + cdef c_string ToString(DatumType kind) + + +cdef extern from * namespace "arrow::compute": + # inlined from compute/function_internal.h to avoid exposing + # implementation details + """ + #include "arrow/compute/function.h" + namespace arrow { + namespace compute { + namespace internal { + Result> DeserializeFunctionOptions( + const Buffer& buffer); + } // namespace internal + } // namespace compute + } // namespace arrow + """ + CResult[unique_ptr[CFunctionOptions]] DeserializeFunctionOptions \ + " arrow::compute::internal::DeserializeFunctionOptions"( + const CBuffer& buffer) + + +cdef extern from * namespace "arrow::compute": + # inlined from expression_internal.h to avoid + # proliferation of #include + """ + #include + + #include "arrow/type.h" + #include "arrow/datum.h" + + namespace arrow { + namespace compute { + struct KnownFieldValues { + std::unordered_map map; + }; + } // namespace compute + } // namespace arrow + """ + cdef struct CKnownFieldValues "arrow::compute::KnownFieldValues": + unordered_map[CFieldRef, CDatum, CFieldRefHash] map + +cdef extern from "arrow/compute/expression.h" \ + namespace "arrow::compute" nogil: + + cdef cppclass CExpression "arrow::compute::Expression": + c_bool Equals(const CExpression& other) const + c_string ToString() const + CResult[CExpression] Bind(const CSchema&) + const CFieldRef* field_ref() const + + cdef CExpression CMakeScalarExpression \ + "arrow::compute::literal"(shared_ptr[CScalar] value) + + cdef CExpression CMakeFieldExpression \ + "arrow::compute::field_ref"(CFieldRef) + + cdef CExpression CMakeFieldExpressionByIndex \ + "arrow::compute::field_ref"(int idx) + + cdef CExpression CMakeCallExpression \ + "arrow::compute::call"(c_string function, + vector[CExpression] arguments, + shared_ptr[CFunctionOptions] options) + + cdef CResult[shared_ptr[CBuffer]] CSerializeExpression \ + "arrow::compute::Serialize"(const CExpression&) + + cdef CResult[CExpression] CDeserializeExpression \ + "arrow::compute::Deserialize"(shared_ptr[CBuffer]) + + cdef CResult[CKnownFieldValues] \ + CExtractKnownFieldValues "arrow::compute::ExtractKnownFieldValues"( + const CExpression& partition_expression) + + +cdef extern from "arrow/extension_type.h" namespace "arrow": + cdef cppclass CExtensionTypeRegistry" arrow::ExtensionTypeRegistry": + @staticmethod + shared_ptr[CExtensionTypeRegistry] GetGlobalRegistry() + + cdef cppclass CExtensionType" arrow::ExtensionType"(CDataType): + c_string extension_name() + shared_ptr[CDataType] storage_type() + int byte_width() + int bit_width() + + @staticmethod + shared_ptr[CArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CArray] storage) + + @staticmethod + shared_ptr[CChunkedArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CChunkedArray] storage) + + cdef cppclass CExtensionArray" arrow::ExtensionArray"(CArray): + CExtensionArray(shared_ptr[CDataType], shared_ptr[CArray] storage) + + shared_ptr[CArray] storage() + + +cdef extern from "arrow/extension/fixed_shape_tensor.h" namespace "arrow::extension" nogil: + cdef cppclass CFixedShapeTensorType \ + " arrow::extension::FixedShapeTensorType"(CExtensionType): + + CResult[shared_ptr[CTensor]] MakeTensor(const shared_ptr[CExtensionScalar]& scalar) const + + @staticmethod + CResult[shared_ptr[CDataType]] Make(const shared_ptr[CDataType]& value_type, + const vector[int64_t]& shape, + const vector[int64_t]& permutation, + const vector[c_string]& dim_names) + + const shared_ptr[CDataType] value_type() + const vector[int64_t] shape() + const vector[int64_t] permutation() + const vector[c_string] dim_names() + + cdef cppclass CFixedShapeTensorArray \ + " arrow::extension::FixedShapeTensorArray"(CExtensionArray): + const CResult[shared_ptr[CTensor]] ToTensor() const + +cdef extern from "arrow/util/compression.h" namespace "arrow" nogil: + cdef enum CCompressionType" arrow::Compression::type": + CCompressionType_UNCOMPRESSED" arrow::Compression::UNCOMPRESSED" + CCompressionType_SNAPPY" arrow::Compression::SNAPPY" + CCompressionType_GZIP" arrow::Compression::GZIP" + CCompressionType_BROTLI" arrow::Compression::BROTLI" + CCompressionType_ZSTD" arrow::Compression::ZSTD" + CCompressionType_LZ4" arrow::Compression::LZ4" + CCompressionType_LZ4_FRAME" arrow::Compression::LZ4_FRAME" + CCompressionType_BZ2" arrow::Compression::BZ2" + + cdef cppclass CCodec" arrow::util::Codec": + @staticmethod + CResult[unique_ptr[CCodec]] Create(CCompressionType codec) + + @staticmethod + CResult[unique_ptr[CCodec]] CreateWithLevel" Create"( + CCompressionType codec, + int compression_level) + + @staticmethod + c_bool SupportsCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MinimumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MaximumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] DefaultCompressionLevel(CCompressionType codec) + + @staticmethod + c_bool IsAvailable(CCompressionType codec) + + CResult[int64_t] Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, + uint8_t* output_buffer) + CResult[int64_t] Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) + c_string name() const + int compression_level() const + int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) + + +cdef extern from "arrow/util/io_util.h" namespace "arrow::internal" nogil: + int ErrnoFromStatus(CStatus status) + int WinErrorFromStatus(CStatus status) + int SignalFromStatus(CStatus status) + + CStatus SendSignal(int signum) + CStatus SendSignalToThread(int signum, uint64_t thread_id) + + +cdef extern from "arrow/util/iterator.h" namespace "arrow" nogil: + cdef cppclass CIterator" arrow::Iterator"[T]: + CResult[T] Next() + CStatus Visit[Visitor](Visitor&& visitor) + cppclass RangeIterator: + CResult[T] operator*() + RangeIterator& operator++() + c_bool operator!=(RangeIterator) const + RangeIterator begin() + RangeIterator end() + CIterator[T] MakeVectorIterator[T](vector[T] v) + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow" nogil: + int GetCpuThreadPoolCapacity() + CStatus SetCpuThreadPoolCapacity(int threads) + +cdef extern from "arrow/array/concatenate.h" namespace "arrow" nogil: + CResult[shared_ptr[CArray]] Concatenate( + const vector[shared_ptr[CArray]]& arrays, + CMemoryPool* pool) + +cdef extern from "arrow/c/abi.h": + cdef struct ArrowSchema: + void (*release)(ArrowSchema*) noexcept nogil + + cdef struct ArrowArray: + void (*release)(ArrowArray*) noexcept nogil + + cdef struct ArrowArrayStream: + void (*release)(ArrowArrayStream*) noexcept nogil + + ctypedef int32_t ArrowDeviceType + cdef ArrowDeviceType ARROW_DEVICE_CUDA + + cdef struct ArrowDeviceArray: + ArrowArray array + int64_t device_id + int32_t device_type + +cdef extern from "arrow/c/bridge.h" namespace "arrow" nogil: + CStatus ExportType(CDataType&, ArrowSchema* out) + CResult[shared_ptr[CDataType]] ImportType(ArrowSchema*) + + CStatus ExportField(CField&, ArrowSchema* out) + CResult[shared_ptr[CField]] ImportField(ArrowSchema*) + + CStatus ExportSchema(CSchema&, ArrowSchema* out) + CResult[shared_ptr[CSchema]] ImportSchema(ArrowSchema*) + + CStatus ExportArray(CArray&, ArrowArray* out) + CStatus ExportArray(CArray&, ArrowArray* out, ArrowSchema* out_schema) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, + shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, ArrowSchema*) + + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out) + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out, + ArrowSchema* out_schema) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + ArrowSchema*) + + CStatus ExportRecordBatchReader(shared_ptr[CRecordBatchReader], + ArrowArrayStream*) + CResult[shared_ptr[CRecordBatchReader]] ImportRecordBatchReader( + ArrowArrayStream*) + + CStatus ExportChunkedArray(shared_ptr[CChunkedArray], ArrowArrayStream*) + CResult[shared_ptr[CChunkedArray]] ImportChunkedArray(ArrowArrayStream*) + + CStatus ExportDeviceArray(const CArray&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, ArrowSchema*) + + CStatus ExportDeviceRecordBatch(const CRecordBatch&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, ArrowSchema*) + + +cdef extern from "arrow/util/byte_size.h" namespace "arrow::util" nogil: + CResult[int64_t] ReferencedBufferSize(const CArray& array_data) + CResult[int64_t] ReferencedBufferSize(const CRecordBatch& record_batch) + CResult[int64_t] ReferencedBufferSize(const CChunkedArray& chunked_array) + CResult[int64_t] ReferencedBufferSize(const CTable& table) + int64_t TotalBufferSize(const CArray& array) + int64_t TotalBufferSize(const CChunkedArray& array) + int64_t TotalBufferSize(const CRecordBatch& record_batch) + int64_t TotalBufferSize(const CTable& table) + +ctypedef PyObject* CallbackUdf(object user_function, const CUdfContext& context, object inputs) + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + + cdef cppclass CRecordBatchIterator "arrow::RecordBatchIterator"( + CIterator[shared_ptr[CRecordBatch]]): + pass + + +cdef extern from "arrow/python/udf.h" namespace "arrow::py" nogil: + cdef cppclass CUdfContext" arrow::py::UdfContext": + CMemoryPool *pool + int64_t batch_length + + cdef cppclass CUdfOptions" arrow::py::UdfOptions": + c_string func_name + CArity arity + CFunctionDoc func_doc + vector[shared_ptr[CDataType]] input_types + shared_ptr[CDataType] output_type + + CStatus RegisterScalarFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterTabularFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterAggregateFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterVectorFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CResult[shared_ptr[CRecordBatchReader]] CallTabularFunction( + const c_string& func_name, const vector[CDatum]& args, CFunctionRegistry* registry) + +cdef extern from "arrow/compute/cast.h" namespace "arrow::compute": + CResult[CDatum] Cast(const CDatum& value, const CCastOptions& options) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dc9babee190e110583d9d2cc24b71567203290af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd @@ -0,0 +1,118 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/acero/options.h" namespace "arrow::acero" nogil: + cdef enum CJoinType "arrow::acero::JoinType": + CJoinType_LEFT_SEMI "arrow::acero::JoinType::LEFT_SEMI" + CJoinType_RIGHT_SEMI "arrow::acero::JoinType::RIGHT_SEMI" + CJoinType_LEFT_ANTI "arrow::acero::JoinType::LEFT_ANTI" + CJoinType_RIGHT_ANTI "arrow::acero::JoinType::RIGHT_ANTI" + CJoinType_INNER "arrow::acero::JoinType::INNER" + CJoinType_LEFT_OUTER "arrow::acero::JoinType::LEFT_OUTER" + CJoinType_RIGHT_OUTER "arrow::acero::JoinType::RIGHT_OUTER" + CJoinType_FULL_OUTER "arrow::acero::JoinType::FULL_OUTER" + + cdef cppclass CExecNodeOptions "arrow::acero::ExecNodeOptions": + pass + + cdef cppclass CSourceNodeOptions "arrow::acero::SourceNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CTableSourceNodeOptions "arrow::acero::TableSourceNodeOptions"(CExecNodeOptions): + CTableSourceNodeOptions(shared_ptr[CTable] table) + CTableSourceNodeOptions(shared_ptr[CTable] table, int64_t max_batch_size) + + cdef cppclass CSinkNodeOptions "arrow::acero::SinkNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CFilterNodeOptions "arrow::acero::FilterNodeOptions"(CExecNodeOptions): + CFilterNodeOptions(CExpression) + + cdef cppclass CProjectNodeOptions "arrow::acero::ProjectNodeOptions"(CExecNodeOptions): + CProjectNodeOptions(vector[CExpression] expressions) + CProjectNodeOptions(vector[CExpression] expressions, + vector[c_string] names) + + cdef cppclass CAggregateNodeOptions "arrow::acero::AggregateNodeOptions"(CExecNodeOptions): + CAggregateNodeOptions(vector[CAggregate] aggregates, vector[CFieldRef] names) + + cdef cppclass COrderByNodeOptions "arrow::acero::OrderByNodeOptions"(CExecNodeOptions): + COrderByNodeOptions(COrdering ordering) + + cdef cppclass CHashJoinNodeOptions "arrow::acero::HashJoinNodeOptions"(CExecNodeOptions): + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys) + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + CHashJoinNodeOptions(CJoinType join_type, + vector[CFieldRef] left_keys, + vector[CFieldRef] right_keys, + vector[CFieldRef] left_output, + vector[CFieldRef] right_output, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + + cdef struct CAsofJoinKeys "arrow::acero::AsofJoinNodeOptions::Keys": + CFieldRef on_key + vector[CFieldRef] by_key + + cdef cppclass CAsofJoinNodeOptions "arrow::acero::AsofJoinNodeOptions"(CExecNodeOptions): + CAsofJoinNodeOptions(vector[CAsofJoinKeys] keys, int64_t tolerance) + + +cdef extern from "arrow/acero/exec_plan.h" namespace "arrow::acero" nogil: + cdef cppclass CDeclaration "arrow::acero::Declaration": + cppclass Input: + Input(CExecNode*) + Input(CDeclaration) + + c_string label + vector[Input] inputs + + CDeclaration() + CDeclaration(c_string factory_name, CExecNodeOptions options) + CDeclaration(c_string factory_name, vector[Input] inputs, shared_ptr[CExecNodeOptions] options) + + @staticmethod + CDeclaration Sequence(vector[CDeclaration] decls) + + cdef cppclass CExecNode "arrow::acero::ExecNode": + const vector[CExecNode*]& inputs() const + const shared_ptr[CSchema]& output_schema() const + + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads + ) + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads, + CMemoryPool* memory_pool, CFunctionRegistry* function_registry + ) + CResult[unique_ptr[CRecordBatchReader]] DeclarationToReader( + CDeclaration declaration, c_bool use_threads + ) + + CResult[c_string] DeclarationToString(const CDeclaration& declaration) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3ac943cf941d8591483d4be2e2bceaac3e051292 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport * + +cdef extern from "arrow/gpu/cuda_api.h" namespace "arrow::cuda" nogil: + + cdef cppclass CCudaDeviceManager" arrow::cuda::CudaDeviceManager": + @staticmethod + CResult[CCudaDeviceManager*] Instance() + CResult[shared_ptr[CCudaContext]] GetContext(int gpu_number) + CResult[shared_ptr[CCudaContext]] GetSharedContext(int gpu_number, + void* handle) + CStatus AllocateHost(int device_number, int64_t nbytes, + shared_ptr[CCudaHostBuffer]* buffer) + int num_devices() const + + cdef cppclass CCudaContext" arrow::cuda::CudaContext": + CResult[shared_ptr[CCudaBuffer]] Allocate(int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] View(uint8_t* data, int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] OpenIpcBuffer( + const CCudaIpcMemHandle& ipc_handle) + CStatus Synchronize() + int64_t bytes_allocated() const + const void* handle() const + int device_number() const + CResult[uintptr_t] GetDeviceAddress(uintptr_t addr) + + cdef cppclass CCudaIpcMemHandle" arrow::cuda::CudaIpcMemHandle": + @staticmethod + CResult[shared_ptr[CCudaIpcMemHandle]] FromBuffer( + const void* opaque_handle) + CResult[shared_ptr[CBuffer]] Serialize(CMemoryPool* pool) const + + cdef cppclass CCudaBuffer" arrow::cuda::CudaBuffer"(CBuffer): + CCudaBuffer(uint8_t* data, int64_t size, + const shared_ptr[CCudaContext]& context, + c_bool own_data=false, c_bool is_ipc=false) + CCudaBuffer(const shared_ptr[CCudaBuffer]& parent, + const int64_t offset, const int64_t size) + + @staticmethod + CResult[shared_ptr[CCudaBuffer]] FromBuffer(shared_ptr[CBuffer] buf) + + CStatus CopyToHost(const int64_t position, const int64_t nbytes, + void* out) const + CStatus CopyFromHost(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromDevice(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromAnotherDevice(const shared_ptr[CCudaContext]& src_ctx, + const int64_t position, const void* data, + int64_t nbytes) + CResult[shared_ptr[CCudaIpcMemHandle]] ExportForIpc() + shared_ptr[CCudaContext] context() const + + cdef cppclass \ + CCudaHostBuffer" arrow::cuda::CudaHostBuffer"(CMutableBuffer): + pass + + cdef cppclass \ + CCudaBufferReader" arrow::cuda::CudaBufferReader"(CBufferReader): + CCudaBufferReader(const shared_ptr[CBuffer]& buffer) + CResult[int64_t] Read(int64_t nbytes, void* buffer) + CResult[shared_ptr[CBuffer]] Read(int64_t nbytes) + + cdef cppclass \ + CCudaBufferWriter" arrow::cuda::CudaBufferWriter"(WritableFile): + CCudaBufferWriter(const shared_ptr[CCudaBuffer]& buffer) + CStatus Close() + CStatus Write(const void* data, int64_t nbytes) + CStatus WriteAt(int64_t position, const void* data, int64_t nbytes) + CStatus SetBufferSize(const int64_t buffer_size) + int64_t buffer_size() + int64_t num_bytes_buffered() const + + CResult[shared_ptr[CCudaHostBuffer]] AllocateCudaHostBuffer( + int device_number, const int64_t size) + + # Cuda prefix is added to avoid picking up arrow::cuda functions + # from arrow namespace. + CResult[shared_ptr[CCudaBuffer]] \ + CudaSerializeRecordBatch" arrow::cuda::SerializeRecordBatch"\ + (const CRecordBatch& batch, + CCudaContext* ctx) + CResult[shared_ptr[CRecordBatch]] \ + CudaReadRecordBatch" arrow::cuda::ReadRecordBatch"\ + (const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const shared_ptr[CCudaBuffer]& buffer, + CMemoryPool* pool) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fe96705a54b2ff3eb9e2ec4da998566a58767a81 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd @@ -0,0 +1,413 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.unordered_map cimport unordered_map +from libcpp cimport bool as c_bool + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.includes.libarrow_fs cimport * + + +cdef extern from "arrow/dataset/plan.h" namespace "arrow::dataset::internal" nogil: + + cdef void Initialize() + + +ctypedef CStatus cb_writer_finish_internal(CFileWriter*) +ctypedef void cb_writer_finish(dict, CFileWriter*) + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef enum ExistingDataBehavior" arrow::dataset::ExistingDataBehavior": + ExistingDataBehavior_DELETE_MATCHING" \ + arrow::dataset::ExistingDataBehavior::kDeleteMatchingPartitions" + ExistingDataBehavior_OVERWRITE_OR_IGNORE" \ + arrow::dataset::ExistingDataBehavior::kOverwriteOrIgnore" + ExistingDataBehavior_ERROR" \ + arrow::dataset::ExistingDataBehavior::kError" + + cdef cppclass CScanOptions "arrow::dataset::ScanOptions": + shared_ptr[CSchema] dataset_schema + shared_ptr[CSchema] projected_schema + c_bool use_threads + CExpression filter + + cdef cppclass CScanNodeOptions "arrow::dataset::ScanNodeOptions"(CExecNodeOptions): + CScanNodeOptions(shared_ptr[CDataset] dataset, shared_ptr[CScanOptions] scan_options) + + shared_ptr[CScanOptions] scan_options + + cdef cppclass CFragmentScanOptions "arrow::dataset::FragmentScanOptions": + c_string type_name() const + + ctypedef CIterator[shared_ptr[CScanTask]] CScanTaskIterator \ + "arrow::dataset::ScanTaskIterator" + + cdef cppclass CScanTask" arrow::dataset::ScanTask": + CResult[CRecordBatchIterator] Execute() + + cdef cppclass CFragment "arrow::dataset::Fragment": + CResult[shared_ptr[CSchema]] ReadPhysicalSchema() + CResult[CScanTaskIterator] Scan(shared_ptr[CScanOptions] options) + c_bool splittable() const + c_string type_name() const + const CExpression& partition_expression() const + + ctypedef vector[shared_ptr[CFragment]] CFragmentVector \ + "arrow::dataset::FragmentVector" + + ctypedef CIterator[shared_ptr[CFragment]] CFragmentIterator \ + "arrow::dataset::FragmentIterator" + + cdef cppclass CInMemoryFragment "arrow::dataset::InMemoryFragment"( + CFragment): + CInMemoryFragment(vector[shared_ptr[CRecordBatch]] record_batches, + CExpression partition_expression) + + cdef cppclass CTaggedRecordBatch "arrow::dataset::TaggedRecordBatch": + shared_ptr[CRecordBatch] record_batch + shared_ptr[CFragment] fragment + + ctypedef CIterator[CTaggedRecordBatch] CTaggedRecordBatchIterator \ + "arrow::dataset::TaggedRecordBatchIterator" + + cdef cppclass CScanner "arrow::dataset::Scanner": + CScanner(shared_ptr[CDataset], shared_ptr[CScanOptions]) + CScanner(shared_ptr[CFragment], shared_ptr[CScanOptions]) + CResult[CScanTaskIterator] Scan() + CResult[CTaggedRecordBatchIterator] ScanBatches() + CResult[shared_ptr[CTable]] ToTable() + CResult[shared_ptr[CTable]] TakeRows(const CArray& indices) + CResult[shared_ptr[CTable]] Head(int64_t num_rows) + CResult[int64_t] CountRows() + CResult[CFragmentIterator] GetFragments() + CResult[shared_ptr[CRecordBatchReader]] ToRecordBatchReader() + const shared_ptr[CScanOptions]& options() + + cdef cppclass CScannerBuilder "arrow::dataset::ScannerBuilder": + CScannerBuilder(shared_ptr[CDataset], + shared_ptr[CScanOptions] scan_options) + CScannerBuilder(shared_ptr[CSchema], shared_ptr[CFragment], + shared_ptr[CScanOptions] scan_options) + + @staticmethod + shared_ptr[CScannerBuilder] FromRecordBatchReader( + shared_ptr[CRecordBatchReader] reader) + CStatus ProjectColumns "Project"(const vector[c_string]& columns) + CStatus Project(vector[CExpression]& exprs, vector[c_string]& columns) + CStatus Filter(CExpression filter) + CStatus UseThreads(c_bool use_threads) + CStatus Pool(CMemoryPool* pool) + CStatus BatchSize(int64_t batch_size) + CStatus BatchReadahead(int32_t batch_readahead) + CStatus FragmentReadahead(int32_t fragment_readahead) + CStatus FragmentScanOptions( + shared_ptr[CFragmentScanOptions] fragment_scan_options) + CResult[shared_ptr[CScanOptions]] GetScanOptions() + CResult[shared_ptr[CScanner]] Finish() + shared_ptr[CSchema] schema() const + + ctypedef vector[shared_ptr[CDataset]] CDatasetVector \ + "arrow::dataset::DatasetVector" + + cdef cppclass CDataset "arrow::dataset::Dataset": + const shared_ptr[CSchema] & schema() + CResult[CFragmentIterator] GetFragments() + CResult[CFragmentIterator] GetFragments(CExpression predicate) + const CExpression & partition_expression() + c_string type_name() + + CResult[shared_ptr[CDataset]] ReplaceSchema(shared_ptr[CSchema]) + + CResult[shared_ptr[CScannerBuilder]] NewScan() + + cdef cppclass CInMemoryDataset "arrow::dataset::InMemoryDataset"( + CDataset): + CInMemoryDataset(shared_ptr[CRecordBatchReader]) + CInMemoryDataset(shared_ptr[CTable]) + + cdef cppclass CUnionDataset "arrow::dataset::UnionDataset"( + CDataset): + @staticmethod + CResult[shared_ptr[CUnionDataset]] Make(shared_ptr[CSchema] schema, + CDatasetVector children) + + const CDatasetVector& children() const + + cdef cppclass CInspectOptions "arrow::dataset::InspectOptions": + int fragments + + cdef cppclass CFinishOptions "arrow::dataset::FinishOptions": + shared_ptr[CSchema] schema + CInspectOptions inspect_options + c_bool validate_fragments + + cdef cppclass CDatasetFactory "arrow::dataset::DatasetFactory": + CResult[vector[shared_ptr[CSchema]]] InspectSchemas(CInspectOptions) + CResult[shared_ptr[CSchema]] Inspect(CInspectOptions) + CResult[shared_ptr[CDataset]] FinishWithSchema "Finish"( + const shared_ptr[CSchema]& schema) + CResult[shared_ptr[CDataset]] Finish() + const CExpression& root_partition() + CStatus SetRootPartition(CExpression partition) + + cdef cppclass CUnionDatasetFactory "arrow::dataset::UnionDatasetFactory": + @staticmethod + CResult[shared_ptr[CDatasetFactory]] Make( + vector[shared_ptr[CDatasetFactory]] factories) + + cdef cppclass CFileSource "arrow::dataset::FileSource": + const c_string& path() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CBuffer]& buffer() const + const int64_t size() const + # HACK: Cython can't handle all the overloads so don't declare them. + # This means invalid construction of CFileSource won't be caught in + # the C++ generation phase (though it will still be caught when + # the generated C++ is compiled). + CFileSource(...) + + cdef cppclass CFileWriteOptions \ + "arrow::dataset::FileWriteOptions": + const shared_ptr[CFileFormat]& format() const + c_string type_name() const + + cdef cppclass CFileWriter \ + "arrow::dataset::FileWriter": + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CSchema]& schema() const + const shared_ptr[CFileWriteOptions]& options() const + const CFileLocator& destination() const + CResult[int64_t] GetBytesWritten() + + cdef cppclass CFileFormat "arrow::dataset::FileFormat": + shared_ptr[CFragmentScanOptions] default_fragment_scan_options + c_string type_name() const + CResult[shared_ptr[CSchema]] Inspect(const CFileSource&) const + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema) + shared_ptr[CFileWriteOptions] DefaultWriteOptions() + + cdef cppclass CFileFragment "arrow::dataset::FileFragment"( + CFragment): + const CFileSource& source() const + const shared_ptr[CFileFormat]& format() const + + cdef cppclass CFileSystemDatasetWriteOptions \ + "arrow::dataset::FileSystemDatasetWriteOptions": + shared_ptr[CFileWriteOptions] file_write_options + shared_ptr[CFileSystem] filesystem + c_string base_dir + shared_ptr[CPartitioning] partitioning + int max_partitions + c_string basename_template + function[cb_writer_finish_internal] writer_pre_finish + function[cb_writer_finish_internal] writer_post_finish + ExistingDataBehavior existing_data_behavior + c_bool create_dir + uint32_t max_open_files + uint64_t max_rows_per_file + uint64_t min_rows_per_group + uint64_t max_rows_per_group + + cdef cppclass CFileSystemDataset \ + "arrow::dataset::FileSystemDataset"(CDataset): + @staticmethod + CResult[shared_ptr[CDataset]] Make( + shared_ptr[CSchema] schema, + CExpression source_partition, + shared_ptr[CFileFormat] format, + shared_ptr[CFileSystem] filesystem, + vector[shared_ptr[CFileFragment]] fragments) + + @staticmethod + CStatus Write( + const CFileSystemDatasetWriteOptions& write_options, + shared_ptr[CScanner] scanner) + + c_string type() + vector[c_string] files() + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CPartitioning]& partitioning() const + + cdef cppclass CIpcFileWriteOptions \ + "arrow::dataset::IpcFileWriteOptions"(CFileWriteOptions): + shared_ptr[CIpcWriteOptions] options + + cdef cppclass CIpcFileFormat "arrow::dataset::IpcFileFormat"( + CFileFormat): + pass + + cdef cppclass COrcFileFormat "arrow::dataset::OrcFileFormat"( + CFileFormat): + pass + + cdef cppclass CCsvFileWriteOptions \ + "arrow::dataset::CsvFileWriteOptions"(CFileWriteOptions): + shared_ptr[CCSVWriteOptions] write_options + CMemoryPool* pool + + cdef cppclass CCsvFileFormat "arrow::dataset::CsvFileFormat"( + CFileFormat): + CCSVParseOptions parse_options + + cdef cppclass CCsvFragmentScanOptions \ + "arrow::dataset::CsvFragmentScanOptions"(CFragmentScanOptions): + CCSVConvertOptions convert_options + CCSVReadOptions read_options + function[StreamWrapFunc] stream_transform_func + + cdef cppclass CJsonFileFormat "arrow::dataset::JsonFileFormat"(CFileFormat): + pass + + cdef cppclass CJsonFragmentScanOptions "arrow::dataset::JsonFragmentScanOptions"(CFragmentScanOptions): + CJSONParseOptions parse_options + CJSONReadOptions read_options + + cdef cppclass CPartitioning "arrow::dataset::Partitioning": + c_string type_name() const + CResult[CExpression] Parse(const c_string & path) const + const shared_ptr[CSchema] & schema() + c_bool Equals(const CPartitioning& other) const + + cdef cppclass CSegmentEncoding" arrow::dataset::SegmentEncoding": + bint operator==(CSegmentEncoding) + + CSegmentEncoding CSegmentEncoding_None\ + " arrow::dataset::SegmentEncoding::None" + CSegmentEncoding CSegmentEncoding_Uri\ + " arrow::dataset::SegmentEncoding::Uri" + + cdef cppclass CKeyValuePartitioningOptions \ + "arrow::dataset::KeyValuePartitioningOptions": + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningOptions \ + "arrow::dataset::HivePartitioningOptions": + CSegmentEncoding segment_encoding + c_string null_fallback + + cdef cppclass CPartitioningFactoryOptions \ + "arrow::dataset::PartitioningFactoryOptions": + c_bool infer_dictionary + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningFactoryOptions \ + "arrow::dataset::HivePartitioningFactoryOptions": + c_bool infer_dictionary + c_string null_fallback + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CPartitioningFactory "arrow::dataset::PartitioningFactory": + c_string type_name() const + + cdef cppclass CKeyValuePartitioning \ + "arrow::dataset::KeyValuePartitioning"(CPartitioning): + CKeyValuePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CKeyValuePartitioningOptions options) + + vector[shared_ptr[CArray]] dictionaries() const + CSegmentEncoding segment_encoding() + + cdef cppclass CDirectoryPartitioning \ + "arrow::dataset::DirectoryPartitioning"(CPartitioning): + CDirectoryPartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CHivePartitioning \ + "arrow::dataset::HivePartitioning"(CPartitioning): + CHivePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CHivePartitioningOptions options) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + CHivePartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + c_string null_fallback() const + + cdef cppclass CFilenamePartitioning \ + "arrow::dataset::FilenamePartitioning"(CPartitioning): + CFilenamePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CPartitioningOrFactory \ + "arrow::dataset::PartitioningOrFactory": + CPartitioningOrFactory(shared_ptr[CPartitioning]) + CPartitioningOrFactory(shared_ptr[CPartitioningFactory]) + CPartitioningOrFactory & operator = (shared_ptr[CPartitioning]) + CPartitioningOrFactory & operator = ( + shared_ptr[CPartitioningFactory]) + shared_ptr[CPartitioning] partitioning() const + shared_ptr[CPartitioningFactory] factory() const + + cdef cppclass CFileSystemFactoryOptions \ + "arrow::dataset::FileSystemFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool exclude_invalid_files + vector[c_string] selector_ignore_prefixes + + cdef cppclass CFileSystemDatasetFactory \ + "arrow::dataset::FileSystemDatasetFactory"( + CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromPaths "Make"( + shared_ptr[CFileSystem] filesystem, + vector[c_string] paths, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromSelector "Make"( + shared_ptr[CFileSystem] filesystem, + CFileSelector, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromFileInfos "Make"( + shared_ptr[CFileSystem] filesystem, + vector[CFileInfo] files, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e5389b3135faf472e9b1f7d85fee9dea5fd9f2b0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libparquet_encryption cimport * + +from pyarrow._parquet cimport * + + +cdef extern from "arrow/dataset/parquet_encryption_config.h" namespace "arrow::dataset" nogil: + cdef cppclass CParquetEncryptionConfig "arrow::dataset::ParquetEncryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CEncryptionConfiguration] encryption_config + + cdef cppclass CParquetDecryptionConfig "arrow::dataset::ParquetDecryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CDecryptionConfiguration] decryption_config + + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef cppclass CParquetFileWriter \ + "arrow::dataset::ParquetFileWriter"(CFileWriter): + const shared_ptr[FileWriter]& parquet_writer() const + + cdef cppclass CParquetFileWriteOptions \ + "arrow::dataset::ParquetFileWriteOptions"(CFileWriteOptions): + shared_ptr[WriterProperties] writer_properties + shared_ptr[ArrowWriterProperties] arrow_writer_properties + shared_ptr[CParquetEncryptionConfig] parquet_encryption_config + + cdef cppclass CParquetFileFragment "arrow::dataset::ParquetFileFragment"( + CFileFragment): + const vector[int]& row_groups() const + shared_ptr[CFileMetaData] metadata() const + CResult[vector[shared_ptr[CFragment]]] SplitByRowGroup( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithFilter "Subset"( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithIds "Subset"( + vector[int] row_group_ids) + CStatus EnsureCompleteMetadata() + + cdef cppclass CParquetFileFormatReaderOptions \ + "arrow::dataset::ParquetFileFormat::ReaderOptions": + unordered_set[c_string] dict_columns + TimeUnit coerce_int96_timestamp_unit + + cdef cppclass CParquetFileFormat "arrow::dataset::ParquetFileFormat"( + CFileFormat): + CParquetFileFormatReaderOptions reader_options + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema, + vector[int] row_groups) + + cdef cppclass CParquetFragmentScanOptions \ + "arrow::dataset::ParquetFragmentScanOptions"(CFragmentScanOptions): + shared_ptr[CReaderProperties] reader_properties + shared_ptr[ArrowReaderProperties] arrow_reader_properties + shared_ptr[CParquetDecryptionConfig] parquet_decryption_config + + cdef cppclass CParquetFactoryOptions \ + "arrow::dataset::ParquetFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool validate_column_chunk_paths + + cdef cppclass CParquetDatasetFactory \ + "arrow::dataset::ParquetDatasetFactory"(CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataPath "Make"( + const c_string& metadata_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataSource "Make"( + const CFileSource& metadata_path, + const c_string& base_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd new file mode 100644 index 0000000000000000000000000000000000000000..722e947bfeca238af0bd6ee002e7f3f9f1063ed6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport (CCompressionType, CStatus, CTable, + COutputStream, CResult, shared_ptr, + vector, CRandomAccessFile, CSchema, + c_string, CIpcReadOptions) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + int kFeatherV1Version" arrow::ipc::feather::kFeatherV1Version" + int kFeatherV2Version" arrow::ipc::feather::kFeatherV2Version" + + cdef cppclass CFeatherProperties" arrow::ipc::feather::WriteProperties": + int version + int chunksize + CCompressionType compression + int compression_level + + CStatus WriteFeather" arrow::ipc::feather::WriteTable" \ + (const CTable& table, COutputStream* out, + CFeatherProperties properties) + + cdef cppclass CFeatherReader" arrow::ipc::feather::Reader": + @staticmethod + CResult[shared_ptr[CFeatherReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + const CIpcReadOptions& options) + int version() + shared_ptr[CSchema] schema() + + CStatus Read(shared_ptr[CTable]* out) + CStatus Read(const vector[int] indices, shared_ptr[CTable]* out) + CStatus Read(const vector[c_string] names, shared_ptr[CTable]* out) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c4cf5830c41286fde3af95c894b909e0aa34a72d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd @@ -0,0 +1,622 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/flight/api.h" namespace "arrow" nogil: + cdef char* CTracingServerMiddlewareName\ + " arrow::flight::TracingServerMiddleware::kMiddlewareName" + + cdef cppclass CActionType" arrow::flight::ActionType": + c_string type + c_string description + bint operator==(CActionType) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CActionType] Deserialize(const c_string& serialized) + + cdef cppclass CAction" arrow::flight::Action": + c_string type + shared_ptr[CBuffer] body + bint operator==(CAction) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CAction] Deserialize(const c_string& serialized) + + cdef cppclass CFlightResult" arrow::flight::Result": + CFlightResult() + CFlightResult(CFlightResult) + shared_ptr[CBuffer] body + bint operator==(CFlightResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightResult] Deserialize(const c_string& serialized) + + cdef cppclass CBasicAuth" arrow::flight::BasicAuth": + CBasicAuth() + CBasicAuth(CBuffer) + CBasicAuth(CBasicAuth) + c_string username + c_string password + bint operator==(CBasicAuth) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CBasicAuth] Deserialize(const c_string& serialized) + + cdef cppclass CResultStream" arrow::flight::ResultStream": + CResult[unique_ptr[CFlightResult]] Next() + + cdef cppclass CDescriptorType \ + " arrow::flight::FlightDescriptor::DescriptorType": + bint operator==(CDescriptorType) + + CDescriptorType CDescriptorTypeUnknown\ + " arrow::flight::FlightDescriptor::UNKNOWN" + CDescriptorType CDescriptorTypePath\ + " arrow::flight::FlightDescriptor::PATH" + CDescriptorType CDescriptorTypeCmd\ + " arrow::flight::FlightDescriptor::CMD" + + cdef cppclass CFlightDescriptor" arrow::flight::FlightDescriptor": + CDescriptorType type + c_string cmd + vector[c_string] path + bint operator==(CFlightDescriptor) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightDescriptor] Deserialize(const c_string& serialized) + + cdef cppclass CTicket" arrow::flight::Ticket": + CTicket() + c_string ticket + bint operator==(CTicket) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CTicket] Deserialize(const c_string& serialized) + + cdef cppclass CCriteria" arrow::flight::Criteria": + CCriteria() + c_string expression + bint operator==(CCriteria) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CCriteria] Deserialize(const c_string& serialized) + + cdef cppclass CLocation" arrow::flight::Location": + CLocation() + c_string ToString() + c_bool Equals(const CLocation& other) + + @staticmethod + CResult[CLocation] Parse(const c_string& uri_string) + + @staticmethod + CResult[CLocation] ForGrpcTcp(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcTls(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcUnix(const c_string& path) + + cdef cppclass CFlightEndpoint" arrow::flight::FlightEndpoint": + CFlightEndpoint() + + CTicket ticket + vector[CLocation] locations + + bint operator==(CFlightEndpoint) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightEndpoint] Deserialize(const c_string& serialized) + + cdef cppclass CFlightInfo" arrow::flight::FlightInfo": + CFlightInfo(CFlightInfo info) + int64_t total_records() + int64_t total_bytes() + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + CFlightDescriptor& descriptor() + const vector[CFlightEndpoint]& endpoints() + CResult[c_string] SerializeToString() + c_string ToString() + bint operator==(CFlightInfo) + + @staticmethod + CResult[unique_ptr[CFlightInfo]] Deserialize( + const c_string& serialized) + + cdef cppclass CSchemaResult" arrow::flight::SchemaResult": + CSchemaResult() + CSchemaResult(CSchemaResult result) + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + bint operator==(CSchemaResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CSchemaResult] Deserialize(const c_string& serialized) + + cdef cppclass CFlightListing" arrow::flight::FlightListing": + CResult[unique_ptr[CFlightInfo]] Next() + + cdef cppclass CSimpleFlightListing" arrow::flight::SimpleFlightListing": + # This doesn't work with Cython >= 3 + # CSimpleFlightListing(vector[CFlightInfo]&& info) + CSimpleFlightListing(const vector[CFlightInfo]& info) + + cdef cppclass CFlightPayload" arrow::flight::FlightPayload": + shared_ptr[CBuffer] descriptor + shared_ptr[CBuffer] app_metadata + CIpcPayload ipc_message + + cdef cppclass CFlightDataStream" arrow::flight::FlightDataStream": + shared_ptr[CSchema] schema() + CResult[CFlightPayload] Next() + + cdef cppclass CFlightStreamChunk" arrow::flight::FlightStreamChunk": + CFlightStreamChunk() + shared_ptr[CRecordBatch] data + shared_ptr[CBuffer] app_metadata + + cdef cppclass CMetadataRecordBatchReader \ + " arrow::flight::MetadataRecordBatchReader": + CResult[shared_ptr[CSchema]] GetSchema() + CResult[CFlightStreamChunk] Next() + CResult[shared_ptr[CTable]] ToTable() + + CResult[shared_ptr[CRecordBatchReader]] MakeRecordBatchReader\ + " arrow::flight::MakeRecordBatchReader"( + shared_ptr[CMetadataRecordBatchReader]) + + cdef cppclass CMetadataRecordBatchWriter \ + " arrow::flight::MetadataRecordBatchWriter"(CRecordBatchWriter): + CStatus Begin(shared_ptr[CSchema] schema, + const CIpcWriteOptions& options) + CStatus WriteMetadata(shared_ptr[CBuffer] app_metadata) + CStatus WriteWithMetadata(const CRecordBatch& batch, + shared_ptr[CBuffer] app_metadata) + + cdef cppclass CFlightStreamReader \ + " arrow::flight::FlightStreamReader"(CMetadataRecordBatchReader): + void Cancel() + CResult[shared_ptr[CTable]] ToTableWithStopToken" ToTable"\ + (const CStopToken& stop_token) + + cdef cppclass CFlightMessageReader \ + " arrow::flight::FlightMessageReader"(CMetadataRecordBatchReader): + CFlightDescriptor& descriptor() + + cdef cppclass CFlightMessageWriter \ + " arrow::flight::FlightMessageWriter"(CMetadataRecordBatchWriter): + pass + + cdef cppclass CFlightStreamWriter \ + " arrow::flight::FlightStreamWriter"(CMetadataRecordBatchWriter): + CStatus DoneWriting() + + cdef cppclass CRecordBatchStream \ + " arrow::flight::RecordBatchStream"(CFlightDataStream): + CRecordBatchStream(shared_ptr[CRecordBatchReader]& reader, + const CIpcWriteOptions& options) + + cdef cppclass CFlightMetadataReader" arrow::flight::FlightMetadataReader": + CStatus ReadMetadata(shared_ptr[CBuffer]* out) + + cdef cppclass CFlightMetadataWriter" arrow::flight::FlightMetadataWriter": + CStatus WriteMetadata(const CBuffer& message) + + cdef cppclass CServerAuthReader" arrow::flight::ServerAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CServerAuthSender" arrow::flight::ServerAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CClientAuthReader" arrow::flight::ClientAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CClientAuthSender" arrow::flight::ClientAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CServerAuthHandler" arrow::flight::ServerAuthHandler": + pass + + cdef cppclass CClientAuthHandler" arrow::flight::ClientAuthHandler": + pass + + cdef cppclass CServerCallContext" arrow::flight::ServerCallContext": + c_string& peer_identity() + c_string& peer() + c_bool is_cancelled() + void AddHeader(const c_string& key, const c_string& value) + void AddTrailer(const c_string& key, const c_string& value) + CServerMiddleware* GetMiddleware(const c_string& key) + + cdef cppclass CTimeoutDuration" arrow::flight::TimeoutDuration": + CTimeoutDuration(double) + + cdef cppclass CFlightCallOptions" arrow::flight::FlightCallOptions": + CFlightCallOptions() + CTimeoutDuration timeout + CIpcWriteOptions write_options + CIpcReadOptions read_options + vector[pair[c_string, c_string]] headers + CStopToken stop_token + + cdef cppclass CCertKeyPair" arrow::flight::CertKeyPair": + CCertKeyPair() + c_string pem_cert + c_string pem_key + + cdef cppclass CFlightMethod" arrow::flight::FlightMethod": + bint operator==(CFlightMethod) + + CFlightMethod CFlightMethodInvalid\ + " arrow::flight::FlightMethod::Invalid" + CFlightMethod CFlightMethodHandshake\ + " arrow::flight::FlightMethod::Handshake" + CFlightMethod CFlightMethodListFlights\ + " arrow::flight::FlightMethod::ListFlights" + CFlightMethod CFlightMethodGetFlightInfo\ + " arrow::flight::FlightMethod::GetFlightInfo" + CFlightMethod CFlightMethodGetSchema\ + " arrow::flight::FlightMethod::GetSchema" + CFlightMethod CFlightMethodDoGet\ + " arrow::flight::FlightMethod::DoGet" + CFlightMethod CFlightMethodDoPut\ + " arrow::flight::FlightMethod::DoPut" + CFlightMethod CFlightMethodDoAction\ + " arrow::flight::FlightMethod::DoAction" + CFlightMethod CFlightMethodListActions\ + " arrow::flight::FlightMethod::ListActions" + CFlightMethod CFlightMethodDoExchange\ + " arrow::flight::FlightMethod::DoExchange" + + cdef cppclass CCallInfo" arrow::flight::CallInfo": + CFlightMethod method + + # This is really std::unordered_multimap, but Cython has no + # bindings for it, so treat it as an opaque class and bind the + # methods we need + cdef cppclass CCallHeaders" arrow::flight::CallHeaders": + cppclass const_iterator: + pair[c_string, c_string] operator*() + # For Cython < 3 + const_iterator operator++() + # For Cython >= 3 + const_iterator operator++(int) + bint operator==(const_iterator) + bint operator!=(const_iterator) + const_iterator cbegin() + const_iterator cend() + + cdef cppclass CAddCallHeaders" arrow::flight::AddCallHeaders": + void AddHeader(const c_string& key, const c_string& value) + + cdef cppclass CServerMiddleware" arrow::flight::ServerMiddleware": + c_string name() + + cdef cppclass CServerMiddlewareFactory\ + " arrow::flight::ServerMiddlewareFactory": + pass + + cdef cppclass CClientMiddleware" arrow::flight::ClientMiddleware": + pass + + cdef cppclass CClientMiddlewareFactory\ + " arrow::flight::ClientMiddlewareFactory": + pass + + cpdef cppclass CTracingServerMiddlewareTraceKey\ + " arrow::flight::TracingServerMiddleware::TraceKey": + CTracingServerMiddlewareTraceKey() + c_string key + c_string value + + cdef cppclass CTracingServerMiddleware\ + " arrow::flight::TracingServerMiddleware"(CServerMiddleware): + vector[CTracingServerMiddlewareTraceKey] GetTraceContext() + + cdef shared_ptr[CServerMiddlewareFactory] \ + MakeTracingServerMiddlewareFactory\ + " arrow::flight::MakeTracingServerMiddlewareFactory"() + + cdef cppclass CFlightServerOptions" arrow::flight::FlightServerOptions": + CFlightServerOptions(const CLocation& location) + CLocation location + unique_ptr[CServerAuthHandler] auth_handler + vector[CCertKeyPair] tls_certificates + c_bool verify_client + c_string root_certificates + vector[pair[c_string, shared_ptr[CServerMiddlewareFactory]]] middleware + + cdef cppclass CFlightClientOptions" arrow::flight::FlightClientOptions": + c_string tls_root_certs + c_string cert_chain + c_string private_key + c_string override_hostname + vector[shared_ptr[CClientMiddlewareFactory]] middleware + int64_t write_size_limit_bytes + vector[pair[c_string, CIntStringVariant]] generic_options + c_bool disable_server_verification + + @staticmethod + CFlightClientOptions Defaults() + + cdef cppclass CDoPutResult" arrow::flight::FlightClient::DoPutResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightMetadataReader] reader + + cdef cppclass CDoExchangeResult" arrow::flight::FlightClient::DoExchangeResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightStreamReader] reader + + cdef cppclass CFlightClient" arrow::flight::FlightClient": + @staticmethod + CResult[unique_ptr[CFlightClient]] Connect(const CLocation& location, + const CFlightClientOptions& options) + + c_bool supports_async() + CStatus CheckAsyncSupport() + + CStatus Authenticate(CFlightCallOptions& options, + unique_ptr[CClientAuthHandler] auth_handler) + + CResult[pair[c_string, c_string]] AuthenticateBasicToken( + CFlightCallOptions& options, + const c_string& username, + const c_string& password) + + CResult[unique_ptr[CResultStream]] DoAction(CFlightCallOptions& options, CAction& action) + CResult[vector[CActionType]] ListActions(CFlightCallOptions& options) + + CResult[unique_ptr[CFlightListing]] ListFlights(CFlightCallOptions& options, CCriteria criteria) + CResult[unique_ptr[CFlightInfo]] GetFlightInfo(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CFuture[CFlightInfo] GetFlightInfoAsync(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CSchemaResult]] GetSchema(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CFlightStreamReader]] DoGet(CFlightCallOptions& options, CTicket& ticket) + CResult[CDoPutResult] DoPut(CFlightCallOptions& options, + CFlightDescriptor& descriptor, + shared_ptr[CSchema]& schema) + CResult[CDoExchangeResult] DoExchange(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CStatus Close() + + cdef cppclass CFlightStatusCode" arrow::flight::FlightStatusCode": + bint operator==(CFlightStatusCode) + + CFlightStatusCode CFlightStatusInternal \ + " arrow::flight::FlightStatusCode::Internal" + CFlightStatusCode CFlightStatusTimedOut \ + " arrow::flight::FlightStatusCode::TimedOut" + CFlightStatusCode CFlightStatusCancelled \ + " arrow::flight::FlightStatusCode::Cancelled" + CFlightStatusCode CFlightStatusUnauthenticated \ + " arrow::flight::FlightStatusCode::Unauthenticated" + CFlightStatusCode CFlightStatusUnauthorized \ + " arrow::flight::FlightStatusCode::Unauthorized" + CFlightStatusCode CFlightStatusUnavailable \ + " arrow::flight::FlightStatusCode::Unavailable" + CFlightStatusCode CFlightStatusFailed \ + " arrow::flight::FlightStatusCode::Failed" + + cdef cppclass FlightStatusDetail" arrow::flight::FlightStatusDetail": + CFlightStatusCode code() + c_string extra_info() + + @staticmethod + shared_ptr[FlightStatusDetail] UnwrapStatus(const CStatus& status) + + cdef cppclass FlightWriteSizeStatusDetail\ + " arrow::flight::FlightWriteSizeStatusDetail": + int64_t limit() + int64_t actual() + + @staticmethod + shared_ptr[FlightWriteSizeStatusDetail] UnwrapStatus( + const CStatus& status) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, const c_string& message) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, + const c_string& message, + const c_string& extra_info) + +# Callbacks for implementing Flight servers +# Use typedef to emulate syntax for std::function +ctypedef CStatus cb_list_flights(object, const CServerCallContext&, + const CCriteria*, + unique_ptr[CFlightListing]*) +ctypedef CStatus cb_get_flight_info(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CFlightInfo]*) +ctypedef CStatus cb_get_schema(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CSchemaResult]*) +ctypedef CStatus cb_do_put(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMetadataWriter]) +ctypedef CStatus cb_do_get(object, const CServerCallContext&, + const CTicket&, + unique_ptr[CFlightDataStream]*) +ctypedef CStatus cb_do_exchange(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMessageWriter]) +ctypedef CStatus cb_do_action(object, const CServerCallContext&, + const CAction&, + unique_ptr[CResultStream]*) +ctypedef CStatus cb_list_actions(object, const CServerCallContext&, + vector[CActionType]*) +ctypedef CStatus cb_result_next(object, unique_ptr[CFlightResult]*) +ctypedef CStatus cb_data_stream_next(object, CFlightPayload*) +ctypedef CStatus cb_server_authenticate(object, CServerAuthSender*, + CServerAuthReader*) +ctypedef CStatus cb_is_valid(object, const c_string&, c_string*) +ctypedef CStatus cb_client_authenticate(object, CClientAuthSender*, + CClientAuthReader*) +ctypedef CStatus cb_get_token(object, c_string*) + +ctypedef CStatus cb_middleware_sending_headers(object, CAddCallHeaders*) +ctypedef CStatus cb_middleware_call_completed(object, const CStatus&) +ctypedef CStatus cb_client_middleware_received_headers( + object, const CCallHeaders&) +ctypedef CStatus cb_server_middleware_start_call( + object, + const CCallInfo&, + const CCallHeaders&, + shared_ptr[CServerMiddleware]*) +ctypedef CStatus cb_client_middleware_start_call( + object, + const CCallInfo&, + unique_ptr[CClientMiddleware]*) + +cdef extern from "arrow/python/flight.h" namespace "arrow::py::flight" nogil: + cdef char* CPyServerMiddlewareName\ + " arrow::py::flight::kPyServerMiddlewareName" + + cdef cppclass PyFlightServerVtable: + PyFlightServerVtable() + function[cb_list_flights] list_flights + function[cb_get_flight_info] get_flight_info + function[cb_get_schema] get_schema + function[cb_do_put] do_put + function[cb_do_get] do_get + function[cb_do_exchange] do_exchange + function[cb_do_action] do_action + function[cb_list_actions] list_actions + + cdef cppclass PyServerAuthHandlerVtable: + PyServerAuthHandlerVtable() + function[cb_server_authenticate] authenticate + function[cb_is_valid] is_valid + + cdef cppclass PyClientAuthHandlerVtable: + PyClientAuthHandlerVtable() + function[cb_client_authenticate] authenticate + function[cb_get_token] get_token + + cdef cppclass PyFlightServer: + PyFlightServer(object server, PyFlightServerVtable vtable) + + CStatus Init(CFlightServerOptions& options) + int port() + CStatus ServeWithSignals() except * + CStatus Shutdown() + CStatus Wait() + + cdef cppclass PyServerAuthHandler\ + " arrow::py::flight::PyServerAuthHandler"(CServerAuthHandler): + PyServerAuthHandler(object handler, PyServerAuthHandlerVtable vtable) + + cdef cppclass PyClientAuthHandler\ + " arrow::py::flight::PyClientAuthHandler"(CClientAuthHandler): + PyClientAuthHandler(object handler, PyClientAuthHandlerVtable vtable) + + cdef cppclass CPyFlightResultStream\ + " arrow::py::flight::PyFlightResultStream"(CResultStream): + CPyFlightResultStream(object generator, + function[cb_result_next] callback) + + cdef cppclass CPyFlightDataStream\ + " arrow::py::flight::PyFlightDataStream"(CFlightDataStream): + CPyFlightDataStream(object data_source, + unique_ptr[CFlightDataStream] stream) + + cdef cppclass CPyGeneratorFlightDataStream\ + " arrow::py::flight::PyGeneratorFlightDataStream"\ + (CFlightDataStream): + CPyGeneratorFlightDataStream(object generator, + shared_ptr[CSchema] schema, + function[cb_data_stream_next] callback, + const CIpcWriteOptions& options) + + cdef cppclass PyServerMiddlewareVtable\ + " arrow::py::flight::PyServerMiddleware::Vtable": + PyServerMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass PyClientMiddlewareVtable\ + " arrow::py::flight::PyClientMiddleware::Vtable": + PyClientMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_client_middleware_received_headers] received_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass CPyServerMiddleware\ + " arrow::py::flight::PyServerMiddleware"(CServerMiddleware): + CPyServerMiddleware(object middleware, PyServerMiddlewareVtable vtable) + void* py_object() + + cdef cppclass CPyServerMiddlewareFactory\ + " arrow::py::flight::PyServerMiddlewareFactory"\ + (CServerMiddlewareFactory): + CPyServerMiddlewareFactory( + object factory, + function[cb_server_middleware_start_call] start_call) + + cdef cppclass CPyClientMiddleware\ + " arrow::py::flight::PyClientMiddleware"(CClientMiddleware): + CPyClientMiddleware(object middleware, PyClientMiddlewareVtable vtable) + + cdef cppclass CPyClientMiddlewareFactory\ + " arrow::py::flight::PyClientMiddlewareFactory"\ + (CClientMiddlewareFactory): + CPyClientMiddlewareFactory( + object factory, + function[cb_client_middleware_start_call] start_call) + + cdef CStatus CreateFlightInfo" arrow::py::flight::CreateFlightInfo"( + shared_ptr[CSchema] schema, + CFlightDescriptor& descriptor, + vector[CFlightEndpoint] endpoints, + int64_t total_records, + int64_t total_bytes, + unique_ptr[CFlightInfo]* out) + + cdef CStatus CreateSchemaResult" arrow::py::flight::CreateSchemaResult"( + shared_ptr[CSchema] schema, + unique_ptr[CSchemaResult]* out) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass CIntStringVariant" std::variant": + CIntStringVariant() + CIntStringVariant(int) + CIntStringVariant(c_string) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..cc260b80c77796b1d859c4a205a5e405e4f1d17a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd @@ -0,0 +1,357 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport CTimePoint + +cdef extern from "arrow/filesystem/api.h" namespace "arrow::fs" nogil: + + ctypedef enum CFileType "arrow::fs::FileType": + CFileType_NotFound "arrow::fs::FileType::NotFound" + CFileType_Unknown "arrow::fs::FileType::Unknown" + CFileType_File "arrow::fs::FileType::File" + CFileType_Directory "arrow::fs::FileType::Directory" + + cdef cppclass CFileInfo "arrow::fs::FileInfo": + CFileInfo() + CFileInfo(CFileInfo) + CFileInfo& operator=(CFileInfo) + CFileInfo(const CFileInfo&) + CFileInfo& operator=(const CFileInfo&) + + CFileType type() + void set_type(CFileType type) + c_string path() + void set_path(const c_string& path) + c_string base_name() + int64_t size() + void set_size(int64_t size) + c_string extension() + CTimePoint mtime() + void set_mtime(CTimePoint mtime) + + cdef cppclass CFileSelector "arrow::fs::FileSelector": + CFileSelector() + c_string base_dir + c_bool allow_not_found + c_bool recursive + + cdef cppclass CFileLocator "arrow::fs::FileLocator": + shared_ptr[CFileSystem] filesystem + c_string path + + cdef cppclass CFileSystem "arrow::fs::FileSystem": + shared_ptr[CFileSystem] shared_from_this() + c_string type_name() const + CResult[c_string] NormalizePath(c_string path) + CResult[c_string] MakeUri(c_string path) + CResult[CFileInfo] GetFileInfo(const c_string& path) + CResult[vector[CFileInfo]] GetFileInfo( + const vector[c_string]& paths) + CResult[vector[CFileInfo]] GetFileInfo(const CFileSelector& select) + CStatus CreateDir(const c_string& path, c_bool recursive) + CStatus DeleteDir(const c_string& path) + CStatus DeleteDirContents(const c_string& path, c_bool missing_dir_ok) + CStatus DeleteRootDirContents() + CStatus DeleteFile(const c_string& path) + CStatus DeleteFiles(const vector[c_string]& paths) + CStatus Move(const c_string& src, const c_string& dest) + CStatus CopyFile(const c_string& src, const c_string& dest) + CResult[shared_ptr[CInputStream]] OpenInputStream( + const c_string& path) + CResult[shared_ptr[CRandomAccessFile]] OpenInputFile( + const c_string& path) + CResult[shared_ptr[COutputStream]] OpenOutputStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + CResult[shared_ptr[COutputStream]] OpenAppendStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + c_bool Equals(const CFileSystem& other) + c_bool Equals(shared_ptr[CFileSystem] other) + + CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \ + "arrow::fs::FileSystemFromUri"(const c_string& uri) + CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \ + "arrow::fs::FileSystemFromUri"(const c_string& uri, c_string* out_path) + CResult[shared_ptr[CFileSystem]] CFileSystemFromUriOrPath \ + "arrow::fs::FileSystemFromUriOrPath"(const c_string& uri, + c_string* out_path) + + cdef cppclass CFileSystemGlobalOptions \ + "arrow::fs::FileSystemGlobalOptions": + c_string tls_ca_file_path + c_string tls_ca_dir_path + + CStatus CFileSystemsInitialize "arrow::fs::Initialize" \ + (const CFileSystemGlobalOptions& options) + + cdef cppclass CSubTreeFileSystem \ + "arrow::fs::SubTreeFileSystem"(CFileSystem): + CSubTreeFileSystem(const c_string& base_path, + shared_ptr[CFileSystem] base_fs) + c_string base_path() + shared_ptr[CFileSystem] base_fs() + + ctypedef enum CS3LogLevel "arrow::fs::S3LogLevel": + CS3LogLevel_Off "arrow::fs::S3LogLevel::Off" + CS3LogLevel_Fatal "arrow::fs::S3LogLevel::Fatal" + CS3LogLevel_Error "arrow::fs::S3LogLevel::Error" + CS3LogLevel_Warn "arrow::fs::S3LogLevel::Warn" + CS3LogLevel_Info "arrow::fs::S3LogLevel::Info" + CS3LogLevel_Debug "arrow::fs::S3LogLevel::Debug" + CS3LogLevel_Trace "arrow::fs::S3LogLevel::Trace" + + cdef struct CS3GlobalOptions "arrow::fs::S3GlobalOptions": + CS3LogLevel log_level + int num_event_loop_threads + + cdef cppclass CS3ProxyOptions "arrow::fs::S3ProxyOptions": + c_string scheme + c_string host + int port + c_string username + c_string password + c_bool Equals(const CS3ProxyOptions& other) + + @staticmethod + CResult[CS3ProxyOptions] FromUriString "FromUri"( + const c_string& uri_string) + + ctypedef enum CS3CredentialsKind "arrow::fs::S3CredentialsKind": + CS3CredentialsKind_Anonymous "arrow::fs::S3CredentialsKind::Anonymous" + CS3CredentialsKind_Default "arrow::fs::S3CredentialsKind::Default" + CS3CredentialsKind_Explicit "arrow::fs::S3CredentialsKind::Explicit" + CS3CredentialsKind_Role "arrow::fs::S3CredentialsKind::Role" + CS3CredentialsKind_WebIdentity \ + "arrow::fs::S3CredentialsKind::WebIdentity" + + cdef cppclass CS3RetryStrategy "arrow::fs::S3RetryStrategy": + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsDefaultRetryStrategy(int64_t max_attempts) + + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsStandardRetryStrategy(int64_t max_attempts) + + cdef cppclass CS3Options "arrow::fs::S3Options": + c_string region + double connect_timeout + double request_timeout + c_string endpoint_override + c_string scheme + c_bool background_writes + c_bool allow_bucket_creation + c_bool allow_bucket_deletion + c_bool check_directory_existence_before_creation + c_bool force_virtual_addressing + shared_ptr[const CKeyValueMetadata] default_metadata + c_string role_arn + c_string session_name + c_string external_id + int load_frequency + CS3ProxyOptions proxy_options + CS3CredentialsKind credentials_kind + shared_ptr[CS3RetryStrategy] retry_strategy + void ConfigureDefaultCredentials() + void ConfigureAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + c_string GetAccessKey() + c_string GetSecretKey() + c_string GetSessionToken() + c_bool Equals(const CS3Options& other) + + @staticmethod + CS3Options Defaults() + + @staticmethod + CS3Options Anonymous() + + @staticmethod + CS3Options FromAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + + @staticmethod + CS3Options FromAssumeRole(const c_string& role_arn, + const c_string& session_name, + const c_string& external_id, + const int load_frequency) + + cdef cppclass CS3FileSystem "arrow::fs::S3FileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CS3FileSystem]] Make(const CS3Options& options) + CS3Options options() + c_string region() + + cdef CStatus CInitializeS3 "arrow::fs::InitializeS3"( + const CS3GlobalOptions& options) + cdef CStatus CEnsureS3Initialized "arrow::fs::EnsureS3Initialized"() + cdef CStatus CFinalizeS3 "arrow::fs::FinalizeS3"() + cdef CStatus CEnsureS3Finalized "arrow::fs::EnsureS3Finalized"() + + cdef CResult[c_string] ResolveS3BucketRegion(const c_string& bucket) + + cdef cppclass CGcsCredentials "arrow::fs::GcsCredentials": + c_bool anonymous() + CTimePoint expiration() + c_string access_token() + c_string target_service_account() + + cdef cppclass CGcsOptions "arrow::fs::GcsOptions": + CGcsCredentials credentials + c_string endpoint_override + c_string scheme + c_string default_bucket_location + optional[c_string] project_id + optional[double] retry_limit_seconds + shared_ptr[const CKeyValueMetadata] default_metadata + c_bool Equals(const CS3Options& other) + + @staticmethod + CGcsOptions Defaults() + + @staticmethod + CGcsOptions Anonymous() + + @staticmethod + CGcsOptions FromAccessToken(const c_string& access_token, + CTimePoint expiration) + + @staticmethod + CGcsOptions FromImpersonatedServiceAccount(const CGcsCredentials& base_credentials, + c_string& target_service_account) + + cdef cppclass CGcsFileSystem "arrow::fs::GcsFileSystem": + @staticmethod + CResult[shared_ptr[CGcsFileSystem]] Make(const CGcsOptions& options) + CGcsOptions options() + + cdef cppclass CAzureOptions "arrow::fs::AzureOptions": + c_string account_name + c_string blob_storage_authority + c_string dfs_storage_authority + c_string blob_storage_scheme + c_string dfs_storage_scheme + + c_bool Equals(const CAzureOptions& other) + CStatus ConfigureDefaultCredential() + CStatus ConfigureAccountKeyCredential(c_string account_key) + + cdef cppclass CAzureFileSystem "arrow::fs::AzureFileSystem": + @staticmethod + CResult[shared_ptr[CAzureFileSystem]] Make(const CAzureOptions& options) + CAzureOptions options() + + cdef cppclass CHdfsOptions "arrow::fs::HdfsOptions": + HdfsConnectionConfig connection_config + int32_t buffer_size + int16_t replication + int64_t default_block_size + + @staticmethod + CResult[CHdfsOptions] FromUriString "FromUri"( + const c_string& uri_string) + void ConfigureEndPoint(c_string host, int port) + void ConfigureDriver(c_bool use_hdfs3) + void ConfigureReplication(int16_t replication) + void ConfigureUser(c_string user_name) + void ConfigureBufferSize(int32_t buffer_size) + void ConfigureBlockSize(int64_t default_block_size) + void ConfigureKerberosTicketCachePath(c_string path) + void ConfigureExtraConf(c_string key, c_string value) + + cdef cppclass CHadoopFileSystem "arrow::fs::HadoopFileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CHadoopFileSystem]] Make( + const CHdfsOptions& options) + CHdfsOptions options() + + cdef cppclass CMockFileSystem "arrow::fs::internal::MockFileSystem"( + CFileSystem): + CMockFileSystem(CTimePoint current_time) + + CStatus CCopyFiles "arrow::fs::CopyFiles"( + const vector[CFileLocator]& sources, + const vector[CFileLocator]& destinations, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + CStatus CCopyFilesWithSelector "arrow::fs::CopyFiles"( + const shared_ptr[CFileSystem]& source_fs, + const CFileSelector& source_sel, + const shared_ptr[CFileSystem]& destination_fs, + const c_string& destination_base_dir, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + + +# Callbacks for implementing Python filesystems +# Use typedef to emulate syntax for std::function +ctypedef void CallbackGetTypeName(object, c_string*) +ctypedef c_bool CallbackEquals(object, const CFileSystem&) + +ctypedef void CallbackGetFileInfo(object, const c_string&, CFileInfo*) +ctypedef void CallbackGetFileInfoVector(object, const vector[c_string]&, + vector[CFileInfo]*) +ctypedef void CallbackGetFileInfoSelector(object, const CFileSelector&, + vector[CFileInfo]*) +ctypedef void CallbackCreateDir(object, const c_string&, c_bool) +ctypedef void CallbackDeleteDir(object, const c_string&) +ctypedef void CallbackDeleteDirContents(object, const c_string&, c_bool) +ctypedef void CallbackDeleteRootDirContents(object) +ctypedef void CallbackDeleteFile(object, const c_string&) +ctypedef void CallbackMove(object, const c_string&, const c_string&) +ctypedef void CallbackCopyFile(object, const c_string&, const c_string&) + +ctypedef void CallbackOpenInputStream(object, const c_string&, + shared_ptr[CInputStream]*) +ctypedef void CallbackOpenInputFile(object, const c_string&, + shared_ptr[CRandomAccessFile]*) +ctypedef void CallbackOpenOutputStream( + object, const c_string&, const shared_ptr[const CKeyValueMetadata]&, + shared_ptr[COutputStream]*) +ctypedef void CallbackNormalizePath(object, const c_string&, c_string*) + +cdef extern from "arrow/python/filesystem.h" namespace "arrow::py::fs" nogil: + + cdef cppclass CPyFileSystemVtable "arrow::py::fs::PyFileSystemVtable": + PyFileSystemVtable() + function[CallbackGetTypeName] get_type_name + function[CallbackEquals] equals + function[CallbackGetFileInfo] get_file_info + function[CallbackGetFileInfoVector] get_file_info_vector + function[CallbackGetFileInfoSelector] get_file_info_selector + function[CallbackCreateDir] create_dir + function[CallbackDeleteDir] delete_dir + function[CallbackDeleteDirContents] delete_dir_contents + function[CallbackDeleteRootDirContents] delete_root_dir_contents + function[CallbackDeleteFile] delete_file + function[CallbackMove] move + function[CallbackCopyFile] copy_file + function[CallbackOpenInputStream] open_input_stream + function[CallbackOpenInputFile] open_input_file + function[CallbackOpenOutputStream] open_output_stream + function[CallbackOpenOutputStream] open_append_stream + function[CallbackNormalizePath] normalize_path + + cdef cppclass CPyFileSystem "arrow::py::fs::PyFileSystem": + @staticmethod + shared_ptr[CPyFileSystem] Make(object handler, + CPyFileSystemVtable vtable) + + PyObject* handler() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9fcc97aaf0a9c4277086a9713104f5ac6817641f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd @@ -0,0 +1,322 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +ctypedef CInvalidRowResult PyInvalidRowCallback(object, + const CCSVInvalidRow&) + + +cdef extern from "arrow/python/csv.h" namespace "arrow::py::csv": + + function[CInvalidRowHandler] MakeInvalidRowHandler( + function[PyInvalidRowCallback], object handler) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py": + # Requires GIL + CResult[shared_ptr[CDataType]] InferArrowType( + object obj, object mask, c_bool pandas_null_sentinels) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal": + object NewMonthDayNanoTupleType() + CResult[PyObject*] MonthDayNanoIntervalArrayToPyList( + const CMonthDayNanoIntervalArray& array) + CResult[PyObject*] MonthDayNanoIntervalScalarToPyObject( + const CMonthDayNanoIntervalScalar& scalar) + + +cdef extern from "arrow/python/arrow_to_pandas.h" namespace "arrow::py::MapConversionType": + cdef enum MapConversionType "arrow::py::MapConversionType": + DEFAULT, + LOSSY, + STRICT_ + + +cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil: + shared_ptr[CDataType] GetPrimitiveType(Type type) + + object PyHalf_FromHalf(npy_half value) + + cdef cppclass PyConversionOptions: + PyConversionOptions() + + shared_ptr[CDataType] type + int64_t size + CMemoryPool* pool + c_bool from_pandas + c_bool ignore_timezone + c_bool strict + + # TODO Some functions below are not actually "nogil" + + CResult[shared_ptr[CChunkedArray]] ConvertPySequence( + object obj, object mask, const PyConversionOptions& options, + CMemoryPool* pool) + + CResult[shared_ptr[CDataType]] NumPyDtypeToArrow(object dtype) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + const CCastOptions& cast_options, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToTensor(CMemoryPool* pool, object ao, + const vector[c_string]& dim_names, + shared_ptr[CTensor]* out) + + CStatus TensorToNdarray(const shared_ptr[CTensor]& tensor, object base, + PyObject** out) + + CStatus SparseCOOTensorToNdarray( + const shared_ptr[CSparseCOOTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_coords) + + CStatus SparseCSRMatrixToNdarray( + const shared_ptr[CSparseCSRMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSCMatrixToNdarray( + const shared_ptr[CSparseCSCMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSFTensorToNdarray( + const shared_ptr[CSparseCSFTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus NdarraysToSparseCOOTensor(CMemoryPool* pool, object data_ao, + object coords_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCOOTensor]* out) + + CStatus NdarraysToSparseCSRMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSRMatrix]* out) + + CStatus NdarraysToSparseCSCMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSCMatrix]* out) + + CStatus NdarraysToSparseCSFTensor(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[int64_t]& axis_order, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSFTensor]* out) + + CStatus TensorToSparseCOOTensor(shared_ptr[CTensor], + shared_ptr[CSparseCOOTensor]* out) + + CStatus TensorToSparseCSRMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSRMatrix]* out) + + CStatus TensorToSparseCSCMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSCMatrix]* out) + + CStatus TensorToSparseCSFTensor(shared_ptr[CTensor], + shared_ptr[CSparseCSFTensor]* out) + + CStatus ConvertArrayToPandas(const PandasOptions& options, + shared_ptr[CArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertChunkedArrayToPandas(const PandasOptions& options, + shared_ptr[CChunkedArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertTableToPandas(const PandasOptions& options, + shared_ptr[CTable] table, + PyObject** out) + + void c_set_default_memory_pool \ + " arrow::py::set_default_memory_pool"(CMemoryPool* pool)\ + + CMemoryPool* c_get_memory_pool \ + " arrow::py::get_memory_pool"() + + cdef cppclass PyBuffer(CBuffer): + @staticmethod + CResult[shared_ptr[CBuffer]] FromPyObject(object obj) + + cdef cppclass PyForeignBuffer(CBuffer): + @staticmethod + CStatus Make(const uint8_t* data, int64_t size, object base, + shared_ptr[CBuffer]* out) + + cdef cppclass PyReadableFile(CRandomAccessFile): + PyReadableFile(object fo) + + cdef cppclass PyOutputStream(COutputStream): + PyOutputStream(object fo) + + cdef cppclass PandasOptions: + CMemoryPool* pool + c_bool strings_to_categorical + c_bool zero_copy_only + c_bool integer_object_nulls + c_bool date_as_object + c_bool timestamp_as_object + c_bool use_threads + c_bool coerce_temporal_nanoseconds + c_bool ignore_timezone + c_bool deduplicate_objects + c_bool safe_cast + c_bool split_blocks + c_bool self_destruct + MapConversionType maps_as_pydicts + c_bool decode_dictionaries + unordered_set[c_string] categorical_columns + unordered_set[c_string] extension_columns + c_bool to_numpy + + cdef cppclass CSerializedPyObject" arrow::py::SerializedPyObject": + shared_ptr[CRecordBatch] batch + vector[shared_ptr[CTensor]] tensors + + CStatus WriteTo(COutputStream* dst) + CStatus GetComponents(CMemoryPool* pool, PyObject** dst) + + CStatus SerializeObject(object context, object sequence, + CSerializedPyObject* out) + + CStatus DeserializeObject(object context, + const CSerializedPyObject& obj, + PyObject* base, PyObject** out) + + CStatus ReadSerializedObject(CRandomAccessFile* src, + CSerializedPyObject* out) + + cdef cppclass SparseTensorCounts: + SparseTensorCounts() + int coo + int csr + int csc + int csf + int ndim_csf + int num_total_tensors() const + int num_total_buffers() const + + CStatus GetSerializedFromComponents( + int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, + int num_buffers, + object buffers, + CSerializedPyObject* out) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal" nogil: + cdef cppclass CTimePoint "arrow::py::internal::TimePoint": + pass + + CTimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) + int64_t TimePoint_to_ns(CTimePoint val) + CTimePoint TimePoint_from_s(double val) + CTimePoint TimePoint_from_ns(int64_t val) + + CResult[c_string] TzinfoToString(PyObject* pytzinfo) + CResult[PyObject*] StringToTzinfo(c_string) + + +cdef extern from "arrow/python/init.h": + int arrow_init_numpy() except -1 + + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + int import_pyarrow() except -1 + + +cdef extern from "arrow/python/common.h" namespace "arrow::py": + c_bool IsPyError(const CStatus& status) + void RestorePyError(const CStatus& status) except * + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + cdef cppclass SharedPtrNoGIL[T](shared_ptr[T]): + # This looks like the only way to satisfy both Cython 2 and Cython 3 + SharedPtrNoGIL& operator=(...) + cdef cppclass UniquePtrNoGIL[T, DELETER=*](unique_ptr[T, DELETER]): + UniquePtrNoGIL& operator=(...) + + +cdef extern from "arrow/python/inference.h" namespace "arrow::py": + c_bool IsPyBool(object o) + c_bool IsPyInt(object o) + c_bool IsPyFloat(object o) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py": + cdef cppclass CPyRecordBatchReader" arrow::py::PyRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CSchema], + object) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py" nogil: + cdef cppclass CCastingRecordBatchReader" arrow::py::CastingRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CRecordBatchReader], + shared_ptr[CSchema]) + + +cdef extern from "arrow/python/extension_type.h" namespace "arrow::py": + cdef cppclass CPyExtensionType \ + " arrow::py::PyExtensionType"(CExtensionType): + @staticmethod + CStatus FromClass(const shared_ptr[CDataType] storage_type, + const c_string extension_name, object typ, + shared_ptr[CExtensionType]* out) + + @staticmethod + CStatus FromInstance(shared_ptr[CDataType] storage_type, + object inst, shared_ptr[CExtensionType]* out) + + object GetInstance() + CStatus SetInstance(object) + + c_string PyExtensionName() + CStatus RegisterPyExtensionType(shared_ptr[CDataType]) + CStatus UnregisterPyExtensionType(c_string type_name) + + +cdef extern from "arrow/python/benchmark.h" namespace "arrow::py::benchmark": + void Benchmark_PandasObjectIsNull(object lst) except * + + +cdef extern from "arrow/python/gdb.h" namespace "arrow::gdb" nogil: + void GdbTestSession "arrow::gdb::TestSession"() + +cdef extern from "arrow/python/helpers.h" namespace "arrow::py::internal": + c_bool IsThreadingEnabled() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c41f4c05d3a77ca4a3ba163b27d9df9f9f234767 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.vector cimport vector as std_vector + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + +ctypedef CResult[CDeclaration] CNamedTableProvider(const std_vector[c_string]&, const CSchema&) + +cdef extern from "arrow/engine/substrait/options.h" namespace "arrow::engine" nogil: + cdef enum ConversionStrictness \ + "arrow::engine::ConversionStrictness": + EXACT_ROUNDTRIP \ + "arrow::engine::ConversionStrictness::EXACT_ROUNDTRIP" + PRESERVE_STRUCTURE \ + "arrow::engine::ConversionStrictness::PRESERVE_STRUCTURE" + BEST_EFFORT \ + "arrow::engine::ConversionStrictness::BEST_EFFORT" + + cdef cppclass CConversionOptions \ + "arrow::engine::ConversionOptions": + CConversionOptions() + ConversionStrictness strictness + function[CNamedTableProvider] named_table_provider + c_bool allow_arrow_extensions + +cdef extern from "arrow/engine/substrait/extension_set.h" \ + namespace "arrow::engine" nogil: + + cdef cppclass ExtensionIdRegistry: + std_vector[c_string] GetSupportedSubstraitFunctions() + + ExtensionIdRegistry* default_extension_id_registry() + +cdef extern from "arrow/engine/substrait/relation.h" namespace "arrow::engine" nogil: + + cdef cppclass CNamedExpression "arrow::engine::NamedExpression": + CExpression expression + c_string name + + cdef cppclass CBoundExpressions "arrow::engine::BoundExpressions": + std_vector[CNamedExpression] named_expressions + shared_ptr[CSchema] schema + +cdef extern from "arrow/engine/substrait/serde.h" namespace "arrow::engine" nogil: + + CResult[shared_ptr[CBuffer]] SerializeExpressions( + const CBoundExpressions& bound_expressions, const CConversionOptions& conversion_options) + + CResult[CBoundExpressions] DeserializeExpressions( + const CBuffer& serialized_expressions) + +cdef extern from "arrow/engine/substrait/util.h" namespace "arrow::engine" nogil: + CResult[shared_ptr[CRecordBatchReader]] ExecuteSerializedPlan( + const CBuffer& substrait_buffer, const ExtensionIdRegistry* registry, + CFunctionRegistry* func_registry, const CConversionOptions& conversion_options, + c_bool use_threads) + + CResult[shared_ptr[CBuffer]] SerializeJsonPlan(const c_string& substrait_json) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7d76576bef2b9513f53c20c55ec92bb01c8b2766 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd @@ -0,0 +1,298 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.string cimport string as c_string +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t, uint8_t, uintptr_t + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + +cdef extern from "gandiva/node.h" namespace "gandiva" nogil: + + cdef cppclass CNode" gandiva::Node": + c_string ToString() + shared_ptr[CDataType] return_type() + + cdef cppclass CGandivaExpression" gandiva::Expression": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + + ctypedef vector[shared_ptr[CNode]] CNodeVector" gandiva::NodeVector" + + ctypedef vector[shared_ptr[CGandivaExpression]] \ + CExpressionVector" gandiva::ExpressionVector" + +cdef extern from "gandiva/selection_vector.h" namespace "gandiva" nogil: + + cdef cppclass CSelectionVector" gandiva::SelectionVector": + + shared_ptr[CArray] ToArray() + + enum CSelectionVector_Mode" gandiva::SelectionVector::Mode": + CSelectionVector_Mode_NONE" gandiva::SelectionVector::Mode::MODE_NONE" + CSelectionVector_Mode_UINT16" \ + gandiva::SelectionVector::Mode::MODE_UINT16" + CSelectionVector_Mode_UINT32" \ + gandiva::SelectionVector::Mode::MODE_UINT32" + CSelectionVector_Mode_UINT64" \ + gandiva::SelectionVector::Mode::MODE_UINT64" + + cdef CStatus SelectionVector_MakeInt16\ + "gandiva::SelectionVector::MakeInt16"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt32\ + "gandiva::SelectionVector::MakeInt32"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt64\ + "gandiva::SelectionVector::MakeInt64"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + +cdef inline CSelectionVector_Mode _ensure_selection_mode(str name) except *: + uppercase = name.upper() + if uppercase == 'NONE': + return CSelectionVector_Mode_NONE + elif uppercase == 'UINT16': + return CSelectionVector_Mode_UINT16 + elif uppercase == 'UINT32': + return CSelectionVector_Mode_UINT32 + elif uppercase == 'UINT64': + return CSelectionVector_Mode_UINT64 + else: + raise ValueError('Invalid value for Selection Mode: {!r}'.format(name)) + +cdef inline str _selection_mode_name(CSelectionVector_Mode ctype): + if ctype == CSelectionVector_Mode_NONE: + return 'NONE' + elif ctype == CSelectionVector_Mode_UINT16: + return 'UINT16' + elif ctype == CSelectionVector_Mode_UINT32: + return 'UINT32' + elif ctype == CSelectionVector_Mode_UINT64: + return 'UINT64' + else: + raise RuntimeError('Unexpected CSelectionVector_Mode value') + +cdef extern from "gandiva/condition.h" namespace "gandiva" nogil: + + cdef cppclass CCondition" gandiva::Condition": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + +cdef extern from "gandiva/arrow.h" namespace "gandiva" nogil: + + ctypedef vector[shared_ptr[CArray]] CArrayVector" gandiva::ArrayVector" + + +cdef extern from "gandiva/tree_expr_builder.h" namespace "gandiva" nogil: + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBoolLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(c_bool value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFloatLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(float value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeDoubleLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(double value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeStringLiteral \ + "gandiva::TreeExprBuilder::MakeStringLiteral"(const c_string& value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBinaryLiteral \ + "gandiva::TreeExprBuilder::MakeBinaryLiteral"(const c_string& value) + + cdef shared_ptr[CGandivaExpression] TreeExprBuilder_MakeExpression\ + "gandiva::TreeExprBuilder::MakeExpression"( + shared_ptr[CNode] root_node, shared_ptr[CField] result_field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFunction \ + "gandiva::TreeExprBuilder::MakeFunction"( + const c_string& name, const CNodeVector& children, + shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeField \ + "gandiva::TreeExprBuilder::MakeField"(shared_ptr[CField] field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeIf \ + "gandiva::TreeExprBuilder::MakeIf"( + shared_ptr[CNode] condition, shared_ptr[CNode] this_node, + shared_ptr[CNode] else_node, shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeAnd \ + "gandiva::TreeExprBuilder::MakeAnd"(const CNodeVector& children) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeOr \ + "gandiva::TreeExprBuilder::MakeOr"(const CNodeVector& children) + + cdef shared_ptr[CCondition] TreeExprBuilder_MakeCondition \ + "gandiva::TreeExprBuilder::MakeCondition"( + shared_ptr[CNode] condition) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt32 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt64 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime32 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime64 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate32 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate64 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTimeStamp \ + "gandiva::TreeExprBuilder::MakeInExpressionTimeStamp"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionString \ + "gandiva::TreeExprBuilder::MakeInExpressionString"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionBinary \ + "gandiva::TreeExprBuilder::MakeInExpressionBinary"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + +cdef extern from "gandiva/projector.h" namespace "gandiva" nogil: + + cdef cppclass CProjector" gandiva::Projector": + + CStatus Evaluate( + const CRecordBatch& batch, CMemoryPool* pool, + const CArrayVector* output) + + CStatus Evaluate( + const CRecordBatch& batch, + const CSelectionVector* selection, + CMemoryPool* pool, + const CArrayVector* output) + + c_string DumpIR() + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + shared_ptr[CProjector]* projector) + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + CSelectionVector_Mode mode, + shared_ptr[CConfiguration] configuration, + shared_ptr[CProjector]* projector) + +cdef extern from "gandiva/filter.h" namespace "gandiva" nogil: + + cdef cppclass CFilter" gandiva::Filter": + + CStatus Evaluate( + const CRecordBatch& batch, + shared_ptr[CSelectionVector] out_selection) + + c_string DumpIR() + + cdef CStatus Filter_Make \ + "gandiva::Filter::Make"( + shared_ptr[CSchema] schema, shared_ptr[CCondition] condition, + shared_ptr[CConfiguration] configuration, + shared_ptr[CFilter]* filter) + +cdef extern from "gandiva/function_signature.h" namespace "gandiva" nogil: + + cdef cppclass CFunctionSignature" gandiva::FunctionSignature": + + CFunctionSignature(const c_string& base_name, + vector[shared_ptr[CDataType]] param_types, + shared_ptr[CDataType] ret_type) + + shared_ptr[CDataType] ret_type() const + + const c_string& base_name() const + + vector[shared_ptr[CDataType]] param_types() const + + c_string ToString() const + +cdef extern from "gandiva/expression_registry.h" namespace "gandiva" nogil: + + cdef vector[shared_ptr[CFunctionSignature]] \ + GetRegisteredFunctionSignatures() + +cdef extern from "gandiva/configuration.h" namespace "gandiva" nogil: + + cdef cppclass CConfiguration" gandiva::Configuration": + + CConfiguration() + + CConfiguration(bint optimize, bint dump_ir) + + void set_optimize(bint optimize) + + void set_dump_ir(bint dump_ir) + + cdef cppclass CConfigurationBuilder \ + " gandiva::ConfigurationBuilder": + @staticmethod + shared_ptr[CConfiguration] DefaultConfiguration() + + CConfigurationBuilder() + + shared_ptr[CConfiguration] build() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd b/parrot/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2b40414ce538319dc66d5a2e7a58fc28cb93770e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) + + +cdef extern from "parquet/encryption/kms_client.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClient" parquet::encryption::KmsClient": + c_string WrapKey(const c_string& key_bytes, + const c_string& master_key_identifier) except + + c_string UnwrapKey(const c_string& wrapped_key, + const c_string& master_key_identifier) except + + + cdef cppclass CKeyAccessToken" parquet::encryption::KeyAccessToken": + CKeyAccessToken(const c_string value) + void Refresh(const c_string& new_value) + const c_string& value() const + + cdef cppclass CKmsConnectionConfig \ + " parquet::encryption::KmsConnectionConfig": + CKmsConnectionConfig() + c_string kms_instance_id + c_string kms_instance_url + shared_ptr[CKeyAccessToken] refreshable_key_access_token + unordered_map[c_string, c_string] custom_kms_conf + +# Callbacks for implementing Python kms clients +# Use typedef to emulate syntax for std::function +ctypedef void CallbackWrapKey( + object, const c_string&, const c_string&, c_string*) +ctypedef void CallbackUnwrapKey( + object, const c_string&, const c_string&, c_string*) + +cdef extern from "parquet/encryption/kms_client_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClientFactory" parquet::encryption::KmsClientFactory": + shared_ptr[CKmsClient] CreateKmsClient( + const CKmsConnectionConfig& kms_connection_config) except + + +# Callbacks for implementing Python kms client factories +# Use typedef to emulate syntax for std::function +ctypedef void CallbackCreateKmsClient( + object, + const CKmsConnectionConfig&, shared_ptr[CKmsClient]*) + +cdef extern from "parquet/encryption/crypto_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CEncryptionConfiguration\ + " parquet::encryption::EncryptionConfiguration": + CEncryptionConfiguration(const c_string& footer_key) except + + c_string footer_key + c_string column_keys + ParquetCipher encryption_algorithm + c_bool plaintext_footer + c_bool double_wrapping + double cache_lifetime_seconds + c_bool internal_key_material + int32_t data_key_length_bits + + cdef cppclass CDecryptionConfiguration\ + " parquet::encryption::DecryptionConfiguration": + CDecryptionConfiguration() except + + double cache_lifetime_seconds + + cdef cppclass CCryptoFactory" parquet::encryption::CryptoFactory": + void RegisterKmsClientFactory( + shared_ptr[CKmsClientFactory] kms_client_factory) except + + shared_ptr[CFileEncryptionProperties] GetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) except +* + shared_ptr[CFileDecryptionProperties] GetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) except +* + void RemoveCacheEntriesForToken(const c_string& access_token) except + + void RemoveCacheEntriesForAllTokens() except + + +cdef extern from "arrow/python/parquet_encryption.h" \ + namespace "arrow::py::parquet::encryption" nogil: + cdef cppclass CPyKmsClientVtable \ + " arrow::py::parquet::encryption::PyKmsClientVtable": + CPyKmsClientVtable() + function[CallbackWrapKey] wrap_key + function[CallbackUnwrapKey] unwrap_key + + cdef cppclass CPyKmsClient\ + " arrow::py::parquet::encryption::PyKmsClient"(CKmsClient): + CPyKmsClient(object handler, CPyKmsClientVtable vtable) + + cdef cppclass CPyKmsClientFactoryVtable\ + " arrow::py::parquet::encryption::PyKmsClientFactoryVtable": + CPyKmsClientFactoryVtable() + function[CallbackCreateKmsClient] create_kms_client + + cdef cppclass CPyKmsClientFactory\ + " arrow::py::parquet::encryption::PyKmsClientFactory"( + CKmsClientFactory): + CPyKmsClientFactory(object handler, CPyKmsClientFactoryVtable vtable) + + cdef cppclass CPyCryptoFactory\ + " arrow::py::parquet::encryption::PyCryptoFactory"(CCryptoFactory): + CResult[shared_ptr[CFileEncryptionProperties]] \ + SafeGetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) + CResult[shared_ptr[CFileDecryptionProperties]] \ + SafeGetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__init__.py b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ebe59b499c214dd82954bff84824cfea574b415 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +from .from_dataframe import from_dataframe diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0da6e32bd1f681930d9baedc77292f5cd0adcde4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ddf81daeff4f6fd5cae8af85778efede0eacc7c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e6fd5935343179e736317871555dd88ec0967f3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc1f521f559c81e3d02228599a0905d8558d93f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d532b929aceedee861c58d3156447a13d2a1887 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/buffer.py b/parrot/lib/python3.10/site-packages/pyarrow/interchange/buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..1f537798130b9a77bc50e1040ea8046557974894 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/interchange/buffer.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations +import enum + +import pyarrow as pa + + +class DlpackDeviceType(enum.IntEnum): + """Integer enum for device type codes matching DLPack.""" + + CPU = 1 + CUDA = 2 + CPU_PINNED = 3 + OPENCL = 4 + VULKAN = 7 + METAL = 8 + VPI = 9 + ROCM = 10 + + +class _PyArrowBuffer: + """ + Data in the buffer is guaranteed to be contiguous in memory. + + Note that there is no dtype attribute present, a buffer can be thought of + as simply a block of memory. However, if the column that the buffer is + attached to has a dtype that's supported by DLPack and ``__dlpack__`` is + implemented, then that dtype information will be contained in the return + value from ``__dlpack__``. + + This distinction is useful to support both data exchange via DLPack on a + buffer and (b) dtypes like variable-length strings which do not have a + fixed number of bytes per element. + """ + + def __init__(self, x: pa.Buffer, allow_copy: bool = True) -> None: + """ + Handle PyArrow Buffers. + """ + self._x = x + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._x.size + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._x.address + + def __dlpack__(self): + """ + Produce DLPack capsule (see array API standard). + + Raises: + - TypeError : if the buffer contains unsupported dtypes. + - NotImplementedError : if DLPack support is not implemented + + Useful to have to connect to array libraries. Support optional because + it's not completely trivial to implement for a Python-only library. + """ + raise NotImplementedError("__dlpack__") + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + Uses device type codes matching DLPack. + Note: must be implemented even if ``__dlpack__`` is not. + """ + if self._x.is_cpu: + return (DlpackDeviceType.CPU, None) + else: + raise NotImplementedError("__dlpack_device__") + + def __repr__(self) -> str: + return ( + "PyArrowBuffer(" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": self.__dlpack_device__()[0].name, + } + ) + + ")" + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/column.py b/parrot/lib/python3.10/site-packages/pyarrow/interchange/column.py new file mode 100644 index 0000000000000000000000000000000000000000..e609e469b0ffa6a880f530757b72ed15d859571a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/interchange/column.py @@ -0,0 +1,529 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import enum +from typing import ( + Any, + Dict, + Iterable, + Optional, + Tuple, +) + +import sys +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.interchange.buffer import _PyArrowBuffer + + +class DtypeKind(enum.IntEnum): + """ + Integer enum for data types. + + Attributes + ---------- + INT : int + Matches to signed integer data type. + UINT : int + Matches to unsigned integer data type. + FLOAT : int + Matches to floating point data type. + BOOL : int + Matches to boolean data type. + STRING : int + Matches to string data type (UTF-8 encoded). + DATETIME : int + Matches to datetime data type. + CATEGORICAL : int + Matches to categorical data type. + """ + + INT = 0 + UINT = 1 + FLOAT = 2 + BOOL = 20 + STRING = 21 # UTF-8 + DATETIME = 22 + CATEGORICAL = 23 + + +Dtype = Tuple[DtypeKind, int, str, str] # see Column.dtype + + +_PYARROW_KINDS = { + pa.int8(): (DtypeKind.INT, "c"), + pa.int16(): (DtypeKind.INT, "s"), + pa.int32(): (DtypeKind.INT, "i"), + pa.int64(): (DtypeKind.INT, "l"), + pa.uint8(): (DtypeKind.UINT, "C"), + pa.uint16(): (DtypeKind.UINT, "S"), + pa.uint32(): (DtypeKind.UINT, "I"), + pa.uint64(): (DtypeKind.UINT, "L"), + pa.float16(): (DtypeKind.FLOAT, "e"), + pa.float32(): (DtypeKind.FLOAT, "f"), + pa.float64(): (DtypeKind.FLOAT, "g"), + pa.bool_(): (DtypeKind.BOOL, "b"), + pa.string(): (DtypeKind.STRING, "u"), + pa.large_string(): (DtypeKind.STRING, "U"), +} + + +class ColumnNullType(enum.IntEnum): + """ + Integer enum for null type representation. + + Attributes + ---------- + NON_NULLABLE : int + Non-nullable column. + USE_NAN : int + Use explicit float NaN value. + USE_SENTINEL : int + Sentinel value besides NaN. + USE_BITMASK : int + The bit is set/unset representing a null on a certain position. + USE_BYTEMASK : int + The byte is set/unset representing a null on a certain position. + """ + + NON_NULLABLE = 0 + USE_NAN = 1 + USE_SENTINEL = 2 + USE_BITMASK = 3 + USE_BYTEMASK = 4 + + +class ColumnBuffers(TypedDict): + # first element is a buffer containing the column data; + # second element is the data buffer's associated dtype + data: Tuple[_PyArrowBuffer, Dtype] + + # first element is a buffer containing mask values indicating missing data; + # second element is the mask value buffer's associated dtype. + # None if the null representation is not a bit or byte mask + validity: Optional[Tuple[_PyArrowBuffer, Dtype]] + + # first element is a buffer containing the offset values for + # variable-size binary data (e.g., variable-length strings); + # second element is the offsets buffer's associated dtype. + # None if the data buffer does not have an associated offsets buffer + offsets: Optional[Tuple[_PyArrowBuffer, Dtype]] + + +class CategoricalDescription(TypedDict): + # whether the ordering of dictionary indices is semantically meaningful + is_ordered: bool + # whether a dictionary-style mapping of categorical values to other objects + # exists + is_dictionary: bool + # Python-level only (e.g. ``{int: str}``). + # None if not a dictionary-style categorical. + categories: Optional[_PyArrowColumn] + + +class Endianness: + """Enum indicating the byte-order of a data-type.""" + + LITTLE = "<" + BIG = ">" + NATIVE = "=" + NA = "|" + + +class NoBufferPresent(Exception): + """Exception to signal that there is no requested buffer.""" + + +class _PyArrowColumn: + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + + TBD: Arrow has a separate "null" dtype, and has no separate mask concept. + Instead, it seems to use "children" for both columns with a bit mask, + and for nested dtypes. Unclear whether this is elegant or confusing. + This design requires checking the null representation explicitly. + + The Arrow design requires checking: + 1. the ARROW_FLAG_NULLABLE (for sentinel values) + 2. if a column has two children, combined with one of those children + having a null dtype. + + Making the mask concept explicit seems useful. One null dtype would + not be enough to cover both bit and byte masks, so that would mean + even more checking if we did it the Arrow way. + + TBD: there's also the "chunk" concept here, which is implicit in Arrow as + multiple buffers per array (= column here). Semantically it may make + sense to have both: chunks were meant for example for lazy evaluation + of data which doesn't fit in memory, while multiple buffers per column + could also come from doing a selection operation on a single + contiguous buffer. + + Given these concepts, one would expect chunks to be all of the same + size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), + while multiple buffers could have data-dependent lengths. Not an issue + in pandas if one column is backed by a single NumPy array, but in + Arrow it seems possible. + Are multiple chunks *and* multiple buffers per column necessary for + the purposes of this interchange protocol, or must producers either + reuse the chunk concept for this or copy the data? + + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + def __init__( + self, column: pa.Array | pa.ChunkedArray, allow_copy: bool = True + ) -> None: + """ + Handles PyArrow Arrays and ChunkedArrays. + """ + # Store the column as a private attribute + if isinstance(column, pa.ChunkedArray): + if column.num_chunks == 1: + column = column.chunk(0) + else: + if not allow_copy: + raise RuntimeError( + "Chunks will be combined and a copy is required which " + "is forbidden by allow_copy=False" + ) + column = column.combine_chunks() + + self._allow_copy = allow_copy + + if pa.types.is_boolean(column.type): + if not allow_copy: + raise RuntimeError( + "Boolean column will be casted to uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + self._dtype = self._dtype_from_arrowdtype(column.type, 8) + self._col = pc.cast(column, pa.uint8()) + else: + self._col = column + dtype = self._col.type + try: + bit_width = dtype.bit_width + except ValueError: + # in case of a variable-length strings, considered as array + # of bytes (8 bits) + bit_width = 8 + self._dtype = self._dtype_from_arrowdtype(dtype, bit_width) + + def size(self) -> int: + """ + Size of the column, in elements. + + Corresponds to DataFrame.num_rows() if column is a single chunk; + equal to size of this current chunk otherwise. + + Is a method rather than a property because it may cause a (potentially + expensive) computation for some dataframe implementations. + """ + return len(self._col) + + @property + def offset(self) -> int: + """ + Offset of first element. + + May be > 0 if using chunks; for example for a column with N chunks of + equal size M (only the last chunk may be shorter), + ``offset = n * M``, ``n = 0 .. N-1``. + """ + return self._col.offset + + @property + def dtype(self) -> Tuple[DtypeKind, int, str, str]: + """ + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + + Bit-width : the number of bits as an integer + Format string : data type description format string in Apache Arrow C + Data Interface format. + Endianness : current only native endianness (``=``) is supported + + Notes: + - Kind specifiers are aligned with DLPack where possible (hence the + jump to 20, leave enough room for future extension) + - Masks must be specified as boolean with either bit width 1 (for + bit masks) or 8 (for byte masks). + - Dtype width in bits was preferred over bytes + - Endianness isn't too useful, but included now in case in the + future we need to support non-native endianness + - Went with Apache Arrow format strings over NumPy format strings + because they're more complete from a dataframe perspective + - Format strings are mostly useful for datetime specification, and + for categoricals. + - For categoricals, the format string describes the type of the + categorical in the data buffer. In case of a separate encoding of + the categorical (e.g. an integer to string mapping), this can + be derived from ``self.describe_categorical``. + - Data types not included: complex, Arrow-style null, binary, + decimal, and nested (list, struct, map, union) dtypes. + """ + return self._dtype + + def _dtype_from_arrowdtype( + self, dtype: pa.DataType, bit_width: int + ) -> Tuple[DtypeKind, int, str, str]: + """ + See `self.dtype` for details. + """ + # Note: 'c' (complex) not handled yet (not in array spec v1). + # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) + # not handled datetime and timedelta both map to datetime + # (is timedelta handled?) + + if pa.types.is_timestamp(dtype): + kind = DtypeKind.DATETIME + ts = dtype.unit[0] + tz = dtype.tz if dtype.tz else "" + f_string = "ts{ts}:{tz}".format(ts=ts, tz=tz) + return kind, bit_width, f_string, Endianness.NATIVE + elif pa.types.is_dictionary(dtype): + kind = DtypeKind.CATEGORICAL + arr = self._col + indices_dtype = arr.indices.type + _, f_string = _PYARROW_KINDS.get(indices_dtype) + return kind, bit_width, f_string, Endianness.NATIVE + else: + kind, f_string = _PYARROW_KINDS.get(dtype, (None, None)) + if kind is None: + raise ValueError( + f"Data type {dtype} not supported by interchange protocol") + + return kind, bit_width, f_string, Endianness.NATIVE + + @property + def describe_categorical(self) -> CategoricalDescription: + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding categorical + values. + + Raises TypeError if the dtype is not categorical + + Returns the dictionary with description on how to interpret the + data buffer: + - "is_ordered" : bool, whether the ordering of dictionary indices + is semantically meaningful. + - "is_dictionary" : bool, whether a mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of + indices to category values (e.g. an array of + cat1, cat2, ...). None if not a dictionary-style + categorical. + + TBD: are there any other in-memory representations that are needed? + """ + arr = self._col + if not pa.types.is_dictionary(arr.type): + raise TypeError( + "describe_categorical only works on a column with " + "categorical dtype!" + ) + + return { + "is_ordered": self._col.type.ordered, + "is_dictionary": True, + "categories": _PyArrowColumn(arr.dictionary), + } + + @property + def describe_null(self) -> Tuple[ColumnNullType, Any]: + """ + Return the missing value (or "null") representation the column dtype + uses, as a tuple ``(kind, value)``. + + Value : if kind is "sentinel value", the actual value. If kind is a bit + mask or a byte mask, the value (0 or 1) indicating a missing value. + None otherwise. + """ + # In case of no missing values, we need to set ColumnNullType to + # non nullable as in the current __dataframe__ protocol bit/byte masks + # cannot be None + if self.null_count == 0: + return ColumnNullType.NON_NULLABLE, None + else: + return ColumnNullType.USE_BITMASK, 0 + + @property + def null_count(self) -> int: + """ + Number of null elements, if known. + + Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. + """ + arrow_null_count = self._col.null_count + n = arrow_null_count if arrow_null_count != -1 else None + return n + + @property + def metadata(self) -> Dict[str, Any]: + """ + The metadata for the column. See `DataFrame.metadata` for more details. + """ + pass + + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + return 1 + + def get_chunks( + self, n_chunks: Optional[int] = None + ) -> Iterable[_PyArrowColumn]: + """ + Return an iterator yielding the chunks. + + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + if n_chunks and n_chunks > 1: + chunk_size = self.size() // n_chunks + if self.size() % n_chunks != 0: + chunk_size += 1 + + array = self._col + i = 0 + for start in range(0, chunk_size * n_chunks, chunk_size): + yield _PyArrowColumn( + array.slice(start, chunk_size), self._allow_copy + ) + i += 1 + else: + yield self + + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + + The returned dictionary has the following contents: + + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + buffers: ColumnBuffers = { + "data": self._get_data_buffer(), + "validity": None, + "offsets": None, + } + + try: + buffers["validity"] = self._get_validity_buffer() + except NoBufferPresent: + pass + + try: + buffers["offsets"] = self._get_offsets_buffer() + except NoBufferPresent: + pass + + return buffers + + def _get_data_buffer( + self, + ) -> Tuple[_PyArrowBuffer, Any]: # Any is for self.dtype tuple + """ + Return the buffer containing the data and the buffer's + associated dtype. + """ + array = self._col + dtype = self.dtype + + # In case of dictionary arrays, use indices + # to define a buffer, codes are transferred through + # describe_categorical() + if pa.types.is_dictionary(array.type): + array = array.indices + dtype = _PyArrowColumn(array).dtype + + n = len(array.buffers()) + if n == 2: + return _PyArrowBuffer(array.buffers()[1]), dtype + elif n == 3: + return _PyArrowBuffer(array.buffers()[2]), dtype + + def _get_validity_buffer(self) -> Tuple[_PyArrowBuffer, Any]: + """ + Return the buffer containing the mask values indicating missing data + and the buffer's associated dtype. + Raises NoBufferPresent if null representation is not a bit or byte + mask. + """ + # Define the dtype of the returned buffer + dtype = (DtypeKind.BOOL, 1, "b", Endianness.NATIVE) + array = self._col + buff = array.buffers()[0] + if buff: + return _PyArrowBuffer(buff), dtype + else: + raise NoBufferPresent( + "There are no missing values so " + "does not have a separate mask") + + def _get_offsets_buffer(self) -> Tuple[_PyArrowBuffer, Any]: + """ + Return the buffer containing the offset values for variable-size binary + data (e.g., variable-length strings) and the buffer's associated dtype. + Raises NoBufferPresent if the data buffer does not have an associated + offsets buffer. + """ + array = self._col + n = len(array.buffers()) + if n == 2: + raise NoBufferPresent( + "This column has a fixed-length dtype so " + "it does not have an offsets buffer" + ) + elif n == 3: + # Define the dtype of the returned buffer + dtype = self._col.type + if pa.types.is_large_string(dtype): + dtype = (DtypeKind.INT, 64, "l", Endianness.NATIVE) + else: + dtype = (DtypeKind.INT, 32, "i", Endianness.NATIVE) + return _PyArrowBuffer(array.buffers()[1]), dtype diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py b/parrot/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..59ba765c175ad471274a99bf857c8880a072e0b8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations +from typing import ( + Any, + Iterable, + Optional, + Sequence, +) + +import pyarrow as pa + +from pyarrow.interchange.column import _PyArrowColumn + + +class _PyArrowDataFrame: + """ + A data frame class, with only the methods required by the interchange + protocol defined. + + A "data frame" represents an ordered collection of named columns. + A column's "name" must be a unique string. + Columns may be accessed by name or by position. + + This could be a public data frame class, or an object with the methods and + attributes defined on this DataFrame class could be returned from the + ``__dataframe__`` method of a public data frame class in a library adhering + to the dataframe interchange protocol specification. + """ + + def __init__( + self, df: pa.Table | pa.RecordBatch, + nan_as_null: bool = False, + allow_copy: bool = True + ) -> None: + """ + Constructor - an instance of this (private) class is returned from + `pa.Table.__dataframe__` or `pa.RecordBatch.__dataframe__`. + """ + self._df = df + # ``nan_as_null`` is a keyword intended for the consumer to tell the + # producer to overwrite null values in the data with ``NaN`` (or + # ``NaT``). + if nan_as_null is True: + raise RuntimeError( + "nan_as_null=True currently has no effect, " + "use the default nan_as_null=False" + ) + self._nan_as_null = nan_as_null + self._allow_copy = allow_copy + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> _PyArrowDataFrame: + """ + Construct a new exchange object, potentially changing the parameters. + ``nan_as_null`` is a keyword intended for the consumer to tell the + producer to overwrite null values in the data with ``NaN``. + It is intended for cases where the consumer does not support the bit + mask or byte mask that is the producer's native representation. + ``allow_copy`` is a keyword that defines whether or not the library is + allowed to make a copy of the data. For example, copying data would be + necessary if a library supports strided buffers, given that this + protocol specifies contiguous buffers. + """ + return _PyArrowDataFrame(self._df, nan_as_null, allow_copy) + + @property + def metadata(self) -> dict[str, Any]: + """ + The metadata for the data frame, as a dictionary with string keys. The + contents of `metadata` may be anything, they are meant for a library + to store information that it needs to, e.g., roundtrip losslessly or + for two implementations to share data that is not (yet) part of the + interchange protocol specification. For avoiding collisions with other + entries, please add name the keys with the name of the library + followed by a period and the desired name, e.g, ``pandas.indexcol``. + """ + # The metadata for the data frame, as a dictionary with string keys. + # Add schema metadata here (pandas metadata or custom metadata) + if self._df.schema.metadata: + schema_metadata = {"pyarrow." + k.decode('utf8'): v.decode('utf8') + for k, v in self._df.schema.metadata.items()} + return schema_metadata + else: + return {} + + def num_columns(self) -> int: + """ + Return the number of columns in the DataFrame. + """ + return self._df.num_columns + + def num_rows(self) -> int: + """ + Return the number of rows in the DataFrame, if available. + """ + return self._df.num_rows + + def num_chunks(self) -> int: + """ + Return the number of chunks the DataFrame consists of. + """ + if isinstance(self._df, pa.RecordBatch): + return 1 + else: + # pyarrow.Table can have columns with different number + # of chunks so we take the number of chunks that + # .to_batches() returns as it takes the min chunk size + # of all the columns (to_batches is a zero copy method) + batches = self._df.to_batches() + return len(batches) + + def column_names(self) -> Iterable[str]: + """ + Return an iterator yielding the column names. + """ + return self._df.schema.names + + def get_column(self, i: int) -> _PyArrowColumn: + """ + Return the column at the indicated position. + """ + return _PyArrowColumn(self._df.column(i), + allow_copy=self._allow_copy) + + def get_column_by_name(self, name: str) -> _PyArrowColumn: + """ + Return the column whose name is the indicated name. + """ + return _PyArrowColumn(self._df.column(name), + allow_copy=self._allow_copy) + + def get_columns(self) -> Iterable[_PyArrowColumn]: + """ + Return an iterator yielding the columns. + """ + return [ + _PyArrowColumn(col, allow_copy=self._allow_copy) + for col in self._df.columns + ] + + def select_columns(self, indices: Sequence[int]) -> _PyArrowDataFrame: + """ + Create a new DataFrame by selecting a subset of columns by index. + """ + return _PyArrowDataFrame( + self._df.select(list(indices)), self._nan_as_null, self._allow_copy + ) + + def select_columns_by_name( + self, names: Sequence[str] + ) -> _PyArrowDataFrame: + """ + Create a new DataFrame by selecting a subset of columns by name. + """ + return _PyArrowDataFrame( + self._df.select(list(names)), self._nan_as_null, self._allow_copy + ) + + def get_chunks( + self, n_chunks: Optional[int] = None + ) -> Iterable[_PyArrowDataFrame]: + """ + Return an iterator yielding the chunks. + + By default (None), yields the chunks that the data is stored as by the + producer. If given, ``n_chunks`` must be a multiple of + ``self.num_chunks()``, meaning the producer must subdivide each chunk + before yielding it. + + Note that the producer must ensure that all columns are chunked the + same way. + """ + # Subdivide chunks + if n_chunks and n_chunks > 1: + chunk_size = self.num_rows() // n_chunks + if self.num_rows() % n_chunks != 0: + chunk_size += 1 + if isinstance(self._df, pa.Table): + batches = self._df.to_batches(max_chunksize=chunk_size) + else: + batches = [] + for start in range(0, chunk_size * n_chunks, chunk_size): + batches.append(self._df.slice(start, chunk_size)) + # In case when the size of the chunk is such that the resulting + # list is one less chunk then n_chunks -> append an empty chunk + if len(batches) == n_chunks - 1: + batches.append(pa.record_batch([[]], schema=self._df.schema)) + # yields the chunks that the data is stored as + else: + if isinstance(self._df, pa.Table): + batches = self._df.to_batches() + else: + batches = [self._df] + + # Create an iterator of RecordBatches + iterator = [_PyArrowDataFrame(batch, + self._nan_as_null, + self._allow_copy) + for batch in batches] + return iterator diff --git a/parrot/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py b/parrot/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..fcaec41e3dcdf982e19bd45ba4a1941fab5ec34e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py @@ -0,0 +1,614 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from typing import ( + Any, + Tuple, +) + +from pyarrow.interchange.column import ( + DtypeKind, + ColumnBuffers, + ColumnNullType, +) + +import pyarrow as pa +import re + +import pyarrow.compute as pc +from pyarrow.interchange.column import Dtype + + +# A typing protocol could be added later to let Mypy validate code using +# `from_dataframe` better. +DataFrameObject = Any +ColumnObject = Any +BufferObject = Any + + +_PYARROW_DTYPES: dict[DtypeKind, dict[int, Any]] = { + DtypeKind.INT: {8: pa.int8(), + 16: pa.int16(), + 32: pa.int32(), + 64: pa.int64()}, + DtypeKind.UINT: {8: pa.uint8(), + 16: pa.uint16(), + 32: pa.uint32(), + 64: pa.uint64()}, + DtypeKind.FLOAT: {16: pa.float16(), + 32: pa.float32(), + 64: pa.float64()}, + DtypeKind.BOOL: {1: pa.bool_(), + 8: pa.uint8()}, + DtypeKind.STRING: {8: pa.string()}, +} + + +def from_dataframe(df: DataFrameObject, allow_copy=True) -> pa.Table: + """ + Build a ``pa.Table`` from any DataFrame supporting the interchange protocol. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Table + + Examples + -------- + >>> import pyarrow + >>> from pyarrow.interchange import from_dataframe + + Convert a pandas dataframe to a pyarrow table: + + >>> import pandas as pd + >>> df = pd.DataFrame({ + ... "n_attendees": [100, 10, 1], + ... "country": ["Italy", "Spain", "Slovenia"], + ... }) + >>> df + n_attendees country + 0 100 Italy + 1 10 Spain + 2 1 Slovenia + >>> from_dataframe(df) + pyarrow.Table + n_attendees: int64 + country: large_string + ---- + n_attendees: [[100,10,1]] + country: [["Italy","Spain","Slovenia"]] + """ + if isinstance(df, pa.Table): + return df + elif isinstance(df, pa.RecordBatch): + return pa.Table.from_batches([df]) + + if not hasattr(df, "__dataframe__"): + raise ValueError("`df` does not support __dataframe__") + + return _from_dataframe(df.__dataframe__(allow_copy=allow_copy), + allow_copy=allow_copy) + + +def _from_dataframe(df: DataFrameObject, allow_copy=True): + """ + Build a ``pa.Table`` from the DataFrame interchange object. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Table + """ + batches = [] + for chunk in df.get_chunks(): + batch = protocol_df_chunk_to_pyarrow(chunk, allow_copy) + batches.append(batch) + + if not batches: + batch = protocol_df_chunk_to_pyarrow(df) + batches.append(batch) + + return pa.Table.from_batches(batches) + + +def protocol_df_chunk_to_pyarrow( + df: DataFrameObject, + allow_copy: bool = True +) -> pa.RecordBatch: + """ + Convert interchange protocol chunk to ``pa.RecordBatch``. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.RecordBatch + """ + # We need a dict of columns here, with each column being a pa.Array + columns: dict[str, pa.Array] = {} + for name in df.column_names(): + if not isinstance(name, str): + raise ValueError(f"Column {name} is not a string") + if name in columns: + raise ValueError(f"Column {name} is not unique") + col = df.get_column_by_name(name) + dtype = col.dtype[0] + if dtype in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.STRING, + DtypeKind.DATETIME, + ): + columns[name] = column_to_array(col, allow_copy) + elif dtype == DtypeKind.BOOL: + columns[name] = bool_column_to_array(col, allow_copy) + elif dtype == DtypeKind.CATEGORICAL: + columns[name] = categorical_column_to_dictionary(col, allow_copy) + else: + raise NotImplementedError(f"Data type {dtype} not handled yet") + + return pa.RecordBatch.from_pydict(columns) + + +def column_to_array( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.Array: + """ + Convert a column holding one of the primitive dtypes to a PyArrow array. + A primitive type is one of: int, uint, float, bool (1 bit). + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + """ + buffers = col.get_buffers() + data_type = col.dtype + data = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset, + allow_copy) + return data + + +def bool_column_to_array( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.Array: + """ + Convert a column holding boolean dtype to a PyArrow array. + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + """ + buffers = col.get_buffers() + size = buffers["data"][1][1] + + # If booleans are byte-packed a copy to bit-packed will be made + if size == 8 and not allow_copy: + raise RuntimeError( + "Boolean column will be casted from uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + + data_type = col.dtype + data = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset) + if size == 8: + data = pc.cast(data, pa.bool_()) + + return data + + +def categorical_column_to_dictionary( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.DictionaryArray: + """ + Convert a column holding categorical data to a pa.DictionaryArray. + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.DictionaryArray + """ + if not allow_copy: + raise RuntimeError( + "Categorical column will be casted from uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + + categorical = col.describe_categorical + + if not categorical["is_dictionary"]: + raise NotImplementedError( + "Non-dictionary categoricals not supported yet") + + # We need to first convert the dictionary column + cat_column = categorical["categories"] + dictionary = column_to_array(cat_column) + # Then we need to convert the indices + # Here we need to use the buffer data type! + buffers = col.get_buffers() + _, data_type = buffers["data"] + indices = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset) + + # Constructing a pa.DictionaryArray + dict_array = pa.DictionaryArray.from_arrays(indices, dictionary) + + return dict_array + + +def parse_datetime_format_str(format_str): + """Parse datetime `format_str` to interpret the `data`.""" + + # timestamp 'ts{unit}:tz' + timestamp_meta = re.match(r"ts([smun]):(.*)", format_str) + if timestamp_meta: + unit, tz = timestamp_meta.group(1), timestamp_meta.group(2) + if unit != "s": + # the format string describes only a first letter of the unit, so + # add one extra letter to convert the unit to numpy-style: + # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns' + unit += "s" + + return unit, tz + + raise NotImplementedError(f"DateTime kind is not supported: {format_str}") + + +def map_date_type(data_type): + """Map column date type to pyarrow date type. """ + kind, bit_width, f_string, _ = data_type + + if kind == DtypeKind.DATETIME: + unit, tz = parse_datetime_format_str(f_string) + return pa.timestamp(unit, tz=tz) + else: + pa_dtype = _PYARROW_DTYPES.get(kind, {}).get(bit_width, None) + + # Error if dtype is not supported + if pa_dtype: + return pa_dtype + else: + raise NotImplementedError( + f"Conversion for {data_type} is not yet supported.") + + +def buffers_to_array( + buffers: ColumnBuffers, + data_type: Tuple[DtypeKind, int, str, str], + length: int, + describe_null: ColumnNullType, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Array: + """ + Build a PyArrow array from the passed buffer. + + Parameters + ---------- + buffer : ColumnBuffers + Dictionary containing tuples of underlying buffers and + their associated dtype. + data_type : Tuple[DtypeKind, int, str, str], + Dtype description of the column as a tuple ``(kind, bit-width, format string, + endianness)``. + length : int + The number of values in the array. + describe_null: ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + + Notes + ----- + The returned array doesn't own the memory. The caller of this function + is responsible for keeping the memory owner object alive as long as + the returned PyArrow array is being used. + """ + data_buff, _ = buffers["data"] + try: + validity_buff, validity_dtype = buffers["validity"] + except TypeError: + validity_buff = None + try: + offset_buff, offset_dtype = buffers["offsets"] + except TypeError: + offset_buff = None + + # Construct a pyarrow Buffer + data_pa_buffer = pa.foreign_buffer(data_buff.ptr, data_buff.bufsize, + base=data_buff) + + # Construct a validity pyarrow Buffer, if applicable + if validity_buff: + validity_pa_buff = validity_buffer_from_mask(validity_buff, + validity_dtype, + describe_null, + length, + offset, + allow_copy) + else: + validity_pa_buff = validity_buffer_nan_sentinel(data_pa_buffer, + data_type, + describe_null, + length, + offset, + allow_copy) + + # Construct a pyarrow Array from buffers + data_dtype = map_date_type(data_type) + + if offset_buff: + _, offset_bit_width, _, _ = offset_dtype + # If an offset buffer exists, construct an offset pyarrow Buffer + # and add it to the construction of an array + offset_pa_buffer = pa.foreign_buffer(offset_buff.ptr, + offset_buff.bufsize, + base=offset_buff) + + if data_type[2] == 'U': + string_type = pa.large_string() + else: + if offset_bit_width == 64: + string_type = pa.large_string() + else: + string_type = pa.string() + array = pa.Array.from_buffers( + string_type, + length, + [validity_pa_buff, offset_pa_buffer, data_pa_buffer], + offset=offset, + ) + else: + array = pa.Array.from_buffers( + data_dtype, + length, + [validity_pa_buff, data_pa_buffer], + offset=offset, + ) + + return array + + +def validity_buffer_from_mask( + validity_buff: BufferObject, + validity_dtype: Dtype, + describe_null: ColumnNullType, + length: int, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Buffer: + """ + Build a PyArrow buffer from the passed mask buffer. + + Parameters + ---------- + validity_buff : BufferObject + Tuple of underlying validity buffer and associated dtype. + validity_dtype : Dtype + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + describe_null : ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + length : int + The number of values in the array. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Buffer + """ + null_kind, sentinel_val = describe_null + validity_kind, _, _, _ = validity_dtype + assert validity_kind == DtypeKind.BOOL + + if null_kind == ColumnNullType.NON_NULLABLE: + # Sliced array can have a NON_NULLABLE ColumnNullType due + # to no missing values in that slice of an array though the bitmask + # exists and validity_buff must be set to None in this case + return None + + elif null_kind == ColumnNullType.USE_BYTEMASK or ( + null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 1 + ): + buff = pa.foreign_buffer(validity_buff.ptr, + validity_buff.bufsize, + base=validity_buff) + + if null_kind == ColumnNullType.USE_BYTEMASK: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + mask = pa.Array.from_buffers(pa.int8(), length, + [None, buff], + offset=offset) + mask_bool = pc.cast(mask, pa.bool_()) + else: + mask_bool = pa.Array.from_buffers(pa.bool_(), length, + [None, buff], + offset=offset) + + if sentinel_val == 1: + mask_bool = pc.invert(mask_bool) + + return mask_bool.buffers()[1] + + elif null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 0: + return pa.foreign_buffer(validity_buff.ptr, + validity_buff.bufsize, + base=validity_buff) + else: + raise NotImplementedError( + f"{describe_null} null representation is not yet supported.") + + +def validity_buffer_nan_sentinel( + data_pa_buffer: BufferObject, + data_type: Dtype, + describe_null: ColumnNullType, + length: int, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Buffer: + """ + Build a PyArrow buffer from NaN or sentinel values. + + Parameters + ---------- + data_pa_buffer : pa.Buffer + PyArrow buffer for the column data. + data_type : Dtype + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + describe_null : ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + length : int + The number of values in the array. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Buffer + """ + kind, bit_width, _, _ = data_type + data_dtype = map_date_type(data_type) + null_kind, sentinel_val = describe_null + + # Check for float NaN values + if null_kind == ColumnNullType.USE_NAN: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + + if kind == DtypeKind.FLOAT and bit_width == 16: + # 'pyarrow.compute.is_nan' kernel not yet implemented + # for float16 + raise NotImplementedError( + f"{data_type} with {null_kind} is not yet supported.") + else: + pyarrow_data = pa.Array.from_buffers( + data_dtype, + length, + [None, data_pa_buffer], + offset=offset, + ) + mask = pc.is_nan(pyarrow_data) + mask = pc.invert(mask) + return mask.buffers()[1] + + # Check for sentinel values + elif null_kind == ColumnNullType.USE_SENTINEL: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + + if kind == DtypeKind.DATETIME: + sentinel_dtype = pa.int64() + else: + sentinel_dtype = data_dtype + pyarrow_data = pa.Array.from_buffers(sentinel_dtype, + length, + [None, data_pa_buffer], + offset=offset) + sentinel_arr = pc.equal(pyarrow_data, sentinel_val) + mask_bool = pc.invert(sentinel_arr) + return mask_bool.buffers()[1] + + elif null_kind == ColumnNullType.NON_NULLABLE: + pass + else: + raise NotImplementedError( + f"{describe_null} null representation is not yet supported.") diff --git a/parrot/lib/python3.10/site-packages/pyarrow/ipc.pxi b/parrot/lib/python3.10/site-packages/pyarrow/ipc.pxi new file mode 100644 index 0000000000000000000000000000000000000000..e15b0ea40ed2e7de9d5a7f1776d26ff40909b4c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/ipc.pxi @@ -0,0 +1,1403 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +from collections import namedtuple +import warnings +from cython import sizeof + +cpdef enum MetadataVersion: + V1 = CMetadataVersion_V1 + V2 = CMetadataVersion_V2 + V3 = CMetadataVersion_V3 + V4 = CMetadataVersion_V4 + V5 = CMetadataVersion_V5 + + +cdef object _wrap_metadata_version(CMetadataVersion version): + return MetadataVersion( version) + + +cdef CMetadataVersion _unwrap_metadata_version( + MetadataVersion version) except *: + if version == MetadataVersion.V1: + return CMetadataVersion_V1 + elif version == MetadataVersion.V2: + return CMetadataVersion_V2 + elif version == MetadataVersion.V3: + return CMetadataVersion_V3 + elif version == MetadataVersion.V4: + return CMetadataVersion_V4 + elif version == MetadataVersion.V5: + return CMetadataVersion_V5 + raise ValueError("Not a metadata version: " + repr(version)) + + +_WriteStats = namedtuple( + 'WriteStats', + ('num_messages', 'num_record_batches', 'num_dictionary_batches', + 'num_dictionary_deltas', 'num_replaced_dictionaries')) + + +class WriteStats(_WriteStats): + """IPC write statistics + + Parameters + ---------- + num_messages : int + Number of messages. + num_record_batches : int + Number of record batches. + num_dictionary_batches : int + Number of dictionary batches. + num_dictionary_deltas : int + Delta of dictionaries. + num_replaced_dictionaries : int + Number of replaced dictionaries. + """ + __slots__ = () + + +@staticmethod +cdef _wrap_write_stats(CIpcWriteStats c): + return WriteStats(c.num_messages, c.num_record_batches, + c.num_dictionary_batches, c.num_dictionary_deltas, + c.num_replaced_dictionaries) + + +_ReadStats = namedtuple( + 'ReadStats', + ('num_messages', 'num_record_batches', 'num_dictionary_batches', + 'num_dictionary_deltas', 'num_replaced_dictionaries')) + + +class ReadStats(_ReadStats): + """IPC read statistics + + Parameters + ---------- + num_messages : int + Number of messages. + num_record_batches : int + Number of record batches. + num_dictionary_batches : int + Number of dictionary batches. + num_dictionary_deltas : int + Delta of dictionaries. + num_replaced_dictionaries : int + Number of replaced dictionaries. + """ + __slots__ = () + + +@staticmethod +cdef _wrap_read_stats(CIpcReadStats c): + return ReadStats(c.num_messages, c.num_record_batches, + c.num_dictionary_batches, c.num_dictionary_deltas, + c.num_replaced_dictionaries) + + +cdef class IpcReadOptions(_Weakrefable): + """ + Serialization options for reading IPC format. + + Parameters + ---------- + ensure_native_endian : bool, default True + Whether to convert incoming data to platform-native endianness. + use_threads : bool + Whether to use the global CPU thread pool to parallelize any + computational tasks like decompression + included_fields : list + If empty (the default), return all deserialized fields. + If non-empty, the values are the indices of fields to read on + the top-level schema + """ + __slots__ = () + + # cdef block is in lib.pxd + + def __init__(self, *, bint ensure_native_endian=True, + bint use_threads=True, list included_fields=None): + self.c_options = CIpcReadOptions.Defaults() + self.ensure_native_endian = ensure_native_endian + self.use_threads = use_threads + if included_fields is not None: + self.included_fields = included_fields + + @property + def ensure_native_endian(self): + return self.c_options.ensure_native_endian + + @ensure_native_endian.setter + def ensure_native_endian(self, bint value): + self.c_options.ensure_native_endian = value + + @property + def use_threads(self): + return self.c_options.use_threads + + @use_threads.setter + def use_threads(self, bint value): + self.c_options.use_threads = value + + @property + def included_fields(self): + return self.c_options.included_fields + + @included_fields.setter + def included_fields(self, list value not None): + self.c_options.included_fields = value + + +cdef class IpcWriteOptions(_Weakrefable): + """ + Serialization options for the IPC format. + + Parameters + ---------- + metadata_version : MetadataVersion, default MetadataVersion.V5 + The metadata version to write. V5 is the current and latest, + V4 is the pre-1.0 metadata version (with incompatible Union layout). + allow_64bit : bool, default False + If true, allow field lengths that don't fit in a signed 32-bit int. + use_legacy_format : bool, default False + Whether to use the pre-Arrow 0.15 IPC format. + compression : str, Codec, or None + compression codec to use for record batch buffers. + If None then batch buffers will be uncompressed. + Must be "lz4", "zstd" or None. + To specify a compression_level use `pyarrow.Codec` + use_threads : bool + Whether to use the global CPU thread pool to parallelize any + computational tasks like compression. + emit_dictionary_deltas : bool + Whether to emit dictionary deltas. Default is false for maximum + stream compatibility. + unify_dictionaries : bool + If true then calls to write_table will attempt to unify dictionaries + across all batches in the table. This can help avoid the need for + replacement dictionaries (which the file format does not support) + but requires computing the unified dictionary and then remapping + the indices arrays. + + This parameter is ignored when writing to the IPC stream format as + the IPC stream format can support replacement dictionaries. + """ + __slots__ = () + + # cdef block is in lib.pxd + + def __init__(self, *, metadata_version=MetadataVersion.V5, + bint allow_64bit=False, use_legacy_format=False, + compression=None, bint use_threads=True, + bint emit_dictionary_deltas=False, + bint unify_dictionaries=False): + self.c_options = CIpcWriteOptions.Defaults() + self.allow_64bit = allow_64bit + self.use_legacy_format = use_legacy_format + self.metadata_version = metadata_version + if compression is not None: + self.compression = compression + self.use_threads = use_threads + self.emit_dictionary_deltas = emit_dictionary_deltas + self.unify_dictionaries = unify_dictionaries + + @property + def allow_64bit(self): + return self.c_options.allow_64bit + + @allow_64bit.setter + def allow_64bit(self, bint value): + self.c_options.allow_64bit = value + + @property + def use_legacy_format(self): + return self.c_options.write_legacy_ipc_format + + @use_legacy_format.setter + def use_legacy_format(self, bint value): + self.c_options.write_legacy_ipc_format = value + + @property + def metadata_version(self): + return _wrap_metadata_version(self.c_options.metadata_version) + + @metadata_version.setter + def metadata_version(self, value): + self.c_options.metadata_version = _unwrap_metadata_version(value) + + @property + def compression(self): + if self.c_options.codec == nullptr: + return None + else: + return frombytes(self.c_options.codec.get().name()) + + @compression.setter + def compression(self, value): + if value is None: + self.c_options.codec.reset() + elif isinstance(value, str): + codec_type = _ensure_compression(value) + if codec_type != CCompressionType_ZSTD and codec_type != CCompressionType_LZ4_FRAME: + raise ValueError("Compression type must be lz4, zstd or None") + self.c_options.codec = shared_ptr[CCodec](GetResultValue( + CCodec.Create(codec_type)).release()) + elif isinstance(value, Codec): + if value.name != "lz4" and value.name != "zstd": + raise ValueError("Compression type must be lz4, zstd or None") + self.c_options.codec = (value).wrapped + else: + raise TypeError( + "Property `compression` must be None, str, or pyarrow.Codec") + + @property + def use_threads(self): + return self.c_options.use_threads + + @use_threads.setter + def use_threads(self, bint value): + self.c_options.use_threads = value + + @property + def emit_dictionary_deltas(self): + return self.c_options.emit_dictionary_deltas + + @emit_dictionary_deltas.setter + def emit_dictionary_deltas(self, bint value): + self.c_options.emit_dictionary_deltas = value + + @property + def unify_dictionaries(self): + return self.c_options.unify_dictionaries + + @unify_dictionaries.setter + def unify_dictionaries(self, bint value): + self.c_options.unify_dictionaries = value + + +cdef class Message(_Weakrefable): + """ + Container for an Arrow IPC message with metadata and optional body + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.ipc.read_message` function instead." + .format(self.__class__.__name__)) + + @property + def type(self): + return frombytes(FormatMessageType(self.message.get().type())) + + @property + def metadata(self): + return pyarrow_wrap_buffer(self.message.get().metadata()) + + @property + def metadata_version(self): + return _wrap_metadata_version(self.message.get().metadata_version()) + + @property + def body(self): + cdef shared_ptr[CBuffer] body = self.message.get().body() + if body.get() == NULL: + return None + else: + return pyarrow_wrap_buffer(body) + + def equals(self, Message other): + """ + Returns True if the message contents (metadata and body) are identical + + Parameters + ---------- + other : Message + + Returns + ------- + are_equal : bool + """ + cdef c_bool result + with nogil: + result = self.message.get().Equals(deref(other.message.get())) + return result + + def serialize_to(self, NativeFile sink, alignment=8, memory_pool=None): + """ + Write message to generic OutputStream + + Parameters + ---------- + sink : NativeFile + alignment : int, default 8 + Byte alignment for metadata and body + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + """ + cdef: + int64_t output_length = 0 + COutputStream* out + CIpcWriteOptions options + + options.alignment = alignment + out = sink.get_output_stream().get() + with nogil: + check_status(self.message.get() + .SerializeTo(out, options, &output_length)) + + def serialize(self, alignment=8, memory_pool=None): + """ + Write message as encapsulated IPC message + + Parameters + ---------- + alignment : int, default 8 + Byte alignment for metadata and body + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + """ + stream = BufferOutputStream(memory_pool) + self.serialize_to(stream, alignment=alignment, memory_pool=memory_pool) + return stream.getvalue() + + def __repr__(self): + if self.message == nullptr: + return """pyarrow.Message(uninitialized)""" + + metadata_len = self.metadata.size + body = self.body + body_len = 0 if body is None else body.size + + return """pyarrow.Message +type: {0} +metadata length: {1} +body length: {2}""".format(self.type, metadata_len, body_len) + + +cdef class MessageReader(_Weakrefable): + """ + Interface for reading Message objects from some source (like an + InputStream) + """ + cdef: + unique_ptr[CMessageReader] reader + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.ipc.MessageReader.open_stream` function " + "instead.".format(self.__class__.__name__)) + + @staticmethod + def open_stream(source): + """ + Open stream from source, if you want to use memory map use + MemoryMappedFile as source. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + A readable source, like an InputStream + """ + cdef: + MessageReader result = MessageReader.__new__(MessageReader) + shared_ptr[CInputStream] in_stream + unique_ptr[CMessageReader] reader + + _get_input_stream(source, &in_stream) + with nogil: + reader = CMessageReader.Open(in_stream) + result.reader.reset(reader.release()) + + return result + + def __iter__(self): + return self + + def __next__(self): + return self.read_next_message() + + def read_next_message(self): + """ + Read next Message from the stream. + + Raises + ------ + StopIteration + At end of stream + """ + cdef Message result = Message.__new__(Message) + + with nogil: + result.message = move(GetResultValue(self.reader.get() + .ReadNextMessage())) + + if result.message.get() == NULL: + raise StopIteration + + return result + +# ---------------------------------------------------------------------- +# File and stream readers and writers + +cdef class _CRecordBatchWriter(_Weakrefable): + """The base RecordBatchWriter wrapper. + + Provides common implementations of convenience methods. Should not + be instantiated directly by user code. + """ + + # cdef block is in lib.pxd + + def write(self, table_or_batch): + """ + Write RecordBatch or Table to stream. + + Parameters + ---------- + table_or_batch : {RecordBatch, Table} + """ + if isinstance(table_or_batch, RecordBatch): + self.write_batch(table_or_batch) + elif isinstance(table_or_batch, Table): + self.write_table(table_or_batch) + else: + raise ValueError(type(table_or_batch)) + + def write_batch(self, RecordBatch batch, custom_metadata=None): + """ + Write RecordBatch to stream. + + Parameters + ---------- + batch : RecordBatch + custom_metadata : mapping or KeyValueMetadata + Keys and values must be string-like / coercible to bytes + """ + metadata = ensure_metadata(custom_metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + with nogil: + check_status(self.writer.get() + .WriteRecordBatch(deref(batch.batch), c_meta)) + + def write_table(self, Table table, max_chunksize=None): + """ + Write Table to stream in (contiguous) RecordBatch objects. + + Parameters + ---------- + table : Table + max_chunksize : int, default None + Maximum number of rows for RecordBatch chunks. Individual chunks may + be smaller depending on the chunk layout of individual columns. + """ + cdef: + # max_chunksize must be > 0 to have any impact + int64_t c_max_chunksize = -1 + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + + with nogil: + check_status(self.writer.get().WriteTable(table.table[0], + c_max_chunksize)) + + def close(self): + """ + Close stream and write end-of-stream 0 marker. + """ + with nogil: + check_status(self.writer.get().Close()) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + @property + def stats(self): + """ + Current IPC write statistics. + """ + if not self.writer: + raise ValueError("Operation on closed writer") + return _wrap_write_stats(self.writer.get().stats()) + + +cdef class _RecordBatchStreamWriter(_CRecordBatchWriter): + cdef: + CIpcWriteOptions options + bint closed + + def __cinit__(self): + pass + + def __dealloc__(self): + pass + + @property + def _use_legacy_format(self): + # For testing (see test_ipc.py) + return self.options.write_legacy_ipc_format + + @property + def _metadata_version(self): + # For testing (see test_ipc.py) + return _wrap_metadata_version(self.options.metadata_version) + + def _open(self, sink, Schema schema not None, + IpcWriteOptions options=IpcWriteOptions()): + cdef: + shared_ptr[COutputStream] c_sink + + self.options = options.c_options + get_writer(sink, &c_sink) + with nogil: + self.writer = GetResultValue( + MakeStreamWriter(c_sink, schema.sp_schema, + self.options)) + + +cdef _get_input_stream(object source, shared_ptr[CInputStream]* out): + try: + source = as_buffer(source) + except TypeError: + # Non-buffer-like + pass + + get_input_stream(source, True, out) + + +class _ReadPandasMixin: + + def read_pandas(self, **options): + """ + Read contents of stream to a pandas.DataFrame. + + Read all record batches as a pyarrow.Table then convert it to a + pandas.DataFrame using Table.to_pandas. + + Parameters + ---------- + **options + Arguments to forward to :meth:`Table.to_pandas`. + + Returns + ------- + df : pandas.DataFrame + """ + table = self.read_all() + return table.to_pandas(**options) + + +cdef class RecordBatchReader(_Weakrefable): + """Base class for reading stream of record batches. + + Record batch readers function as iterators of record batches that also + provide the schema (without the need to get any batches). + + Warnings + -------- + Do not call this class's constructor directly, use one of the + ``RecordBatchReader.from_*`` functions instead. + + Notes + ----- + To import and export using the Arrow C stream interface, use the + ``_import_from_c`` and ``_export_to_c`` methods. However, keep in mind this + interface is intended for expert users. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([('x', pa.int64())]) + >>> def iter_record_batches(): + ... for i in range(2): + ... yield pa.RecordBatch.from_arrays([pa.array([1, 2, 3])], schema=schema) + >>> reader = pa.RecordBatchReader.from_batches(schema, iter_record_batches()) + >>> print(reader.schema) + x: int64 + >>> for batch in reader: + ... print(batch) + pyarrow.RecordBatch + x: int64 + ---- + x: [1,2,3] + pyarrow.RecordBatch + x: int64 + ---- + x: [1,2,3] + """ + + # cdef block is in lib.pxd + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use one of the RecordBatchReader.from_* functions instead." + .format(self.__class__.__name__)) + + def __iter__(self): + return self + + def __next__(self): + return self.read_next_batch() + + @property + def schema(self): + """ + Shared schema of the record batches in the stream. + + Returns + ------- + Schema + """ + cdef shared_ptr[CSchema] c_schema + + with nogil: + c_schema = self.reader.get().schema() + + return pyarrow_wrap_schema(c_schema) + + def read_next_batch(self): + """ + Read next RecordBatch from the stream. + + Raises + ------ + StopIteration: + At end of stream. + + Returns + ------- + RecordBatch + """ + cdef shared_ptr[CRecordBatch] batch + + with nogil: + check_status(self.reader.get().ReadNext(&batch)) + + if batch.get() == NULL: + raise StopIteration + + return pyarrow_wrap_batch(batch) + + def read_next_batch_with_custom_metadata(self): + """ + Read next RecordBatch from the stream along with its custom metadata. + + Raises + ------ + StopIteration: + At end of stream. + + Returns + ------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + cdef: + CRecordBatchWithMetadata batch_with_metadata + + with nogil: + batch_with_metadata = GetResultValue(self.reader.get().ReadNext()) + + if batch_with_metadata.batch.get() == NULL: + raise StopIteration + + return _wrap_record_batch_with_metadata(batch_with_metadata) + + def iter_batches_with_custom_metadata(self): + """ + Iterate over record batches from the stream along with their custom + metadata. + + Yields + ------ + RecordBatchWithMetadata + """ + while True: + try: + yield self.read_next_batch_with_custom_metadata() + except StopIteration: + return + + def read_all(self): + """ + Read all record batches as a pyarrow.Table. + + Returns + ------- + Table + """ + cdef shared_ptr[CTable] table + with nogil: + check_status(self.reader.get().ToTable().Value(&table)) + return pyarrow_wrap_table(table) + + read_pandas = _ReadPandasMixin.read_pandas + + def close(self): + """ + Release any resources associated with the reader. + """ + with nogil: + check_status(self.reader.get().Close()) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def cast(self, target_schema): + """ + Wrap this reader with one that casts each batch lazily as it is pulled. + Currently only a safe cast to target_schema is implemented. + + Parameters + ---------- + target_schema : Schema + Schema to cast to, the names and order of fields must match. + + Returns + ------- + RecordBatchReader + """ + cdef: + shared_ptr[CSchema] c_schema + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader out + + if self.schema.names != target_schema.names: + raise ValueError("Target schema's field names are not matching " + f"the table's field names: {self.schema.names}, " + f"{target_schema.names}") + + c_schema = pyarrow_unwrap_schema(target_schema) + c_reader = GetResultValue(CCastingRecordBatchReader.Make( + self.reader, c_schema)) + + out = RecordBatchReader.__new__(RecordBatchReader) + out.reader = c_reader + return out + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowArrayStream struct, given its pointer. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArrayStream struct. + + Be careful: if you don't pass the ArrowArrayStream struct to a + consumer, array memory will leak. This is a low-level function + intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + with nogil: + check_status(ExportRecordBatchReader( + self.reader, c_ptr)) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import RecordBatchReader from a C ArrowArrayStream struct, + given its pointer. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArrayStream struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + with nogil: + c_reader = GetResultValue(ImportRecordBatchReader( + c_ptr)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export to a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + + Returns + ------- + PyCapsule + A capsule containing a C ArrowArrayStream struct. + """ + cdef: + ArrowArrayStream* c_stream + + if requested_schema is not None: + out_schema = Schema._import_from_c_capsule(requested_schema) + if self.schema != out_schema: + return self.cast(out_schema).__arrow_c_stream__() + + stream_capsule = alloc_c_stream(&c_stream) + + with nogil: + check_status(ExportRecordBatchReader(self.reader, c_stream)) + + return stream_capsule + + @staticmethod + def _import_from_c_capsule(stream): + """ + Import RecordBatchReader from a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + stream: PyCapsule + A capsule containing a C ArrowArrayStream PyCapsule. + + Returns + ------- + RecordBatchReader + """ + cdef: + ArrowArrayStream* c_stream + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + c_stream = PyCapsule_GetPointer( + stream, 'arrow_array_stream' + ) + + with nogil: + c_reader = GetResultValue(ImportRecordBatchReader(c_stream)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + @staticmethod + def from_stream(data, schema=None): + """ + Create RecordBatchReader from a Arrow-compatible stream object. + + This accepts objects implementing the Arrow PyCapsule Protocol for + streams, i.e. objects that have a ``__arrow_c_stream__`` method. + + Parameters + ---------- + data : Arrow-compatible stream object + Any object that implements the Arrow PyCapsule Protocol for + streams. + schema : Schema, default None + The schema to which the stream should be casted, if supported + by the stream object. + + Returns + ------- + RecordBatchReader + """ + + if not hasattr(data, "__arrow_c_stream__"): + raise TypeError( + "Expected an object implementing the Arrow PyCapsule Protocol for " + "streams (i.e. having a `__arrow_c_stream__` method), " + f"got {type(data)!r}." + ) + + if schema is not None: + if not hasattr(schema, "__arrow_c_schema__"): + raise TypeError( + "Expected an object implementing the Arrow PyCapsule Protocol for " + "schema (i.e. having a `__arrow_c_schema__` method), " + f"got {type(schema)!r}." + ) + requested = schema.__arrow_c_schema__() + else: + requested = None + + capsule = data.__arrow_c_stream__(requested) + return RecordBatchReader._import_from_c_capsule(capsule) + + @staticmethod + def from_batches(Schema schema not None, batches): + """ + Create RecordBatchReader from an iterable of batches. + + Parameters + ---------- + schema : Schema + The shared schema of the record batches + batches : Iterable[RecordBatch] + The batches that this reader will return. + + Returns + ------- + reader : RecordBatchReader + """ + cdef: + shared_ptr[CSchema] c_schema + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + c_schema = pyarrow_unwrap_schema(schema) + c_reader = GetResultValue(CPyRecordBatchReader.Make( + c_schema, batches)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + +cdef class _RecordBatchStreamReader(RecordBatchReader): + cdef: + shared_ptr[CInputStream] in_stream + CIpcReadOptions options + CRecordBatchStreamReader* stream_reader + + def __cinit__(self): + pass + + def _open(self, source, IpcReadOptions options=IpcReadOptions(), + MemoryPool memory_pool=None): + self.options = options.c_options + self.options.memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_input_stream(source, &self.in_stream) + with nogil: + self.reader = GetResultValue(CRecordBatchStreamReader.Open( + self.in_stream, self.options)) + self.stream_reader = self.reader.get() + + @property + def stats(self): + """ + Current IPC read statistics. + """ + if not self.reader: + raise ValueError("Operation on closed reader") + return _wrap_read_stats(self.stream_reader.stats()) + + +cdef class _RecordBatchFileWriter(_RecordBatchStreamWriter): + + def _open(self, sink, Schema schema not None, + IpcWriteOptions options=IpcWriteOptions()): + cdef: + shared_ptr[COutputStream] c_sink + + self.options = options.c_options + get_writer(sink, &c_sink) + with nogil: + self.writer = GetResultValue( + MakeFileWriter(c_sink, schema.sp_schema, self.options)) + +_RecordBatchWithMetadata = namedtuple( + 'RecordBatchWithMetadata', + ('batch', 'custom_metadata')) + + +class RecordBatchWithMetadata(_RecordBatchWithMetadata): + """RecordBatch with its custom metadata + + Parameters + ---------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + __slots__ = () + + +@staticmethod +cdef _wrap_record_batch_with_metadata(CRecordBatchWithMetadata c): + return RecordBatchWithMetadata(pyarrow_wrap_batch(c.batch), + pyarrow_wrap_metadata(c.custom_metadata)) + + +cdef class _RecordBatchFileReader(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchFileReader] reader + shared_ptr[CRandomAccessFile] file + CIpcReadOptions options + + cdef readonly: + Schema schema + + def __cinit__(self): + pass + + def _open(self, source, footer_offset=None, + IpcReadOptions options=IpcReadOptions(), + MemoryPool memory_pool=None): + self.options = options.c_options + self.options.memory_pool = maybe_unbox_memory_pool(memory_pool) + try: + source = as_buffer(source) + except TypeError: + pass + + get_reader(source, False, &self.file) + + cdef int64_t offset = 0 + if footer_offset is not None: + offset = footer_offset + + with nogil: + if offset != 0: + self.reader = GetResultValue( + CRecordBatchFileReader.Open2(self.file.get(), offset, + self.options)) + + else: + self.reader = GetResultValue( + CRecordBatchFileReader.Open(self.file.get(), + self.options)) + + self.schema = pyarrow_wrap_schema(self.reader.get().schema()) + + @property + def num_record_batches(self): + """ + The number of record batches in the IPC file. + """ + return self.reader.get().num_record_batches() + + def get_batch(self, int i): + """ + Read the record batch with the given index. + + Parameters + ---------- + i : int + The index of the record batch in the IPC file. + + Returns + ------- + batch : RecordBatch + """ + cdef shared_ptr[CRecordBatch] batch + + if i < 0 or i >= self.num_record_batches: + raise ValueError('Batch number {0} out of range'.format(i)) + + with nogil: + batch = GetResultValue(self.reader.get().ReadRecordBatch(i)) + + return pyarrow_wrap_batch(batch) + + # TODO(wesm): ARROW-503: Function was renamed. Remove after a period of + # time has passed + get_record_batch = get_batch + + def get_batch_with_custom_metadata(self, int i): + """ + Read the record batch with the given index along with + its custom metadata + + Parameters + ---------- + i : int + The index of the record batch in the IPC file. + + Returns + ------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + cdef: + CRecordBatchWithMetadata batch_with_metadata + + if i < 0 or i >= self.num_record_batches: + raise ValueError('Batch number {0} out of range'.format(i)) + + with nogil: + batch_with_metadata = GetResultValue( + self.reader.get().ReadRecordBatchWithCustomMetadata(i)) + + return _wrap_record_batch_with_metadata(batch_with_metadata) + + def read_all(self): + """ + Read all record batches as a pyarrow.Table + """ + cdef: + vector[shared_ptr[CRecordBatch]] batches + shared_ptr[CTable] table + int i, nbatches + + nbatches = self.num_record_batches + + batches.resize(nbatches) + with nogil: + for i in range(nbatches): + batches[i] = GetResultValue(self.reader.get() + .ReadRecordBatch(i)) + table = GetResultValue( + CTable.FromRecordBatches(self.schema.sp_schema, move(batches))) + + return pyarrow_wrap_table(table) + + read_pandas = _ReadPandasMixin.read_pandas + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + @property + def stats(self): + """ + Current IPC read statistics. + """ + if not self.reader: + raise ValueError("Operation on closed reader") + return _wrap_read_stats(self.reader.get().stats()) + + +def get_tensor_size(Tensor tensor): + """ + Return total size of serialized Tensor including metadata and padding. + + Parameters + ---------- + tensor : Tensor + The tensor for which we want to known the size. + """ + cdef int64_t size + with nogil: + check_status(GetTensorSize(deref(tensor.tp), &size)) + return size + + +def get_record_batch_size(RecordBatch batch): + """ + Return total size of serialized RecordBatch including metadata and padding. + + Parameters + ---------- + batch : RecordBatch + The recordbatch for which we want to know the size. + """ + cdef int64_t size + with nogil: + check_status(GetRecordBatchSize(deref(batch.batch), &size)) + return size + + +def write_tensor(Tensor tensor, NativeFile dest): + """ + Write pyarrow.Tensor to pyarrow.NativeFile object its current position. + + Parameters + ---------- + tensor : pyarrow.Tensor + dest : pyarrow.NativeFile + + Returns + ------- + bytes_written : int + Total number of bytes written to the file + """ + cdef: + int32_t metadata_length + int64_t body_length + + handle = dest.get_output_stream() + + with nogil: + check_status( + WriteTensor(deref(tensor.tp), handle.get(), + &metadata_length, &body_length)) + + return metadata_length + body_length + + +cdef NativeFile as_native_file(source): + if not isinstance(source, NativeFile): + if hasattr(source, 'read'): + source = PythonFile(source) + else: + source = BufferReader(source) + + if not isinstance(source, NativeFile): + raise ValueError('Unable to read message from object with type: {0}' + .format(type(source))) + return source + + +def read_tensor(source): + """Read pyarrow.Tensor from pyarrow.NativeFile object from current + position. If the file source supports zero copy (e.g. a memory map), then + this operation does not allocate any memory. This function not assume that + the stream is aligned + + Parameters + ---------- + source : pyarrow.NativeFile + + Returns + ------- + tensor : Tensor + + """ + cdef: + shared_ptr[CTensor] sp_tensor + CInputStream* c_stream + NativeFile nf = as_native_file(source) + + c_stream = nf.get_input_stream().get() + with nogil: + sp_tensor = GetResultValue(ReadTensor(c_stream)) + return pyarrow_wrap_tensor(sp_tensor) + + +def read_message(source): + """ + Read length-prefixed message from file or buffer-like object + + Parameters + ---------- + source : pyarrow.NativeFile, file-like object, or buffer-like object + + Returns + ------- + message : Message + """ + cdef: + Message result = Message.__new__(Message) + CInputStream* c_stream + + cdef NativeFile nf = as_native_file(source) + c_stream = nf.get_input_stream().get() + + with nogil: + result.message = move( + GetResultValue(ReadMessage(c_stream, c_default_memory_pool()))) + + if result.message == nullptr: + raise EOFError("End of Arrow stream") + + return result + + +def read_schema(obj, DictionaryMemo dictionary_memo=None): + """ + Read Schema from message or buffer + + Parameters + ---------- + obj : buffer or Message + dictionary_memo : DictionaryMemo, optional + Needed to be able to reconstruct dictionary-encoded fields + with read_record_batch + + Returns + ------- + schema : Schema + """ + cdef: + shared_ptr[CSchema] result + shared_ptr[CRandomAccessFile] cpp_file + Message message + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + if isinstance(obj, Message): + message = obj + with nogil: + result = GetResultValue(ReadSchema( + deref(message.message.get()), arg_dict_memo)) + else: + get_reader(obj, False, &cpp_file) + with nogil: + result = GetResultValue(ReadSchema(cpp_file.get(), arg_dict_memo)) + + return pyarrow_wrap_schema(result) + + +def read_record_batch(obj, Schema schema, + DictionaryMemo dictionary_memo=None): + """ + Read RecordBatch from message, given a known schema. If reading data from a + complete IPC stream, use ipc.open_stream instead + + Parameters + ---------- + obj : Message or Buffer-like + schema : Schema + dictionary_memo : DictionaryMemo, optional + If message contains dictionaries, must pass a populated + DictionaryMemo + + Returns + ------- + batch : RecordBatch + """ + cdef: + shared_ptr[CRecordBatch] result + Message message + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + + if isinstance(obj, Message): + message = obj + else: + message = read_message(obj) + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + with nogil: + result = GetResultValue( + ReadRecordBatch(deref(message.message.get()), + schema.sp_schema, + arg_dict_memo, + CIpcReadOptions.Defaults())) + + return pyarrow_wrap_batch(result) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/json.py b/parrot/lib/python3.10/site-packages/pyarrow/json.py new file mode 100644 index 0000000000000000000000000000000000000000..a864f5d998a443e949d4ee24ae2df628cf81b8c5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/json.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow._json import ReadOptions, ParseOptions, read_json # noqa diff --git a/parrot/lib/python3.10/site-packages/pyarrow/jvm.py b/parrot/lib/python3.10/site-packages/pyarrow/jvm.py new file mode 100644 index 0000000000000000000000000000000000000000..161c5ff4d6d74512dfcd76ddac5a4c4781ad63c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/jvm.py @@ -0,0 +1,335 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Functions to interact with Arrow memory allocated by Arrow Java. + +These functions convert the objects holding the metadata, the actual +data is not copied at all. + +This will only work with a JVM running in the same process such as provided +through jpype. Modules that talk to a remote JVM like py4j will not work as the +memory addresses reported by them are not reachable in the python process. +""" + +import pyarrow as pa + + +class _JvmBufferNanny: + """ + An object that keeps a org.apache.arrow.memory.ArrowBuf's underlying + memory alive. + """ + ref_manager = None + + def __init__(self, jvm_buf): + ref_manager = jvm_buf.getReferenceManager() + # Will raise a java.lang.IllegalArgumentException if the buffer + # is already freed. It seems that exception cannot easily be + # caught... + ref_manager.retain() + self.ref_manager = ref_manager + + def __del__(self): + if self.ref_manager is not None: + self.ref_manager.release() + + +def jvm_buffer(jvm_buf): + """ + Construct an Arrow buffer from org.apache.arrow.memory.ArrowBuf + + Parameters + ---------- + + jvm_buf: org.apache.arrow.memory.ArrowBuf + Arrow Buffer representation on the JVM. + + Returns + ------- + pyarrow.Buffer + Python Buffer that references the JVM memory. + """ + nanny = _JvmBufferNanny(jvm_buf) + address = jvm_buf.memoryAddress() + size = jvm_buf.capacity() + return pa.foreign_buffer(address, size, base=nanny) + + +def _from_jvm_int_type(jvm_type): + """ + Convert a JVM int type to its Python equivalent. + + Parameters + ---------- + jvm_type : org.apache.arrow.vector.types.pojo.ArrowType$Int + + Returns + ------- + typ : pyarrow.DataType + """ + + bit_width = jvm_type.getBitWidth() + if jvm_type.getIsSigned(): + if bit_width == 8: + return pa.int8() + elif bit_width == 16: + return pa.int16() + elif bit_width == 32: + return pa.int32() + elif bit_width == 64: + return pa.int64() + else: + if bit_width == 8: + return pa.uint8() + elif bit_width == 16: + return pa.uint16() + elif bit_width == 32: + return pa.uint32() + elif bit_width == 64: + return pa.uint64() + + +def _from_jvm_float_type(jvm_type): + """ + Convert a JVM float type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$FloatingPoint + + Returns + ------- + typ: pyarrow.DataType + """ + precision = jvm_type.getPrecision().toString() + if precision == 'HALF': + return pa.float16() + elif precision == 'SINGLE': + return pa.float32() + elif precision == 'DOUBLE': + return pa.float64() + + +def _from_jvm_time_type(jvm_type): + """ + Convert a JVM time type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Time + + Returns + ------- + typ: pyarrow.DataType + """ + time_unit = jvm_type.getUnit().toString() + if time_unit == 'SECOND': + assert jvm_type.getBitWidth() == 32 + return pa.time32('s') + elif time_unit == 'MILLISECOND': + assert jvm_type.getBitWidth() == 32 + return pa.time32('ms') + elif time_unit == 'MICROSECOND': + assert jvm_type.getBitWidth() == 64 + return pa.time64('us') + elif time_unit == 'NANOSECOND': + assert jvm_type.getBitWidth() == 64 + return pa.time64('ns') + + +def _from_jvm_timestamp_type(jvm_type): + """ + Convert a JVM timestamp type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Timestamp + + Returns + ------- + typ: pyarrow.DataType + """ + time_unit = jvm_type.getUnit().toString() + timezone = jvm_type.getTimezone() + if timezone is not None: + timezone = str(timezone) + if time_unit == 'SECOND': + return pa.timestamp('s', tz=timezone) + elif time_unit == 'MILLISECOND': + return pa.timestamp('ms', tz=timezone) + elif time_unit == 'MICROSECOND': + return pa.timestamp('us', tz=timezone) + elif time_unit == 'NANOSECOND': + return pa.timestamp('ns', tz=timezone) + + +def _from_jvm_date_type(jvm_type): + """ + Convert a JVM date type to its Python equivalent + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Date + + Returns + ------- + typ: pyarrow.DataType + """ + day_unit = jvm_type.getUnit().toString() + if day_unit == 'DAY': + return pa.date32() + elif day_unit == 'MILLISECOND': + return pa.date64() + + +def field(jvm_field): + """ + Construct a Field from a org.apache.arrow.vector.types.pojo.Field + instance. + + Parameters + ---------- + jvm_field: org.apache.arrow.vector.types.pojo.Field + + Returns + ------- + pyarrow.Field + """ + name = str(jvm_field.getName()) + jvm_type = jvm_field.getType() + + typ = None + if not jvm_type.isComplex(): + type_str = jvm_type.getTypeID().toString() + if type_str == 'Null': + typ = pa.null() + elif type_str == 'Int': + typ = _from_jvm_int_type(jvm_type) + elif type_str == 'FloatingPoint': + typ = _from_jvm_float_type(jvm_type) + elif type_str == 'Utf8': + typ = pa.string() + elif type_str == 'Binary': + typ = pa.binary() + elif type_str == 'FixedSizeBinary': + typ = pa.binary(jvm_type.getByteWidth()) + elif type_str == 'Bool': + typ = pa.bool_() + elif type_str == 'Time': + typ = _from_jvm_time_type(jvm_type) + elif type_str == 'Timestamp': + typ = _from_jvm_timestamp_type(jvm_type) + elif type_str == 'Date': + typ = _from_jvm_date_type(jvm_type) + elif type_str == 'Decimal': + typ = pa.decimal128(jvm_type.getPrecision(), jvm_type.getScale()) + else: + raise NotImplementedError( + "Unsupported JVM type: {}".format(type_str)) + else: + # TODO: The following JVM types are not implemented: + # Struct, List, FixedSizeList, Union, Dictionary + raise NotImplementedError( + "JVM field conversion only implemented for primitive types.") + + nullable = jvm_field.isNullable() + jvm_metadata = jvm_field.getMetadata() + if jvm_metadata.isEmpty(): + metadata = None + else: + metadata = {str(entry.getKey()): str(entry.getValue()) + for entry in jvm_metadata.entrySet()} + return pa.field(name, typ, nullable, metadata) + + +def schema(jvm_schema): + """ + Construct a Schema from a org.apache.arrow.vector.types.pojo.Schema + instance. + + Parameters + ---------- + jvm_schema: org.apache.arrow.vector.types.pojo.Schema + + Returns + ------- + pyarrow.Schema + """ + fields = jvm_schema.getFields() + fields = [field(f) for f in fields] + jvm_metadata = jvm_schema.getCustomMetadata() + if jvm_metadata.isEmpty(): + metadata = None + else: + metadata = {str(entry.getKey()): str(entry.getValue()) + for entry in jvm_metadata.entrySet()} + return pa.schema(fields, metadata) + + +def array(jvm_array): + """ + Construct an (Python) Array from its JVM equivalent. + + Parameters + ---------- + jvm_array : org.apache.arrow.vector.ValueVector + + Returns + ------- + array : Array + """ + if jvm_array.getField().getType().isComplex(): + minor_type_str = jvm_array.getMinorType().toString() + raise NotImplementedError( + "Cannot convert JVM Arrow array of type {}," + " complex types not yet implemented.".format(minor_type_str)) + dtype = field(jvm_array.getField()).type + buffers = [jvm_buffer(buf) + for buf in list(jvm_array.getBuffers(False))] + + # If JVM has an empty Vector, buffer list will be empty so create manually + if len(buffers) == 0: + return pa.array([], type=dtype) + + length = jvm_array.getValueCount() + null_count = jvm_array.getNullCount() + return pa.Array.from_buffers(dtype, length, buffers, null_count) + + +def record_batch(jvm_vector_schema_root): + """ + Construct a (Python) RecordBatch from a JVM VectorSchemaRoot + + Parameters + ---------- + jvm_vector_schema_root : org.apache.arrow.vector.VectorSchemaRoot + + Returns + ------- + record_batch: pyarrow.RecordBatch + """ + pa_schema = schema(jvm_vector_schema_root.getSchema()) + + arrays = [] + for name in pa_schema.names: + arrays.append(array(jvm_vector_schema_root.getVector(name))) + + return pa.RecordBatch.from_arrays( + arrays, + pa_schema.names, + metadata=pa_schema.metadata + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/lib.pyx b/parrot/lib/python3.10/site-packages/pyarrow/lib.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c72841c299566cd4b3bddb5e895c4f88e0460595 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/lib.pyx @@ -0,0 +1,229 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile = False +# cython: nonecheck = True +# distutils: language = c++ + +import datetime +import decimal as _pydecimal +import numpy as np +import os +import sys + +from cython.operator cimport dereference as deref +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.includes.common cimport PyObject_to_object +cimport pyarrow.includes.libarrow_python as libarrow_python +cimport cpython as cp + +# Initialize NumPy C API +arrow_init_numpy() +# Initialize PyArrow C++ API +# (used from some of our C++ code, see e.g. ARROW-5260) +import_pyarrow() + + +MonthDayNano = NewMonthDayNanoTupleType() + + +def cpu_count(): + """ + Return the number of threads to use in parallel operations. + + The number of threads is determined at startup by inspecting the + ``OMP_NUM_THREADS`` and ``OMP_THREAD_LIMIT`` environment variables. + If neither is present, it will default to the number of hardware threads + on the system. It can be modified at runtime by calling + :func:`set_cpu_count()`. + + See Also + -------- + set_cpu_count : Modify the size of this pool. + io_thread_count : The analogous function for the I/O thread pool. + """ + return GetCpuThreadPoolCapacity() + + +def set_cpu_count(int count): + """ + Set the number of threads to use in parallel operations. + + Parameters + ---------- + count : int + The number of concurrent threads that should be used. + + See Also + -------- + cpu_count : Get the size of this pool. + set_io_thread_count : The analogous function for the I/O thread pool. + """ + if count < 1: + raise ValueError("CPU count must be strictly positive") + check_status(SetCpuThreadPoolCapacity(count)) + + +def is_threading_enabled() -> bool: + """ + Returns True if threading is enabled in libarrow. + + If it isn't enabled, then python shouldn't create any + threads either, because we're probably on a system where + threading doesn't work (e.g. Emscripten). + """ + return libarrow_python.IsThreadingEnabled() + + +Type_NA = _Type_NA +Type_BOOL = _Type_BOOL +Type_UINT8 = _Type_UINT8 +Type_INT8 = _Type_INT8 +Type_UINT16 = _Type_UINT16 +Type_INT16 = _Type_INT16 +Type_UINT32 = _Type_UINT32 +Type_INT32 = _Type_INT32 +Type_UINT64 = _Type_UINT64 +Type_INT64 = _Type_INT64 +Type_HALF_FLOAT = _Type_HALF_FLOAT +Type_FLOAT = _Type_FLOAT +Type_DOUBLE = _Type_DOUBLE +Type_DECIMAL128 = _Type_DECIMAL128 +Type_DECIMAL256 = _Type_DECIMAL256 +Type_DATE32 = _Type_DATE32 +Type_DATE64 = _Type_DATE64 +Type_TIMESTAMP = _Type_TIMESTAMP +Type_TIME32 = _Type_TIME32 +Type_TIME64 = _Type_TIME64 +Type_DURATION = _Type_DURATION +Type_INTERVAL_MONTH_DAY_NANO = _Type_INTERVAL_MONTH_DAY_NANO +Type_BINARY = _Type_BINARY +Type_STRING = _Type_STRING +Type_LARGE_BINARY = _Type_LARGE_BINARY +Type_LARGE_STRING = _Type_LARGE_STRING +Type_FIXED_SIZE_BINARY = _Type_FIXED_SIZE_BINARY +Type_BINARY_VIEW = _Type_BINARY_VIEW +Type_STRING_VIEW = _Type_STRING_VIEW +Type_LIST = _Type_LIST +Type_LARGE_LIST = _Type_LARGE_LIST +Type_LIST_VIEW = _Type_LIST_VIEW +Type_LARGE_LIST_VIEW = _Type_LARGE_LIST_VIEW +Type_MAP = _Type_MAP +Type_FIXED_SIZE_LIST = _Type_FIXED_SIZE_LIST +Type_STRUCT = _Type_STRUCT +Type_SPARSE_UNION = _Type_SPARSE_UNION +Type_DENSE_UNION = _Type_DENSE_UNION +Type_DICTIONARY = _Type_DICTIONARY +Type_RUN_END_ENCODED = _Type_RUN_END_ENCODED + +UnionMode_SPARSE = _UnionMode_SPARSE +UnionMode_DENSE = _UnionMode_DENSE + +__pc = None +__pac = None +__cuda_loaded = None + + +def _pc(): + global __pc + if __pc is None: + import pyarrow.compute as pc + __pc = pc + return __pc + + +def _pac(): + global __pac + if __pac is None: + import pyarrow.acero as pac + __pac = pac + return __pac + + +def _ensure_cuda_loaded(): + # Try importing the cuda module to ensure libarrow_cuda gets loaded + # to register the CUDA device for the C Data Interface import + global __cuda_loaded + if __cuda_loaded is None: + try: + import pyarrow.cuda # no-cython-lint + __cuda_loaded = True + except ImportError as exc: + __cuda_loaded = str(exc) + + if __cuda_loaded is not True: + raise ImportError( + "Trying to import data on a CUDA device, but PyArrow is not built with " + f"CUDA support.\n(importing 'pyarrow.cuda' resulted in \"{__cuda_loaded}\")." + ) + + +def _gdb_test_session(): + GdbTestSession() + + +# Assorted compatibility helpers +include "compat.pxi" + +# Exception types and Status handling +include "error.pxi" + +# Configuration information +include "config.pxi" + +# pandas API shim +include "pandas-shim.pxi" + +# Memory pools and allocation +include "memory.pxi" + +# Device type and memory manager +include "device.pxi" + +# DataType, Field, Schema +include "types.pxi" + +# Array scalar values +include "scalar.pxi" + +# Array types +include "array.pxi" + +# Builders +include "builder.pxi" + +# Column, Table, Record Batch +include "table.pxi" + +# Tensors +include "tensor.pxi" + +# DLPack +include "_dlpack.pxi" + +# File IO +include "io.pxi" + +# IPC / Messaging +include "ipc.pxi" + +# Micro-benchmark routines +include "benchmark.pxi" + +# Public API +include "public-api.pxi" diff --git a/parrot/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so b/parrot/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so new file mode 100644 index 0000000000000000000000000000000000000000..33fd3da5d52814eefdec80d434fed9cd059e10b9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/memory.pxi b/parrot/lib/python3.10/site-packages/pyarrow/memory.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1ddcb01ccb6ab2ca84786e6e60a5f4c4ffbfc5bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/memory.pxi @@ -0,0 +1,274 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: embedsignature = True + + +cdef class MemoryPool(_Weakrefable): + """ + Base class for memory allocation. + + Besides tracking its number of allocated bytes, a memory pool also + takes care of the required 64-byte alignment for Arrow data. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.*_memory_pool instead." + .format(self.__class__.__name__)) + + cdef void init(self, CMemoryPool* pool): + self.pool = pool + + def release_unused(self): + """ + Attempt to return to the OS any memory being held onto by the pool. + + This function should not be called except potentially for + benchmarking or debugging as it could be expensive and detrimental to + performance. + + This is best effort and may not have any effect on some memory pools + or in some situations (e.g. fragmentation). + """ + cdef CMemoryPool* pool = c_get_memory_pool() + with nogil: + pool.ReleaseUnused() + + def bytes_allocated(self): + """ + Return the number of bytes that are currently allocated from this + memory pool. + """ + return self.pool.bytes_allocated() + + def max_memory(self): + """ + Return the peak memory allocation in this memory pool. + This can be an approximate number in multi-threaded applications. + + None is returned if the pool implementation doesn't know how to + compute this number. + """ + ret = self.pool.max_memory() + return ret if ret >= 0 else None + + @property + def backend_name(self): + """ + The name of the backend used by this MemoryPool (e.g. "jemalloc"). + """ + return frombytes(self.pool.backend_name()) + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"backend_name={self.backend_name} " + f"bytes_allocated={self.bytes_allocated()} " + f"max_memory={self.max_memory()}>") + +cdef CMemoryPool* maybe_unbox_memory_pool(MemoryPool memory_pool): + if memory_pool is None: + return c_get_memory_pool() + else: + return memory_pool.pool + + +cdef api object box_memory_pool(CMemoryPool *c_pool): + cdef MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_pool) + return pool + + +cdef class LoggingMemoryPool(MemoryPool): + cdef: + unique_ptr[CLoggingMemoryPool] logging_pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.logging_memory_pool instead." + .format(self.__class__.__name__)) + + +cdef class ProxyMemoryPool(MemoryPool): + """ + Memory pool implementation that tracks the number of bytes and + maximum memory allocated through its direct calls, while redirecting + to another memory pool. + """ + cdef: + unique_ptr[CProxyMemoryPool] proxy_pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.proxy_memory_pool instead." + .format(self.__class__.__name__)) + + +def default_memory_pool(): + """ + Return the process-global memory pool. + + Examples + -------- + >>> default_memory_pool() + + """ + cdef: + MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_get_memory_pool()) + return pool + + +def proxy_memory_pool(MemoryPool parent): + """ + Create and return a MemoryPool instance that redirects to the + *parent*, but with separate allocation statistics. + + Parameters + ---------- + parent : MemoryPool + The real memory pool that should be used for allocations. + """ + cdef ProxyMemoryPool out = ProxyMemoryPool.__new__(ProxyMemoryPool) + out.proxy_pool.reset(new CProxyMemoryPool(parent.pool)) + out.init(out.proxy_pool.get()) + return out + + +def logging_memory_pool(MemoryPool parent): + """ + Create and return a MemoryPool instance that redirects to the + *parent*, but also dumps allocation logs on stderr. + + Parameters + ---------- + parent : MemoryPool + The real memory pool that should be used for allocations. + """ + cdef LoggingMemoryPool out = LoggingMemoryPool.__new__( + LoggingMemoryPool, parent) + out.logging_pool.reset(new CLoggingMemoryPool(parent.pool)) + out.init(out.logging_pool.get()) + return out + + +def system_memory_pool(): + """ + Return a memory pool based on the C malloc heap. + """ + cdef: + MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_system_memory_pool()) + return pool + + +def jemalloc_memory_pool(): + """ + Return a memory pool based on the jemalloc heap. + + NotImplementedError is raised if jemalloc support is not enabled. + """ + cdef: + CMemoryPool* c_pool + MemoryPool pool = MemoryPool.__new__(MemoryPool) + check_status(c_jemalloc_memory_pool(&c_pool)) + pool.init(c_pool) + return pool + + +def mimalloc_memory_pool(): + """ + Return a memory pool based on the mimalloc heap. + + NotImplementedError is raised if mimalloc support is not enabled. + """ + cdef: + CMemoryPool* c_pool + MemoryPool pool = MemoryPool.__new__(MemoryPool) + check_status(c_mimalloc_memory_pool(&c_pool)) + pool.init(c_pool) + return pool + + +def set_memory_pool(MemoryPool pool): + """ + Set the default memory pool. + + Parameters + ---------- + pool : MemoryPool + The memory pool that should be used by default. + """ + c_set_default_memory_pool(pool.pool) + + +cdef MemoryPool _default_memory_pool = default_memory_pool() +cdef LoggingMemoryPool _logging_memory_pool = logging_memory_pool( + _default_memory_pool) + + +def log_memory_allocations(enable=True): + """ + Enable or disable memory allocator logging for debugging purposes + + Parameters + ---------- + enable : bool, default True + Pass False to disable logging + """ + if enable: + set_memory_pool(_logging_memory_pool) + else: + set_memory_pool(_default_memory_pool) + + +def total_allocated_bytes(): + """ + Return the currently allocated bytes from the default memory pool. + Other memory pools may not be accounted for. + """ + cdef CMemoryPool* pool = c_get_memory_pool() + return pool.bytes_allocated() + + +def jemalloc_set_decay_ms(decay_ms): + """ + Set arenas.dirty_decay_ms and arenas.muzzy_decay_ms to indicated number of + milliseconds. A value of 0 (the default) results in dirty / muzzy memory + pages being released right away to the OS, while a higher value will result + in a time-based decay. See the jemalloc docs for more information + + It's best to set this at the start of your application. + + Parameters + ---------- + decay_ms : int + Number of milliseconds to set for jemalloc decay conf parameters. Note + that this change will only affect future memory arenas + """ + check_status(c_jemalloc_set_decay_ms(decay_ms)) + + +def supported_memory_backends(): + """ + Return a list of available memory pool backends + """ + cdef vector[c_string] backends = c_supported_memory_backends() + return [backend.decode() for backend in backends] diff --git a/parrot/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi b/parrot/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi new file mode 100644 index 0000000000000000000000000000000000000000..74f0d981b52f446c890b396b66d7b0a6e027db77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi @@ -0,0 +1,261 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pandas lazy-loading API shim that reduces API call and import overhead + +import warnings +from threading import Lock + + +cdef class _PandasAPIShim(object): + """ + Lazy pandas importer that isolates usages of pandas APIs and avoids + importing pandas until it's actually needed + """ + cdef: + bint _tried_importing_pandas + bint _have_pandas + + cdef readonly: + object _loose_version, _version + object _pd, _types_api, _compat_module + object _data_frame, _index, _series, _categorical_type + object _datetimetz_type, _extension_array, _extension_dtype + object _array_like_types, _is_extension_array_dtype, _lock + bint has_sparse + bint _pd024 + bint _is_v1, _is_ge_v21, _is_ge_v3 + + def __init__(self): + self._lock = Lock() + self._tried_importing_pandas = False + self._have_pandas = 0 + + cdef _import_pandas(self, bint raise_): + try: + import pandas as pd + import pyarrow.pandas_compat as pdcompat + except ImportError: + self._have_pandas = False + if raise_: + raise + else: + return + + from pyarrow.vendored.version import Version + + self._pd = pd + self._version = pd.__version__ + self._loose_version = Version(pd.__version__) + self._is_v1 = False + + if self._loose_version < Version('1.0.0'): + self._have_pandas = False + if raise_: + raise ImportError( + "pyarrow requires pandas 1.0.0 or above, pandas {} is " + "installed".format(self._version) + ) + else: + warnings.warn( + "pyarrow requires pandas 1.0.0 or above, pandas {} is " + "installed. Therefore, pandas-specific integration is not " + "used.".format(self._version), stacklevel=2) + return + + self._is_v1 = self._loose_version < Version('2.0.0') + self._is_ge_v21 = self._loose_version >= Version('2.1.0') + self._is_ge_v3 = self._loose_version >= Version('3.0.0.dev0') + + self._compat_module = pdcompat + self._data_frame = pd.DataFrame + self._index = pd.Index + self._categorical_type = pd.Categorical + self._series = pd.Series + self._extension_array = pd.api.extensions.ExtensionArray + self._array_like_types = ( + self._series, self._index, self._categorical_type, + self._extension_array) + self._extension_dtype = pd.api.extensions.ExtensionDtype + self._is_extension_array_dtype = ( + pd.api.types.is_extension_array_dtype) + self._types_api = pd.api.types + self._datetimetz_type = pd.api.types.DatetimeTZDtype + self._have_pandas = True + self.has_sparse = False + + cdef inline _check_import(self, bint raise_=True): + if not self._tried_importing_pandas: + with self._lock: + if not self._tried_importing_pandas: + try: + self._import_pandas(raise_) + finally: + self._tried_importing_pandas = True + return + + if not self._have_pandas and raise_: + self._import_pandas(raise_) + + def series(self, *args, **kwargs): + self._check_import() + return self._series(*args, **kwargs) + + def data_frame(self, *args, **kwargs): + self._check_import() + return self._data_frame(*args, **kwargs) + + cdef inline bint _have_pandas_internal(self): + if not self._tried_importing_pandas: + self._check_import(raise_=False) + return self._have_pandas + + @property + def have_pandas(self): + return self._have_pandas_internal() + + @property + def compat(self): + self._check_import() + return self._compat_module + + @property + def pd(self): + self._check_import() + return self._pd + + cpdef infer_dtype(self, obj): + self._check_import() + try: + return self._types_api.infer_dtype(obj, skipna=False) + except AttributeError: + return self._pd.lib.infer_dtype(obj) + + cpdef pandas_dtype(self, dtype): + self._check_import() + try: + return self._types_api.pandas_dtype(dtype) + except AttributeError: + return None + + @property + def loose_version(self): + self._check_import() + return self._loose_version + + @property + def version(self): + self._check_import() + return self._version + + def is_v1(self): + self._check_import() + return self._is_v1 + + def is_ge_v21(self): + self._check_import() + return self._is_ge_v21 + + def is_ge_v3(self): + self._check_import() + return self._is_ge_v3 + + @property + def categorical_type(self): + self._check_import() + return self._categorical_type + + @property + def datetimetz_type(self): + self._check_import() + return self._datetimetz_type + + @property + def extension_dtype(self): + self._check_import() + return self._extension_dtype + + cpdef is_array_like(self, obj): + self._check_import() + return isinstance(obj, self._array_like_types) + + cpdef is_categorical(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._categorical_type) + else: + return False + + cpdef is_datetimetz(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._datetimetz_type) + else: + return False + + cpdef is_extension_array_dtype(self, obj): + self._check_import() + if self._is_extension_array_dtype: + return self._is_extension_array_dtype(obj) + else: + return False + + cpdef is_sparse(self, obj): + if self._have_pandas_internal(): + return isinstance(obj.dtype, self.pd.SparseDtype) + else: + return False + + cpdef is_data_frame(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._data_frame) + else: + return False + + cpdef is_series(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._series) + else: + return False + + cpdef is_index(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._index) + else: + return False + + cpdef get_values(self, obj): + """ + Get the underlying array values of a pandas Series or Index in the + format (np.ndarray or pandas ExtensionArray) as we need them. + + Assumes obj is a pandas Series or Index. + """ + self._check_import() + if isinstance(obj.dtype, (self.pd.api.types.IntervalDtype, + self.pd.api.types.PeriodDtype)): + return obj.array + return obj.values + + def get_rangeindex_attribute(self, level, name): + # public start/stop/step attributes added in pandas 0.25.0 + self._check_import() + if hasattr(level, name): + return getattr(level, name) + return getattr(level, '_' + name) + + +cdef _PandasAPIShim pandas_api = _PandasAPIShim() +_pandas_api = pandas_api diff --git a/parrot/lib/python3.10/site-packages/pyarrow/pandas_compat.py b/parrot/lib/python3.10/site-packages/pyarrow/pandas_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..fcccf564fc619a352a4fa8050a031ed1d54a6efe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/pandas_compat.py @@ -0,0 +1,1207 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import ast +from collections.abc import Sequence +from concurrent import futures +# import threading submodule upfront to avoid partially initialized +# module bug (ARROW-11983) +import concurrent.futures.thread # noqa +from copy import deepcopy +import decimal +from itertools import zip_longest +import json +import operator +import re +import warnings + +import numpy as np + +import pyarrow as pa +from pyarrow.lib import _pandas_api, frombytes, is_threading_enabled # noqa + + +_logical_type_map = {} + + +def get_logical_type_map(): + global _logical_type_map + + if not _logical_type_map: + _logical_type_map.update({ + pa.lib.Type_NA: 'empty', + pa.lib.Type_BOOL: 'bool', + pa.lib.Type_INT8: 'int8', + pa.lib.Type_INT16: 'int16', + pa.lib.Type_INT32: 'int32', + pa.lib.Type_INT64: 'int64', + pa.lib.Type_UINT8: 'uint8', + pa.lib.Type_UINT16: 'uint16', + pa.lib.Type_UINT32: 'uint32', + pa.lib.Type_UINT64: 'uint64', + pa.lib.Type_HALF_FLOAT: 'float16', + pa.lib.Type_FLOAT: 'float32', + pa.lib.Type_DOUBLE: 'float64', + pa.lib.Type_DATE32: 'date', + pa.lib.Type_DATE64: 'date', + pa.lib.Type_TIME32: 'time', + pa.lib.Type_TIME64: 'time', + pa.lib.Type_BINARY: 'bytes', + pa.lib.Type_FIXED_SIZE_BINARY: 'bytes', + pa.lib.Type_STRING: 'unicode', + }) + return _logical_type_map + + +def get_logical_type(arrow_type): + logical_type_map = get_logical_type_map() + + try: + return logical_type_map[arrow_type.id] + except KeyError: + if isinstance(arrow_type, pa.lib.DictionaryType): + return 'categorical' + elif isinstance(arrow_type, pa.lib.ListType): + return 'list[{}]'.format(get_logical_type(arrow_type.value_type)) + elif isinstance(arrow_type, pa.lib.TimestampType): + return 'datetimetz' if arrow_type.tz is not None else 'datetime' + elif isinstance(arrow_type, pa.lib.Decimal128Type): + return 'decimal' + return 'object' + + +_numpy_logical_type_map = { + np.bool_: 'bool', + np.int8: 'int8', + np.int16: 'int16', + np.int32: 'int32', + np.int64: 'int64', + np.uint8: 'uint8', + np.uint16: 'uint16', + np.uint32: 'uint32', + np.uint64: 'uint64', + np.float32: 'float32', + np.float64: 'float64', + 'datetime64[D]': 'date', + np.str_: 'string', + np.bytes_: 'bytes', +} + + +def get_logical_type_from_numpy(pandas_collection): + try: + return _numpy_logical_type_map[pandas_collection.dtype.type] + except KeyError: + if hasattr(pandas_collection.dtype, 'tz'): + return 'datetimetz' + # See https://github.com/pandas-dev/pandas/issues/24739 (infer_dtype will + # result in "datetime64" without unit, while pandas astype requires a unit) + if str(pandas_collection.dtype).startswith('datetime64'): + return str(pandas_collection.dtype) + result = _pandas_api.infer_dtype(pandas_collection) + if result == 'string': + return 'unicode' + return result + + +def get_extension_dtype_info(column): + dtype = column.dtype + if str(dtype) == 'category': + cats = getattr(column, 'cat', column) + assert cats is not None + metadata = { + 'num_categories': len(cats.categories), + 'ordered': cats.ordered, + } + physical_dtype = str(cats.codes.dtype) + elif hasattr(dtype, 'tz'): + metadata = {'timezone': pa.lib.tzinfo_to_string(dtype.tz)} + physical_dtype = 'datetime64[ns]' + else: + metadata = None + physical_dtype = str(dtype) + return physical_dtype, metadata + + +def get_column_metadata(column, name, arrow_type, field_name): + """Construct the metadata for a given column + + Parameters + ---------- + column : pandas.Series or pandas.Index + name : str + arrow_type : pyarrow.DataType + field_name : str + Equivalent to `name` when `column` is a `Series`, otherwise if `column` + is a pandas Index then `field_name` will not be the same as `name`. + This is the name of the field in the arrow Table's schema. + + Returns + ------- + dict + """ + logical_type = get_logical_type(arrow_type) + + string_dtype, extra_metadata = get_extension_dtype_info(column) + if logical_type == 'decimal': + extra_metadata = { + 'precision': arrow_type.precision, + 'scale': arrow_type.scale, + } + string_dtype = 'object' + + if name is not None and not isinstance(name, str): + raise TypeError( + 'Column name must be a string. Got column {} of type {}'.format( + name, type(name).__name__ + ) + ) + + assert field_name is None or isinstance(field_name, str), \ + str(type(field_name)) + return { + 'name': name, + 'field_name': 'None' if field_name is None else field_name, + 'pandas_type': logical_type, + 'numpy_type': string_dtype, + 'metadata': extra_metadata, + } + + +def construct_metadata(columns_to_convert, df, column_names, index_levels, + index_descriptors, preserve_index, types): + """Returns a dictionary containing enough metadata to reconstruct a pandas + DataFrame as an Arrow Table, including index columns. + + Parameters + ---------- + columns_to_convert : list[pd.Series] + df : pandas.DataFrame + index_levels : List[pd.Index] + index_descriptors : List[Dict] + preserve_index : bool + types : List[pyarrow.DataType] + + Returns + ------- + dict + """ + num_serialized_index_levels = len([descr for descr in index_descriptors + if not isinstance(descr, dict)]) + # Use ntypes instead of Python shorthand notation [:-len(x)] as [:-0] + # behaves differently to what we want. + ntypes = len(types) + df_types = types[:ntypes - num_serialized_index_levels] + index_types = types[ntypes - num_serialized_index_levels:] + + column_metadata = [] + for col, sanitized_name, arrow_type in zip(columns_to_convert, + column_names, df_types): + metadata = get_column_metadata(col, name=sanitized_name, + arrow_type=arrow_type, + field_name=sanitized_name) + column_metadata.append(metadata) + + index_column_metadata = [] + if preserve_index is not False: + non_str_index_names = [] + for level, arrow_type, descriptor in zip(index_levels, index_types, + index_descriptors): + if isinstance(descriptor, dict): + # The index is represented in a non-serialized fashion, + # e.g. RangeIndex + continue + + if level.name is not None and not isinstance(level.name, str): + non_str_index_names.append(level.name) + + metadata = get_column_metadata( + level, + name=_column_name_to_strings(level.name), + arrow_type=arrow_type, + field_name=descriptor, + ) + index_column_metadata.append(metadata) + + if len(non_str_index_names) > 0: + warnings.warn( + f"The DataFrame has non-str index name `{non_str_index_names}`" + " which will be converted to string" + " and not roundtrip correctly.", + UserWarning, stacklevel=4) + + column_indexes = [] + + levels = getattr(df.columns, 'levels', [df.columns]) + names = getattr(df.columns, 'names', [df.columns.name]) + for level, name in zip(levels, names): + metadata = _get_simple_index_descriptor(level, name) + column_indexes.append(metadata) + else: + index_descriptors = index_column_metadata = column_indexes = [] + + return { + b'pandas': json.dumps({ + 'index_columns': index_descriptors, + 'column_indexes': column_indexes, + 'columns': column_metadata + index_column_metadata, + 'creator': { + 'library': 'pyarrow', + 'version': pa.__version__ + }, + 'pandas_version': _pandas_api.version + }).encode('utf8') + } + + +def _get_simple_index_descriptor(level, name): + string_dtype, extra_metadata = get_extension_dtype_info(level) + pandas_type = get_logical_type_from_numpy(level) + if 'mixed' in pandas_type: + warnings.warn( + "The DataFrame has column names of mixed type. They will be " + "converted to strings and not roundtrip correctly.", + UserWarning, stacklevel=4) + if pandas_type == 'unicode': + assert not extra_metadata + extra_metadata = {'encoding': 'UTF-8'} + return { + 'name': name, + 'field_name': name, + 'pandas_type': pandas_type, + 'numpy_type': string_dtype, + 'metadata': extra_metadata, + } + + +def _column_name_to_strings(name): + """Convert a column name (or level) to either a string or a recursive + collection of strings. + + Parameters + ---------- + name : str or tuple + + Returns + ------- + value : str or tuple + + Examples + -------- + >>> name = 'foo' + >>> _column_name_to_strings(name) + 'foo' + >>> name = ('foo', 'bar') + >>> _column_name_to_strings(name) + "('foo', 'bar')" + >>> import pandas as pd + >>> name = (1, pd.Timestamp('2017-02-01 00:00:00')) + >>> _column_name_to_strings(name) + "('1', '2017-02-01 00:00:00')" + """ + if isinstance(name, str): + return name + elif isinstance(name, bytes): + # XXX: should we assume that bytes in Python 3 are UTF-8? + return name.decode('utf8') + elif isinstance(name, tuple): + return str(tuple(map(_column_name_to_strings, name))) + elif isinstance(name, Sequence): + raise TypeError("Unsupported type for MultiIndex level") + elif name is None: + return None + return str(name) + + +def _index_level_name(index, i, column_names): + """Return the name of an index level or a default name if `index.name` is + None or is already a column name. + + Parameters + ---------- + index : pandas.Index + i : int + + Returns + ------- + name : str + """ + if index.name is not None and index.name not in column_names: + return _column_name_to_strings(index.name) + else: + return '__index_level_{:d}__'.format(i) + + +def _get_columns_to_convert(df, schema, preserve_index, columns): + columns = _resolve_columns_of_interest(df, schema, columns) + + if not df.columns.is_unique: + raise ValueError( + 'Duplicate column names found: {}'.format(list(df.columns)) + ) + + if schema is not None: + return _get_columns_to_convert_given_schema(df, schema, preserve_index) + + column_names = [] + + index_levels = ( + _get_index_level_values(df.index) if preserve_index is not False + else [] + ) + + columns_to_convert = [] + convert_fields = [] + + for name in columns: + col = df[name] + name = _column_name_to_strings(name) + + if _pandas_api.is_sparse(col): + raise TypeError( + "Sparse pandas data (column {}) not supported.".format(name)) + + columns_to_convert.append(col) + convert_fields.append(None) + column_names.append(name) + + index_descriptors = [] + index_column_names = [] + for i, index_level in enumerate(index_levels): + name = _index_level_name(index_level, i, column_names) + if (isinstance(index_level, _pandas_api.pd.RangeIndex) and + preserve_index is None): + descr = _get_range_index_descriptor(index_level) + else: + columns_to_convert.append(index_level) + convert_fields.append(None) + descr = name + index_column_names.append(name) + index_descriptors.append(descr) + + all_names = column_names + index_column_names + + # all_names : all of the columns in the resulting table including the data + # columns and serialized index columns + # column_names : the names of the data columns + # index_column_names : the names of the serialized index columns + # index_descriptors : descriptions of each index to be used for + # reconstruction + # index_levels : the extracted index level values + # columns_to_convert : assembled raw data (both data columns and indexes) + # to be converted to Arrow format + # columns_fields : specified column to use for coercion / casting + # during serialization, if a Schema was provided + return (all_names, column_names, index_column_names, index_descriptors, + index_levels, columns_to_convert, convert_fields) + + +def _get_columns_to_convert_given_schema(df, schema, preserve_index): + """ + Specialized version of _get_columns_to_convert in case a Schema is + specified. + In that case, the Schema is used as the single point of truth for the + table structure (types, which columns are included, order of columns, ...). + """ + column_names = [] + columns_to_convert = [] + convert_fields = [] + index_descriptors = [] + index_column_names = [] + index_levels = [] + + for name in schema.names: + try: + col = df[name] + is_index = False + except KeyError: + try: + col = _get_index_level(df, name) + except (KeyError, IndexError): + # name not found as index level + raise KeyError( + "name '{}' present in the specified schema is not found " + "in the columns or index".format(name)) + if preserve_index is False: + raise ValueError( + "name '{}' present in the specified schema corresponds " + "to the index, but 'preserve_index=False' was " + "specified".format(name)) + elif (preserve_index is None and + isinstance(col, _pandas_api.pd.RangeIndex)): + raise ValueError( + "name '{}' is present in the schema, but it is a " + "RangeIndex which will not be converted as a column " + "in the Table, but saved as metadata-only not in " + "columns. Specify 'preserve_index=True' to force it " + "being added as a column, or remove it from the " + "specified schema".format(name)) + is_index = True + + name = _column_name_to_strings(name) + + if _pandas_api.is_sparse(col): + raise TypeError( + "Sparse pandas data (column {}) not supported.".format(name)) + + field = schema.field(name) + columns_to_convert.append(col) + convert_fields.append(field) + column_names.append(name) + + if is_index: + index_column_names.append(name) + index_descriptors.append(name) + index_levels.append(col) + + all_names = column_names + index_column_names + + return (all_names, column_names, index_column_names, index_descriptors, + index_levels, columns_to_convert, convert_fields) + + +def _get_index_level(df, name): + """ + Get the index level of a DataFrame given 'name' (column name in an arrow + Schema). + """ + key = name + if name not in df.index.names and _is_generated_index_name(name): + # we know we have an autogenerated name => extract number and get + # the index level positionally + key = int(name[len("__index_level_"):-2]) + return df.index.get_level_values(key) + + +def _level_name(name): + # preserve type when default serializable, otherwise str it + try: + json.dumps(name) + return name + except TypeError: + return str(name) + + +def _get_range_index_descriptor(level): + # public start/stop/step attributes added in pandas 0.25.0 + return { + 'kind': 'range', + 'name': _level_name(level.name), + 'start': _pandas_api.get_rangeindex_attribute(level, 'start'), + 'stop': _pandas_api.get_rangeindex_attribute(level, 'stop'), + 'step': _pandas_api.get_rangeindex_attribute(level, 'step') + } + + +def _get_index_level_values(index): + n = len(getattr(index, 'levels', [index])) + return [index.get_level_values(i) for i in range(n)] + + +def _resolve_columns_of_interest(df, schema, columns): + if schema is not None and columns is not None: + raise ValueError('Schema and columns arguments are mutually ' + 'exclusive, pass only one of them') + elif schema is not None: + columns = schema.names + elif columns is not None: + columns = [c for c in columns if c in df.columns] + else: + columns = df.columns + + return columns + + +def dataframe_to_types(df, preserve_index, columns=None): + (all_names, + column_names, + _, + index_descriptors, + index_columns, + columns_to_convert, + _) = _get_columns_to_convert(df, None, preserve_index, columns) + + types = [] + # If pandas knows type, skip conversion + for c in columns_to_convert: + values = c.values + if _pandas_api.is_categorical(values): + type_ = pa.array(c, from_pandas=True).type + elif _pandas_api.is_extension_array_dtype(values): + empty = c.head(0) if isinstance( + c, _pandas_api.pd.Series) else c[:0] + type_ = pa.array(empty, from_pandas=True).type + else: + values, type_ = get_datetimetz_type(values, c.dtype, None) + type_ = pa.lib._ndarray_to_arrow_type(values, type_) + if type_ is None: + type_ = pa.array(c, from_pandas=True).type + types.append(type_) + + metadata = construct_metadata( + columns_to_convert, df, column_names, index_columns, + index_descriptors, preserve_index, types + ) + + return all_names, types, metadata + + +def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None, + safe=True): + (all_names, + column_names, + index_column_names, + index_descriptors, + index_columns, + columns_to_convert, + convert_fields) = _get_columns_to_convert(df, schema, preserve_index, + columns) + + # NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether + # using a thread pool is worth it. Currently the heuristic is whether the + # nrows > 100 * ncols and ncols > 1. + if nthreads is None: + nrows, ncols = len(df), len(df.columns) + if nrows > ncols * 100 and ncols > 1: + nthreads = pa.cpu_count() + else: + nthreads = 1 + # if we don't have threading in libarrow, don't use threading here either + if not is_threading_enabled(): + nthreads = 1 + + def convert_column(col, field): + if field is None: + field_nullable = True + type_ = None + else: + field_nullable = field.nullable + type_ = field.type + + try: + result = pa.array(col, type=type_, from_pandas=True, safe=safe) + except (pa.ArrowInvalid, + pa.ArrowNotImplementedError, + pa.ArrowTypeError) as e: + e.args += ("Conversion failed for column {!s} with type {!s}" + .format(col.name, col.dtype),) + raise e + if not field_nullable and result.null_count > 0: + raise ValueError("Field {} was non-nullable but pandas column " + "had {} null values".format(str(field), + result.null_count)) + return result + + def _can_definitely_zero_copy(arr): + return (isinstance(arr, np.ndarray) and + arr.flags.contiguous and + issubclass(arr.dtype.type, np.integer)) + + if nthreads == 1: + arrays = [convert_column(c, f) + for c, f in zip(columns_to_convert, convert_fields)] + else: + arrays = [] + with futures.ThreadPoolExecutor(nthreads) as executor: + for c, f in zip(columns_to_convert, convert_fields): + if _can_definitely_zero_copy(c.values): + arrays.append(convert_column(c, f)) + else: + arrays.append(executor.submit(convert_column, c, f)) + + for i, maybe_fut in enumerate(arrays): + if isinstance(maybe_fut, futures.Future): + arrays[i] = maybe_fut.result() + + types = [x.type for x in arrays] + + if schema is None: + fields = [] + for name, type_ in zip(all_names, types): + name = name if name is not None else 'None' + fields.append(pa.field(name, type_)) + schema = pa.schema(fields) + + pandas_metadata = construct_metadata( + columns_to_convert, df, column_names, index_columns, + index_descriptors, preserve_index, types + ) + metadata = deepcopy(schema.metadata) if schema.metadata else dict() + metadata.update(pandas_metadata) + schema = schema.with_metadata(metadata) + + # If dataframe is empty but with RangeIndex -> + # remember the length of the indexes + n_rows = None + if len(arrays) == 0: + try: + kind = index_descriptors[0]["kind"] + if kind == "range": + start = index_descriptors[0]["start"] + stop = index_descriptors[0]["stop"] + step = index_descriptors[0]["step"] + n_rows = len(range(start, stop, step)) + except IndexError: + pass + + return arrays, schema, n_rows + + +def get_datetimetz_type(values, dtype, type_): + if values.dtype.type != np.datetime64: + return values, type_ + + if _pandas_api.is_datetimetz(dtype) and type_ is None: + # If no user type passed, construct a tz-aware timestamp type + tz = dtype.tz + unit = dtype.unit + type_ = pa.timestamp(unit, tz) + elif type_ is None: + # Trust the NumPy dtype + type_ = pa.from_numpy_dtype(values.dtype) + + return values, type_ + +# ---------------------------------------------------------------------- +# Converting pyarrow.Table efficiently to pandas.DataFrame + + +def _reconstruct_block(item, columns=None, extension_columns=None, return_block=True): + """ + Construct a pandas Block from the `item` dictionary coming from pyarrow's + serialization or returned by arrow::python::ConvertTableToPandas. + + This function takes care of converting dictionary types to pandas + categorical, Timestamp-with-timezones to the proper pandas Block, and + conversion to pandas ExtensionBlock + + Parameters + ---------- + item : dict + For basic types, this is a dictionary in the form of + {'block': np.ndarray of values, 'placement': pandas block placement}. + Additional keys are present for other types (dictionary, timezone, + object). + columns : + Column names of the table being constructed, used for extension types + extension_columns : dict + Dictionary of {column_name: pandas_dtype} that includes all columns + and corresponding dtypes that will be converted to a pandas + ExtensionBlock. + + Returns + ------- + pandas Block + + """ + import pandas.core.internals as _int + + block_arr = item.get('block', None) + placement = item['placement'] + if 'dictionary' in item: + arr = _pandas_api.categorical_type.from_codes( + block_arr, categories=item['dictionary'], + ordered=item['ordered']) + elif 'timezone' in item: + unit, _ = np.datetime_data(block_arr.dtype) + dtype = make_datetimetz(unit, item['timezone']) + if _pandas_api.is_ge_v21(): + arr = _pandas_api.pd.array( + block_arr.view("int64"), dtype=dtype, copy=False + ) + else: + arr = block_arr + if return_block: + block = _int.make_block(block_arr, placement=placement, + klass=_int.DatetimeTZBlock, + dtype=dtype) + return block + elif 'py_array' in item: + # create ExtensionBlock + arr = item['py_array'] + assert len(placement) == 1 + name = columns[placement[0]] + pandas_dtype = extension_columns[name] + if not hasattr(pandas_dtype, '__from_arrow__'): + raise ValueError("This column does not support to be converted " + "to a pandas ExtensionArray") + arr = pandas_dtype.__from_arrow__(arr) + else: + arr = block_arr + + if return_block: + return _int.make_block(arr, placement=placement) + else: + return arr, placement + + +def make_datetimetz(unit, tz): + if _pandas_api.is_v1(): + unit = 'ns' # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + tz = pa.lib.string_to_tzinfo(tz) + return _pandas_api.datetimetz_type(unit, tz=tz) + + +def table_to_dataframe( + options, table, categories=None, ignore_metadata=False, types_mapper=None +): + all_columns = [] + column_indexes = [] + pandas_metadata = table.schema.pandas_metadata + + if not ignore_metadata and pandas_metadata is not None: + all_columns = pandas_metadata['columns'] + column_indexes = pandas_metadata.get('column_indexes', []) + index_descriptors = pandas_metadata['index_columns'] + table = _add_any_metadata(table, pandas_metadata) + table, index = _reconstruct_index(table, index_descriptors, + all_columns, types_mapper) + ext_columns_dtypes = _get_extension_dtypes( + table, all_columns, types_mapper) + else: + index = _pandas_api.pd.RangeIndex(table.num_rows) + ext_columns_dtypes = _get_extension_dtypes(table, [], types_mapper) + + _check_data_column_metadata_consistency(all_columns) + columns = _deserialize_column_index(table, all_columns, column_indexes) + + column_names = table.column_names + result = pa.lib.table_to_blocks(options, table, categories, + list(ext_columns_dtypes.keys())) + if _pandas_api.is_ge_v3(): + from pandas.api.internals import create_dataframe_from_blocks + + blocks = [ + _reconstruct_block( + item, column_names, ext_columns_dtypes, return_block=False) + for item in result + ] + df = create_dataframe_from_blocks(blocks, index=index, columns=columns) + return df + else: + from pandas.core.internals import BlockManager + from pandas import DataFrame + + blocks = [ + _reconstruct_block(item, column_names, ext_columns_dtypes) + for item in result + ] + axes = [columns, index] + mgr = BlockManager(blocks, axes) + if _pandas_api.is_ge_v21(): + df = DataFrame._from_mgr(mgr, mgr.axes) + else: + df = DataFrame(mgr) + return df + + +# Set of the string repr of all numpy dtypes that can be stored in a pandas +# dataframe (complex not included since not supported by Arrow) +_pandas_supported_numpy_types = { + "int8", "int16", "int32", "int64", + "uint8", "uint16", "uint32", "uint64", + "float16", "float32", "float64", + "object", "bool" +} + + +def _get_extension_dtypes(table, columns_metadata, types_mapper=None): + """ + Based on the stored column pandas metadata and the extension types + in the arrow schema, infer which columns should be converted to a + pandas extension dtype. + + The 'numpy_type' field in the column metadata stores the string + representation of the original pandas dtype (and, despite its name, + not the 'pandas_type' field). + Based on this string representation, a pandas/numpy dtype is constructed + and then we can check if this dtype supports conversion from arrow. + + """ + ext_columns = {} + + # older pandas version that does not yet support extension dtypes + if _pandas_api.extension_dtype is None: + return ext_columns + + # infer the extension columns from the pandas metadata + for col_meta in columns_metadata: + try: + name = col_meta['field_name'] + except KeyError: + name = col_meta['name'] + dtype = col_meta['numpy_type'] + + if dtype not in _pandas_supported_numpy_types: + # pandas_dtype is expensive, so avoid doing this for types + # that are certainly numpy dtypes + pandas_dtype = _pandas_api.pandas_dtype(dtype) + if isinstance(pandas_dtype, _pandas_api.extension_dtype): + if hasattr(pandas_dtype, "__from_arrow__"): + ext_columns[name] = pandas_dtype + + # infer from extension type in the schema + for field in table.schema: + typ = field.type + if isinstance(typ, pa.BaseExtensionType): + try: + pandas_dtype = typ.to_pandas_dtype() + except NotImplementedError: + pass + else: + ext_columns[field.name] = pandas_dtype + + # use the specified mapping of built-in arrow types to pandas dtypes + if types_mapper: + for field in table.schema: + typ = field.type + pandas_dtype = types_mapper(typ) + if pandas_dtype is not None: + ext_columns[field.name] = pandas_dtype + + return ext_columns + + +def _check_data_column_metadata_consistency(all_columns): + # It can never be the case in a released version of pyarrow that + # c['name'] is None *and* 'field_name' is not a key in the column metadata, + # because the change to allow c['name'] to be None and the change to add + # 'field_name' are in the same release (0.8.0) + assert all( + (c['name'] is None and 'field_name' in c) or c['name'] is not None + for c in all_columns + ) + + +def _deserialize_column_index(block_table, all_columns, column_indexes): + if all_columns: + columns_name_dict = { + c.get('field_name', _column_name_to_strings(c['name'])): c['name'] + for c in all_columns + } + columns_values = [ + columns_name_dict.get(name, name) for name in block_table.column_names + ] + else: + columns_values = block_table.column_names + + # Construct the base index + if len(column_indexes) > 1: + # If we're passed multiple column indexes then evaluate with + # ast.literal_eval, since the column index values show up as a list of + # tuples + columns = _pandas_api.pd.MultiIndex.from_tuples( + list(map(ast.literal_eval, columns_values)), + names=[col_index['name'] for col_index in column_indexes], + ) + else: + columns = _pandas_api.pd.Index( + columns_values, name=column_indexes[0]["name"] if column_indexes else None + ) + + # if we're reconstructing the index + if len(column_indexes) > 0: + columns = _reconstruct_columns_from_metadata(columns, column_indexes) + + return columns + + +def _reconstruct_index(table, index_descriptors, all_columns, types_mapper=None): + # 0. 'field_name' is the name of the column in the arrow Table + # 1. 'name' is the user-facing name of the column, that is, it came from + # pandas + # 2. 'field_name' and 'name' differ for index columns + # 3. We fall back on c['name'] for backwards compatibility + field_name_to_metadata = { + c.get('field_name', c['name']): c + for c in all_columns + } + + # Build up a list of index columns and names while removing those columns + # from the original table + index_arrays = [] + index_names = [] + result_table = table + for descr in index_descriptors: + if isinstance(descr, str): + result_table, index_level, index_name = _extract_index_level( + table, result_table, descr, field_name_to_metadata, types_mapper) + if index_level is None: + # ARROW-1883: the serialized index column was not found + continue + elif descr['kind'] == 'range': + index_name = descr['name'] + index_level = _pandas_api.pd.RangeIndex(descr['start'], + descr['stop'], + step=descr['step'], + name=index_name) + if len(index_level) != len(table): + # Possibly the result of munged metadata + continue + else: + raise ValueError("Unrecognized index kind: {}" + .format(descr['kind'])) + index_arrays.append(index_level) + index_names.append(index_name) + + pd = _pandas_api.pd + + # Reconstruct the row index + if len(index_arrays) > 1: + index = pd.MultiIndex.from_arrays(index_arrays, names=index_names) + elif len(index_arrays) == 1: + index = index_arrays[0] + if not isinstance(index, pd.Index): + # Box anything that wasn't boxed above + index = pd.Index(index, name=index_names[0]) + else: + index = pd.RangeIndex(table.num_rows) + + return result_table, index + + +def _extract_index_level(table, result_table, field_name, + field_name_to_metadata, types_mapper=None): + logical_name = field_name_to_metadata[field_name]['name'] + index_name = _backwards_compatible_index_name(field_name, logical_name) + i = table.schema.get_field_index(field_name) + + if i == -1: + # The serialized index column was removed by the user + return result_table, None, None + + col = table.column(i) + index_level = col.to_pandas(types_mapper=types_mapper) + index_level.name = None + result_table = result_table.remove_column( + result_table.schema.get_field_index(field_name) + ) + return result_table, index_level, index_name + + +def _backwards_compatible_index_name(raw_name, logical_name): + """Compute the name of an index column that is compatible with older + versions of :mod:`pyarrow`. + + Parameters + ---------- + raw_name : str + logical_name : str + + Returns + ------- + result : str + + Notes + ----- + * Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager` + """ + # Part of table_to_blockmanager + if raw_name == logical_name and _is_generated_index_name(raw_name): + return None + else: + return logical_name + + +def _is_generated_index_name(name): + pattern = r'^__index_level_\d+__$' + return re.match(pattern, name) is not None + + +_pandas_logical_type_map = { + 'date': 'datetime64[D]', + 'datetime': 'datetime64[ns]', + 'datetimetz': 'datetime64[ns]', + 'unicode': np.str_, + 'bytes': np.bytes_, + 'string': np.str_, + 'integer': np.int64, + 'floating': np.float64, + 'decimal': np.object_, + 'empty': np.object_, +} + + +def _pandas_type_to_numpy_type(pandas_type): + """Get the numpy dtype that corresponds to a pandas type. + + Parameters + ---------- + pandas_type : str + The result of a call to pandas.lib.infer_dtype. + + Returns + ------- + dtype : np.dtype + The dtype that corresponds to `pandas_type`. + """ + try: + return _pandas_logical_type_map[pandas_type] + except KeyError: + if 'mixed' in pandas_type: + # catching 'mixed', 'mixed-integer' and 'mixed-integer-float' + return np.object_ + return np.dtype(pandas_type) + + +def _reconstruct_columns_from_metadata(columns, column_indexes): + """Construct a pandas MultiIndex from `columns` and column index metadata + in `column_indexes`. + + Parameters + ---------- + columns : List[pd.Index] + The columns coming from a pyarrow.Table + column_indexes : List[Dict[str, str]] + The column index metadata deserialized from the JSON schema metadata + in a :class:`~pyarrow.Table`. + + Returns + ------- + result : MultiIndex + The index reconstructed using `column_indexes` metadata with levels of + the correct type. + + Notes + ----- + * Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager` + """ + pd = _pandas_api.pd + # Get levels and labels, and provide sane defaults if the index has a + # single level to avoid if/else spaghetti. + levels = getattr(columns, 'levels', None) or [columns] + labels = getattr(columns, 'codes', None) or [None] + + # Convert each level to the dtype provided in the metadata + levels_dtypes = [ + (level, col_index.get('pandas_type', str(level.dtype)), + col_index.get('numpy_type', None)) + for level, col_index in zip_longest( + levels, column_indexes, fillvalue={} + ) + ] + + new_levels = [] + encoder = operator.methodcaller('encode', 'UTF-8') + + for level, pandas_dtype, numpy_dtype in levels_dtypes: + dtype = _pandas_type_to_numpy_type(pandas_dtype) + # Since our metadata is UTF-8 encoded, Python turns things that were + # bytes into unicode strings when json.loads-ing them. We need to + # convert them back to bytes to preserve metadata. + if dtype == np.bytes_: + level = level.map(encoder) + # ARROW-13756: if index is timezone aware DataTimeIndex + if pandas_dtype == "datetimetz": + tz = pa.lib.string_to_tzinfo( + column_indexes[0]['metadata']['timezone']) + level = pd.to_datetime(level, utc=True).tz_convert(tz) + if _pandas_api.is_ge_v3(): + # with pandas 3+, to_datetime returns a unit depending on the string + # data, so we restore it to the original unit from the metadata + level = level.as_unit(np.datetime_data(dtype)[0]) + # GH-41503: if the column index was decimal, restore to decimal + elif pandas_dtype == "decimal": + level = _pandas_api.pd.Index([decimal.Decimal(i) for i in level]) + elif level.dtype != dtype: + level = level.astype(dtype) + # ARROW-9096: if original DataFrame was upcast we keep that + if level.dtype != numpy_dtype and pandas_dtype != "datetimetz": + level = level.astype(numpy_dtype) + + new_levels.append(level) + + if len(new_levels) > 1: + return pd.MultiIndex(new_levels, labels, names=columns.names) + else: + return pd.Index(new_levels[0], dtype=new_levels[0].dtype, name=columns.name) + + +def _add_any_metadata(table, pandas_metadata): + modified_columns = {} + modified_fields = {} + + schema = table.schema + + index_columns = pandas_metadata['index_columns'] + # only take index columns into account if they are an actual table column + index_columns = [idx_col for idx_col in index_columns + if isinstance(idx_col, str)] + n_index_levels = len(index_columns) + n_columns = len(pandas_metadata['columns']) - n_index_levels + + # Add time zones + for i, col_meta in enumerate(pandas_metadata['columns']): + + raw_name = col_meta.get('field_name') + if not raw_name: + # deal with metadata written with arrow < 0.8 or fastparquet + raw_name = col_meta['name'] + if i >= n_columns: + # index columns + raw_name = index_columns[i - n_columns] + if raw_name is None: + raw_name = 'None' + + idx = schema.get_field_index(raw_name) + if idx != -1: + if col_meta['pandas_type'] == 'datetimetz': + col = table[idx] + if not isinstance(col.type, pa.lib.TimestampType): + continue + metadata = col_meta['metadata'] + if not metadata: + continue + metadata_tz = metadata.get('timezone') + if metadata_tz and metadata_tz != col.type.tz: + converted = col.to_pandas() + tz_aware_type = pa.timestamp('ns', tz=metadata_tz) + with_metadata = pa.Array.from_pandas(converted, + type=tz_aware_type) + + modified_fields[idx] = pa.field(schema[idx].name, + tz_aware_type) + modified_columns[idx] = with_metadata + + if len(modified_columns) > 0: + columns = [] + fields = [] + for i in range(len(table.schema)): + if i in modified_columns: + columns.append(modified_columns[i]) + fields.append(modified_fields[i]) + else: + columns.append(table[i]) + fields.append(table.schema[i]) + return pa.Table.from_arrays(columns, schema=pa.schema(fields)) + else: + return table + + +# ---------------------------------------------------------------------- +# Helper functions used in lib + + +def make_tz_aware(series, tz): + """ + Make a datetime64 Series timezone-aware for the given tz + """ + tz = pa.lib.string_to_tzinfo(tz) + series = (series.dt.tz_localize('utc') + .dt.tz_convert(tz)) + return series diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc new file mode 100644 index 0000000000000000000000000000000000000000..dba293bbe23668d1db7b749f8a274db4008efce1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Trigger the array import (inversion of NO_IMPORT_ARRAY) +#define NUMPY_IMPORT_ARRAY + +#include "arrow/python/init.h" +#include "arrow/python/numpy_interop.h" + +int arrow_init_numpy() { return arrow::py::import_numpy(); } diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/parquet_encryption.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/parquet_encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..a1aaa30e260f5042c98f96bf081b4a49245ea656 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/parquet_encryption.h @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" +#include "parquet/encryption/crypto_factory.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING) +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows + +namespace arrow { +namespace py { +namespace parquet { +namespace encryption { + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable { + public: + std::function + wrap_key; + std::function + unwrap_key; +}; + +/// \brief A helper for KmsClient implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient + : public ::parquet::encryption::KmsClient { + public: + PyKmsClient(PyObject* handler, PyKmsClientVtable vtable); + ~PyKmsClient() override; + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientVtable vtable_; +}; + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable { + public: + std::function* out)> + create_kms_client; +}; + +/// \brief A helper for KmsClientFactory implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory + : public ::parquet::encryption::KmsClientFactory { + public: + PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable); + ~PyKmsClientFactory() override; + + std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientFactoryVtable vtable_; +}; + +/// \brief A CryptoFactory that returns Results instead of throwing exceptions. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory + : public ::parquet::encryption::CryptoFactory { + public: + arrow::Result> + SafeGetFileEncryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::EncryptionConfiguration& encryption_config); + + /// The returned FileDecryptionProperties object will use the cache inside this + /// CryptoFactory object, so please keep this + /// CryptoFactory object alive along with the returned + /// FileDecryptionProperties object. + arrow::Result> + SafeGetFileDecryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::DecryptionConfiguration& decryption_config); +}; + +} // namespace encryption +} // namespace parquet +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/serialize.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/serialize.h new file mode 100644 index 0000000000000000000000000000000000000000..fd207d3e069039351793f3252c5a8eb5d9009cdb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/serialize.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/ipc/options.h" +#include "arrow/python/visibility.h" +#include "arrow/sparse_tensor.h" +#include "arrow/status.h" + +// Forward declaring PyObject, see +// https://mail.python.org/pipermail/python-dev/2003-August/037601.html +#ifndef PyObject_HEAD +struct _object; +typedef _object PyObject; +#endif + +namespace arrow { + +class Buffer; +class DataType; +class MemoryPool; +class RecordBatch; +class Tensor; + +namespace io { + +class OutputStream; + +} // namespace io + +namespace py { + +struct ARROW_PYTHON_EXPORT SerializedPyObject { + std::shared_ptr batch; + std::vector> tensors; + std::vector> sparse_tensors; + std::vector> ndarrays; + std::vector> buffers; + ipc::IpcWriteOptions ipc_options; + + SerializedPyObject(); + + /// \brief Write serialized Python object to OutputStream + /// \param[in,out] dst an OutputStream + /// \return Status + Status WriteTo(io::OutputStream* dst); + + /// \brief Convert SerializedPyObject to a dict containing the message + /// components as Buffer instances with minimal memory allocation + /// + /// { + /// 'num_tensors': M, + /// 'num_sparse_tensors': N, + /// 'num_buffers': K, + /// 'data': [Buffer] + /// } + /// + /// Each tensor is written as two buffers, one for the metadata and one for + /// the body. Therefore, the number of buffers in 'data' is 2 * M + 2 * N + K + 1, + /// with the first buffer containing the serialized record batch containing + /// the UnionArray that describes the whole object + Status GetComponents(MemoryPool* pool, PyObject** out); +}; + +/// \brief Serialize Python sequence as a SerializedPyObject. +/// \param[in] context Serialization context which contains custom serialization +/// and deserialization callbacks. Can be any Python object with a +/// _serialize_callback method for serialization and a _deserialize_callback +/// method for deserialization. If context is None, no custom serialization +/// will be attempted. +/// \param[in] sequence A Python sequence object to serialize to Arrow data +/// structures +/// \param[out] out The serialized representation +/// \return Status +/// +/// Release GIL before calling +ARROW_PYTHON_EXPORT +Status SerializeObject(PyObject* context, PyObject* sequence, SerializedPyObject* out); + +/// \brief Serialize an Arrow Tensor as a SerializedPyObject. +/// \param[in] tensor Tensor to be serialized +/// \param[out] out The serialized representation +/// \return Status +ARROW_PYTHON_EXPORT +Status SerializeTensor(std::shared_ptr tensor, py::SerializedPyObject* out); + +/// \brief Write the Tensor metadata header to an OutputStream. +/// \param[in] dtype DataType of the Tensor +/// \param[in] shape The shape of the tensor +/// \param[in] tensor_num_bytes The length of the Tensor data in bytes +/// \param[in] dst The OutputStream to write the Tensor header to +/// \return Status +ARROW_PYTHON_EXPORT +Status WriteNdarrayHeader(std::shared_ptr dtype, + const std::vector& shape, int64_t tensor_num_bytes, + io::OutputStream* dst); + +struct PythonType { + enum type { + NONE, + BOOL, + INT, + PY2INT, // Kept for compatibility + BYTES, + STRING, + HALF_FLOAT, + FLOAT, + DOUBLE, + DATE64, + LIST, + DICT, + TUPLE, + SET, + TENSOR, + NDARRAY, + BUFFER, + SPARSECOOTENSOR, + SPARSECSRMATRIX, + SPARSECSCMATRIX, + SPARSECSFTENSOR, + NUM_PYTHON_TYPES + }; +}; + +} // namespace py + +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/util.py b/parrot/lib/python3.10/site-packages/pyarrow/util.py new file mode 100644 index 0000000000000000000000000000000000000000..89780da10f7cfe682ce0b12a1a5bb99975e846e9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/util.py @@ -0,0 +1,258 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Miscellaneous utility code + +import os +import contextlib +import functools +import gc +import socket +import sys +import textwrap +import types +import warnings + + +_DEPR_MSG = ( + "pyarrow.{} is deprecated as of {}, please use pyarrow.{} instead." +) + + +def doc(*docstrings, **params): + """ + A decorator that takes docstring templates, concatenates them, and finally + performs string substitution on them. + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track of the original docstring template for potential future use. + If the docstring is a template, it will be saved as a string. + Otherwise, it will be saved as a callable and the docstring will be obtained via + the __doc__ attribute. + This decorator cannot be used on Cython classes due to a CPython constraint, + which enforces the __doc__ attribute to be read-only. + See https://github.com/python/cpython/issues/91309 + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be prepended in order + before the default docstring under the callable. + **params + The key/value pairs used to format the docstring template. + """ + + def decorator(decorated): + docstring_components = [] + + # collect docstrings and docstring templates + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + # append the callable's docstring last + if decorated.__doc__: + docstring_components.append(textwrap.dedent(decorated.__doc__)) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else textwrap.dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + decorated._docstring_components = ( + docstring_components + ) + return decorated + + return decorator + + +def _deprecate_api(old_name, new_name, api, next_version, type=FutureWarning): + msg = _DEPR_MSG.format(old_name, next_version, new_name) + + def wrapper(*args, **kwargs): + warnings.warn(msg, type) + return api(*args, **kwargs) + return wrapper + + +def _deprecate_class(old_name, new_class, next_version, + instancecheck=True): + """ + Raise warning if a deprecated class is used in an isinstance check. + """ + class _DeprecatedMeta(type): + def __instancecheck__(self, other): + warnings.warn( + _DEPR_MSG.format(old_name, next_version, new_class.__name__), + FutureWarning, + stacklevel=2 + ) + return isinstance(other, new_class) + + return _DeprecatedMeta(old_name, (new_class,), {}) + + +def _is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +def _is_path_like(path): + return isinstance(path, str) or hasattr(path, '__fspath__') + + +def _stringify_path(path): + """ + Convert *path* to a string or unicode path if possible. + """ + if isinstance(path, str): + return os.path.expanduser(path) + + # checking whether path implements the filesystem protocol + try: + return os.path.expanduser(path.__fspath__()) + except AttributeError: + pass + + raise TypeError("not a path-like object") + + +def product(seq): + """ + Return a product of sequence items. + """ + return functools.reduce(lambda a, b: a*b, seq, 1) + + +def get_contiguous_span(shape, strides, itemsize): + """ + Return a contiguous span of N-D array data. + + Parameters + ---------- + shape : tuple + strides : tuple + itemsize : int + Specify array shape data + + Returns + ------- + start, end : int + The span end points. + """ + if not strides: + start = 0 + end = itemsize * product(shape) + else: + start = 0 + end = itemsize + for i, dim in enumerate(shape): + if dim == 0: + start = end = 0 + break + stride = strides[i] + if stride > 0: + end += stride * (dim - 1) + elif stride < 0: + start += stride * (dim - 1) + if end - start != itemsize * product(shape): + raise ValueError('array data is non-contiguous') + return start, end + + +def find_free_port(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + with contextlib.closing(sock) as sock: + sock.bind(('', 0)) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return sock.getsockname()[1] + + +def guid(): + from uuid import uuid4 + return uuid4().hex + + +def _break_traceback_cycle_from_frame(frame): + # Clear local variables in all inner frames, so as to break the + # reference cycle. + this_frame = sys._getframe(0) + refs = gc.get_referrers(frame) + while refs: + for frame in refs: + if frame is not this_frame and isinstance(frame, types.FrameType): + break + else: + # No frame found in referrers (finished?) + break + refs = None + # Clear the frame locals, to try and break the cycle (it is + # somewhere along the chain of execution frames). + frame.clear() + # To visit the inner frame, we need to find it among the + # referrers of this frame (while `frame.f_back` would let + # us visit the outer frame). + refs = gc.get_referrers(frame) + refs = frame = this_frame = None + + +def download_tzdata_on_windows(): + r""" + Download and extract latest IANA timezone database into the + location expected by Arrow which is %USERPROFILE%\Downloads\tzdata. + """ + if sys.platform != 'win32': + raise TypeError(f"Timezone database is already provided by {sys.platform}") + + import tarfile + + tzdata_path = os.path.expandvars(r"%USERPROFILE%\Downloads\tzdata") + tzdata_compressed = os.path.join(tzdata_path, "tzdata.tar.gz") + os.makedirs(tzdata_path, exist_ok=True) + + from urllib.request import urlopen + with urlopen('https://data.iana.org/time-zones/tzdata-latest.tar.gz') as response: + with open(tzdata_compressed, 'wb') as f: + f.write(response.read()) + + assert os.path.exists(tzdata_compressed) + + tarfile.open(tzdata_compressed).extractall(tzdata_path) + + with urlopen('https://raw.githubusercontent.com/unicode-org/cldr/master/common/supplemental/windowsZones.xml') as response_zones: # noqa + with open(os.path.join(tzdata_path, "windowsZones.xml"), 'wb') as f: + f.write(response_zones.read()) diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24c74b246ecb22271276815d5b430f3aba15c920 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/custom_actions.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/custom_actions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a208098f72808355bcd0baf18d35c6761b69277 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/custom_actions.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/fields.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/fields.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec3c90573c16641dc99e509636ab3b98a2538dad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/fields.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/partial.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/partial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..311527d6be1e3b3adf15706e3bdb8e1e2fb7637c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/partial.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/subgroups.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/subgroups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b23a7d8aa123394543b608f15f75f37187ba021 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/subgroups.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hparam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hparam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a3e26c2ec0fd902d416975a3af4e5a91125b1bb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hparam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters_test.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc952a18f73722d4e7c5773fa7335a35569f880d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters_test.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors_test.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a81ba2963923a88727e4293a317feee9da356be9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors_test.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb025aa849534787fd6f215c6fd6cd471108037 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors.py @@ -0,0 +1,356 @@ +import importlib.util +import math +import random +from abc import abstractmethod +from dataclasses import dataclass +from typing import ( + Any, + Generic, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + overload, +) + + +class _np_lazy: + def __getattr__(self, attr): + global np + import numpy as np + + return getattr(np, attr) + + +np = _np_lazy() +numpy_installed = importlib.util.find_spec("numpy") is not None + + +T = TypeVar("T") + + +@dataclass # type: ignore +class Prior(Generic[T]): + def __post_init__(self): + if numpy_installed: + self.np_rng = np.random + else: + self.rng: random.Random = random.Random() + + @abstractmethod + def sample(self) -> T: + pass + + def seed(self, seed: Optional[int]) -> None: + # Should this seed this individual prior? + if numpy_installed: + self.np_rng = np.random.RandomState(seed) + else: + self.rng = random.Random(seed) + + @abstractmethod + def get_orion_space_string(self) -> str: + """Gets the 'Orion-formatted space string' for this Prior object.""" + + @abstractmethod + def __contains__(self, v: Union[T, Any]) -> bool: + pass + + +@dataclass +class NormalPrior(Prior): + mu: float = 0.0 + sigma: float = 1.0 + discrete: bool = False + default: Optional[float] = None + shape: Union[int, Tuple[int, ...]] = None + + def __post_init__(self): + super().__post_init__() + if self.shape: + if isinstance(self.default, (int, float)): + self.default = [self.default for _ in range(self.shape)] + + def sample(self) -> Union[float, int]: + if self.shape: + assert isinstance(self.shape, int), "only support int shape for now." + if numpy_installed: + return self.np_rng.normal(self.mu, self.sigma, size=self.shape) + elif isinstance(self.shape, int): + _shape = self.shape + self.shape = None + values = [self.sample() for _ in range(_shape)] + self.shape = _shape + return values + else: + raise NotImplementedError(self.shape) + if numpy_installed: + value = self.np_rng.normal(self.mu, self.sigma) + else: + value = self.rng.normalvariate(self.mu, self.sigma) + if self.discrete: + return round(value) + return value + + def get_orion_space_string(self) -> str: + raise NotImplementedError( + "TODO: Add this for the normal prior, didn't check how its done in " "Orion yet." + ) + + def __contains__(self, v: Union[T, Any]) -> bool: + # TODO: For normal priors, I guess we only really check if the value is a float? + return isinstance(v, (int, float)) + + +@dataclass +class UniformPrior(Prior): + min: float = 0.0 + max: float = 1.0 + discrete: bool = False + default: Optional[float] = None + shape: Union[int, Tuple[int, ...]] = None + + def __post_init__(self): + super().__post_init__() + assert self.min <= self.max + if self.shape: + if isinstance(self.default, (int, float)): + self.default = [self.default for _ in range(self.shape)] + + def sample(self) -> Union[float, int]: + # TODO: add support for enums? + if self.shape: + assert isinstance(self.shape, int), "only support int shape for now." + if numpy_installed: + values = self.np_rng.uniform(self.min, self.max, size=self.shape) + if self.discrete: + values = np.round(values) + values = values.astype(int) + return values + elif isinstance(self.shape, int): + _shape = self.shape + self.shape = None + values = [self.sample() for _ in range(_shape)] + self.shape = _shape + return values + else: + raise NotImplementedError(self.shape) + if numpy_installed: + value = self.np_rng.uniform(self.min, self.max) + else: + value = self.rng.uniform(self.min, self.max) + if self.discrete: + return round(value) + return value + + def get_orion_space_string(self) -> str: + string = f"uniform({self.min}, {self.max}" + if self.discrete: + string += ", discrete=True" + if self.default is not None: + string += f", default_value={self.default}" + if self.shape is not None: + string += f", shape={self.shape}" + string += ")" + return string + + def __contains__(self, v: Union[T, Any]) -> bool: + # TODO: Include the max value here? or not? + return isinstance(v, (int, float)) and (self.min <= v < self.max) + + +@dataclass +class CategoricalPrior(Prior[T]): + choices: List[T] + probabilities: Optional[List[float]] = None + default_value: Optional[T] = None + + def __post_init__(self): + super().__post_init__() + if isinstance(self.choices, dict): + choices = [] + self.probabilities = [] + for k, v in self.choices.items(): + choices.append(k) + assert isinstance(v, (int, float)), "probs should be int or float" + self.probabilities.append(v) + + @overload + def sample(self, n: int) -> List[T]: + ... + + @overload + def sample(self) -> T: + ... + + def sample(self, n: int = None) -> Union[T, List[T]]: + assert self.choices + # n = n or 1 + # assert isinstance(n, int), n + choices: List = [] + probabilities: List[float] = [] + if isinstance(self.choices, dict): + for k, v in self.choices.items(): + choices.append(k) + probabilities.append(v) + else: + choices = self.choices + probabilities = self.probabilities + + print(choices, n, probabilities) + if numpy_installed: + s = self.np_rng.choice(choices, size=n, p=probabilities) + samples = [(s_i.item() if isinstance(s_i, np.ndarray) else s_i) for s_i in s] + else: + samples = self.rng.choices(choices, weights=probabilities, k=n or 1) + + return samples[0] if n in {None, 1} else samples + + def get_orion_space_string(self) -> str: + string = "choices(" + if self.probabilities: + prob_dict = dict(zip(self.choices, self.probabilities)) + assert sum(self.probabilities) == 1, "probs should sum to 1." + # BUG: Seems like orion still samples entries, even if they have zero + # probability! + # TODO: Remove the entries that have zero prob? + prob_dict = {k: v for k, v in prob_dict.items() if v > 0} + string += str(prob_dict) + else: + string += str(self.choices) + if self.default_value is not None: + assert isinstance(self.default_value, (int, str, float)) + default_value_str = str(self.default_value) + if isinstance(self.default_value, str): + default_value_str = f"'{self.default_value}'" + string += f", default_value={default_value_str}" + string += ")" + return string + + def __contains__(self, v: Union[T, Any]) -> bool: + return v in self.choices + + +@dataclass +class LogUniformPrior(Prior): + min: float = 1e-3 + max: float = 1e3 + base: float = math.e + discrete: bool = False + default: Optional[float] = None + shape: Union[int, Tuple[int, ...]] = None + + def __post_init__(self): + super().__post_init__() + if self.shape: + if isinstance(self.default, (int, float)): + self.default = [self.default for _ in range(self.shape)] + + def sample(self) -> float: + # TODO: Might not be 100% numerically stable. + assert self.min > 0, "min of LogUniform can't be negative!" + assert self.min < self.max, "max should be greater than min!" + if self.shape: + assert isinstance(self.shape, int), "only support in shape for now." + if numpy_installed: + log_vals = self.np_rng.uniform(self.log_min, self.log_max, size=self.shape) + values = np.power(self.base, log_vals) + if self.discrete: + values = np.round(values) + return values + elif isinstance(self.shape, int): + _shape = self.shape + self.shape = None + values = [self.sample() for _ in range(_shape)] + self.shape = _shape + return values + else: + raise NotImplementedError(self.shape) + if numpy_installed: + log_val = self.np_rng.uniform(self.log_min, self.log_max) + else: + log_val = self.rng.uniform(self.log_min, self.log_max) + value = math.pow(self.base, log_val) + if self.discrete: + return round(value) + return value + + @property + def log_min(self) -> Union[int, float]: + if numpy_installed: + if self.base in {np.e, math.e}: + log_min = np.log(self.min) + else: + log_min = np.log(self.min) + else: + if self.base is math.e: + log_min = math.log(self.min) + else: + log_min = math.log(self.min, self.base) + assert isinstance(log_min, (int, float)) + return log_min + + @property + def log_max(self) -> Union[int, float]: + if numpy_installed: + if self.base in {math.e, np.e}: + log_max = np.log(self.max) + else: + log_max = np.log(self.max) / np.log(self.base) + else: + if self.base is math.e: + log_max = math.log(self.max) + else: + log_max = math.log(self.max, self.base) + assert isinstance(log_max, (int, float)) + return log_max + + def get_orion_space_string(self) -> str: + def format_power(value: float, log_value: float): + if isinstance(value, int) or value.is_integer(): + return int(value) + elif isinstance(log_value, int) or log_value.is_integer(): + log_value = int(log_value) + if self.base == np.e: + return f"np.exp({int(log_value)})" + elif self.base == 10: + return f"{value:.2e}" + if math.log10(value).is_integer(): + return f"{value:.0e}" + else: + return f"{value:g}" + + min_str = format_power(self.min, self.log_min) + max_str = format_power(self.max, self.log_max) + string = f"loguniform({min_str}, {max_str}" + if self.discrete: + string += ", discrete=True" + if self.default is not None: + string += f", default_value={self.default}" + if self.shape is not None: + string += f", shape={self.shape}" + string += ")" + return string + + def __contains__(self, v: Union[T, Any]) -> bool: + if self.shape: + assert isinstance(self.shape, int), "only support int shape for now." + mins: Sequence[float] + if isinstance(self.min, (int, float)): + mins = [self.min for _ in range(self.shape)] + else: + mins = self.min + + maxes: Sequence[float] + if isinstance(self.max, (int, float)): + maxes = [self.max for _ in range(self.shape)] + else: + maxes = self.max + + return all( + isinstance(v_i, (int, float)) and mins[i] <= v_i < maxes[i] + for i, v_i in enumerate(v) + ) + return isinstance(v, (int, float)) and (self.min <= v < self.max) diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors_test.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fca5208a86f3a37d11f9dfafe5307a88ceed77cd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/priors_test.py @@ -0,0 +1,134 @@ +import math +from collections import Counter +from dataclasses import dataclass + +import pytest + +from .hparam import hparam +from .hyperparameters import HyperParameters +from .priors import CategoricalPrior, LogUniformPrior, UniformPrior +from .utils import set_seed + +numpy_installed = False +try: + import numpy as np + + numpy_installed = True +except ImportError: + pass + + +matplotlib_installed = False +try: + import matplotlib.pyplot as plt + + matplotlib_installed = True +except ImportError: + pass + + +@dataclass +class A(HyperParameters): + learning_rate: float = hparam(default=0.001, prior=LogUniformPrior(min=1e-6, max=1)) + + +@pytest.mark.skipif(not matplotlib_installed, reason="Test requires matplotlib.") +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +def test_log_uniform(): + n_bins = 5 + n_points = 200 + set_seed(123) + x = [A.sample().to_array() for i in range(n_points)] + hist, bins, _ = plt.hist(x, bins=n_bins) + # histogram on log scale. + # Use non-equal bin sizes, such that they look equal on log scale. + logbins = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins)) + hist, bins, _ = plt.hist(x, bins=logbins) + plt.xscale("log") + + counts = np.sum(hist, axis=0) + mean = np.mean(counts) + std = np.std(counts) + + error_to_mean_ratio = std / mean + # TODO: This is not ideal, since changing the seed might break the test. + # For this particular seed (123), the variance is about 5. + assert ( + error_to_mean_ratio < 0.25 + ), f"Variance is too large!{error_to_mean_ratio} {std}, {counts}," + + +@dataclass +class B(A): + momentum: float = hparam(default=0.0, prior=UniformPrior(min=-2.0, max=2.0)) + + +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +def test_to_array(): + b = B.sample() + array = b.to_array() + assert np.isclose(array[0], b.learning_rate) + assert np.isclose(array[1], b.momentum) + + +def test_log_uniform_and_uniform(): + n_points = 100 + set_seed(123) + x_samples = [B.sample() for i in range(n_points)] + assert all([0.0 < x.learning_rate < 1 for x in x_samples]), x_samples + assert all([-2 < x.momentum < 2 for x in x_samples]), x_samples + + +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +def test_log_uniform_and_uniform_np(): + n_points = 100 + set_seed(123) + x_samples = [B.sample() for i in range(n_points)] + + assert all([0.0 < x.learning_rate < 1 for x in x_samples]), x_samples + assert all([-2 < x.momentum < 2 for x in x_samples]), x_samples + + x = np.stack([x.to_array() for x in x_samples]) + + x0 = x[:, 0] # learning rate + x1 = x[:, 1] # momentum + print(x0.mean(), x0.std()) + print(x1.mean(), x1.std()) + + assert np.abs(x1.mean()) <= 0.5 + assert x1.max() - 2 <= 0.2 + assert x1.min() - (-2) <= 0.2 + + +def test_loguniform_prior(): + prior = LogUniformPrior(min=1, max=1e5, base=10) + samples = [prior.sample() for _ in range(1000)] + assert all(1 < x < 1e5 for x in samples) + log_samples = [math.log10(s) for s in samples] + mean = sum(log_samples) / len(log_samples) + # mean base-10 exponent should be around 2.5 + assert 2.35 <= mean <= 2.65 + + +def test_categorical_prior(): + prior = CategoricalPrior(["a", "b", "c"]) + prior.seed(123) + samples = [prior.sample() for _ in range(1000)] + counter = Counter(samples) + assert all(250 < count < 400 for val, count in counter.items()), counter.items() + + prior = CategoricalPrior({"a": 0.1, "b": 0.1, "c": 0.8}) + prior.seed(123) + assert prior.get_orion_space_string() == "choices({'a': 0.1, 'b': 0.1, 'c': 0.8})" + + samples = prior.sample(1000) + counter = Counter(samples) + assert 50 <= counter["a"] <= 150 + assert 50 <= counter["b"] <= 150 + assert 700 <= counter["c"] <= 900 + + +def test_log_uniform_with_shape(): + prior = LogUniformPrior(min=1e-6, max=1, default=0.001, shape=2) + assert len(prior.sample()) == 2 + assert [0.1, 0.2] in prior