Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER +1 -0
- parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA +15 -0
- parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL +5 -0
- parrot/lib/python3.10/site-packages/pyarrow/__init__.py +432 -0
- parrot/lib/python3.10/site-packages/pyarrow/_acero.pyx +608 -0
- parrot/lib/python3.10/site-packages/pyarrow/_azurefs.pyx +134 -0
- parrot/lib/python3.10/site-packages/pyarrow/_compute.pyx +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/_csv.pyx +1542 -0
- parrot/lib/python3.10/site-packages/pyarrow/_cuda.pxd +67 -0
- parrot/lib/python3.10/site-packages/pyarrow/_cuda.pyx +1058 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset.pxd +183 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset.pyx +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd +43 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx +1053 -0
- parrot/lib/python3.10/site-packages/pyarrow/_flight.pyx +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/_fs.pyx +1628 -0
- parrot/lib/python3.10/site-packages/pyarrow/_orc.pyx +445 -0
- parrot/lib/python3.10/site-packages/pyarrow/_parquet.pxd +679 -0
- parrot/lib/python3.10/site-packages/pyarrow/_parquet.pyx +2253 -0
- parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/_substrait.pyx +349 -0
- parrot/lib/python3.10/site-packages/pyarrow/benchmark.py +21 -0
- parrot/lib/python3.10/site-packages/pyarrow/builder.pxi +148 -0
- parrot/lib/python3.10/site-packages/pyarrow/cffi.py +81 -0
- parrot/lib/python3.10/site-packages/pyarrow/config.pxi +95 -0
- parrot/lib/python3.10/site-packages/pyarrow/conftest.py +375 -0
- parrot/lib/python3.10/site-packages/pyarrow/device.pxi +162 -0
- parrot/lib/python3.10/site-packages/pyarrow/flight.py +69 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/common.pxd +175 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd +118 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd +107 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd +413 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd +105 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd +50 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd +622 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd +357 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd +322 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd +77 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd +298 -0
- parrot/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd +130 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__init__.py +20 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/pyarrow/interchange/buffer.py +107 -0
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.2
|
| 2 |
+
Name: antlr4-python3-runtime
|
| 3 |
+
Version: 4.9.3
|
| 4 |
+
Summary: ANTLR 4.9.3 runtime for Python 3.7
|
| 5 |
+
Home-page: http://www.antlr.org
|
| 6 |
+
Author: Eric Vergnaud, Terence Parr, Sam Harwell
|
| 7 |
+
Author-email: eric.vergnaud@wanadoo.fr
|
| 8 |
+
License: BSD
|
| 9 |
+
Requires-Dist: typing; python_version < "3.5"
|
| 10 |
+
Dynamic: author
|
| 11 |
+
Dynamic: author-email
|
| 12 |
+
Dynamic: home-page
|
| 13 |
+
Dynamic: license
|
| 14 |
+
Dynamic: requires-dist
|
| 15 |
+
Dynamic: summary
|
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (75.8.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
parrot/lib/python3.10/site-packages/pyarrow/__init__.py
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# flake8: noqa
|
| 19 |
+
|
| 20 |
+
"""
|
| 21 |
+
PyArrow is the python implementation of Apache Arrow.
|
| 22 |
+
|
| 23 |
+
Apache Arrow is a cross-language development platform for in-memory data.
|
| 24 |
+
It specifies a standardized language-independent columnar memory format for
|
| 25 |
+
flat and hierarchical data, organized for efficient analytic operations on
|
| 26 |
+
modern hardware. It also provides computational libraries and zero-copy
|
| 27 |
+
streaming messaging and interprocess communication.
|
| 28 |
+
|
| 29 |
+
For more information see the official page at https://arrow.apache.org
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
import gc as _gc
|
| 33 |
+
import importlib as _importlib
|
| 34 |
+
import os as _os
|
| 35 |
+
import platform as _platform
|
| 36 |
+
import sys as _sys
|
| 37 |
+
import warnings as _warnings
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
from ._generated_version import version as __version__
|
| 41 |
+
except ImportError:
|
| 42 |
+
# Package is not installed, parse git tag at runtime
|
| 43 |
+
try:
|
| 44 |
+
import setuptools_scm
|
| 45 |
+
# Code duplicated from setup.py to avoid a dependency on each other
|
| 46 |
+
|
| 47 |
+
def parse_git(root, **kwargs):
|
| 48 |
+
"""
|
| 49 |
+
Parse function for setuptools_scm that ignores tags for non-C++
|
| 50 |
+
subprojects, e.g. apache-arrow-js-XXX tags.
|
| 51 |
+
"""
|
| 52 |
+
from setuptools_scm.git import parse
|
| 53 |
+
kwargs['describe_command'] = \
|
| 54 |
+
"git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'"
|
| 55 |
+
return parse(root, **kwargs)
|
| 56 |
+
__version__ = setuptools_scm.get_version('../',
|
| 57 |
+
parse=parse_git)
|
| 58 |
+
except ImportError:
|
| 59 |
+
__version__ = None
|
| 60 |
+
|
| 61 |
+
# ARROW-8684: Disable GC while initializing Cython extension module,
|
| 62 |
+
# to workaround Cython bug in https://github.com/cython/cython/issues/3603
|
| 63 |
+
_gc_enabled = _gc.isenabled()
|
| 64 |
+
_gc.disable()
|
| 65 |
+
import pyarrow.lib as _lib
|
| 66 |
+
if _gc_enabled:
|
| 67 |
+
_gc.enable()
|
| 68 |
+
|
| 69 |
+
from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path,
|
| 70 |
+
MonthDayNano, VersionInfo, cpp_build_info,
|
| 71 |
+
cpp_version, cpp_version_info, runtime_info,
|
| 72 |
+
cpu_count, set_cpu_count, enable_signal_handlers,
|
| 73 |
+
io_thread_count, set_io_thread_count)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def show_versions():
|
| 77 |
+
"""
|
| 78 |
+
Print various version information, to help with error reporting.
|
| 79 |
+
"""
|
| 80 |
+
def print_entry(label, value):
|
| 81 |
+
print(f"{label: <26}: {value: <8}")
|
| 82 |
+
|
| 83 |
+
print("pyarrow version info\n--------------------")
|
| 84 |
+
print_entry("Package kind", cpp_build_info.package_kind
|
| 85 |
+
if len(cpp_build_info.package_kind) > 0
|
| 86 |
+
else "not indicated")
|
| 87 |
+
print_entry("Arrow C++ library version", cpp_build_info.version)
|
| 88 |
+
print_entry("Arrow C++ compiler",
|
| 89 |
+
f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}")
|
| 90 |
+
print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags)
|
| 91 |
+
print_entry("Arrow C++ git revision", cpp_build_info.git_id)
|
| 92 |
+
print_entry("Arrow C++ git description", cpp_build_info.git_description)
|
| 93 |
+
print_entry("Arrow C++ build type", cpp_build_info.build_type)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _module_is_available(module):
|
| 97 |
+
try:
|
| 98 |
+
_importlib.import_module(f'pyarrow.{module}')
|
| 99 |
+
except ImportError:
|
| 100 |
+
return False
|
| 101 |
+
else:
|
| 102 |
+
return True
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _filesystem_is_available(fs):
|
| 106 |
+
try:
|
| 107 |
+
import pyarrow.fs
|
| 108 |
+
except ImportError:
|
| 109 |
+
return False
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
getattr(pyarrow.fs, fs)
|
| 113 |
+
except (ImportError, AttributeError):
|
| 114 |
+
return False
|
| 115 |
+
else:
|
| 116 |
+
return True
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def show_info():
|
| 120 |
+
"""
|
| 121 |
+
Print detailed version and platform information, for error reporting
|
| 122 |
+
"""
|
| 123 |
+
show_versions()
|
| 124 |
+
|
| 125 |
+
def print_entry(label, value):
|
| 126 |
+
print(f" {label: <20}: {value: <8}")
|
| 127 |
+
|
| 128 |
+
print("\nPlatform:")
|
| 129 |
+
print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}")
|
| 130 |
+
print_entry("SIMD Level", runtime_info().simd_level)
|
| 131 |
+
print_entry("Detected SIMD Level", runtime_info().detected_simd_level)
|
| 132 |
+
|
| 133 |
+
pool = default_memory_pool()
|
| 134 |
+
print("\nMemory:")
|
| 135 |
+
print_entry("Default backend", pool.backend_name)
|
| 136 |
+
print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes")
|
| 137 |
+
print_entry("Max memory", f"{pool.max_memory()} bytes")
|
| 138 |
+
print_entry("Supported Backends", ', '.join(supported_memory_backends()))
|
| 139 |
+
|
| 140 |
+
print("\nOptional modules:")
|
| 141 |
+
modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json",
|
| 142 |
+
"orc", "parquet"]
|
| 143 |
+
for module in modules:
|
| 144 |
+
status = "Enabled" if _module_is_available(module) else "-"
|
| 145 |
+
print(f" {module: <20}: {status: <8}")
|
| 146 |
+
|
| 147 |
+
print("\nFilesystems:")
|
| 148 |
+
filesystems = ["AzureFileSystem", "GcsFileSystem",
|
| 149 |
+
"HadoopFileSystem", "S3FileSystem"]
|
| 150 |
+
for fs in filesystems:
|
| 151 |
+
status = "Enabled" if _filesystem_is_available(fs) else "-"
|
| 152 |
+
print(f" {fs: <20}: {status: <8}")
|
| 153 |
+
|
| 154 |
+
print("\nCompression Codecs:")
|
| 155 |
+
codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"]
|
| 156 |
+
for codec in codecs:
|
| 157 |
+
status = "Enabled" if Codec.is_available(codec) else "-"
|
| 158 |
+
print(f" {codec: <20}: {status: <8}")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
from pyarrow.lib import (null, bool_,
|
| 162 |
+
int8, int16, int32, int64,
|
| 163 |
+
uint8, uint16, uint32, uint64,
|
| 164 |
+
time32, time64, timestamp, date32, date64, duration,
|
| 165 |
+
month_day_nano_interval,
|
| 166 |
+
float16, float32, float64,
|
| 167 |
+
binary, string, utf8, binary_view, string_view,
|
| 168 |
+
large_binary, large_string, large_utf8,
|
| 169 |
+
decimal128, decimal256,
|
| 170 |
+
list_, large_list, list_view, large_list_view,
|
| 171 |
+
map_, struct,
|
| 172 |
+
union, sparse_union, dense_union,
|
| 173 |
+
dictionary,
|
| 174 |
+
run_end_encoded,
|
| 175 |
+
fixed_shape_tensor,
|
| 176 |
+
field,
|
| 177 |
+
type_for_alias,
|
| 178 |
+
DataType, DictionaryType, StructType,
|
| 179 |
+
ListType, LargeListType, FixedSizeListType,
|
| 180 |
+
ListViewType, LargeListViewType,
|
| 181 |
+
MapType, UnionType, SparseUnionType, DenseUnionType,
|
| 182 |
+
TimestampType, Time32Type, Time64Type, DurationType,
|
| 183 |
+
FixedSizeBinaryType, Decimal128Type, Decimal256Type,
|
| 184 |
+
BaseExtensionType, ExtensionType,
|
| 185 |
+
RunEndEncodedType, FixedShapeTensorType,
|
| 186 |
+
PyExtensionType, UnknownExtensionType,
|
| 187 |
+
register_extension_type, unregister_extension_type,
|
| 188 |
+
DictionaryMemo,
|
| 189 |
+
KeyValueMetadata,
|
| 190 |
+
Field,
|
| 191 |
+
Schema,
|
| 192 |
+
schema,
|
| 193 |
+
unify_schemas,
|
| 194 |
+
Array, Tensor,
|
| 195 |
+
array, chunked_array, record_batch, nulls, repeat,
|
| 196 |
+
SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix,
|
| 197 |
+
SparseCSFTensor,
|
| 198 |
+
infer_type, from_numpy_dtype,
|
| 199 |
+
NullArray,
|
| 200 |
+
NumericArray, IntegerArray, FloatingPointArray,
|
| 201 |
+
BooleanArray,
|
| 202 |
+
Int8Array, UInt8Array,
|
| 203 |
+
Int16Array, UInt16Array,
|
| 204 |
+
Int32Array, UInt32Array,
|
| 205 |
+
Int64Array, UInt64Array,
|
| 206 |
+
HalfFloatArray, FloatArray, DoubleArray,
|
| 207 |
+
ListArray, LargeListArray, FixedSizeListArray,
|
| 208 |
+
ListViewArray, LargeListViewArray,
|
| 209 |
+
MapArray, UnionArray,
|
| 210 |
+
BinaryArray, StringArray,
|
| 211 |
+
LargeBinaryArray, LargeStringArray,
|
| 212 |
+
BinaryViewArray, StringViewArray,
|
| 213 |
+
FixedSizeBinaryArray,
|
| 214 |
+
DictionaryArray,
|
| 215 |
+
Date32Array, Date64Array, TimestampArray,
|
| 216 |
+
Time32Array, Time64Array, DurationArray,
|
| 217 |
+
MonthDayNanoIntervalArray,
|
| 218 |
+
Decimal128Array, Decimal256Array, StructArray, ExtensionArray,
|
| 219 |
+
RunEndEncodedArray, FixedShapeTensorArray,
|
| 220 |
+
scalar, NA, _NULL as NULL, Scalar,
|
| 221 |
+
NullScalar, BooleanScalar,
|
| 222 |
+
Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar,
|
| 223 |
+
UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar,
|
| 224 |
+
HalfFloatScalar, FloatScalar, DoubleScalar,
|
| 225 |
+
Decimal128Scalar, Decimal256Scalar,
|
| 226 |
+
ListScalar, LargeListScalar, FixedSizeListScalar,
|
| 227 |
+
ListViewScalar, LargeListViewScalar,
|
| 228 |
+
Date32Scalar, Date64Scalar,
|
| 229 |
+
Time32Scalar, Time64Scalar,
|
| 230 |
+
TimestampScalar, DurationScalar,
|
| 231 |
+
MonthDayNanoIntervalScalar,
|
| 232 |
+
BinaryScalar, LargeBinaryScalar, BinaryViewScalar,
|
| 233 |
+
StringScalar, LargeStringScalar, StringViewScalar,
|
| 234 |
+
FixedSizeBinaryScalar, DictionaryScalar,
|
| 235 |
+
MapScalar, StructScalar, UnionScalar,
|
| 236 |
+
RunEndEncodedScalar, ExtensionScalar)
|
| 237 |
+
|
| 238 |
+
# Buffers, allocation
|
| 239 |
+
from pyarrow.lib import (DeviceAllocationType, Device, MemoryManager,
|
| 240 |
+
default_cpu_memory_manager)
|
| 241 |
+
|
| 242 |
+
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
|
| 243 |
+
Codec, compress, decompress, allocate_buffer)
|
| 244 |
+
|
| 245 |
+
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
|
| 246 |
+
total_allocated_bytes, set_memory_pool,
|
| 247 |
+
default_memory_pool, system_memory_pool,
|
| 248 |
+
jemalloc_memory_pool, mimalloc_memory_pool,
|
| 249 |
+
logging_memory_pool, proxy_memory_pool,
|
| 250 |
+
log_memory_allocations, jemalloc_set_decay_ms,
|
| 251 |
+
supported_memory_backends)
|
| 252 |
+
|
| 253 |
+
# I/O
|
| 254 |
+
from pyarrow.lib import (NativeFile, PythonFile,
|
| 255 |
+
BufferedInputStream, BufferedOutputStream, CacheOptions,
|
| 256 |
+
CompressedInputStream, CompressedOutputStream,
|
| 257 |
+
TransformInputStream, transcoding_input_stream,
|
| 258 |
+
FixedSizeBufferWriter,
|
| 259 |
+
BufferReader, BufferOutputStream,
|
| 260 |
+
OSFile, MemoryMappedFile, memory_map,
|
| 261 |
+
create_memory_map, MockOutputStream,
|
| 262 |
+
input_stream, output_stream,
|
| 263 |
+
have_libhdfs)
|
| 264 |
+
|
| 265 |
+
from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table,
|
| 266 |
+
concat_arrays, concat_tables, TableGroupBy,
|
| 267 |
+
RecordBatchReader)
|
| 268 |
+
|
| 269 |
+
# Exceptions
|
| 270 |
+
from pyarrow.lib import (ArrowCancelled,
|
| 271 |
+
ArrowCapacityError,
|
| 272 |
+
ArrowException,
|
| 273 |
+
ArrowKeyError,
|
| 274 |
+
ArrowIndexError,
|
| 275 |
+
ArrowInvalid,
|
| 276 |
+
ArrowIOError,
|
| 277 |
+
ArrowMemoryError,
|
| 278 |
+
ArrowNotImplementedError,
|
| 279 |
+
ArrowTypeError,
|
| 280 |
+
ArrowSerializationError)
|
| 281 |
+
|
| 282 |
+
from pyarrow.ipc import serialize_pandas, deserialize_pandas
|
| 283 |
+
import pyarrow.ipc as ipc
|
| 284 |
+
|
| 285 |
+
import pyarrow.types as types
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# ----------------------------------------------------------------------
|
| 289 |
+
# Deprecations
|
| 290 |
+
|
| 291 |
+
from pyarrow.util import _deprecate_api, _deprecate_class
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
# TODO: Deprecate these somehow in the pyarrow namespace
|
| 295 |
+
from pyarrow.ipc import (Message, MessageReader, MetadataVersion,
|
| 296 |
+
RecordBatchFileReader, RecordBatchFileWriter,
|
| 297 |
+
RecordBatchStreamReader, RecordBatchStreamWriter)
|
| 298 |
+
|
| 299 |
+
# ----------------------------------------------------------------------
|
| 300 |
+
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
|
| 301 |
+
# wheels)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def get_include():
|
| 305 |
+
"""
|
| 306 |
+
Return absolute path to directory containing Arrow C++ include
|
| 307 |
+
headers. Similar to numpy.get_include
|
| 308 |
+
"""
|
| 309 |
+
return _os.path.join(_os.path.dirname(__file__), 'include')
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _get_pkg_config_executable():
|
| 313 |
+
return _os.environ.get('PKG_CONFIG', 'pkg-config')
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def _has_pkg_config(pkgname):
|
| 317 |
+
import subprocess
|
| 318 |
+
try:
|
| 319 |
+
return subprocess.call([_get_pkg_config_executable(),
|
| 320 |
+
'--exists', pkgname]) == 0
|
| 321 |
+
except FileNotFoundError:
|
| 322 |
+
return False
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _read_pkg_config_variable(pkgname, cli_args):
|
| 326 |
+
import subprocess
|
| 327 |
+
cmd = [_get_pkg_config_executable(), pkgname] + cli_args
|
| 328 |
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
| 329 |
+
stderr=subprocess.PIPE)
|
| 330 |
+
out, err = proc.communicate()
|
| 331 |
+
if proc.returncode != 0:
|
| 332 |
+
raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
|
| 333 |
+
return out.rstrip().decode('utf8')
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def get_libraries():
|
| 337 |
+
"""
|
| 338 |
+
Return list of library names to include in the `libraries` argument for C
|
| 339 |
+
or Cython extensions using pyarrow
|
| 340 |
+
"""
|
| 341 |
+
return ['arrow_python', 'arrow']
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def create_library_symlinks():
|
| 345 |
+
"""
|
| 346 |
+
With Linux and macOS wheels, the bundled shared libraries have an embedded
|
| 347 |
+
ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them
|
| 348 |
+
with -larrow won't work unless we create symlinks at locations like
|
| 349 |
+
site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses
|
| 350 |
+
prior problems we had with shipping two copies of the shared libraries to
|
| 351 |
+
permit third party projects like turbodbc to build their C++ extensions
|
| 352 |
+
against the pyarrow wheels.
|
| 353 |
+
|
| 354 |
+
This function must only be invoked once and only when the shared libraries
|
| 355 |
+
are bundled with the Python package, which should only apply to wheel-based
|
| 356 |
+
installs. It requires write access to the site-packages/pyarrow directory
|
| 357 |
+
and so depending on your system may need to be run with root.
|
| 358 |
+
"""
|
| 359 |
+
import glob
|
| 360 |
+
if _sys.platform == 'win32':
|
| 361 |
+
return
|
| 362 |
+
package_cwd = _os.path.dirname(__file__)
|
| 363 |
+
|
| 364 |
+
if _sys.platform == 'linux':
|
| 365 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*'))
|
| 366 |
+
|
| 367 |
+
def get_symlink_path(hard_path):
|
| 368 |
+
return hard_path.rsplit('.', 1)[0]
|
| 369 |
+
else:
|
| 370 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib'))
|
| 371 |
+
|
| 372 |
+
def get_symlink_path(hard_path):
|
| 373 |
+
return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib'))
|
| 374 |
+
|
| 375 |
+
for lib_hard_path in bundled_libs:
|
| 376 |
+
symlink_path = get_symlink_path(lib_hard_path)
|
| 377 |
+
if _os.path.exists(symlink_path):
|
| 378 |
+
continue
|
| 379 |
+
try:
|
| 380 |
+
_os.symlink(lib_hard_path, symlink_path)
|
| 381 |
+
except PermissionError:
|
| 382 |
+
print("Tried creating symlink {}. If you need to link to "
|
| 383 |
+
"bundled shared libraries, run "
|
| 384 |
+
"pyarrow.create_library_symlinks() as root")
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def get_library_dirs():
|
| 388 |
+
"""
|
| 389 |
+
Return lists of directories likely to contain Arrow C++ libraries for
|
| 390 |
+
linking C or Cython extensions using pyarrow
|
| 391 |
+
"""
|
| 392 |
+
package_cwd = _os.path.dirname(__file__)
|
| 393 |
+
library_dirs = [package_cwd]
|
| 394 |
+
|
| 395 |
+
def append_library_dir(library_dir):
|
| 396 |
+
if library_dir not in library_dirs:
|
| 397 |
+
library_dirs.append(library_dir)
|
| 398 |
+
|
| 399 |
+
# Search library paths via pkg-config. This is necessary if the user
|
| 400 |
+
# installed libarrow and the other shared libraries manually and they
|
| 401 |
+
# are not shipped inside the pyarrow package (see also ARROW-2976).
|
| 402 |
+
pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
|
| 403 |
+
for pkgname in ["arrow", "arrow_python"]:
|
| 404 |
+
if _has_pkg_config(pkgname):
|
| 405 |
+
library_dir = _read_pkg_config_variable(pkgname,
|
| 406 |
+
["--libs-only-L"])
|
| 407 |
+
# pkg-config output could be empty if Arrow is installed
|
| 408 |
+
# as a system package.
|
| 409 |
+
if library_dir:
|
| 410 |
+
if not library_dir.startswith("-L"):
|
| 411 |
+
raise ValueError(
|
| 412 |
+
"pkg-config --libs-only-L returned unexpected "
|
| 413 |
+
"value {!r}".format(library_dir))
|
| 414 |
+
append_library_dir(library_dir[2:])
|
| 415 |
+
|
| 416 |
+
if _sys.platform == 'win32':
|
| 417 |
+
# TODO(wesm): Is this necessary, or does setuptools within a conda
|
| 418 |
+
# installation add Library\lib to the linker path for MSVC?
|
| 419 |
+
python_base_install = _os.path.dirname(_sys.executable)
|
| 420 |
+
library_dir = _os.path.join(python_base_install, 'Library', 'lib')
|
| 421 |
+
|
| 422 |
+
if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
|
| 423 |
+
append_library_dir(library_dir)
|
| 424 |
+
|
| 425 |
+
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
|
| 426 |
+
if _os.environ.get('ARROW_HOME'):
|
| 427 |
+
append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
|
| 428 |
+
else:
|
| 429 |
+
# Python wheels bundle the Arrow libraries in the pyarrow directory.
|
| 430 |
+
append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
|
| 431 |
+
|
| 432 |
+
return library_dirs
|
parrot/lib/python3.10/site-packages/pyarrow/_acero.pyx
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------
|
| 19 |
+
# Low-level Acero bindings
|
| 20 |
+
|
| 21 |
+
# cython: profile=False
|
| 22 |
+
# distutils: language = c++
|
| 23 |
+
# cython: language_level = 3
|
| 24 |
+
|
| 25 |
+
from pyarrow.includes.common cimport *
|
| 26 |
+
from pyarrow.includes.libarrow cimport *
|
| 27 |
+
from pyarrow.includes.libarrow_acero cimport *
|
| 28 |
+
from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table,
|
| 29 |
+
RecordBatchReader)
|
| 30 |
+
from pyarrow.lib import frombytes, tobytes
|
| 31 |
+
from pyarrow._compute cimport (
|
| 32 |
+
Expression, FunctionOptions, _ensure_field_ref, _true,
|
| 33 |
+
unwrap_null_placement, unwrap_sort_order
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
cdef class ExecNodeOptions(_Weakrefable):
|
| 38 |
+
"""
|
| 39 |
+
Base class for the node options.
|
| 40 |
+
|
| 41 |
+
Use one of the subclasses to construct an options object.
|
| 42 |
+
"""
|
| 43 |
+
__slots__ = () # avoid mistakingly creating attributes
|
| 44 |
+
|
| 45 |
+
cdef void init(self, const shared_ptr[CExecNodeOptions]& sp):
|
| 46 |
+
self.wrapped = sp
|
| 47 |
+
|
| 48 |
+
cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil:
|
| 49 |
+
return self.wrapped
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
cdef class _TableSourceNodeOptions(ExecNodeOptions):
|
| 53 |
+
|
| 54 |
+
def _set_options(self, Table table):
|
| 55 |
+
cdef:
|
| 56 |
+
shared_ptr[CTable] c_table
|
| 57 |
+
|
| 58 |
+
c_table = pyarrow_unwrap_table(table)
|
| 59 |
+
self.wrapped.reset(
|
| 60 |
+
new CTableSourceNodeOptions(c_table)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class TableSourceNodeOptions(_TableSourceNodeOptions):
|
| 65 |
+
"""
|
| 66 |
+
A Source node which accepts a table.
|
| 67 |
+
|
| 68 |
+
This is the option class for the "table_source" node factory.
|
| 69 |
+
|
| 70 |
+
Parameters
|
| 71 |
+
----------
|
| 72 |
+
table : pyarrow.Table
|
| 73 |
+
The table which acts as the data source.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
def __init__(self, Table table):
|
| 77 |
+
self._set_options(table)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
cdef class _FilterNodeOptions(ExecNodeOptions):
|
| 81 |
+
|
| 82 |
+
def _set_options(self, Expression filter_expression not None):
|
| 83 |
+
self.wrapped.reset(
|
| 84 |
+
new CFilterNodeOptions(<CExpression>filter_expression.unwrap())
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class FilterNodeOptions(_FilterNodeOptions):
|
| 89 |
+
"""
|
| 90 |
+
Make a node which excludes some rows from batches passed through it.
|
| 91 |
+
|
| 92 |
+
This is the option class for the "filter" node factory.
|
| 93 |
+
|
| 94 |
+
The "filter" operation provides an option to define data filtering
|
| 95 |
+
criteria. It selects rows where the given expression evaluates to true.
|
| 96 |
+
Filters can be written using pyarrow.compute.Expression, and the
|
| 97 |
+
expression must have a return type of boolean.
|
| 98 |
+
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
filter_expression : pyarrow.compute.Expression
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self, Expression filter_expression):
|
| 105 |
+
self._set_options(filter_expression)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
cdef class _ProjectNodeOptions(ExecNodeOptions):
|
| 109 |
+
|
| 110 |
+
def _set_options(self, expressions, names=None):
|
| 111 |
+
cdef:
|
| 112 |
+
Expression expr
|
| 113 |
+
vector[CExpression] c_expressions
|
| 114 |
+
vector[c_string] c_names
|
| 115 |
+
|
| 116 |
+
for expr in expressions:
|
| 117 |
+
c_expressions.push_back(expr.unwrap())
|
| 118 |
+
|
| 119 |
+
if names is not None:
|
| 120 |
+
if len(names) != len(expressions):
|
| 121 |
+
raise ValueError(
|
| 122 |
+
"The number of names should be equal to the number of expressions"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
for name in names:
|
| 126 |
+
c_names.push_back(<c_string>tobytes(name))
|
| 127 |
+
|
| 128 |
+
self.wrapped.reset(
|
| 129 |
+
new CProjectNodeOptions(c_expressions, c_names)
|
| 130 |
+
)
|
| 131 |
+
else:
|
| 132 |
+
self.wrapped.reset(
|
| 133 |
+
new CProjectNodeOptions(c_expressions)
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class ProjectNodeOptions(_ProjectNodeOptions):
|
| 138 |
+
"""
|
| 139 |
+
Make a node which executes expressions on input batches,
|
| 140 |
+
producing batches of the same length with new columns.
|
| 141 |
+
|
| 142 |
+
This is the option class for the "project" node factory.
|
| 143 |
+
|
| 144 |
+
The "project" operation rearranges, deletes, transforms, and
|
| 145 |
+
creates columns. Each output column is computed by evaluating
|
| 146 |
+
an expression against the source record batch. These must be
|
| 147 |
+
scalar expressions (expressions consisting of scalar literals,
|
| 148 |
+
field references and scalar functions, i.e. elementwise functions
|
| 149 |
+
that return one value for each input row independent of the value
|
| 150 |
+
of all other rows).
|
| 151 |
+
|
| 152 |
+
Parameters
|
| 153 |
+
----------
|
| 154 |
+
expressions : list of pyarrow.compute.Expression
|
| 155 |
+
List of expressions to evaluate against the source batch. This must
|
| 156 |
+
be scalar expressions.
|
| 157 |
+
names : list of str, optional
|
| 158 |
+
List of names for each of the output columns (same length as
|
| 159 |
+
`expressions`). If `names` is not provided, the string
|
| 160 |
+
representations of exprs will be used.
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
def __init__(self, expressions, names=None):
|
| 164 |
+
self._set_options(expressions, names)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
cdef class _AggregateNodeOptions(ExecNodeOptions):
|
| 168 |
+
|
| 169 |
+
def _set_options(self, aggregates, keys=None):
|
| 170 |
+
cdef:
|
| 171 |
+
CAggregate c_aggr
|
| 172 |
+
vector[CAggregate] c_aggregations
|
| 173 |
+
vector[CFieldRef] c_keys
|
| 174 |
+
|
| 175 |
+
for arg_names, func_name, opts, name in aggregates:
|
| 176 |
+
c_aggr.function = tobytes(func_name)
|
| 177 |
+
if opts is not None:
|
| 178 |
+
c_aggr.options = (<FunctionOptions?>opts).wrapped
|
| 179 |
+
else:
|
| 180 |
+
c_aggr.options = <shared_ptr[CFunctionOptions]>nullptr
|
| 181 |
+
if not isinstance(arg_names, (list, tuple)):
|
| 182 |
+
arg_names = [arg_names]
|
| 183 |
+
for arg in arg_names:
|
| 184 |
+
c_aggr.target.push_back(_ensure_field_ref(arg))
|
| 185 |
+
c_aggr.name = tobytes(name)
|
| 186 |
+
|
| 187 |
+
c_aggregations.push_back(move(c_aggr))
|
| 188 |
+
|
| 189 |
+
if keys is None:
|
| 190 |
+
keys = []
|
| 191 |
+
for name in keys:
|
| 192 |
+
c_keys.push_back(_ensure_field_ref(name))
|
| 193 |
+
|
| 194 |
+
self.wrapped.reset(
|
| 195 |
+
new CAggregateNodeOptions(c_aggregations, c_keys)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class AggregateNodeOptions(_AggregateNodeOptions):
|
| 200 |
+
"""
|
| 201 |
+
Make a node which aggregates input batches, optionally grouped by keys.
|
| 202 |
+
|
| 203 |
+
This is the option class for the "aggregate" node factory.
|
| 204 |
+
|
| 205 |
+
Acero supports two types of aggregates: "scalar" aggregates,
|
| 206 |
+
and "hash" aggregates. Scalar aggregates reduce an array or scalar
|
| 207 |
+
input to a single scalar output (e.g. computing the mean of a column).
|
| 208 |
+
Hash aggregates act like GROUP BY in SQL and first partition data
|
| 209 |
+
based on one or more key columns, then reduce the data in each partition.
|
| 210 |
+
The aggregate node supports both types of computation, and can compute
|
| 211 |
+
any number of aggregations at once.
|
| 212 |
+
|
| 213 |
+
Parameters
|
| 214 |
+
----------
|
| 215 |
+
aggregates : list of tuples
|
| 216 |
+
Aggregations which will be applied to the targeted fields.
|
| 217 |
+
Specified as a list of tuples, where each tuple is one aggregation
|
| 218 |
+
specification and consists of: aggregation target column(s) followed
|
| 219 |
+
by function name, aggregation function options object and the
|
| 220 |
+
output field name.
|
| 221 |
+
The target column(s) specification can be a single field reference,
|
| 222 |
+
an empty list or a list of fields unary, nullary and n-ary aggregation
|
| 223 |
+
functions respectively. Each field reference can be a string
|
| 224 |
+
column name or expression.
|
| 225 |
+
keys : list of field references, optional
|
| 226 |
+
Keys by which aggregations will be grouped. Each key can reference
|
| 227 |
+
a field using a string name or expression.
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
def __init__(self, aggregates, keys=None):
|
| 231 |
+
self._set_options(aggregates, keys)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
cdef class _OrderByNodeOptions(ExecNodeOptions):
|
| 235 |
+
|
| 236 |
+
def _set_options(self, sort_keys, null_placement):
|
| 237 |
+
cdef:
|
| 238 |
+
vector[CSortKey] c_sort_keys
|
| 239 |
+
|
| 240 |
+
for name, order in sort_keys:
|
| 241 |
+
c_sort_keys.push_back(
|
| 242 |
+
CSortKey(_ensure_field_ref(name), unwrap_sort_order(order))
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
self.wrapped.reset(
|
| 246 |
+
new COrderByNodeOptions(
|
| 247 |
+
COrdering(c_sort_keys, unwrap_null_placement(null_placement))
|
| 248 |
+
)
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class OrderByNodeOptions(_OrderByNodeOptions):
|
| 253 |
+
"""
|
| 254 |
+
Make a node which applies a new ordering to the data.
|
| 255 |
+
|
| 256 |
+
Currently this node works by accumulating all data, sorting, and then
|
| 257 |
+
emitting the new data with an updated batch index.
|
| 258 |
+
Larger-than-memory sort is not currently supported.
|
| 259 |
+
|
| 260 |
+
This is the option class for the "order_by" node factory.
|
| 261 |
+
|
| 262 |
+
Parameters
|
| 263 |
+
----------
|
| 264 |
+
sort_keys : sequence of (name, order) tuples
|
| 265 |
+
Names of field/column keys to sort the input on,
|
| 266 |
+
along with the order each field/column is sorted in.
|
| 267 |
+
Accepted values for `order` are "ascending", "descending".
|
| 268 |
+
Each field reference can be a string column name or expression.
|
| 269 |
+
null_placement : str, default "at_end"
|
| 270 |
+
Where nulls in input should be sorted, only applying to
|
| 271 |
+
columns/fields mentioned in `sort_keys`.
|
| 272 |
+
Accepted values are "at_start", "at_end".
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
def __init__(self, sort_keys=(), *, null_placement="at_end"):
|
| 276 |
+
self._set_options(sort_keys, null_placement)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
cdef class _HashJoinNodeOptions(ExecNodeOptions):
|
| 280 |
+
|
| 281 |
+
def _set_options(
|
| 282 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
| 283 |
+
output_suffix_for_left="", output_suffix_for_right="",
|
| 284 |
+
):
|
| 285 |
+
cdef:
|
| 286 |
+
CJoinType c_join_type
|
| 287 |
+
vector[CFieldRef] c_left_keys
|
| 288 |
+
vector[CFieldRef] c_right_keys
|
| 289 |
+
vector[CFieldRef] c_left_output
|
| 290 |
+
vector[CFieldRef] c_right_output
|
| 291 |
+
|
| 292 |
+
# join type
|
| 293 |
+
if join_type == "left semi":
|
| 294 |
+
c_join_type = CJoinType_LEFT_SEMI
|
| 295 |
+
elif join_type == "right semi":
|
| 296 |
+
c_join_type = CJoinType_RIGHT_SEMI
|
| 297 |
+
elif join_type == "left anti":
|
| 298 |
+
c_join_type = CJoinType_LEFT_ANTI
|
| 299 |
+
elif join_type == "right anti":
|
| 300 |
+
c_join_type = CJoinType_RIGHT_ANTI
|
| 301 |
+
elif join_type == "inner":
|
| 302 |
+
c_join_type = CJoinType_INNER
|
| 303 |
+
elif join_type == "left outer":
|
| 304 |
+
c_join_type = CJoinType_LEFT_OUTER
|
| 305 |
+
elif join_type == "right outer":
|
| 306 |
+
c_join_type = CJoinType_RIGHT_OUTER
|
| 307 |
+
elif join_type == "full outer":
|
| 308 |
+
c_join_type = CJoinType_FULL_OUTER
|
| 309 |
+
else:
|
| 310 |
+
raise ValueError("Unsupported join type")
|
| 311 |
+
|
| 312 |
+
# left/right keys
|
| 313 |
+
if not isinstance(left_keys, (list, tuple)):
|
| 314 |
+
left_keys = [left_keys]
|
| 315 |
+
for key in left_keys:
|
| 316 |
+
c_left_keys.push_back(_ensure_field_ref(key))
|
| 317 |
+
if not isinstance(right_keys, (list, tuple)):
|
| 318 |
+
right_keys = [right_keys]
|
| 319 |
+
for key in right_keys:
|
| 320 |
+
c_right_keys.push_back(_ensure_field_ref(key))
|
| 321 |
+
|
| 322 |
+
# left/right output fields
|
| 323 |
+
if left_output is not None and right_output is not None:
|
| 324 |
+
for colname in left_output:
|
| 325 |
+
c_left_output.push_back(_ensure_field_ref(colname))
|
| 326 |
+
for colname in right_output:
|
| 327 |
+
c_right_output.push_back(_ensure_field_ref(colname))
|
| 328 |
+
|
| 329 |
+
self.wrapped.reset(
|
| 330 |
+
new CHashJoinNodeOptions(
|
| 331 |
+
c_join_type, c_left_keys, c_right_keys,
|
| 332 |
+
c_left_output, c_right_output,
|
| 333 |
+
_true,
|
| 334 |
+
<c_string>tobytes(output_suffix_for_left),
|
| 335 |
+
<c_string>tobytes(output_suffix_for_right)
|
| 336 |
+
)
|
| 337 |
+
)
|
| 338 |
+
else:
|
| 339 |
+
self.wrapped.reset(
|
| 340 |
+
new CHashJoinNodeOptions(
|
| 341 |
+
c_join_type, c_left_keys, c_right_keys,
|
| 342 |
+
_true,
|
| 343 |
+
<c_string>tobytes(output_suffix_for_left),
|
| 344 |
+
<c_string>tobytes(output_suffix_for_right)
|
| 345 |
+
)
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class HashJoinNodeOptions(_HashJoinNodeOptions):
|
| 350 |
+
"""
|
| 351 |
+
Make a node which implements join operation using hash join strategy.
|
| 352 |
+
|
| 353 |
+
This is the option class for the "hashjoin" node factory.
|
| 354 |
+
|
| 355 |
+
Parameters
|
| 356 |
+
----------
|
| 357 |
+
join_type : str
|
| 358 |
+
Type of join. One of "left semi", "right semi", "left anti",
|
| 359 |
+
"right anti", "inner", "left outer", "right outer", "full outer".
|
| 360 |
+
left_keys : str, Expression or list
|
| 361 |
+
Key fields from left input. Each key can be a string column name
|
| 362 |
+
or a field expression, or a list of such field references.
|
| 363 |
+
right_keys : str, Expression or list
|
| 364 |
+
Key fields from right input. See `left_keys` for details.
|
| 365 |
+
left_output : list, optional
|
| 366 |
+
List of output fields passed from left input. If left and right
|
| 367 |
+
output fields are not specified, all valid fields from both left and
|
| 368 |
+
right input will be output. Each field can be a string column name
|
| 369 |
+
or a field expression.
|
| 370 |
+
right_output : list, optional
|
| 371 |
+
List of output fields passed from right input. If left and right
|
| 372 |
+
output fields are not specified, all valid fields from both left and
|
| 373 |
+
right input will be output. Each field can be a string column name
|
| 374 |
+
or a field expression.
|
| 375 |
+
output_suffix_for_left : str
|
| 376 |
+
Suffix added to names of output fields coming from left input
|
| 377 |
+
(used to distinguish, if necessary, between fields of the same
|
| 378 |
+
name in left and right input and can be left empty if there are
|
| 379 |
+
no name collisions).
|
| 380 |
+
output_suffix_for_right : str
|
| 381 |
+
Suffix added to names of output fields coming from right input,
|
| 382 |
+
see `output_suffix_for_left` for details.
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
def __init__(
|
| 386 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
| 387 |
+
output_suffix_for_left="", output_suffix_for_right=""
|
| 388 |
+
):
|
| 389 |
+
self._set_options(
|
| 390 |
+
join_type, left_keys, right_keys, left_output, right_output,
|
| 391 |
+
output_suffix_for_left, output_suffix_for_right
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
cdef class _AsofJoinNodeOptions(ExecNodeOptions):
|
| 396 |
+
|
| 397 |
+
def _set_options(self, left_on, left_by, right_on, right_by, tolerance):
|
| 398 |
+
cdef:
|
| 399 |
+
vector[CFieldRef] c_left_by
|
| 400 |
+
vector[CFieldRef] c_right_by
|
| 401 |
+
CAsofJoinKeys c_left_keys
|
| 402 |
+
CAsofJoinKeys c_right_keys
|
| 403 |
+
vector[CAsofJoinKeys] c_input_keys
|
| 404 |
+
|
| 405 |
+
# Prepare left AsofJoinNodeOption::Keys
|
| 406 |
+
if not isinstance(left_by, (list, tuple)):
|
| 407 |
+
left_by = [left_by]
|
| 408 |
+
for key in left_by:
|
| 409 |
+
c_left_by.push_back(_ensure_field_ref(key))
|
| 410 |
+
|
| 411 |
+
c_left_keys.on_key = _ensure_field_ref(left_on)
|
| 412 |
+
c_left_keys.by_key = c_left_by
|
| 413 |
+
|
| 414 |
+
c_input_keys.push_back(c_left_keys)
|
| 415 |
+
|
| 416 |
+
# Prepare right AsofJoinNodeOption::Keys
|
| 417 |
+
if not isinstance(right_by, (list, tuple)):
|
| 418 |
+
right_by = [right_by]
|
| 419 |
+
for key in right_by:
|
| 420 |
+
c_right_by.push_back(_ensure_field_ref(key))
|
| 421 |
+
|
| 422 |
+
c_right_keys.on_key = _ensure_field_ref(right_on)
|
| 423 |
+
c_right_keys.by_key = c_right_by
|
| 424 |
+
|
| 425 |
+
c_input_keys.push_back(c_right_keys)
|
| 426 |
+
|
| 427 |
+
self.wrapped.reset(
|
| 428 |
+
new CAsofJoinNodeOptions(
|
| 429 |
+
c_input_keys,
|
| 430 |
+
tolerance,
|
| 431 |
+
)
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
class AsofJoinNodeOptions(_AsofJoinNodeOptions):
|
| 436 |
+
"""
|
| 437 |
+
Make a node which implements 'as of join' operation.
|
| 438 |
+
|
| 439 |
+
This is the option class for the "asofjoin" node factory.
|
| 440 |
+
|
| 441 |
+
Parameters
|
| 442 |
+
----------
|
| 443 |
+
left_on : str, Expression
|
| 444 |
+
The left key on which the join operation should be performed.
|
| 445 |
+
Can be a string column name or a field expression.
|
| 446 |
+
|
| 447 |
+
An inexact match is used on the "on" key, i.e. a row is considered a
|
| 448 |
+
match if and only if left_on - tolerance <= right_on <= left_on.
|
| 449 |
+
|
| 450 |
+
The input dataset must be sorted by the "on" key. Must be a single
|
| 451 |
+
field of a common type.
|
| 452 |
+
|
| 453 |
+
Currently, the "on" key must be an integer, date, or timestamp type.
|
| 454 |
+
left_by: str, Expression or list
|
| 455 |
+
The left keys on which the join operation should be performed.
|
| 456 |
+
Exact equality is used for each field of the "by" keys.
|
| 457 |
+
Each key can be a string column name or a field expression,
|
| 458 |
+
or a list of such field references.
|
| 459 |
+
right_on : str, Expression
|
| 460 |
+
The right key on which the join operation should be performed.
|
| 461 |
+
See `left_on` for details.
|
| 462 |
+
right_by: str, Expression or list
|
| 463 |
+
The right keys on which the join operation should be performed.
|
| 464 |
+
See `left_by` for details.
|
| 465 |
+
tolerance : int
|
| 466 |
+
The tolerance to use for the asof join. The tolerance is interpreted in
|
| 467 |
+
the same units as the "on" key.
|
| 468 |
+
"""
|
| 469 |
+
|
| 470 |
+
def __init__(self, left_on, left_by, right_on, right_by, tolerance):
|
| 471 |
+
self._set_options(left_on, left_by, right_on, right_by, tolerance)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
cdef class Declaration(_Weakrefable):
|
| 475 |
+
"""
|
| 476 |
+
Helper class for declaring the nodes of an ExecPlan.
|
| 477 |
+
|
| 478 |
+
A Declaration represents an unconstructed ExecNode, and potentially
|
| 479 |
+
more since its inputs may also be Declarations or when constructed
|
| 480 |
+
with ``from_sequence``.
|
| 481 |
+
|
| 482 |
+
The possible ExecNodes to use are registered with a name,
|
| 483 |
+
the "factory name", and need to be specified using this name, together
|
| 484 |
+
with its corresponding ExecNodeOptions subclass.
|
| 485 |
+
|
| 486 |
+
Parameters
|
| 487 |
+
----------
|
| 488 |
+
factory_name : str
|
| 489 |
+
The ExecNode factory name, such as "table_source", "filter",
|
| 490 |
+
"project" etc. See the ExecNodeOptions subclasses for the exact
|
| 491 |
+
factory names to use.
|
| 492 |
+
options : ExecNodeOptions
|
| 493 |
+
Corresponding ExecNodeOptions subclass (matching the factory name).
|
| 494 |
+
inputs : list of Declaration, optional
|
| 495 |
+
Input nodes for this declaration. Optional if the node is a source
|
| 496 |
+
node, or when the declaration gets combined later with
|
| 497 |
+
``from_sequence``.
|
| 498 |
+
|
| 499 |
+
Returns
|
| 500 |
+
-------
|
| 501 |
+
Declaration
|
| 502 |
+
"""
|
| 503 |
+
cdef void init(self, const CDeclaration& c_decl):
|
| 504 |
+
self.decl = c_decl
|
| 505 |
+
|
| 506 |
+
@staticmethod
|
| 507 |
+
cdef wrap(const CDeclaration& c_decl):
|
| 508 |
+
cdef Declaration self = Declaration.__new__(Declaration)
|
| 509 |
+
self.init(c_decl)
|
| 510 |
+
return self
|
| 511 |
+
|
| 512 |
+
cdef inline CDeclaration unwrap(self) nogil:
|
| 513 |
+
return self.decl
|
| 514 |
+
|
| 515 |
+
def __init__(self, factory_name, ExecNodeOptions options, inputs=None):
|
| 516 |
+
cdef:
|
| 517 |
+
c_string c_factory_name
|
| 518 |
+
CDeclaration c_decl
|
| 519 |
+
vector[CDeclaration.Input] c_inputs
|
| 520 |
+
|
| 521 |
+
c_factory_name = tobytes(factory_name)
|
| 522 |
+
|
| 523 |
+
if inputs is not None:
|
| 524 |
+
for ipt in inputs:
|
| 525 |
+
c_inputs.push_back(
|
| 526 |
+
CDeclaration.Input((<Declaration>ipt).unwrap())
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap())
|
| 530 |
+
self.init(c_decl)
|
| 531 |
+
|
| 532 |
+
@staticmethod
|
| 533 |
+
def from_sequence(decls):
|
| 534 |
+
"""
|
| 535 |
+
Convenience factory for the common case of a simple sequence of nodes.
|
| 536 |
+
|
| 537 |
+
Each of the declarations will be appended to the inputs of the
|
| 538 |
+
subsequent declaration, and the final modified declaration will
|
| 539 |
+
be returned.
|
| 540 |
+
|
| 541 |
+
Parameters
|
| 542 |
+
----------
|
| 543 |
+
decls : list of Declaration
|
| 544 |
+
|
| 545 |
+
Returns
|
| 546 |
+
-------
|
| 547 |
+
Declaration
|
| 548 |
+
"""
|
| 549 |
+
cdef:
|
| 550 |
+
vector[CDeclaration] c_decls
|
| 551 |
+
CDeclaration c_decl
|
| 552 |
+
|
| 553 |
+
for decl in decls:
|
| 554 |
+
c_decls.push_back((<Declaration> decl).unwrap())
|
| 555 |
+
|
| 556 |
+
c_decl = CDeclaration.Sequence(c_decls)
|
| 557 |
+
return Declaration.wrap(c_decl)
|
| 558 |
+
|
| 559 |
+
def __str__(self):
|
| 560 |
+
return frombytes(GetResultValue(DeclarationToString(self.decl)))
|
| 561 |
+
|
| 562 |
+
def __repr__(self):
|
| 563 |
+
return "<pyarrow.acero.Declaration>\n{0}".format(str(self))
|
| 564 |
+
|
| 565 |
+
def to_table(self, bint use_threads=True):
|
| 566 |
+
"""
|
| 567 |
+
Run the declaration and collect the results into a table.
|
| 568 |
+
|
| 569 |
+
This method will implicitly add a sink node to the declaration
|
| 570 |
+
to collect results into a table. It will then create an ExecPlan
|
| 571 |
+
from the declaration, start the exec plan, block until the plan
|
| 572 |
+
has finished, and return the created table.
|
| 573 |
+
|
| 574 |
+
Parameters
|
| 575 |
+
----------
|
| 576 |
+
use_threads : bool, default True
|
| 577 |
+
If set to False, then all CPU work will be done on the calling
|
| 578 |
+
thread. I/O tasks will still happen on the I/O executor
|
| 579 |
+
and may be multi-threaded (but should not use significant CPU
|
| 580 |
+
resources).
|
| 581 |
+
|
| 582 |
+
Returns
|
| 583 |
+
-------
|
| 584 |
+
pyarrow.Table
|
| 585 |
+
"""
|
| 586 |
+
cdef:
|
| 587 |
+
shared_ptr[CTable] c_table
|
| 588 |
+
|
| 589 |
+
with nogil:
|
| 590 |
+
c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads))
|
| 591 |
+
return pyarrow_wrap_table(c_table)
|
| 592 |
+
|
| 593 |
+
def to_reader(self, bint use_threads=True):
|
| 594 |
+
"""Run the declaration and return results as a RecordBatchReader.
|
| 595 |
+
|
| 596 |
+
For details about the parameters, see `to_table`.
|
| 597 |
+
|
| 598 |
+
Returns
|
| 599 |
+
-------
|
| 600 |
+
pyarrow.RecordBatchReader
|
| 601 |
+
"""
|
| 602 |
+
cdef:
|
| 603 |
+
RecordBatchReader reader
|
| 604 |
+
reader = RecordBatchReader.__new__(RecordBatchReader)
|
| 605 |
+
reader.reader.reset(
|
| 606 |
+
GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release()
|
| 607 |
+
)
|
| 608 |
+
return reader
|
parrot/lib/python3.10/site-packages/pyarrow/_azurefs.pyx
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
from cython cimport binding
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from pyarrow.lib import frombytes, tobytes
|
| 24 |
+
from pyarrow.includes.libarrow_fs cimport *
|
| 25 |
+
from pyarrow._fs cimport FileSystem
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
cdef class AzureFileSystem(FileSystem):
|
| 29 |
+
"""
|
| 30 |
+
Azure Blob Storage backed FileSystem implementation
|
| 31 |
+
|
| 32 |
+
This implementation supports flat namespace and hierarchical namespace (HNS) a.k.a.
|
| 33 |
+
Data Lake Gen2 storage accounts. HNS will be automatically detected and HNS specific
|
| 34 |
+
features will be used when they provide a performance advantage. Azurite emulator is
|
| 35 |
+
also supported. Note: `/` is the only supported delimiter.
|
| 36 |
+
|
| 37 |
+
The storage account is considered the root of the filesystem. When enabled, containers
|
| 38 |
+
will be created or deleted during relevant directory operations. Obviously, this also
|
| 39 |
+
requires authentication with the additional permissions.
|
| 40 |
+
|
| 41 |
+
By default `DefaultAzureCredential <https://github.com/Azure/azure-sdk-for-cpp/blob/main/sdk/identity/azure-identity/README.md#defaultazurecredential>`__
|
| 42 |
+
is used for authentication. This means it will try several types of authentication
|
| 43 |
+
and go with the first one that works. If any authentication parameters are provided when
|
| 44 |
+
initialising the FileSystem, they will be used instead of the default credential.
|
| 45 |
+
|
| 46 |
+
Parameters
|
| 47 |
+
----------
|
| 48 |
+
account_name : str
|
| 49 |
+
Azure Blob Storage account name. This is the globally unique identifier for the
|
| 50 |
+
storage account.
|
| 51 |
+
account_key : str, default None
|
| 52 |
+
Account key of the storage account. Pass None to use default credential.
|
| 53 |
+
blob_storage_authority : str, default None
|
| 54 |
+
hostname[:port] of the Blob Service. Defaults to `.blob.core.windows.net`. Useful
|
| 55 |
+
for connecting to a local emulator, like Azurite.
|
| 56 |
+
dfs_storage_authority : str, default None
|
| 57 |
+
hostname[:port] of the Data Lake Gen 2 Service. Defaults to
|
| 58 |
+
`.dfs.core.windows.net`. Useful for connecting to a local emulator, like Azurite.
|
| 59 |
+
blob_storage_scheme : str, default None
|
| 60 |
+
Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
|
| 61 |
+
emulator, like Azurite.
|
| 62 |
+
dfs_storage_scheme : str, default None
|
| 63 |
+
Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
|
| 64 |
+
emulator, like Azurite.
|
| 65 |
+
|
| 66 |
+
Examples
|
| 67 |
+
--------
|
| 68 |
+
>>> from pyarrow import fs
|
| 69 |
+
>>> azure_fs = fs.AzureFileSystem(account_name='myaccount')
|
| 70 |
+
>>> azurite_fs = fs.AzureFileSystem(
|
| 71 |
+
... account_name='devstoreaccount1',
|
| 72 |
+
... account_key='Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
| 73 |
+
... blob_storage_authority='127.0.0.1:10000',
|
| 74 |
+
... dfs_storage_authority='127.0.0.1:10000',
|
| 75 |
+
... blob_storage_scheme='http',
|
| 76 |
+
... dfs_storage_scheme='http',
|
| 77 |
+
... )
|
| 78 |
+
|
| 79 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
| 80 |
+
"""
|
| 81 |
+
cdef:
|
| 82 |
+
CAzureFileSystem* azurefs
|
| 83 |
+
c_string account_key
|
| 84 |
+
|
| 85 |
+
def __init__(self, account_name, *, account_key=None, blob_storage_authority=None,
|
| 86 |
+
dfs_storage_authority=None, blob_storage_scheme=None,
|
| 87 |
+
dfs_storage_scheme=None):
|
| 88 |
+
cdef:
|
| 89 |
+
CAzureOptions options
|
| 90 |
+
shared_ptr[CAzureFileSystem] wrapped
|
| 91 |
+
|
| 92 |
+
options.account_name = tobytes(account_name)
|
| 93 |
+
if blob_storage_authority:
|
| 94 |
+
options.blob_storage_authority = tobytes(blob_storage_authority)
|
| 95 |
+
if dfs_storage_authority:
|
| 96 |
+
options.dfs_storage_authority = tobytes(dfs_storage_authority)
|
| 97 |
+
if blob_storage_scheme:
|
| 98 |
+
options.blob_storage_scheme = tobytes(blob_storage_scheme)
|
| 99 |
+
if dfs_storage_scheme:
|
| 100 |
+
options.dfs_storage_scheme = tobytes(dfs_storage_scheme)
|
| 101 |
+
|
| 102 |
+
if account_key:
|
| 103 |
+
options.ConfigureAccountKeyCredential(tobytes(account_key))
|
| 104 |
+
self.account_key = tobytes(account_key)
|
| 105 |
+
else:
|
| 106 |
+
options.ConfigureDefaultCredential()
|
| 107 |
+
|
| 108 |
+
with nogil:
|
| 109 |
+
wrapped = GetResultValue(CAzureFileSystem.Make(options))
|
| 110 |
+
|
| 111 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
| 112 |
+
|
| 113 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
| 114 |
+
FileSystem.init(self, wrapped)
|
| 115 |
+
self.azurefs = <CAzureFileSystem*> wrapped.get()
|
| 116 |
+
|
| 117 |
+
@staticmethod
|
| 118 |
+
@binding(True) # Required for cython < 3
|
| 119 |
+
def _reconstruct(kwargs):
|
| 120 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
| 121 |
+
# reconstructor, hence this wrapper.
|
| 122 |
+
return AzureFileSystem(**kwargs)
|
| 123 |
+
|
| 124 |
+
def __reduce__(self):
|
| 125 |
+
cdef CAzureOptions opts = self.azurefs.options()
|
| 126 |
+
return (
|
| 127 |
+
AzureFileSystem._reconstruct, (dict(
|
| 128 |
+
account_name=frombytes(opts.account_name),
|
| 129 |
+
account_key=frombytes(self.account_key),
|
| 130 |
+
blob_storage_authority=frombytes(opts.blob_storage_authority),
|
| 131 |
+
dfs_storage_authority=frombytes(opts.dfs_storage_authority),
|
| 132 |
+
blob_storage_scheme=frombytes(opts.blob_storage_scheme),
|
| 133 |
+
dfs_storage_scheme=frombytes(opts.dfs_storage_scheme)
|
| 134 |
+
),))
|
parrot/lib/python3.10/site-packages/pyarrow/_compute.pyx
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/_csv.pyx
ADDED
|
@@ -0,0 +1,1542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: profile=False
|
| 19 |
+
# distutils: language = c++
|
| 20 |
+
# cython: language_level = 3
|
| 21 |
+
|
| 22 |
+
from cython.operator cimport dereference as deref
|
| 23 |
+
|
| 24 |
+
from collections import namedtuple
|
| 25 |
+
from collections.abc import Mapping
|
| 26 |
+
|
| 27 |
+
from pyarrow.includes.common cimport *
|
| 28 |
+
from pyarrow.includes.libarrow cimport *
|
| 29 |
+
from pyarrow.includes.libarrow_python cimport *
|
| 30 |
+
from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema,
|
| 31 |
+
RecordBatchReader, ensure_type,
|
| 32 |
+
maybe_unbox_memory_pool, get_input_stream,
|
| 33 |
+
get_writer, native_transcoding_input_stream,
|
| 34 |
+
pyarrow_unwrap_batch, pyarrow_unwrap_schema,
|
| 35 |
+
pyarrow_unwrap_table, pyarrow_wrap_schema,
|
| 36 |
+
pyarrow_wrap_table, pyarrow_wrap_data_type,
|
| 37 |
+
pyarrow_unwrap_data_type, Table, RecordBatch,
|
| 38 |
+
StopToken, _CRecordBatchWriter)
|
| 39 |
+
from pyarrow.lib import frombytes, tobytes, SignalStopHandler
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
cdef unsigned char _single_char(s) except 0:
|
| 43 |
+
val = ord(s)
|
| 44 |
+
if val == 0 or val > 127:
|
| 45 |
+
raise ValueError("Expecting an ASCII character")
|
| 46 |
+
return <unsigned char> val
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
_InvalidRow = namedtuple(
|
| 50 |
+
"_InvalidRow", ("expected_columns", "actual_columns", "number", "text"),
|
| 51 |
+
module=__name__)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class InvalidRow(_InvalidRow):
|
| 55 |
+
"""
|
| 56 |
+
Description of an invalid row in a CSV file.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
expected_columns : int
|
| 61 |
+
The expected number of columns in the row.
|
| 62 |
+
actual_columns : int
|
| 63 |
+
The actual number of columns in the row.
|
| 64 |
+
number : int or None
|
| 65 |
+
The physical row number if known, otherwise None.
|
| 66 |
+
text : str
|
| 67 |
+
The contents of the row.
|
| 68 |
+
"""
|
| 69 |
+
__slots__ = ()
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
cdef CInvalidRowResult _handle_invalid_row(
|
| 73 |
+
handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error:
|
| 74 |
+
# A negative row number means undetermined (because of parallel reading)
|
| 75 |
+
row_number = c_row.number if c_row.number >= 0 else None
|
| 76 |
+
row = InvalidRow(c_row.expected_columns, c_row.actual_columns,
|
| 77 |
+
row_number, frombytes(<c_string> c_row.text))
|
| 78 |
+
result = handler(row)
|
| 79 |
+
if result == 'error':
|
| 80 |
+
return CInvalidRowResult_Error
|
| 81 |
+
elif result == 'skip':
|
| 82 |
+
return CInvalidRowResult_Skip
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError("Invalid return value for invalid row handler: "
|
| 85 |
+
f"expected 'error' or 'skip', got {result!r}")
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
cdef class ReadOptions(_Weakrefable):
|
| 89 |
+
"""
|
| 90 |
+
Options for reading CSV files.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
use_threads : bool, optional (default True)
|
| 95 |
+
Whether to use multiple threads to accelerate reading
|
| 96 |
+
block_size : int, optional
|
| 97 |
+
How much bytes to process at a time from the input stream.
|
| 98 |
+
This will determine multi-threading granularity as well as
|
| 99 |
+
the size of individual record batches or table chunks.
|
| 100 |
+
Minimum valid value for block size is 1
|
| 101 |
+
skip_rows : int, optional (default 0)
|
| 102 |
+
The number of rows to skip before the column names (if any)
|
| 103 |
+
and the CSV data.
|
| 104 |
+
skip_rows_after_names : int, optional (default 0)
|
| 105 |
+
The number of rows to skip after the column names.
|
| 106 |
+
This number can be larger than the number of rows in one
|
| 107 |
+
block, and empty rows are counted.
|
| 108 |
+
The order of application is as follows:
|
| 109 |
+
- `skip_rows` is applied (if non-zero);
|
| 110 |
+
- column names are read (unless `column_names` is set);
|
| 111 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
| 112 |
+
column_names : list, optional
|
| 113 |
+
The column names of the target table. If empty, fall back on
|
| 114 |
+
`autogenerate_column_names`.
|
| 115 |
+
autogenerate_column_names : bool, optional (default False)
|
| 116 |
+
Whether to autogenerate column names if `column_names` is empty.
|
| 117 |
+
If true, column names will be of the form "f0", "f1"...
|
| 118 |
+
If false, column names will be read from the first CSV row
|
| 119 |
+
after `skip_rows`.
|
| 120 |
+
encoding : str, optional (default 'utf8')
|
| 121 |
+
The character encoding of the CSV data. Columns that cannot
|
| 122 |
+
decode using this encoding can still be read as Binary.
|
| 123 |
+
|
| 124 |
+
Examples
|
| 125 |
+
--------
|
| 126 |
+
|
| 127 |
+
Defining an example data:
|
| 128 |
+
|
| 129 |
+
>>> import io
|
| 130 |
+
>>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04"
|
| 131 |
+
>>> print(s)
|
| 132 |
+
1,2,3
|
| 133 |
+
Flamingo,2,2022-03-01
|
| 134 |
+
Horse,4,2022-03-02
|
| 135 |
+
Brittle stars,5,2022-03-03
|
| 136 |
+
Centipede,100,2022-03-04
|
| 137 |
+
|
| 138 |
+
Ignore the first numbered row and substitute it with defined
|
| 139 |
+
or autogenerated column names:
|
| 140 |
+
|
| 141 |
+
>>> from pyarrow import csv
|
| 142 |
+
>>> read_options = csv.ReadOptions(
|
| 143 |
+
... column_names=["animals", "n_legs", "entry"],
|
| 144 |
+
... skip_rows=1)
|
| 145 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
| 146 |
+
pyarrow.Table
|
| 147 |
+
animals: string
|
| 148 |
+
n_legs: int64
|
| 149 |
+
entry: date32[day]
|
| 150 |
+
----
|
| 151 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
| 152 |
+
n_legs: [[2,4,5,100]]
|
| 153 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
| 154 |
+
|
| 155 |
+
>>> read_options = csv.ReadOptions(autogenerate_column_names=True,
|
| 156 |
+
... skip_rows=1)
|
| 157 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
| 158 |
+
pyarrow.Table
|
| 159 |
+
f0: string
|
| 160 |
+
f1: int64
|
| 161 |
+
f2: date32[day]
|
| 162 |
+
----
|
| 163 |
+
f0: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
| 164 |
+
f1: [[2,4,5,100]]
|
| 165 |
+
f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
| 166 |
+
|
| 167 |
+
Remove the first 2 rows of the data:
|
| 168 |
+
|
| 169 |
+
>>> read_options = csv.ReadOptions(skip_rows_after_names=2)
|
| 170 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
| 171 |
+
pyarrow.Table
|
| 172 |
+
1: string
|
| 173 |
+
2: int64
|
| 174 |
+
3: date32[day]
|
| 175 |
+
----
|
| 176 |
+
1: [["Brittle stars","Centipede"]]
|
| 177 |
+
2: [[5,100]]
|
| 178 |
+
3: [[2022-03-03,2022-03-04]]
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
# Avoid mistakingly creating attributes
|
| 182 |
+
__slots__ = ()
|
| 183 |
+
|
| 184 |
+
# __init__() is not called when unpickling, initialize storage here
|
| 185 |
+
def __cinit__(self, *argw, **kwargs):
|
| 186 |
+
self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults()))
|
| 187 |
+
|
| 188 |
+
def __init__(self, *, use_threads=None, block_size=None, skip_rows=None,
|
| 189 |
+
skip_rows_after_names=None, column_names=None,
|
| 190 |
+
autogenerate_column_names=None, encoding='utf8'):
|
| 191 |
+
if use_threads is not None:
|
| 192 |
+
self.use_threads = use_threads
|
| 193 |
+
if block_size is not None:
|
| 194 |
+
self.block_size = block_size
|
| 195 |
+
if skip_rows is not None:
|
| 196 |
+
self.skip_rows = skip_rows
|
| 197 |
+
if skip_rows_after_names is not None:
|
| 198 |
+
self.skip_rows_after_names = skip_rows_after_names
|
| 199 |
+
if column_names is not None:
|
| 200 |
+
self.column_names = column_names
|
| 201 |
+
if autogenerate_column_names is not None:
|
| 202 |
+
self.autogenerate_column_names= autogenerate_column_names
|
| 203 |
+
# Python-specific option
|
| 204 |
+
self.encoding = encoding
|
| 205 |
+
|
| 206 |
+
@property
|
| 207 |
+
def use_threads(self):
|
| 208 |
+
"""
|
| 209 |
+
Whether to use multiple threads to accelerate reading.
|
| 210 |
+
"""
|
| 211 |
+
return deref(self.options).use_threads
|
| 212 |
+
|
| 213 |
+
@use_threads.setter
|
| 214 |
+
def use_threads(self, value):
|
| 215 |
+
deref(self.options).use_threads = value
|
| 216 |
+
|
| 217 |
+
@property
|
| 218 |
+
def block_size(self):
|
| 219 |
+
"""
|
| 220 |
+
How much bytes to process at a time from the input stream.
|
| 221 |
+
This will determine multi-threading granularity as well as
|
| 222 |
+
the size of individual record batches or table chunks.
|
| 223 |
+
"""
|
| 224 |
+
return deref(self.options).block_size
|
| 225 |
+
|
| 226 |
+
@block_size.setter
|
| 227 |
+
def block_size(self, value):
|
| 228 |
+
deref(self.options).block_size = value
|
| 229 |
+
|
| 230 |
+
@property
|
| 231 |
+
def skip_rows(self):
|
| 232 |
+
"""
|
| 233 |
+
The number of rows to skip before the column names (if any)
|
| 234 |
+
and the CSV data.
|
| 235 |
+
See `skip_rows_after_names` for interaction description
|
| 236 |
+
"""
|
| 237 |
+
return deref(self.options).skip_rows
|
| 238 |
+
|
| 239 |
+
@skip_rows.setter
|
| 240 |
+
def skip_rows(self, value):
|
| 241 |
+
deref(self.options).skip_rows = value
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def skip_rows_after_names(self):
|
| 245 |
+
"""
|
| 246 |
+
The number of rows to skip after the column names.
|
| 247 |
+
This number can be larger than the number of rows in one
|
| 248 |
+
block, and empty rows are counted.
|
| 249 |
+
The order of application is as follows:
|
| 250 |
+
- `skip_rows` is applied (if non-zero);
|
| 251 |
+
- column names are read (unless `column_names` is set);
|
| 252 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
| 253 |
+
"""
|
| 254 |
+
return deref(self.options).skip_rows_after_names
|
| 255 |
+
|
| 256 |
+
@skip_rows_after_names.setter
|
| 257 |
+
def skip_rows_after_names(self, value):
|
| 258 |
+
deref(self.options).skip_rows_after_names = value
|
| 259 |
+
|
| 260 |
+
@property
|
| 261 |
+
def column_names(self):
|
| 262 |
+
"""
|
| 263 |
+
The column names of the target table. If empty, fall back on
|
| 264 |
+
`autogenerate_column_names`.
|
| 265 |
+
"""
|
| 266 |
+
return [frombytes(s) for s in deref(self.options).column_names]
|
| 267 |
+
|
| 268 |
+
@column_names.setter
|
| 269 |
+
def column_names(self, value):
|
| 270 |
+
deref(self.options).column_names.clear()
|
| 271 |
+
for item in value:
|
| 272 |
+
deref(self.options).column_names.push_back(tobytes(item))
|
| 273 |
+
|
| 274 |
+
@property
|
| 275 |
+
def autogenerate_column_names(self):
|
| 276 |
+
"""
|
| 277 |
+
Whether to autogenerate column names if `column_names` is empty.
|
| 278 |
+
If true, column names will be of the form "f0", "f1"...
|
| 279 |
+
If false, column names will be read from the first CSV row
|
| 280 |
+
after `skip_rows`.
|
| 281 |
+
"""
|
| 282 |
+
return deref(self.options).autogenerate_column_names
|
| 283 |
+
|
| 284 |
+
@autogenerate_column_names.setter
|
| 285 |
+
def autogenerate_column_names(self, value):
|
| 286 |
+
deref(self.options).autogenerate_column_names = value
|
| 287 |
+
|
| 288 |
+
def validate(self):
|
| 289 |
+
check_status(deref(self.options).Validate())
|
| 290 |
+
|
| 291 |
+
def equals(self, ReadOptions other):
|
| 292 |
+
"""
|
| 293 |
+
Parameters
|
| 294 |
+
----------
|
| 295 |
+
other : pyarrow.csv.ReadOptions
|
| 296 |
+
|
| 297 |
+
Returns
|
| 298 |
+
-------
|
| 299 |
+
bool
|
| 300 |
+
"""
|
| 301 |
+
return (
|
| 302 |
+
self.use_threads == other.use_threads and
|
| 303 |
+
self.block_size == other.block_size and
|
| 304 |
+
self.skip_rows == other.skip_rows and
|
| 305 |
+
self.skip_rows_after_names == other.skip_rows_after_names and
|
| 306 |
+
self.column_names == other.column_names and
|
| 307 |
+
self.autogenerate_column_names ==
|
| 308 |
+
other.autogenerate_column_names and
|
| 309 |
+
self.encoding == other.encoding
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
@staticmethod
|
| 313 |
+
cdef ReadOptions wrap(CCSVReadOptions options):
|
| 314 |
+
out = ReadOptions()
|
| 315 |
+
out.options.reset(new CCSVReadOptions(move(options)))
|
| 316 |
+
out.encoding = 'utf8' # No way to know this
|
| 317 |
+
return out
|
| 318 |
+
|
| 319 |
+
def __getstate__(self):
|
| 320 |
+
return (self.use_threads, self.block_size, self.skip_rows,
|
| 321 |
+
self.column_names, self.autogenerate_column_names,
|
| 322 |
+
self.encoding, self.skip_rows_after_names)
|
| 323 |
+
|
| 324 |
+
def __setstate__(self, state):
|
| 325 |
+
(self.use_threads, self.block_size, self.skip_rows,
|
| 326 |
+
self.column_names, self.autogenerate_column_names,
|
| 327 |
+
self.encoding, self.skip_rows_after_names) = state
|
| 328 |
+
|
| 329 |
+
def __eq__(self, other):
|
| 330 |
+
try:
|
| 331 |
+
return self.equals(other)
|
| 332 |
+
except TypeError:
|
| 333 |
+
return False
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
cdef class ParseOptions(_Weakrefable):
|
| 337 |
+
"""
|
| 338 |
+
Options for parsing CSV files.
|
| 339 |
+
|
| 340 |
+
Parameters
|
| 341 |
+
----------
|
| 342 |
+
delimiter : 1-character string, optional (default ',')
|
| 343 |
+
The character delimiting individual cells in the CSV data.
|
| 344 |
+
quote_char : 1-character string or False, optional (default '"')
|
| 345 |
+
The character used optionally for quoting CSV values
|
| 346 |
+
(False if quoting is not allowed).
|
| 347 |
+
double_quote : bool, optional (default True)
|
| 348 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
| 349 |
+
in the data.
|
| 350 |
+
escape_char : 1-character string or False, optional (default False)
|
| 351 |
+
The character used optionally for escaping special characters
|
| 352 |
+
(False if escaping is not allowed).
|
| 353 |
+
newlines_in_values : bool, optional (default False)
|
| 354 |
+
Whether newline characters are allowed in CSV values.
|
| 355 |
+
Setting this to True reduces the performance of multi-threaded
|
| 356 |
+
CSV reading.
|
| 357 |
+
ignore_empty_lines : bool, optional (default True)
|
| 358 |
+
Whether empty lines are ignored in CSV input.
|
| 359 |
+
If False, an empty line is interpreted as containing a single empty
|
| 360 |
+
value (assuming a one-column CSV file).
|
| 361 |
+
invalid_row_handler : callable, optional (default None)
|
| 362 |
+
If not None, this object is called for each CSV row that fails
|
| 363 |
+
parsing (because of a mismatching number of columns).
|
| 364 |
+
It should accept a single InvalidRow argument and return either
|
| 365 |
+
"skip" or "error" depending on the desired outcome.
|
| 366 |
+
|
| 367 |
+
Examples
|
| 368 |
+
--------
|
| 369 |
+
|
| 370 |
+
Defining an example file from bytes object:
|
| 371 |
+
|
| 372 |
+
>>> import io
|
| 373 |
+
>>> s = (
|
| 374 |
+
... "animals;n_legs;entry\\n"
|
| 375 |
+
... "Flamingo;2;2022-03-01\\n"
|
| 376 |
+
... "# Comment here:\\n"
|
| 377 |
+
... "Horse;4;2022-03-02\\n"
|
| 378 |
+
... "Brittle stars;5;2022-03-03\\n"
|
| 379 |
+
... "Centipede;100;2022-03-04"
|
| 380 |
+
... )
|
| 381 |
+
>>> print(s)
|
| 382 |
+
animals;n_legs;entry
|
| 383 |
+
Flamingo;2;2022-03-01
|
| 384 |
+
# Comment here:
|
| 385 |
+
Horse;4;2022-03-02
|
| 386 |
+
Brittle stars;5;2022-03-03
|
| 387 |
+
Centipede;100;2022-03-04
|
| 388 |
+
>>> source = io.BytesIO(s.encode())
|
| 389 |
+
|
| 390 |
+
Read the data from a file skipping rows with comments
|
| 391 |
+
and defining the delimiter:
|
| 392 |
+
|
| 393 |
+
>>> from pyarrow import csv
|
| 394 |
+
>>> def skip_comment(row):
|
| 395 |
+
... if row.text.startswith("# "):
|
| 396 |
+
... return 'skip'
|
| 397 |
+
... else:
|
| 398 |
+
... return 'error'
|
| 399 |
+
...
|
| 400 |
+
>>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment)
|
| 401 |
+
>>> csv.read_csv(source, parse_options=parse_options)
|
| 402 |
+
pyarrow.Table
|
| 403 |
+
animals: string
|
| 404 |
+
n_legs: int64
|
| 405 |
+
entry: date32[day]
|
| 406 |
+
----
|
| 407 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
| 408 |
+
n_legs: [[2,4,5,100]]
|
| 409 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
| 410 |
+
"""
|
| 411 |
+
__slots__ = ()
|
| 412 |
+
|
| 413 |
+
def __cinit__(self, *argw, **kwargs):
|
| 414 |
+
self._invalid_row_handler = None
|
| 415 |
+
self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults()))
|
| 416 |
+
|
| 417 |
+
def __init__(self, *, delimiter=None, quote_char=None, double_quote=None,
|
| 418 |
+
escape_char=None, newlines_in_values=None,
|
| 419 |
+
ignore_empty_lines=None, invalid_row_handler=None):
|
| 420 |
+
if delimiter is not None:
|
| 421 |
+
self.delimiter = delimiter
|
| 422 |
+
if quote_char is not None:
|
| 423 |
+
self.quote_char = quote_char
|
| 424 |
+
if double_quote is not None:
|
| 425 |
+
self.double_quote = double_quote
|
| 426 |
+
if escape_char is not None:
|
| 427 |
+
self.escape_char = escape_char
|
| 428 |
+
if newlines_in_values is not None:
|
| 429 |
+
self.newlines_in_values = newlines_in_values
|
| 430 |
+
if ignore_empty_lines is not None:
|
| 431 |
+
self.ignore_empty_lines = ignore_empty_lines
|
| 432 |
+
if invalid_row_handler is not None:
|
| 433 |
+
self.invalid_row_handler = invalid_row_handler
|
| 434 |
+
|
| 435 |
+
@property
|
| 436 |
+
def delimiter(self):
|
| 437 |
+
"""
|
| 438 |
+
The character delimiting individual cells in the CSV data.
|
| 439 |
+
"""
|
| 440 |
+
return chr(deref(self.options).delimiter)
|
| 441 |
+
|
| 442 |
+
@delimiter.setter
|
| 443 |
+
def delimiter(self, value):
|
| 444 |
+
deref(self.options).delimiter = _single_char(value)
|
| 445 |
+
|
| 446 |
+
@property
|
| 447 |
+
def quote_char(self):
|
| 448 |
+
"""
|
| 449 |
+
The character used optionally for quoting CSV values
|
| 450 |
+
(False if quoting is not allowed).
|
| 451 |
+
"""
|
| 452 |
+
if deref(self.options).quoting:
|
| 453 |
+
return chr(deref(self.options).quote_char)
|
| 454 |
+
else:
|
| 455 |
+
return False
|
| 456 |
+
|
| 457 |
+
@quote_char.setter
|
| 458 |
+
def quote_char(self, value):
|
| 459 |
+
if value is False:
|
| 460 |
+
deref(self.options).quoting = False
|
| 461 |
+
else:
|
| 462 |
+
deref(self.options).quote_char = _single_char(value)
|
| 463 |
+
deref(self.options).quoting = True
|
| 464 |
+
|
| 465 |
+
@property
|
| 466 |
+
def double_quote(self):
|
| 467 |
+
"""
|
| 468 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
| 469 |
+
in the data.
|
| 470 |
+
"""
|
| 471 |
+
return deref(self.options).double_quote
|
| 472 |
+
|
| 473 |
+
@double_quote.setter
|
| 474 |
+
def double_quote(self, value):
|
| 475 |
+
deref(self.options).double_quote = value
|
| 476 |
+
|
| 477 |
+
@property
|
| 478 |
+
def escape_char(self):
|
| 479 |
+
"""
|
| 480 |
+
The character used optionally for escaping special characters
|
| 481 |
+
(False if escaping is not allowed).
|
| 482 |
+
"""
|
| 483 |
+
if deref(self.options).escaping:
|
| 484 |
+
return chr(deref(self.options).escape_char)
|
| 485 |
+
else:
|
| 486 |
+
return False
|
| 487 |
+
|
| 488 |
+
@escape_char.setter
|
| 489 |
+
def escape_char(self, value):
|
| 490 |
+
if value is False:
|
| 491 |
+
deref(self.options).escaping = False
|
| 492 |
+
else:
|
| 493 |
+
deref(self.options).escape_char = _single_char(value)
|
| 494 |
+
deref(self.options).escaping = True
|
| 495 |
+
|
| 496 |
+
@property
|
| 497 |
+
def newlines_in_values(self):
|
| 498 |
+
"""
|
| 499 |
+
Whether newline characters are allowed in CSV values.
|
| 500 |
+
Setting this to True reduces the performance of multi-threaded
|
| 501 |
+
CSV reading.
|
| 502 |
+
"""
|
| 503 |
+
return deref(self.options).newlines_in_values
|
| 504 |
+
|
| 505 |
+
@newlines_in_values.setter
|
| 506 |
+
def newlines_in_values(self, value):
|
| 507 |
+
deref(self.options).newlines_in_values = value
|
| 508 |
+
|
| 509 |
+
@property
|
| 510 |
+
def ignore_empty_lines(self):
|
| 511 |
+
"""
|
| 512 |
+
Whether empty lines are ignored in CSV input.
|
| 513 |
+
If False, an empty line is interpreted as containing a single empty
|
| 514 |
+
value (assuming a one-column CSV file).
|
| 515 |
+
"""
|
| 516 |
+
return deref(self.options).ignore_empty_lines
|
| 517 |
+
|
| 518 |
+
@property
|
| 519 |
+
def invalid_row_handler(self):
|
| 520 |
+
"""
|
| 521 |
+
Optional handler for invalid rows.
|
| 522 |
+
|
| 523 |
+
If not None, this object is called for each CSV row that fails
|
| 524 |
+
parsing (because of a mismatching number of columns).
|
| 525 |
+
It should accept a single InvalidRow argument and return either
|
| 526 |
+
"skip" or "error" depending on the desired outcome.
|
| 527 |
+
"""
|
| 528 |
+
return self._invalid_row_handler
|
| 529 |
+
|
| 530 |
+
@invalid_row_handler.setter
|
| 531 |
+
def invalid_row_handler(self, value):
|
| 532 |
+
if value is not None and not callable(value):
|
| 533 |
+
raise TypeError("Expected callable or None, "
|
| 534 |
+
f"got instance of {type(value)!r}")
|
| 535 |
+
self._invalid_row_handler = value
|
| 536 |
+
deref(self.options).invalid_row_handler = MakeInvalidRowHandler(
|
| 537 |
+
<function[PyInvalidRowCallback]> &_handle_invalid_row, value)
|
| 538 |
+
|
| 539 |
+
@ignore_empty_lines.setter
|
| 540 |
+
def ignore_empty_lines(self, value):
|
| 541 |
+
deref(self.options).ignore_empty_lines = value
|
| 542 |
+
|
| 543 |
+
def validate(self):
|
| 544 |
+
check_status(deref(self.options).Validate())
|
| 545 |
+
|
| 546 |
+
def equals(self, ParseOptions other):
|
| 547 |
+
"""
|
| 548 |
+
Parameters
|
| 549 |
+
----------
|
| 550 |
+
other : pyarrow.csv.ParseOptions
|
| 551 |
+
|
| 552 |
+
Returns
|
| 553 |
+
-------
|
| 554 |
+
bool
|
| 555 |
+
"""
|
| 556 |
+
return (
|
| 557 |
+
self.delimiter == other.delimiter and
|
| 558 |
+
self.quote_char == other.quote_char and
|
| 559 |
+
self.double_quote == other.double_quote and
|
| 560 |
+
self.escape_char == other.escape_char and
|
| 561 |
+
self.newlines_in_values == other.newlines_in_values and
|
| 562 |
+
self.ignore_empty_lines == other.ignore_empty_lines and
|
| 563 |
+
self._invalid_row_handler == other._invalid_row_handler
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
@staticmethod
|
| 567 |
+
cdef ParseOptions wrap(CCSVParseOptions options):
|
| 568 |
+
out = ParseOptions()
|
| 569 |
+
out.options.reset(new CCSVParseOptions(move(options)))
|
| 570 |
+
return out
|
| 571 |
+
|
| 572 |
+
def __getstate__(self):
|
| 573 |
+
return (self.delimiter, self.quote_char, self.double_quote,
|
| 574 |
+
self.escape_char, self.newlines_in_values,
|
| 575 |
+
self.ignore_empty_lines, self.invalid_row_handler)
|
| 576 |
+
|
| 577 |
+
def __setstate__(self, state):
|
| 578 |
+
(self.delimiter, self.quote_char, self.double_quote,
|
| 579 |
+
self.escape_char, self.newlines_in_values,
|
| 580 |
+
self.ignore_empty_lines, self.invalid_row_handler) = state
|
| 581 |
+
|
| 582 |
+
def __eq__(self, other):
|
| 583 |
+
try:
|
| 584 |
+
return self.equals(other)
|
| 585 |
+
except TypeError:
|
| 586 |
+
return False
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
cdef class _ISO8601(_Weakrefable):
|
| 590 |
+
"""
|
| 591 |
+
A special object indicating ISO-8601 parsing.
|
| 592 |
+
"""
|
| 593 |
+
__slots__ = ()
|
| 594 |
+
|
| 595 |
+
def __str__(self):
|
| 596 |
+
return 'ISO8601'
|
| 597 |
+
|
| 598 |
+
def __eq__(self, other):
|
| 599 |
+
return isinstance(other, _ISO8601)
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
ISO8601 = _ISO8601()
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
cdef class ConvertOptions(_Weakrefable):
|
| 606 |
+
"""
|
| 607 |
+
Options for converting CSV data.
|
| 608 |
+
|
| 609 |
+
Parameters
|
| 610 |
+
----------
|
| 611 |
+
check_utf8 : bool, optional (default True)
|
| 612 |
+
Whether to check UTF8 validity of string columns.
|
| 613 |
+
column_types : pyarrow.Schema or dict, optional
|
| 614 |
+
Explicitly map column names to column types. Passing this argument
|
| 615 |
+
disables type inference on the defined columns.
|
| 616 |
+
null_values : list, optional
|
| 617 |
+
A sequence of strings that denote nulls in the data
|
| 618 |
+
(defaults are appropriate in most cases). Note that by default,
|
| 619 |
+
string columns are not checked for null values. To enable
|
| 620 |
+
null checking for those, specify ``strings_can_be_null=True``.
|
| 621 |
+
true_values : list, optional
|
| 622 |
+
A sequence of strings that denote true booleans in the data
|
| 623 |
+
(defaults are appropriate in most cases).
|
| 624 |
+
false_values : list, optional
|
| 625 |
+
A sequence of strings that denote false booleans in the data
|
| 626 |
+
(defaults are appropriate in most cases).
|
| 627 |
+
decimal_point : 1-character string, optional (default '.')
|
| 628 |
+
The character used as decimal point in floating-point and decimal
|
| 629 |
+
data.
|
| 630 |
+
strings_can_be_null : bool, optional (default False)
|
| 631 |
+
Whether string / binary columns can have null values.
|
| 632 |
+
If true, then strings in null_values are considered null for
|
| 633 |
+
string columns.
|
| 634 |
+
If false, then all strings are valid string values.
|
| 635 |
+
quoted_strings_can_be_null : bool, optional (default True)
|
| 636 |
+
Whether quoted values can be null.
|
| 637 |
+
If true, then strings in "null_values" are also considered null
|
| 638 |
+
when they appear quoted in the CSV file. Otherwise, quoted values
|
| 639 |
+
are never considered null.
|
| 640 |
+
include_columns : list, optional
|
| 641 |
+
The names of columns to include in the Table.
|
| 642 |
+
If empty, the Table will include all columns from the CSV file.
|
| 643 |
+
If not empty, only these columns will be included, in this order.
|
| 644 |
+
include_missing_columns : bool, optional (default False)
|
| 645 |
+
If false, columns in `include_columns` but not in the CSV file will
|
| 646 |
+
error out.
|
| 647 |
+
If true, columns in `include_columns` but not in the CSV file will
|
| 648 |
+
produce a column of nulls (whose type is selected using
|
| 649 |
+
`column_types`, or null by default).
|
| 650 |
+
This option is ignored if `include_columns` is empty.
|
| 651 |
+
auto_dict_encode : bool, optional (default False)
|
| 652 |
+
Whether to try to automatically dict-encode string / binary data.
|
| 653 |
+
If true, then when type inference detects a string or binary column,
|
| 654 |
+
it it dict-encoded up to `auto_dict_max_cardinality` distinct values
|
| 655 |
+
(per chunk), after which it switches to regular encoding.
|
| 656 |
+
This setting is ignored for non-inferred columns (those in
|
| 657 |
+
`column_types`).
|
| 658 |
+
auto_dict_max_cardinality : int, optional
|
| 659 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
| 660 |
+
This value is per chunk.
|
| 661 |
+
timestamp_parsers : list, optional
|
| 662 |
+
A sequence of strptime()-compatible format strings, tried in order
|
| 663 |
+
when attempting to infer or convert timestamp values (the special
|
| 664 |
+
value ISO8601() can also be given). By default, a fast built-in
|
| 665 |
+
ISO-8601 parser is used.
|
| 666 |
+
|
| 667 |
+
Examples
|
| 668 |
+
--------
|
| 669 |
+
|
| 670 |
+
Defining an example data:
|
| 671 |
+
|
| 672 |
+
>>> import io
|
| 673 |
+
>>> s = (
|
| 674 |
+
... "animals,n_legs,entry,fast\\n"
|
| 675 |
+
... "Flamingo,2,01/03/2022,Yes\\n"
|
| 676 |
+
... "Horse,4,02/03/2022,Yes\\n"
|
| 677 |
+
... "Brittle stars,5,03/03/2022,No\\n"
|
| 678 |
+
... "Centipede,100,04/03/2022,No\\n"
|
| 679 |
+
... ",6,05/03/2022,"
|
| 680 |
+
... )
|
| 681 |
+
>>> print(s)
|
| 682 |
+
animals,n_legs,entry,fast
|
| 683 |
+
Flamingo,2,01/03/2022,Yes
|
| 684 |
+
Horse,4,02/03/2022,Yes
|
| 685 |
+
Brittle stars,5,03/03/2022,No
|
| 686 |
+
Centipede,100,04/03/2022,No
|
| 687 |
+
,6,05/03/2022,
|
| 688 |
+
|
| 689 |
+
Change the type of a column:
|
| 690 |
+
|
| 691 |
+
>>> import pyarrow as pa
|
| 692 |
+
>>> from pyarrow import csv
|
| 693 |
+
>>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()})
|
| 694 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 695 |
+
pyarrow.Table
|
| 696 |
+
animals: string
|
| 697 |
+
n_legs: double
|
| 698 |
+
entry: string
|
| 699 |
+
fast: string
|
| 700 |
+
----
|
| 701 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
| 702 |
+
n_legs: [[2,4,5,100,6]]
|
| 703 |
+
entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]]
|
| 704 |
+
fast: [["Yes","Yes","No","No",""]]
|
| 705 |
+
|
| 706 |
+
Define a date parsing format to get a timestamp type column
|
| 707 |
+
(in case dates are not in ISO format and not converted by default):
|
| 708 |
+
|
| 709 |
+
>>> convert_options = csv.ConvertOptions(
|
| 710 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"])
|
| 711 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 712 |
+
pyarrow.Table
|
| 713 |
+
animals: string
|
| 714 |
+
n_legs: int64
|
| 715 |
+
entry: timestamp[s]
|
| 716 |
+
fast: string
|
| 717 |
+
----
|
| 718 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
| 719 |
+
n_legs: [[2,4,5,100,6]]
|
| 720 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
| 721 |
+
fast: [["Yes","Yes","No","No",""]]
|
| 722 |
+
|
| 723 |
+
Specify a subset of columns to be read:
|
| 724 |
+
|
| 725 |
+
>>> convert_options = csv.ConvertOptions(
|
| 726 |
+
... include_columns=["animals", "n_legs"])
|
| 727 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 728 |
+
pyarrow.Table
|
| 729 |
+
animals: string
|
| 730 |
+
n_legs: int64
|
| 731 |
+
----
|
| 732 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
| 733 |
+
n_legs: [[2,4,5,100,6]]
|
| 734 |
+
|
| 735 |
+
List additional column to be included as a null typed column:
|
| 736 |
+
|
| 737 |
+
>>> convert_options = csv.ConvertOptions(
|
| 738 |
+
... include_columns=["animals", "n_legs", "location"],
|
| 739 |
+
... include_missing_columns=True)
|
| 740 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 741 |
+
pyarrow.Table
|
| 742 |
+
animals: string
|
| 743 |
+
n_legs: int64
|
| 744 |
+
location: null
|
| 745 |
+
----
|
| 746 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
| 747 |
+
n_legs: [[2,4,5,100,6]]
|
| 748 |
+
location: [5 nulls]
|
| 749 |
+
|
| 750 |
+
Define columns as dictionary type (by default only the
|
| 751 |
+
string/binary columns are dictionary encoded):
|
| 752 |
+
|
| 753 |
+
>>> convert_options = csv.ConvertOptions(
|
| 754 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"],
|
| 755 |
+
... auto_dict_encode=True)
|
| 756 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 757 |
+
pyarrow.Table
|
| 758 |
+
animals: dictionary<values=string, indices=int32, ordered=0>
|
| 759 |
+
n_legs: int64
|
| 760 |
+
entry: timestamp[s]
|
| 761 |
+
fast: dictionary<values=string, indices=int32, ordered=0>
|
| 762 |
+
----
|
| 763 |
+
animals: [ -- dictionary:
|
| 764 |
+
["Flamingo","Horse","Brittle stars","Centipede",""] -- indices:
|
| 765 |
+
[0,1,2,3,4]]
|
| 766 |
+
n_legs: [[2,4,5,100,6]]
|
| 767 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
| 768 |
+
fast: [ -- dictionary:
|
| 769 |
+
["Yes","No",""] -- indices:
|
| 770 |
+
[0,0,1,1,2]]
|
| 771 |
+
|
| 772 |
+
Set upper limit for the number of categories. If the categories
|
| 773 |
+
is more than the limit, the conversion to dictionary will not
|
| 774 |
+
happen:
|
| 775 |
+
|
| 776 |
+
>>> convert_options = csv.ConvertOptions(
|
| 777 |
+
... include_columns=["animals"],
|
| 778 |
+
... auto_dict_encode=True,
|
| 779 |
+
... auto_dict_max_cardinality=2)
|
| 780 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 781 |
+
pyarrow.Table
|
| 782 |
+
animals: string
|
| 783 |
+
----
|
| 784 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
| 785 |
+
|
| 786 |
+
Set empty strings to missing values:
|
| 787 |
+
|
| 788 |
+
>>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"],
|
| 789 |
+
... strings_can_be_null=True)
|
| 790 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 791 |
+
pyarrow.Table
|
| 792 |
+
animals: string
|
| 793 |
+
n_legs: int64
|
| 794 |
+
----
|
| 795 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",null]]
|
| 796 |
+
n_legs: [[2,4,5,100,6]]
|
| 797 |
+
|
| 798 |
+
Define values to be True and False when converting a column
|
| 799 |
+
into a bool type:
|
| 800 |
+
|
| 801 |
+
>>> convert_options = csv.ConvertOptions(
|
| 802 |
+
... include_columns=["fast"],
|
| 803 |
+
... false_values=["No"],
|
| 804 |
+
... true_values=["Yes"])
|
| 805 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
| 806 |
+
pyarrow.Table
|
| 807 |
+
fast: bool
|
| 808 |
+
----
|
| 809 |
+
fast: [[true,true,false,false,null]]
|
| 810 |
+
"""
|
| 811 |
+
|
| 812 |
+
# Avoid mistakingly creating attributes
|
| 813 |
+
__slots__ = ()
|
| 814 |
+
|
| 815 |
+
def __cinit__(self, *argw, **kwargs):
|
| 816 |
+
self.options.reset(
|
| 817 |
+
new CCSVConvertOptions(CCSVConvertOptions.Defaults()))
|
| 818 |
+
|
| 819 |
+
def __init__(self, *, check_utf8=None, column_types=None, null_values=None,
|
| 820 |
+
true_values=None, false_values=None, decimal_point=None,
|
| 821 |
+
strings_can_be_null=None, quoted_strings_can_be_null=None,
|
| 822 |
+
include_columns=None, include_missing_columns=None,
|
| 823 |
+
auto_dict_encode=None, auto_dict_max_cardinality=None,
|
| 824 |
+
timestamp_parsers=None):
|
| 825 |
+
if check_utf8 is not None:
|
| 826 |
+
self.check_utf8 = check_utf8
|
| 827 |
+
if column_types is not None:
|
| 828 |
+
self.column_types = column_types
|
| 829 |
+
if null_values is not None:
|
| 830 |
+
self.null_values = null_values
|
| 831 |
+
if true_values is not None:
|
| 832 |
+
self.true_values = true_values
|
| 833 |
+
if false_values is not None:
|
| 834 |
+
self.false_values = false_values
|
| 835 |
+
if decimal_point is not None:
|
| 836 |
+
self.decimal_point = decimal_point
|
| 837 |
+
if strings_can_be_null is not None:
|
| 838 |
+
self.strings_can_be_null = strings_can_be_null
|
| 839 |
+
if quoted_strings_can_be_null is not None:
|
| 840 |
+
self.quoted_strings_can_be_null = quoted_strings_can_be_null
|
| 841 |
+
if include_columns is not None:
|
| 842 |
+
self.include_columns = include_columns
|
| 843 |
+
if include_missing_columns is not None:
|
| 844 |
+
self.include_missing_columns = include_missing_columns
|
| 845 |
+
if auto_dict_encode is not None:
|
| 846 |
+
self.auto_dict_encode = auto_dict_encode
|
| 847 |
+
if auto_dict_max_cardinality is not None:
|
| 848 |
+
self.auto_dict_max_cardinality = auto_dict_max_cardinality
|
| 849 |
+
if timestamp_parsers is not None:
|
| 850 |
+
self.timestamp_parsers = timestamp_parsers
|
| 851 |
+
|
| 852 |
+
@property
|
| 853 |
+
def check_utf8(self):
|
| 854 |
+
"""
|
| 855 |
+
Whether to check UTF8 validity of string columns.
|
| 856 |
+
"""
|
| 857 |
+
return deref(self.options).check_utf8
|
| 858 |
+
|
| 859 |
+
@check_utf8.setter
|
| 860 |
+
def check_utf8(self, value):
|
| 861 |
+
deref(self.options).check_utf8 = value
|
| 862 |
+
|
| 863 |
+
@property
|
| 864 |
+
def strings_can_be_null(self):
|
| 865 |
+
"""
|
| 866 |
+
Whether string / binary columns can have null values.
|
| 867 |
+
"""
|
| 868 |
+
return deref(self.options).strings_can_be_null
|
| 869 |
+
|
| 870 |
+
@strings_can_be_null.setter
|
| 871 |
+
def strings_can_be_null(self, value):
|
| 872 |
+
deref(self.options).strings_can_be_null = value
|
| 873 |
+
|
| 874 |
+
@property
|
| 875 |
+
def quoted_strings_can_be_null(self):
|
| 876 |
+
"""
|
| 877 |
+
Whether quoted values can be null.
|
| 878 |
+
"""
|
| 879 |
+
return deref(self.options).quoted_strings_can_be_null
|
| 880 |
+
|
| 881 |
+
@quoted_strings_can_be_null.setter
|
| 882 |
+
def quoted_strings_can_be_null(self, value):
|
| 883 |
+
deref(self.options).quoted_strings_can_be_null = value
|
| 884 |
+
|
| 885 |
+
@property
|
| 886 |
+
def column_types(self):
|
| 887 |
+
"""
|
| 888 |
+
Explicitly map column names to column types.
|
| 889 |
+
"""
|
| 890 |
+
d = {frombytes(item.first): pyarrow_wrap_data_type(item.second)
|
| 891 |
+
for item in deref(self.options).column_types}
|
| 892 |
+
return d
|
| 893 |
+
|
| 894 |
+
@column_types.setter
|
| 895 |
+
def column_types(self, value):
|
| 896 |
+
cdef:
|
| 897 |
+
shared_ptr[CDataType] typ
|
| 898 |
+
|
| 899 |
+
if isinstance(value, Mapping):
|
| 900 |
+
value = value.items()
|
| 901 |
+
|
| 902 |
+
deref(self.options).column_types.clear()
|
| 903 |
+
for item in value:
|
| 904 |
+
if isinstance(item, Field):
|
| 905 |
+
k = item.name
|
| 906 |
+
v = item.type
|
| 907 |
+
else:
|
| 908 |
+
k, v = item
|
| 909 |
+
typ = pyarrow_unwrap_data_type(ensure_type(v))
|
| 910 |
+
assert typ != NULL
|
| 911 |
+
deref(self.options).column_types[tobytes(k)] = typ
|
| 912 |
+
|
| 913 |
+
@property
|
| 914 |
+
def null_values(self):
|
| 915 |
+
"""
|
| 916 |
+
A sequence of strings that denote nulls in the data.
|
| 917 |
+
"""
|
| 918 |
+
return [frombytes(x) for x in deref(self.options).null_values]
|
| 919 |
+
|
| 920 |
+
@null_values.setter
|
| 921 |
+
def null_values(self, value):
|
| 922 |
+
deref(self.options).null_values = [tobytes(x) for x in value]
|
| 923 |
+
|
| 924 |
+
@property
|
| 925 |
+
def true_values(self):
|
| 926 |
+
"""
|
| 927 |
+
A sequence of strings that denote true booleans in the data.
|
| 928 |
+
"""
|
| 929 |
+
return [frombytes(x) for x in deref(self.options).true_values]
|
| 930 |
+
|
| 931 |
+
@true_values.setter
|
| 932 |
+
def true_values(self, value):
|
| 933 |
+
deref(self.options).true_values = [tobytes(x) for x in value]
|
| 934 |
+
|
| 935 |
+
@property
|
| 936 |
+
def false_values(self):
|
| 937 |
+
"""
|
| 938 |
+
A sequence of strings that denote false booleans in the data.
|
| 939 |
+
"""
|
| 940 |
+
return [frombytes(x) for x in deref(self.options).false_values]
|
| 941 |
+
|
| 942 |
+
@false_values.setter
|
| 943 |
+
def false_values(self, value):
|
| 944 |
+
deref(self.options).false_values = [tobytes(x) for x in value]
|
| 945 |
+
|
| 946 |
+
@property
|
| 947 |
+
def decimal_point(self):
|
| 948 |
+
"""
|
| 949 |
+
The character used as decimal point in floating-point and decimal
|
| 950 |
+
data.
|
| 951 |
+
"""
|
| 952 |
+
return chr(deref(self.options).decimal_point)
|
| 953 |
+
|
| 954 |
+
@decimal_point.setter
|
| 955 |
+
def decimal_point(self, value):
|
| 956 |
+
deref(self.options).decimal_point = _single_char(value)
|
| 957 |
+
|
| 958 |
+
@property
|
| 959 |
+
def auto_dict_encode(self):
|
| 960 |
+
"""
|
| 961 |
+
Whether to try to automatically dict-encode string / binary data.
|
| 962 |
+
"""
|
| 963 |
+
return deref(self.options).auto_dict_encode
|
| 964 |
+
|
| 965 |
+
@auto_dict_encode.setter
|
| 966 |
+
def auto_dict_encode(self, value):
|
| 967 |
+
deref(self.options).auto_dict_encode = value
|
| 968 |
+
|
| 969 |
+
@property
|
| 970 |
+
def auto_dict_max_cardinality(self):
|
| 971 |
+
"""
|
| 972 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
| 973 |
+
|
| 974 |
+
This value is per chunk.
|
| 975 |
+
"""
|
| 976 |
+
return deref(self.options).auto_dict_max_cardinality
|
| 977 |
+
|
| 978 |
+
@auto_dict_max_cardinality.setter
|
| 979 |
+
def auto_dict_max_cardinality(self, value):
|
| 980 |
+
deref(self.options).auto_dict_max_cardinality = value
|
| 981 |
+
|
| 982 |
+
@property
|
| 983 |
+
def include_columns(self):
|
| 984 |
+
"""
|
| 985 |
+
The names of columns to include in the Table.
|
| 986 |
+
|
| 987 |
+
If empty, the Table will include all columns from the CSV file.
|
| 988 |
+
If not empty, only these columns will be included, in this order.
|
| 989 |
+
"""
|
| 990 |
+
return [frombytes(s) for s in deref(self.options).include_columns]
|
| 991 |
+
|
| 992 |
+
@include_columns.setter
|
| 993 |
+
def include_columns(self, value):
|
| 994 |
+
deref(self.options).include_columns.clear()
|
| 995 |
+
for item in value:
|
| 996 |
+
deref(self.options).include_columns.push_back(tobytes(item))
|
| 997 |
+
|
| 998 |
+
@property
|
| 999 |
+
def include_missing_columns(self):
|
| 1000 |
+
"""
|
| 1001 |
+
If false, columns in `include_columns` but not in the CSV file will
|
| 1002 |
+
error out.
|
| 1003 |
+
If true, columns in `include_columns` but not in the CSV file will
|
| 1004 |
+
produce a null column (whose type is selected using `column_types`,
|
| 1005 |
+
or null by default).
|
| 1006 |
+
This option is ignored if `include_columns` is empty.
|
| 1007 |
+
"""
|
| 1008 |
+
return deref(self.options).include_missing_columns
|
| 1009 |
+
|
| 1010 |
+
@include_missing_columns.setter
|
| 1011 |
+
def include_missing_columns(self, value):
|
| 1012 |
+
deref(self.options).include_missing_columns = value
|
| 1013 |
+
|
| 1014 |
+
@property
|
| 1015 |
+
def timestamp_parsers(self):
|
| 1016 |
+
"""
|
| 1017 |
+
A sequence of strptime()-compatible format strings, tried in order
|
| 1018 |
+
when attempting to infer or convert timestamp values (the special
|
| 1019 |
+
value ISO8601() can also be given). By default, a fast built-in
|
| 1020 |
+
ISO-8601 parser is used.
|
| 1021 |
+
"""
|
| 1022 |
+
cdef:
|
| 1023 |
+
shared_ptr[CTimestampParser] c_parser
|
| 1024 |
+
c_string kind
|
| 1025 |
+
|
| 1026 |
+
parsers = []
|
| 1027 |
+
for c_parser in deref(self.options).timestamp_parsers:
|
| 1028 |
+
kind = deref(c_parser).kind()
|
| 1029 |
+
if kind == b'strptime':
|
| 1030 |
+
parsers.append(frombytes(deref(c_parser).format()))
|
| 1031 |
+
else:
|
| 1032 |
+
assert kind == b'iso8601'
|
| 1033 |
+
parsers.append(ISO8601)
|
| 1034 |
+
|
| 1035 |
+
return parsers
|
| 1036 |
+
|
| 1037 |
+
@timestamp_parsers.setter
|
| 1038 |
+
def timestamp_parsers(self, value):
|
| 1039 |
+
cdef:
|
| 1040 |
+
vector[shared_ptr[CTimestampParser]] c_parsers
|
| 1041 |
+
|
| 1042 |
+
for v in value:
|
| 1043 |
+
if isinstance(v, str):
|
| 1044 |
+
c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v)))
|
| 1045 |
+
elif v == ISO8601:
|
| 1046 |
+
c_parsers.push_back(CTimestampParser.MakeISO8601())
|
| 1047 |
+
else:
|
| 1048 |
+
raise TypeError("Expected list of str or ISO8601 objects")
|
| 1049 |
+
|
| 1050 |
+
deref(self.options).timestamp_parsers = move(c_parsers)
|
| 1051 |
+
|
| 1052 |
+
@staticmethod
|
| 1053 |
+
cdef ConvertOptions wrap(CCSVConvertOptions options):
|
| 1054 |
+
out = ConvertOptions()
|
| 1055 |
+
out.options.reset(new CCSVConvertOptions(move(options)))
|
| 1056 |
+
return out
|
| 1057 |
+
|
| 1058 |
+
def validate(self):
|
| 1059 |
+
check_status(deref(self.options).Validate())
|
| 1060 |
+
|
| 1061 |
+
def equals(self, ConvertOptions other):
|
| 1062 |
+
"""
|
| 1063 |
+
Parameters
|
| 1064 |
+
----------
|
| 1065 |
+
other : pyarrow.csv.ConvertOptions
|
| 1066 |
+
|
| 1067 |
+
Returns
|
| 1068 |
+
-------
|
| 1069 |
+
bool
|
| 1070 |
+
"""
|
| 1071 |
+
return (
|
| 1072 |
+
self.check_utf8 == other.check_utf8 and
|
| 1073 |
+
self.column_types == other.column_types and
|
| 1074 |
+
self.null_values == other.null_values and
|
| 1075 |
+
self.true_values == other.true_values and
|
| 1076 |
+
self.false_values == other.false_values and
|
| 1077 |
+
self.decimal_point == other.decimal_point and
|
| 1078 |
+
self.timestamp_parsers == other.timestamp_parsers and
|
| 1079 |
+
self.strings_can_be_null == other.strings_can_be_null and
|
| 1080 |
+
self.quoted_strings_can_be_null ==
|
| 1081 |
+
other.quoted_strings_can_be_null and
|
| 1082 |
+
self.auto_dict_encode == other.auto_dict_encode and
|
| 1083 |
+
self.auto_dict_max_cardinality ==
|
| 1084 |
+
other.auto_dict_max_cardinality and
|
| 1085 |
+
self.include_columns == other.include_columns and
|
| 1086 |
+
self.include_missing_columns == other.include_missing_columns
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
def __getstate__(self):
|
| 1090 |
+
return (self.check_utf8, self.column_types, self.null_values,
|
| 1091 |
+
self.true_values, self.false_values, self.decimal_point,
|
| 1092 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
| 1093 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
| 1094 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
| 1095 |
+
self.include_missing_columns)
|
| 1096 |
+
|
| 1097 |
+
def __setstate__(self, state):
|
| 1098 |
+
(self.check_utf8, self.column_types, self.null_values,
|
| 1099 |
+
self.true_values, self.false_values, self.decimal_point,
|
| 1100 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
| 1101 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
| 1102 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
| 1103 |
+
self.include_missing_columns) = state
|
| 1104 |
+
|
| 1105 |
+
def __eq__(self, other):
|
| 1106 |
+
try:
|
| 1107 |
+
return self.equals(other)
|
| 1108 |
+
except TypeError:
|
| 1109 |
+
return False
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
cdef _get_reader(input_file, ReadOptions read_options,
|
| 1113 |
+
shared_ptr[CInputStream]* out):
|
| 1114 |
+
use_memory_map = False
|
| 1115 |
+
get_input_stream(input_file, use_memory_map, out)
|
| 1116 |
+
if read_options is not None:
|
| 1117 |
+
out[0] = native_transcoding_input_stream(out[0],
|
| 1118 |
+
read_options.encoding,
|
| 1119 |
+
'utf8')
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out):
|
| 1123 |
+
if read_options is None:
|
| 1124 |
+
out[0] = CCSVReadOptions.Defaults()
|
| 1125 |
+
else:
|
| 1126 |
+
out[0] = deref(read_options.options)
|
| 1127 |
+
|
| 1128 |
+
|
| 1129 |
+
cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out):
|
| 1130 |
+
if parse_options is None:
|
| 1131 |
+
out[0] = CCSVParseOptions.Defaults()
|
| 1132 |
+
else:
|
| 1133 |
+
out[0] = deref(parse_options.options)
|
| 1134 |
+
|
| 1135 |
+
|
| 1136 |
+
cdef _get_convert_options(ConvertOptions convert_options,
|
| 1137 |
+
CCSVConvertOptions* out):
|
| 1138 |
+
if convert_options is None:
|
| 1139 |
+
out[0] = CCSVConvertOptions.Defaults()
|
| 1140 |
+
else:
|
| 1141 |
+
out[0] = deref(convert_options.options)
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
cdef class CSVStreamingReader(RecordBatchReader):
|
| 1145 |
+
"""An object that reads record batches incrementally from a CSV file.
|
| 1146 |
+
|
| 1147 |
+
Should not be instantiated directly by user code.
|
| 1148 |
+
"""
|
| 1149 |
+
cdef readonly:
|
| 1150 |
+
Schema schema
|
| 1151 |
+
|
| 1152 |
+
def __init__(self):
|
| 1153 |
+
raise TypeError("Do not call {}'s constructor directly, "
|
| 1154 |
+
"use pyarrow.csv.open_csv() instead."
|
| 1155 |
+
.format(self.__class__.__name__))
|
| 1156 |
+
|
| 1157 |
+
# Note about cancellation: we cannot create a SignalStopHandler
|
| 1158 |
+
# by default here, as several CSVStreamingReader instances may be
|
| 1159 |
+
# created (including by the same thread). Handling cancellation
|
| 1160 |
+
# would require having the user pass the SignalStopHandler.
|
| 1161 |
+
# (in addition to solving ARROW-11853)
|
| 1162 |
+
|
| 1163 |
+
cdef _open(self, shared_ptr[CInputStream] stream,
|
| 1164 |
+
CCSVReadOptions c_read_options,
|
| 1165 |
+
CCSVParseOptions c_parse_options,
|
| 1166 |
+
CCSVConvertOptions c_convert_options,
|
| 1167 |
+
MemoryPool memory_pool):
|
| 1168 |
+
cdef:
|
| 1169 |
+
shared_ptr[CSchema] c_schema
|
| 1170 |
+
CIOContext io_context
|
| 1171 |
+
|
| 1172 |
+
io_context = CIOContext(maybe_unbox_memory_pool(memory_pool))
|
| 1173 |
+
|
| 1174 |
+
with nogil:
|
| 1175 |
+
self.reader = <shared_ptr[CRecordBatchReader]> GetResultValue(
|
| 1176 |
+
CCSVStreamingReader.Make(
|
| 1177 |
+
io_context, stream,
|
| 1178 |
+
move(c_read_options), move(c_parse_options),
|
| 1179 |
+
move(c_convert_options)))
|
| 1180 |
+
c_schema = self.reader.get().schema()
|
| 1181 |
+
|
| 1182 |
+
self.schema = pyarrow_wrap_schema(c_schema)
|
| 1183 |
+
|
| 1184 |
+
|
| 1185 |
+
def read_csv(input_file, read_options=None, parse_options=None,
|
| 1186 |
+
convert_options=None, MemoryPool memory_pool=None):
|
| 1187 |
+
"""
|
| 1188 |
+
Read a Table from a stream of CSV data.
|
| 1189 |
+
|
| 1190 |
+
Parameters
|
| 1191 |
+
----------
|
| 1192 |
+
input_file : string, path or file-like object
|
| 1193 |
+
The location of CSV data. If a string or path, and if it ends
|
| 1194 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
| 1195 |
+
the data is automatically decompressed when reading.
|
| 1196 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
| 1197 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
| 1198 |
+
for defaults)
|
| 1199 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
| 1200 |
+
Options for the CSV parser
|
| 1201 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
| 1202 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
| 1203 |
+
Options for converting CSV data
|
| 1204 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
| 1205 |
+
memory_pool : MemoryPool, optional
|
| 1206 |
+
Pool to allocate Table memory from
|
| 1207 |
+
|
| 1208 |
+
Returns
|
| 1209 |
+
-------
|
| 1210 |
+
:class:`pyarrow.Table`
|
| 1211 |
+
Contents of the CSV file as a in-memory table.
|
| 1212 |
+
|
| 1213 |
+
Examples
|
| 1214 |
+
--------
|
| 1215 |
+
|
| 1216 |
+
Defining an example file from bytes object:
|
| 1217 |
+
|
| 1218 |
+
>>> import io
|
| 1219 |
+
>>> s = (
|
| 1220 |
+
... "animals,n_legs,entry\\n"
|
| 1221 |
+
... "Flamingo,2,2022-03-01\\n"
|
| 1222 |
+
... "Horse,4,2022-03-02\\n"
|
| 1223 |
+
... "Brittle stars,5,2022-03-03\\n"
|
| 1224 |
+
... "Centipede,100,2022-03-04"
|
| 1225 |
+
... )
|
| 1226 |
+
>>> print(s)
|
| 1227 |
+
animals,n_legs,entry
|
| 1228 |
+
Flamingo,2,2022-03-01
|
| 1229 |
+
Horse,4,2022-03-02
|
| 1230 |
+
Brittle stars,5,2022-03-03
|
| 1231 |
+
Centipede,100,2022-03-04
|
| 1232 |
+
>>> source = io.BytesIO(s.encode())
|
| 1233 |
+
|
| 1234 |
+
Reading from the file
|
| 1235 |
+
|
| 1236 |
+
>>> from pyarrow import csv
|
| 1237 |
+
>>> csv.read_csv(source)
|
| 1238 |
+
pyarrow.Table
|
| 1239 |
+
animals: string
|
| 1240 |
+
n_legs: int64
|
| 1241 |
+
entry: date32[day]
|
| 1242 |
+
----
|
| 1243 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
| 1244 |
+
n_legs: [[2,4,5,100]]
|
| 1245 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
| 1246 |
+
"""
|
| 1247 |
+
cdef:
|
| 1248 |
+
shared_ptr[CInputStream] stream
|
| 1249 |
+
CCSVReadOptions c_read_options
|
| 1250 |
+
CCSVParseOptions c_parse_options
|
| 1251 |
+
CCSVConvertOptions c_convert_options
|
| 1252 |
+
CIOContext io_context
|
| 1253 |
+
SharedPtrNoGIL[CCSVReader] reader
|
| 1254 |
+
shared_ptr[CTable] table
|
| 1255 |
+
|
| 1256 |
+
_get_reader(input_file, read_options, &stream)
|
| 1257 |
+
_get_read_options(read_options, &c_read_options)
|
| 1258 |
+
_get_parse_options(parse_options, &c_parse_options)
|
| 1259 |
+
_get_convert_options(convert_options, &c_convert_options)
|
| 1260 |
+
|
| 1261 |
+
with SignalStopHandler() as stop_handler:
|
| 1262 |
+
io_context = CIOContext(
|
| 1263 |
+
maybe_unbox_memory_pool(memory_pool),
|
| 1264 |
+
(<StopToken> stop_handler.stop_token).stop_token)
|
| 1265 |
+
reader = GetResultValue(CCSVReader.Make(
|
| 1266 |
+
io_context, stream,
|
| 1267 |
+
c_read_options, c_parse_options, c_convert_options))
|
| 1268 |
+
|
| 1269 |
+
with nogil:
|
| 1270 |
+
table = GetResultValue(reader.get().Read())
|
| 1271 |
+
|
| 1272 |
+
return pyarrow_wrap_table(table)
|
| 1273 |
+
|
| 1274 |
+
|
| 1275 |
+
def open_csv(input_file, read_options=None, parse_options=None,
|
| 1276 |
+
convert_options=None, MemoryPool memory_pool=None):
|
| 1277 |
+
"""
|
| 1278 |
+
Open a streaming reader of CSV data.
|
| 1279 |
+
|
| 1280 |
+
Reading using this function is always single-threaded.
|
| 1281 |
+
|
| 1282 |
+
Parameters
|
| 1283 |
+
----------
|
| 1284 |
+
input_file : string, path or file-like object
|
| 1285 |
+
The location of CSV data. If a string or path, and if it ends
|
| 1286 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
| 1287 |
+
the data is automatically decompressed when reading.
|
| 1288 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
| 1289 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
| 1290 |
+
for defaults)
|
| 1291 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
| 1292 |
+
Options for the CSV parser
|
| 1293 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
| 1294 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
| 1295 |
+
Options for converting CSV data
|
| 1296 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
| 1297 |
+
memory_pool : MemoryPool, optional
|
| 1298 |
+
Pool to allocate Table memory from
|
| 1299 |
+
|
| 1300 |
+
Returns
|
| 1301 |
+
-------
|
| 1302 |
+
:class:`pyarrow.csv.CSVStreamingReader`
|
| 1303 |
+
"""
|
| 1304 |
+
cdef:
|
| 1305 |
+
shared_ptr[CInputStream] stream
|
| 1306 |
+
CCSVReadOptions c_read_options
|
| 1307 |
+
CCSVParseOptions c_parse_options
|
| 1308 |
+
CCSVConvertOptions c_convert_options
|
| 1309 |
+
CSVStreamingReader reader
|
| 1310 |
+
|
| 1311 |
+
_get_reader(input_file, read_options, &stream)
|
| 1312 |
+
_get_read_options(read_options, &c_read_options)
|
| 1313 |
+
_get_parse_options(parse_options, &c_parse_options)
|
| 1314 |
+
_get_convert_options(convert_options, &c_convert_options)
|
| 1315 |
+
|
| 1316 |
+
reader = CSVStreamingReader.__new__(CSVStreamingReader)
|
| 1317 |
+
reader._open(stream, move(c_read_options), move(c_parse_options),
|
| 1318 |
+
move(c_convert_options), memory_pool)
|
| 1319 |
+
return reader
|
| 1320 |
+
|
| 1321 |
+
|
| 1322 |
+
def _raise_invalid_function_option(value, description, *,
|
| 1323 |
+
exception_class=ValueError):
|
| 1324 |
+
raise exception_class(f"\"{value}\" is not a valid {description}")
|
| 1325 |
+
|
| 1326 |
+
|
| 1327 |
+
cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *:
|
| 1328 |
+
if quoting_style == "needed":
|
| 1329 |
+
return CQuotingStyle_Needed
|
| 1330 |
+
elif quoting_style == "all_valid":
|
| 1331 |
+
return CQuotingStyle_AllValid
|
| 1332 |
+
elif quoting_style == "none":
|
| 1333 |
+
return CQuotingStyle_None
|
| 1334 |
+
_raise_invalid_function_option(quoting_style, "quoting style")
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
cdef wrap_quoting_style(quoting_style):
|
| 1338 |
+
if quoting_style == CQuotingStyle_Needed:
|
| 1339 |
+
return 'needed'
|
| 1340 |
+
elif quoting_style == CQuotingStyle_AllValid:
|
| 1341 |
+
return 'all_valid'
|
| 1342 |
+
elif quoting_style == CQuotingStyle_None:
|
| 1343 |
+
return 'none'
|
| 1344 |
+
|
| 1345 |
+
|
| 1346 |
+
cdef class WriteOptions(_Weakrefable):
|
| 1347 |
+
"""
|
| 1348 |
+
Options for writing CSV files.
|
| 1349 |
+
|
| 1350 |
+
Parameters
|
| 1351 |
+
----------
|
| 1352 |
+
include_header : bool, optional (default True)
|
| 1353 |
+
Whether to write an initial header line with column names
|
| 1354 |
+
batch_size : int, optional (default 1024)
|
| 1355 |
+
How many rows to process together when converting and writing
|
| 1356 |
+
CSV data
|
| 1357 |
+
delimiter : 1-character string, optional (default ",")
|
| 1358 |
+
The character delimiting individual cells in the CSV data.
|
| 1359 |
+
quoting_style : str, optional (default "needed")
|
| 1360 |
+
Whether to quote values, and if so, which quoting style to use.
|
| 1361 |
+
The following values are accepted:
|
| 1362 |
+
|
| 1363 |
+
- "needed" (default): only enclose values in quotes when needed.
|
| 1364 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
| 1365 |
+
- "none": do not enclose any values in quotes; values containing
|
| 1366 |
+
special characters (such as quotes, cell delimiters or line endings)
|
| 1367 |
+
will raise an error.
|
| 1368 |
+
"""
|
| 1369 |
+
|
| 1370 |
+
# Avoid mistakingly creating attributes
|
| 1371 |
+
__slots__ = ()
|
| 1372 |
+
|
| 1373 |
+
def __init__(self, *, include_header=None, batch_size=None,
|
| 1374 |
+
delimiter=None, quoting_style=None):
|
| 1375 |
+
self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults()))
|
| 1376 |
+
if include_header is not None:
|
| 1377 |
+
self.include_header = include_header
|
| 1378 |
+
if batch_size is not None:
|
| 1379 |
+
self.batch_size = batch_size
|
| 1380 |
+
if delimiter is not None:
|
| 1381 |
+
self.delimiter = delimiter
|
| 1382 |
+
if quoting_style is not None:
|
| 1383 |
+
self.quoting_style = quoting_style
|
| 1384 |
+
|
| 1385 |
+
@property
|
| 1386 |
+
def include_header(self):
|
| 1387 |
+
"""
|
| 1388 |
+
Whether to write an initial header line with column names.
|
| 1389 |
+
"""
|
| 1390 |
+
return deref(self.options).include_header
|
| 1391 |
+
|
| 1392 |
+
@include_header.setter
|
| 1393 |
+
def include_header(self, value):
|
| 1394 |
+
deref(self.options).include_header = value
|
| 1395 |
+
|
| 1396 |
+
@property
|
| 1397 |
+
def batch_size(self):
|
| 1398 |
+
"""
|
| 1399 |
+
How many rows to process together when converting and writing
|
| 1400 |
+
CSV data.
|
| 1401 |
+
"""
|
| 1402 |
+
return deref(self.options).batch_size
|
| 1403 |
+
|
| 1404 |
+
@batch_size.setter
|
| 1405 |
+
def batch_size(self, value):
|
| 1406 |
+
deref(self.options).batch_size = value
|
| 1407 |
+
|
| 1408 |
+
@property
|
| 1409 |
+
def delimiter(self):
|
| 1410 |
+
"""
|
| 1411 |
+
The character delimiting individual cells in the CSV data.
|
| 1412 |
+
"""
|
| 1413 |
+
return chr(deref(self.options).delimiter)
|
| 1414 |
+
|
| 1415 |
+
@delimiter.setter
|
| 1416 |
+
def delimiter(self, value):
|
| 1417 |
+
deref(self.options).delimiter = _single_char(value)
|
| 1418 |
+
|
| 1419 |
+
@property
|
| 1420 |
+
def quoting_style(self):
|
| 1421 |
+
"""
|
| 1422 |
+
Whether to quote values, and if so, which quoting style to use.
|
| 1423 |
+
The following values are accepted:
|
| 1424 |
+
|
| 1425 |
+
- "needed" (default): only enclose values in quotes when needed.
|
| 1426 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
| 1427 |
+
- "none": do not enclose any values in quotes; values containing
|
| 1428 |
+
special characters (such as quotes, cell delimiters or line endings)
|
| 1429 |
+
will raise an error.
|
| 1430 |
+
"""
|
| 1431 |
+
return wrap_quoting_style(deref(self.options).quoting_style)
|
| 1432 |
+
|
| 1433 |
+
@quoting_style.setter
|
| 1434 |
+
def quoting_style(self, value):
|
| 1435 |
+
deref(self.options).quoting_style = unwrap_quoting_style(value)
|
| 1436 |
+
|
| 1437 |
+
@staticmethod
|
| 1438 |
+
cdef WriteOptions wrap(CCSVWriteOptions options):
|
| 1439 |
+
out = WriteOptions()
|
| 1440 |
+
out.options.reset(new CCSVWriteOptions(move(options)))
|
| 1441 |
+
return out
|
| 1442 |
+
|
| 1443 |
+
def validate(self):
|
| 1444 |
+
check_status(self.options.get().Validate())
|
| 1445 |
+
|
| 1446 |
+
|
| 1447 |
+
cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out):
|
| 1448 |
+
if write_options is None:
|
| 1449 |
+
out[0] = CCSVWriteOptions.Defaults()
|
| 1450 |
+
else:
|
| 1451 |
+
out[0] = deref(write_options.options)
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
def write_csv(data, output_file, write_options=None,
|
| 1455 |
+
MemoryPool memory_pool=None):
|
| 1456 |
+
"""
|
| 1457 |
+
Write record batch or table to a CSV file.
|
| 1458 |
+
|
| 1459 |
+
Parameters
|
| 1460 |
+
----------
|
| 1461 |
+
data : pyarrow.RecordBatch or pyarrow.Table
|
| 1462 |
+
The data to write.
|
| 1463 |
+
output_file : string, path, pyarrow.NativeFile, or file-like object
|
| 1464 |
+
The location where to write the CSV data.
|
| 1465 |
+
write_options : pyarrow.csv.WriteOptions
|
| 1466 |
+
Options to configure writing the CSV data.
|
| 1467 |
+
memory_pool : MemoryPool, optional
|
| 1468 |
+
Pool for temporary allocations.
|
| 1469 |
+
|
| 1470 |
+
Examples
|
| 1471 |
+
--------
|
| 1472 |
+
|
| 1473 |
+
>>> import pyarrow as pa
|
| 1474 |
+
>>> from pyarrow import csv
|
| 1475 |
+
|
| 1476 |
+
>>> legs = pa.array([2, 4, 5, 100])
|
| 1477 |
+
>>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
|
| 1478 |
+
>>> entry_date = pa.array(["01/03/2022", "02/03/2022",
|
| 1479 |
+
... "03/03/2022", "04/03/2022"])
|
| 1480 |
+
>>> table = pa.table([animals, legs, entry_date],
|
| 1481 |
+
... names=["animals", "n_legs", "entry"])
|
| 1482 |
+
|
| 1483 |
+
>>> csv.write_csv(table, "animals.csv")
|
| 1484 |
+
|
| 1485 |
+
>>> write_options = csv.WriteOptions(include_header=False)
|
| 1486 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
| 1487 |
+
|
| 1488 |
+
>>> write_options = csv.WriteOptions(delimiter=";")
|
| 1489 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
| 1490 |
+
"""
|
| 1491 |
+
cdef:
|
| 1492 |
+
shared_ptr[COutputStream] stream
|
| 1493 |
+
CCSVWriteOptions c_write_options
|
| 1494 |
+
CMemoryPool* c_memory_pool
|
| 1495 |
+
CRecordBatch* batch
|
| 1496 |
+
CTable* table
|
| 1497 |
+
_get_write_options(write_options, &c_write_options)
|
| 1498 |
+
|
| 1499 |
+
get_writer(output_file, &stream)
|
| 1500 |
+
c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
| 1501 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
| 1502 |
+
if isinstance(data, RecordBatch):
|
| 1503 |
+
batch = pyarrow_unwrap_batch(data).get()
|
| 1504 |
+
with nogil:
|
| 1505 |
+
check_status(WriteCSV(deref(batch), c_write_options, stream.get()))
|
| 1506 |
+
elif isinstance(data, Table):
|
| 1507 |
+
table = pyarrow_unwrap_table(data).get()
|
| 1508 |
+
with nogil:
|
| 1509 |
+
check_status(WriteCSV(deref(table), c_write_options, stream.get()))
|
| 1510 |
+
else:
|
| 1511 |
+
raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'")
|
| 1512 |
+
|
| 1513 |
+
|
| 1514 |
+
cdef class CSVWriter(_CRecordBatchWriter):
|
| 1515 |
+
"""
|
| 1516 |
+
Writer to create a CSV file.
|
| 1517 |
+
|
| 1518 |
+
Parameters
|
| 1519 |
+
----------
|
| 1520 |
+
sink : str, path, pyarrow.OutputStream or file-like object
|
| 1521 |
+
The location where to write the CSV data.
|
| 1522 |
+
schema : pyarrow.Schema
|
| 1523 |
+
The schema of the data to be written.
|
| 1524 |
+
write_options : pyarrow.csv.WriteOptions
|
| 1525 |
+
Options to configure writing the CSV data.
|
| 1526 |
+
memory_pool : MemoryPool, optional
|
| 1527 |
+
Pool for temporary allocations.
|
| 1528 |
+
"""
|
| 1529 |
+
|
| 1530 |
+
def __init__(self, sink, Schema schema, *,
|
| 1531 |
+
WriteOptions write_options=None, MemoryPool memory_pool=None):
|
| 1532 |
+
cdef:
|
| 1533 |
+
shared_ptr[COutputStream] c_stream
|
| 1534 |
+
shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema)
|
| 1535 |
+
CCSVWriteOptions c_write_options
|
| 1536 |
+
CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
| 1537 |
+
_get_write_options(write_options, &c_write_options)
|
| 1538 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
| 1539 |
+
get_writer(sink, &c_stream)
|
| 1540 |
+
with nogil:
|
| 1541 |
+
self.writer = GetResultValue(MakeCSVWriter(
|
| 1542 |
+
c_stream, c_schema, c_write_options))
|
parrot/lib/python3.10/site-packages/pyarrow/_cuda.pxd
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
from pyarrow.lib cimport *
|
| 21 |
+
from pyarrow.includes.common cimport *
|
| 22 |
+
from pyarrow.includes.libarrow cimport *
|
| 23 |
+
from pyarrow.includes.libarrow_cuda cimport *
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
cdef class Context(_Weakrefable):
|
| 27 |
+
cdef:
|
| 28 |
+
shared_ptr[CCudaContext] context
|
| 29 |
+
int device_number
|
| 30 |
+
|
| 31 |
+
cdef void init(self, const shared_ptr[CCudaContext]& ctx)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
cdef class IpcMemHandle(_Weakrefable):
|
| 35 |
+
cdef:
|
| 36 |
+
shared_ptr[CCudaIpcMemHandle] handle
|
| 37 |
+
|
| 38 |
+
cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
cdef class CudaBuffer(Buffer):
|
| 42 |
+
cdef:
|
| 43 |
+
shared_ptr[CCudaBuffer] cuda_buffer
|
| 44 |
+
object base
|
| 45 |
+
|
| 46 |
+
cdef void init_cuda(self,
|
| 47 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
| 48 |
+
object base)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
cdef class HostBuffer(Buffer):
|
| 52 |
+
cdef:
|
| 53 |
+
shared_ptr[CCudaHostBuffer] host_buffer
|
| 54 |
+
|
| 55 |
+
cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
cdef class BufferReader(NativeFile):
|
| 59 |
+
cdef:
|
| 60 |
+
CCudaBufferReader* reader
|
| 61 |
+
CudaBuffer buffer
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
cdef class BufferWriter(NativeFile):
|
| 65 |
+
cdef:
|
| 66 |
+
CCudaBufferWriter* writer
|
| 67 |
+
CudaBuffer buffer
|
parrot/lib/python3.10/site-packages/pyarrow/_cuda.pyx
ADDED
|
@@ -0,0 +1,1058 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
from pyarrow.lib cimport *
|
| 20 |
+
from pyarrow.includes.libarrow_cuda cimport *
|
| 21 |
+
from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError
|
| 22 |
+
from pyarrow.util import get_contiguous_span
|
| 23 |
+
cimport cpython as cp
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
cdef class Context(_Weakrefable):
|
| 27 |
+
"""
|
| 28 |
+
CUDA driver context.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, *args, **kwargs):
|
| 32 |
+
"""
|
| 33 |
+
Create a CUDA driver context for a particular device.
|
| 34 |
+
|
| 35 |
+
If a CUDA context handle is passed, it is wrapped, otherwise
|
| 36 |
+
a default CUDA context for the given device is requested.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
device_number : int (default 0)
|
| 41 |
+
Specify the GPU device for which the CUDA driver context is
|
| 42 |
+
requested.
|
| 43 |
+
handle : int, optional
|
| 44 |
+
Specify CUDA handle for a shared context that has been created
|
| 45 |
+
by another library.
|
| 46 |
+
"""
|
| 47 |
+
# This method exposed because autodoc doesn't pick __cinit__
|
| 48 |
+
|
| 49 |
+
def __cinit__(self, int device_number=0, uintptr_t handle=0):
|
| 50 |
+
cdef CCudaDeviceManager* manager
|
| 51 |
+
manager = GetResultValue(CCudaDeviceManager.Instance())
|
| 52 |
+
cdef int n = manager.num_devices()
|
| 53 |
+
if device_number >= n or device_number < 0:
|
| 54 |
+
self.context.reset()
|
| 55 |
+
raise ValueError('device_number argument must be '
|
| 56 |
+
'non-negative less than %s' % (n))
|
| 57 |
+
if handle == 0:
|
| 58 |
+
self.context = GetResultValue(manager.GetContext(device_number))
|
| 59 |
+
else:
|
| 60 |
+
self.context = GetResultValue(manager.GetSharedContext(
|
| 61 |
+
device_number, <void*>handle))
|
| 62 |
+
self.device_number = device_number
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def from_numba(context=None):
|
| 66 |
+
"""
|
| 67 |
+
Create a Context instance from a Numba CUDA context.
|
| 68 |
+
|
| 69 |
+
Parameters
|
| 70 |
+
----------
|
| 71 |
+
context : {numba.cuda.cudadrv.driver.Context, None}
|
| 72 |
+
A Numba CUDA context instance.
|
| 73 |
+
If None, the current Numba context is used.
|
| 74 |
+
|
| 75 |
+
Returns
|
| 76 |
+
-------
|
| 77 |
+
shared_context : pyarrow.cuda.Context
|
| 78 |
+
Context instance.
|
| 79 |
+
"""
|
| 80 |
+
if context is None:
|
| 81 |
+
import numba.cuda
|
| 82 |
+
context = numba.cuda.current_context()
|
| 83 |
+
return Context(device_number=context.device.id,
|
| 84 |
+
handle=context.handle.value)
|
| 85 |
+
|
| 86 |
+
def to_numba(self):
|
| 87 |
+
"""
|
| 88 |
+
Convert Context to a Numba CUDA context.
|
| 89 |
+
|
| 90 |
+
Returns
|
| 91 |
+
-------
|
| 92 |
+
context : numba.cuda.cudadrv.driver.Context
|
| 93 |
+
Numba CUDA context instance.
|
| 94 |
+
"""
|
| 95 |
+
import ctypes
|
| 96 |
+
import numba.cuda
|
| 97 |
+
device = numba.cuda.gpus[self.device_number]
|
| 98 |
+
handle = ctypes.c_void_p(self.handle)
|
| 99 |
+
context = numba.cuda.cudadrv.driver.Context(device, handle)
|
| 100 |
+
|
| 101 |
+
class DummyPendingDeallocs(object):
|
| 102 |
+
# Context is managed by pyarrow
|
| 103 |
+
def add_item(self, *args, **kwargs):
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
context.deallocations = DummyPendingDeallocs()
|
| 107 |
+
return context
|
| 108 |
+
|
| 109 |
+
@staticmethod
|
| 110 |
+
def get_num_devices():
|
| 111 |
+
""" Return the number of GPU devices.
|
| 112 |
+
"""
|
| 113 |
+
cdef CCudaDeviceManager* manager
|
| 114 |
+
manager = GetResultValue(CCudaDeviceManager.Instance())
|
| 115 |
+
return manager.num_devices()
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def device_number(self):
|
| 119 |
+
""" Return context device number.
|
| 120 |
+
"""
|
| 121 |
+
return self.device_number
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def handle(self):
|
| 125 |
+
""" Return pointer to context handle.
|
| 126 |
+
"""
|
| 127 |
+
return <uintptr_t>self.context.get().handle()
|
| 128 |
+
|
| 129 |
+
cdef void init(self, const shared_ptr[CCudaContext]& ctx):
|
| 130 |
+
self.context = ctx
|
| 131 |
+
|
| 132 |
+
def synchronize(self):
|
| 133 |
+
"""Blocks until the device has completed all preceding requested
|
| 134 |
+
tasks.
|
| 135 |
+
"""
|
| 136 |
+
check_status(self.context.get().Synchronize())
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def bytes_allocated(self):
|
| 140 |
+
"""Return the number of allocated bytes.
|
| 141 |
+
"""
|
| 142 |
+
return self.context.get().bytes_allocated()
|
| 143 |
+
|
| 144 |
+
def get_device_address(self, uintptr_t address):
|
| 145 |
+
"""Return the device address that is reachable from kernels running in
|
| 146 |
+
the context
|
| 147 |
+
|
| 148 |
+
Parameters
|
| 149 |
+
----------
|
| 150 |
+
address : int
|
| 151 |
+
Specify memory address value
|
| 152 |
+
|
| 153 |
+
Returns
|
| 154 |
+
-------
|
| 155 |
+
device_address : int
|
| 156 |
+
Device address accessible from device context
|
| 157 |
+
|
| 158 |
+
Notes
|
| 159 |
+
-----
|
| 160 |
+
The device address is defined as a memory address accessible
|
| 161 |
+
by device. While it is often a device memory address but it
|
| 162 |
+
can be also a host memory address, for instance, when the
|
| 163 |
+
memory is allocated as host memory (using cudaMallocHost or
|
| 164 |
+
cudaHostAlloc) or as managed memory (using cudaMallocManaged)
|
| 165 |
+
or the host memory is page-locked (using cudaHostRegister).
|
| 166 |
+
"""
|
| 167 |
+
return GetResultValue(self.context.get().GetDeviceAddress(address))
|
| 168 |
+
|
| 169 |
+
def new_buffer(self, int64_t nbytes):
|
| 170 |
+
"""Return new device buffer.
|
| 171 |
+
|
| 172 |
+
Parameters
|
| 173 |
+
----------
|
| 174 |
+
nbytes : int
|
| 175 |
+
Specify the number of bytes to be allocated.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
buf : CudaBuffer
|
| 180 |
+
Allocated buffer.
|
| 181 |
+
"""
|
| 182 |
+
cdef:
|
| 183 |
+
shared_ptr[CCudaBuffer] cudabuf
|
| 184 |
+
with nogil:
|
| 185 |
+
cudabuf = GetResultValue(self.context.get().Allocate(nbytes))
|
| 186 |
+
return pyarrow_wrap_cudabuffer(cudabuf)
|
| 187 |
+
|
| 188 |
+
def foreign_buffer(self, address, size, base=None):
|
| 189 |
+
"""
|
| 190 |
+
Create device buffer from address and size as a view.
|
| 191 |
+
|
| 192 |
+
The caller is responsible for allocating and freeing the
|
| 193 |
+
memory. When `address==size==0` then a new zero-sized buffer
|
| 194 |
+
is returned.
|
| 195 |
+
|
| 196 |
+
Parameters
|
| 197 |
+
----------
|
| 198 |
+
address : int
|
| 199 |
+
Specify the starting address of the buffer. The address can
|
| 200 |
+
refer to both device or host memory but it must be
|
| 201 |
+
accessible from device after mapping it with
|
| 202 |
+
`get_device_address` method.
|
| 203 |
+
size : int
|
| 204 |
+
Specify the size of device buffer in bytes.
|
| 205 |
+
base : {None, object}
|
| 206 |
+
Specify object that owns the referenced memory.
|
| 207 |
+
|
| 208 |
+
Returns
|
| 209 |
+
-------
|
| 210 |
+
cbuf : CudaBuffer
|
| 211 |
+
Device buffer as a view of device reachable memory.
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
if not address and size == 0:
|
| 215 |
+
return self.new_buffer(0)
|
| 216 |
+
cdef:
|
| 217 |
+
uintptr_t c_addr = self.get_device_address(address)
|
| 218 |
+
int64_t c_size = size
|
| 219 |
+
shared_ptr[CCudaBuffer] cudabuf
|
| 220 |
+
|
| 221 |
+
cudabuf = GetResultValue(self.context.get().View(
|
| 222 |
+
<uint8_t*>c_addr, c_size))
|
| 223 |
+
return pyarrow_wrap_cudabuffer_base(cudabuf, base)
|
| 224 |
+
|
| 225 |
+
def open_ipc_buffer(self, ipc_handle):
|
| 226 |
+
""" Open existing CUDA IPC memory handle
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
ipc_handle : IpcMemHandle
|
| 231 |
+
Specify opaque pointer to CUipcMemHandle (driver API).
|
| 232 |
+
|
| 233 |
+
Returns
|
| 234 |
+
-------
|
| 235 |
+
buf : CudaBuffer
|
| 236 |
+
referencing device buffer
|
| 237 |
+
"""
|
| 238 |
+
handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle)
|
| 239 |
+
cdef shared_ptr[CCudaBuffer] cudabuf
|
| 240 |
+
with nogil:
|
| 241 |
+
cudabuf = GetResultValue(
|
| 242 |
+
self.context.get().OpenIpcBuffer(handle.get()[0]))
|
| 243 |
+
return pyarrow_wrap_cudabuffer(cudabuf)
|
| 244 |
+
|
| 245 |
+
def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1):
|
| 246 |
+
"""Create device buffer and initialize with data.
|
| 247 |
+
|
| 248 |
+
Parameters
|
| 249 |
+
----------
|
| 250 |
+
data : {CudaBuffer, HostBuffer, Buffer, array-like}
|
| 251 |
+
Specify data to be copied to device buffer.
|
| 252 |
+
offset : int
|
| 253 |
+
Specify the offset of input buffer for device data
|
| 254 |
+
buffering. Default: 0.
|
| 255 |
+
size : int
|
| 256 |
+
Specify the size of device buffer in bytes. Default: all
|
| 257 |
+
(starting from input offset)
|
| 258 |
+
|
| 259 |
+
Returns
|
| 260 |
+
-------
|
| 261 |
+
cbuf : CudaBuffer
|
| 262 |
+
Device buffer with copied data.
|
| 263 |
+
"""
|
| 264 |
+
is_host_data = not pyarrow_is_cudabuffer(data)
|
| 265 |
+
buf = as_buffer(data) if is_host_data else data
|
| 266 |
+
|
| 267 |
+
bsize = buf.size
|
| 268 |
+
if offset < 0 or (bsize and offset >= bsize):
|
| 269 |
+
raise ValueError('offset argument is out-of-range')
|
| 270 |
+
if size < 0:
|
| 271 |
+
size = bsize - offset
|
| 272 |
+
elif offset + size > bsize:
|
| 273 |
+
raise ValueError(
|
| 274 |
+
'requested larger slice than available in device buffer')
|
| 275 |
+
|
| 276 |
+
if offset != 0 or size != bsize:
|
| 277 |
+
buf = buf.slice(offset, size)
|
| 278 |
+
|
| 279 |
+
result = self.new_buffer(size)
|
| 280 |
+
if is_host_data:
|
| 281 |
+
result.copy_from_host(buf, position=0, nbytes=size)
|
| 282 |
+
else:
|
| 283 |
+
result.copy_from_device(buf, position=0, nbytes=size)
|
| 284 |
+
return result
|
| 285 |
+
|
| 286 |
+
def buffer_from_object(self, obj):
|
| 287 |
+
"""Create device buffer view of arbitrary object that references
|
| 288 |
+
device accessible memory.
|
| 289 |
+
|
| 290 |
+
When the object contains a non-contiguous view of device
|
| 291 |
+
accessible memory then the returned device buffer will contain
|
| 292 |
+
contiguous view of the memory, that is, including the
|
| 293 |
+
intermediate data that is otherwise invisible to the input
|
| 294 |
+
object.
|
| 295 |
+
|
| 296 |
+
Parameters
|
| 297 |
+
----------
|
| 298 |
+
obj : {object, Buffer, HostBuffer, CudaBuffer, ...}
|
| 299 |
+
Specify an object that holds (device or host) address that
|
| 300 |
+
can be accessed from device. This includes objects with
|
| 301 |
+
types defined in pyarrow.cuda as well as arbitrary objects
|
| 302 |
+
that implement the CUDA array interface as defined by numba.
|
| 303 |
+
|
| 304 |
+
Returns
|
| 305 |
+
-------
|
| 306 |
+
cbuf : CudaBuffer
|
| 307 |
+
Device buffer as a view of device accessible memory.
|
| 308 |
+
|
| 309 |
+
"""
|
| 310 |
+
if isinstance(obj, HostBuffer):
|
| 311 |
+
return self.foreign_buffer(obj.address, obj.size, base=obj)
|
| 312 |
+
elif isinstance(obj, Buffer):
|
| 313 |
+
return CudaBuffer.from_buffer(obj)
|
| 314 |
+
elif isinstance(obj, CudaBuffer):
|
| 315 |
+
return obj
|
| 316 |
+
elif hasattr(obj, '__cuda_array_interface__'):
|
| 317 |
+
desc = obj.__cuda_array_interface__
|
| 318 |
+
addr = desc['data'][0]
|
| 319 |
+
if addr is None:
|
| 320 |
+
return self.new_buffer(0)
|
| 321 |
+
import numpy as np
|
| 322 |
+
start, end = get_contiguous_span(
|
| 323 |
+
desc['shape'], desc.get('strides'),
|
| 324 |
+
np.dtype(desc['typestr']).itemsize)
|
| 325 |
+
return self.foreign_buffer(addr + start, end - start, base=obj)
|
| 326 |
+
raise ArrowTypeError('cannot create device buffer view from'
|
| 327 |
+
' `%s` object' % (type(obj)))
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
cdef class IpcMemHandle(_Weakrefable):
|
| 331 |
+
"""A serializable container for a CUDA IPC handle.
|
| 332 |
+
"""
|
| 333 |
+
cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h):
|
| 334 |
+
self.handle = h
|
| 335 |
+
|
| 336 |
+
@staticmethod
|
| 337 |
+
def from_buffer(Buffer opaque_handle):
|
| 338 |
+
"""Create IpcMemHandle from opaque buffer (e.g. from another
|
| 339 |
+
process)
|
| 340 |
+
|
| 341 |
+
Parameters
|
| 342 |
+
----------
|
| 343 |
+
opaque_handle :
|
| 344 |
+
a CUipcMemHandle as a const void*
|
| 345 |
+
|
| 346 |
+
Returns
|
| 347 |
+
-------
|
| 348 |
+
ipc_handle : IpcMemHandle
|
| 349 |
+
"""
|
| 350 |
+
c_buf = pyarrow_unwrap_buffer(opaque_handle)
|
| 351 |
+
cdef:
|
| 352 |
+
shared_ptr[CCudaIpcMemHandle] handle
|
| 353 |
+
|
| 354 |
+
handle = GetResultValue(
|
| 355 |
+
CCudaIpcMemHandle.FromBuffer(c_buf.get().data()))
|
| 356 |
+
return pyarrow_wrap_cudaipcmemhandle(handle)
|
| 357 |
+
|
| 358 |
+
def serialize(self, pool=None):
|
| 359 |
+
"""Write IpcMemHandle to a Buffer
|
| 360 |
+
|
| 361 |
+
Parameters
|
| 362 |
+
----------
|
| 363 |
+
pool : {MemoryPool, None}
|
| 364 |
+
Specify a pool to allocate memory from
|
| 365 |
+
|
| 366 |
+
Returns
|
| 367 |
+
-------
|
| 368 |
+
buf : Buffer
|
| 369 |
+
The serialized buffer.
|
| 370 |
+
"""
|
| 371 |
+
cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
| 372 |
+
cdef shared_ptr[CBuffer] buf
|
| 373 |
+
cdef CCudaIpcMemHandle* h = self.handle.get()
|
| 374 |
+
with nogil:
|
| 375 |
+
buf = GetResultValue(h.Serialize(pool_))
|
| 376 |
+
return pyarrow_wrap_buffer(buf)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
cdef class CudaBuffer(Buffer):
|
| 380 |
+
"""An Arrow buffer with data located in a GPU device.
|
| 381 |
+
|
| 382 |
+
To create a CudaBuffer instance, use Context.device_buffer().
|
| 383 |
+
|
| 384 |
+
The memory allocated in a CudaBuffer is freed when the buffer object
|
| 385 |
+
is deleted.
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
def __init__(self):
|
| 389 |
+
raise TypeError("Do not call CudaBuffer's constructor directly, use "
|
| 390 |
+
"`<pyarrow.Context instance>.device_buffer`"
|
| 391 |
+
" method instead.")
|
| 392 |
+
|
| 393 |
+
cdef void init_cuda(self,
|
| 394 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
| 395 |
+
object base):
|
| 396 |
+
self.cuda_buffer = buffer
|
| 397 |
+
self.init(<shared_ptr[CBuffer]> buffer)
|
| 398 |
+
self.base = base
|
| 399 |
+
|
| 400 |
+
@staticmethod
|
| 401 |
+
def from_buffer(buf):
|
| 402 |
+
""" Convert back generic buffer into CudaBuffer
|
| 403 |
+
|
| 404 |
+
Parameters
|
| 405 |
+
----------
|
| 406 |
+
buf : Buffer
|
| 407 |
+
Specify buffer containing CudaBuffer
|
| 408 |
+
|
| 409 |
+
Returns
|
| 410 |
+
-------
|
| 411 |
+
dbuf : CudaBuffer
|
| 412 |
+
Resulting device buffer.
|
| 413 |
+
"""
|
| 414 |
+
c_buf = pyarrow_unwrap_buffer(buf)
|
| 415 |
+
cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf))
|
| 416 |
+
return pyarrow_wrap_cudabuffer(cuda_buffer)
|
| 417 |
+
|
| 418 |
+
@staticmethod
|
| 419 |
+
def from_numba(mem):
|
| 420 |
+
"""Create a CudaBuffer view from numba MemoryPointer instance.
|
| 421 |
+
|
| 422 |
+
Parameters
|
| 423 |
+
----------
|
| 424 |
+
mem : numba.cuda.cudadrv.driver.MemoryPointer
|
| 425 |
+
|
| 426 |
+
Returns
|
| 427 |
+
-------
|
| 428 |
+
cbuf : CudaBuffer
|
| 429 |
+
Device buffer as a view of numba MemoryPointer.
|
| 430 |
+
"""
|
| 431 |
+
ctx = Context.from_numba(mem.context)
|
| 432 |
+
if mem.device_pointer.value is None and mem.size==0:
|
| 433 |
+
return ctx.new_buffer(0)
|
| 434 |
+
return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem)
|
| 435 |
+
|
| 436 |
+
def to_numba(self):
|
| 437 |
+
"""Return numba memory pointer of CudaBuffer instance.
|
| 438 |
+
"""
|
| 439 |
+
import ctypes
|
| 440 |
+
from numba.cuda.cudadrv.driver import MemoryPointer
|
| 441 |
+
return MemoryPointer(self.context.to_numba(),
|
| 442 |
+
pointer=ctypes.c_void_p(self.address),
|
| 443 |
+
size=self.size)
|
| 444 |
+
|
| 445 |
+
cdef getitem(self, int64_t i):
|
| 446 |
+
return self.copy_to_host(position=i, nbytes=1)[0]
|
| 447 |
+
|
| 448 |
+
def copy_to_host(self, int64_t position=0, int64_t nbytes=-1,
|
| 449 |
+
Buffer buf=None,
|
| 450 |
+
MemoryPool memory_pool=None, c_bool resizable=False):
|
| 451 |
+
"""Copy memory from GPU device to CPU host
|
| 452 |
+
|
| 453 |
+
Caller is responsible for ensuring that all tasks affecting
|
| 454 |
+
the memory are finished. Use
|
| 455 |
+
|
| 456 |
+
`<CudaBuffer instance>.context.synchronize()`
|
| 457 |
+
|
| 458 |
+
when needed.
|
| 459 |
+
|
| 460 |
+
Parameters
|
| 461 |
+
----------
|
| 462 |
+
position : int
|
| 463 |
+
Specify the starting position of the source data in GPU
|
| 464 |
+
device buffer. Default: 0.
|
| 465 |
+
nbytes : int
|
| 466 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
| 467 |
+
the position until host buffer is full).
|
| 468 |
+
buf : Buffer
|
| 469 |
+
Specify a pre-allocated output buffer in host. Default: None
|
| 470 |
+
(allocate new output buffer).
|
| 471 |
+
memory_pool : MemoryPool
|
| 472 |
+
resizable : bool
|
| 473 |
+
Specify extra arguments to allocate_buffer. Used only when
|
| 474 |
+
buf is None.
|
| 475 |
+
|
| 476 |
+
Returns
|
| 477 |
+
-------
|
| 478 |
+
buf : Buffer
|
| 479 |
+
Output buffer in host.
|
| 480 |
+
|
| 481 |
+
"""
|
| 482 |
+
if position < 0 or (self.size and position > self.size) \
|
| 483 |
+
or (self.size == 0 and position != 0):
|
| 484 |
+
raise ValueError('position argument is out-of-range')
|
| 485 |
+
cdef:
|
| 486 |
+
int64_t c_nbytes
|
| 487 |
+
if buf is None:
|
| 488 |
+
if nbytes < 0:
|
| 489 |
+
# copy all starting from position to new host buffer
|
| 490 |
+
c_nbytes = self.size - position
|
| 491 |
+
else:
|
| 492 |
+
if nbytes > self.size - position:
|
| 493 |
+
raise ValueError(
|
| 494 |
+
'requested more to copy than available from '
|
| 495 |
+
'device buffer')
|
| 496 |
+
# copy nbytes starting from position to new host buffer
|
| 497 |
+
c_nbytes = nbytes
|
| 498 |
+
buf = allocate_buffer(c_nbytes, memory_pool=memory_pool,
|
| 499 |
+
resizable=resizable)
|
| 500 |
+
else:
|
| 501 |
+
if nbytes < 0:
|
| 502 |
+
# copy all from position until given host buffer is full
|
| 503 |
+
c_nbytes = min(self.size - position, buf.size)
|
| 504 |
+
else:
|
| 505 |
+
if nbytes > buf.size:
|
| 506 |
+
raise ValueError(
|
| 507 |
+
'requested copy does not fit into host buffer')
|
| 508 |
+
# copy nbytes from position to given host buffer
|
| 509 |
+
c_nbytes = nbytes
|
| 510 |
+
|
| 511 |
+
cdef:
|
| 512 |
+
shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
|
| 513 |
+
int64_t c_position = position
|
| 514 |
+
with nogil:
|
| 515 |
+
check_status(self.cuda_buffer.get()
|
| 516 |
+
.CopyToHost(c_position, c_nbytes,
|
| 517 |
+
c_buf.get().mutable_data()))
|
| 518 |
+
return buf
|
| 519 |
+
|
| 520 |
+
def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1):
|
| 521 |
+
"""Copy data from host to device.
|
| 522 |
+
|
| 523 |
+
The device buffer must be pre-allocated.
|
| 524 |
+
|
| 525 |
+
Parameters
|
| 526 |
+
----------
|
| 527 |
+
data : {Buffer, array-like}
|
| 528 |
+
Specify data in host. It can be array-like that is valid
|
| 529 |
+
argument to py_buffer
|
| 530 |
+
position : int
|
| 531 |
+
Specify the starting position of the copy in device buffer.
|
| 532 |
+
Default: 0.
|
| 533 |
+
nbytes : int
|
| 534 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
| 535 |
+
source until device buffer, starting from position, is full)
|
| 536 |
+
|
| 537 |
+
Returns
|
| 538 |
+
-------
|
| 539 |
+
nbytes : int
|
| 540 |
+
Number of bytes copied.
|
| 541 |
+
"""
|
| 542 |
+
if position < 0 or position > self.size:
|
| 543 |
+
raise ValueError('position argument is out-of-range')
|
| 544 |
+
cdef:
|
| 545 |
+
int64_t c_nbytes
|
| 546 |
+
buf = as_buffer(data)
|
| 547 |
+
|
| 548 |
+
if nbytes < 0:
|
| 549 |
+
# copy from host buffer to device buffer starting from
|
| 550 |
+
# position until device buffer is full
|
| 551 |
+
c_nbytes = min(self.size - position, buf.size)
|
| 552 |
+
else:
|
| 553 |
+
if nbytes > buf.size:
|
| 554 |
+
raise ValueError(
|
| 555 |
+
'requested more to copy than available from host buffer')
|
| 556 |
+
if nbytes > self.size - position:
|
| 557 |
+
raise ValueError(
|
| 558 |
+
'requested more to copy than available in device buffer')
|
| 559 |
+
# copy nbytes from host buffer to device buffer starting
|
| 560 |
+
# from position
|
| 561 |
+
c_nbytes = nbytes
|
| 562 |
+
|
| 563 |
+
cdef:
|
| 564 |
+
shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
|
| 565 |
+
int64_t c_position = position
|
| 566 |
+
with nogil:
|
| 567 |
+
check_status(self.cuda_buffer.get().
|
| 568 |
+
CopyFromHost(c_position, c_buf.get().data(),
|
| 569 |
+
c_nbytes))
|
| 570 |
+
return c_nbytes
|
| 571 |
+
|
| 572 |
+
def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1):
|
| 573 |
+
"""Copy data from device to device.
|
| 574 |
+
|
| 575 |
+
Parameters
|
| 576 |
+
----------
|
| 577 |
+
buf : CudaBuffer
|
| 578 |
+
Specify source device buffer.
|
| 579 |
+
position : int
|
| 580 |
+
Specify the starting position of the copy in device buffer.
|
| 581 |
+
Default: 0.
|
| 582 |
+
nbytes : int
|
| 583 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
| 584 |
+
source until device buffer, starting from position, is full)
|
| 585 |
+
|
| 586 |
+
Returns
|
| 587 |
+
-------
|
| 588 |
+
nbytes : int
|
| 589 |
+
Number of bytes copied.
|
| 590 |
+
|
| 591 |
+
"""
|
| 592 |
+
if position < 0 or position > self.size:
|
| 593 |
+
raise ValueError('position argument is out-of-range')
|
| 594 |
+
cdef:
|
| 595 |
+
int64_t c_nbytes
|
| 596 |
+
|
| 597 |
+
if nbytes < 0:
|
| 598 |
+
# copy from source device buffer to device buffer starting
|
| 599 |
+
# from position until device buffer is full
|
| 600 |
+
c_nbytes = min(self.size - position, buf.size)
|
| 601 |
+
else:
|
| 602 |
+
if nbytes > buf.size:
|
| 603 |
+
raise ValueError(
|
| 604 |
+
'requested more to copy than available from device buffer')
|
| 605 |
+
if nbytes > self.size - position:
|
| 606 |
+
raise ValueError(
|
| 607 |
+
'requested more to copy than available in device buffer')
|
| 608 |
+
# copy nbytes from source device buffer to device buffer
|
| 609 |
+
# starting from position
|
| 610 |
+
c_nbytes = nbytes
|
| 611 |
+
|
| 612 |
+
cdef:
|
| 613 |
+
shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf)
|
| 614 |
+
int64_t c_position = position
|
| 615 |
+
shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext(
|
| 616 |
+
buf.context)
|
| 617 |
+
void* c_source_data = <void*>(c_buf.get().address())
|
| 618 |
+
|
| 619 |
+
if self.context.handle != buf.context.handle:
|
| 620 |
+
with nogil:
|
| 621 |
+
check_status(self.cuda_buffer.get().
|
| 622 |
+
CopyFromAnotherDevice(c_src_ctx, c_position,
|
| 623 |
+
c_source_data, c_nbytes))
|
| 624 |
+
else:
|
| 625 |
+
with nogil:
|
| 626 |
+
check_status(self.cuda_buffer.get().
|
| 627 |
+
CopyFromDevice(c_position, c_source_data,
|
| 628 |
+
c_nbytes))
|
| 629 |
+
return c_nbytes
|
| 630 |
+
|
| 631 |
+
def export_for_ipc(self):
|
| 632 |
+
"""
|
| 633 |
+
Expose this device buffer as IPC memory which can be used in other
|
| 634 |
+
processes.
|
| 635 |
+
|
| 636 |
+
After calling this function, this device memory will not be
|
| 637 |
+
freed when the CudaBuffer is destructed.
|
| 638 |
+
|
| 639 |
+
Returns
|
| 640 |
+
-------
|
| 641 |
+
ipc_handle : IpcMemHandle
|
| 642 |
+
The exported IPC handle
|
| 643 |
+
|
| 644 |
+
"""
|
| 645 |
+
cdef shared_ptr[CCudaIpcMemHandle] handle
|
| 646 |
+
with nogil:
|
| 647 |
+
handle = GetResultValue(self.cuda_buffer.get().ExportForIpc())
|
| 648 |
+
return pyarrow_wrap_cudaipcmemhandle(handle)
|
| 649 |
+
|
| 650 |
+
@property
|
| 651 |
+
def context(self):
|
| 652 |
+
"""Returns the CUDA driver context of this buffer.
|
| 653 |
+
"""
|
| 654 |
+
return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context())
|
| 655 |
+
|
| 656 |
+
def slice(self, offset=0, length=None):
|
| 657 |
+
"""Return slice of device buffer
|
| 658 |
+
|
| 659 |
+
Parameters
|
| 660 |
+
----------
|
| 661 |
+
offset : int, default 0
|
| 662 |
+
Specify offset from the start of device buffer to slice
|
| 663 |
+
length : int, default None
|
| 664 |
+
Specify the length of slice (default is until end of device
|
| 665 |
+
buffer starting from offset). If the length is larger than
|
| 666 |
+
the data available, the returned slice will have a size of
|
| 667 |
+
the available data starting from the offset.
|
| 668 |
+
|
| 669 |
+
Returns
|
| 670 |
+
-------
|
| 671 |
+
sliced : CudaBuffer
|
| 672 |
+
Zero-copy slice of device buffer.
|
| 673 |
+
|
| 674 |
+
"""
|
| 675 |
+
if offset < 0 or (self.size and offset >= self.size):
|
| 676 |
+
raise ValueError('offset argument is out-of-range')
|
| 677 |
+
cdef int64_t offset_ = offset
|
| 678 |
+
cdef int64_t size
|
| 679 |
+
if length is None:
|
| 680 |
+
size = self.size - offset_
|
| 681 |
+
elif offset + length <= self.size:
|
| 682 |
+
size = length
|
| 683 |
+
else:
|
| 684 |
+
size = self.size - offset
|
| 685 |
+
parent = pyarrow_unwrap_cudabuffer(self)
|
| 686 |
+
return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent,
|
| 687 |
+
offset_, size))
|
| 688 |
+
|
| 689 |
+
def to_pybytes(self):
|
| 690 |
+
"""Return device buffer content as Python bytes.
|
| 691 |
+
"""
|
| 692 |
+
return self.copy_to_host().to_pybytes()
|
| 693 |
+
|
| 694 |
+
def __getbuffer__(self, cp.Py_buffer* buffer, int flags):
|
| 695 |
+
# Device buffer contains data pointers on the device. Hence,
|
| 696 |
+
# cannot support buffer protocol PEP-3118 for CudaBuffer.
|
| 697 |
+
raise BufferError('buffer protocol for device buffer not supported')
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
cdef class HostBuffer(Buffer):
|
| 701 |
+
"""Device-accessible CPU memory created using cudaHostAlloc.
|
| 702 |
+
|
| 703 |
+
To create a HostBuffer instance, use
|
| 704 |
+
|
| 705 |
+
cuda.new_host_buffer(<nbytes>)
|
| 706 |
+
"""
|
| 707 |
+
|
| 708 |
+
def __init__(self):
|
| 709 |
+
raise TypeError("Do not call HostBuffer's constructor directly,"
|
| 710 |
+
" use `cuda.new_host_buffer` function instead.")
|
| 711 |
+
|
| 712 |
+
cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer):
|
| 713 |
+
self.host_buffer = buffer
|
| 714 |
+
self.init(<shared_ptr[CBuffer]> buffer)
|
| 715 |
+
|
| 716 |
+
@property
|
| 717 |
+
def size(self):
|
| 718 |
+
return self.host_buffer.get().size()
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
cdef class BufferReader(NativeFile):
|
| 722 |
+
"""File interface for zero-copy read from CUDA buffers.
|
| 723 |
+
|
| 724 |
+
Note: Read methods return pointers to device memory. This means
|
| 725 |
+
you must be careful using this interface with any Arrow code which
|
| 726 |
+
may expect to be able to do anything other than pointer arithmetic
|
| 727 |
+
on the returned buffers.
|
| 728 |
+
"""
|
| 729 |
+
|
| 730 |
+
def __cinit__(self, CudaBuffer obj):
|
| 731 |
+
self.buffer = obj
|
| 732 |
+
self.reader = new CCudaBufferReader(self.buffer.buffer)
|
| 733 |
+
self.set_random_access_file(
|
| 734 |
+
shared_ptr[CRandomAccessFile](self.reader))
|
| 735 |
+
self.is_readable = True
|
| 736 |
+
|
| 737 |
+
def read_buffer(self, nbytes=None):
|
| 738 |
+
"""Return a slice view of the underlying device buffer.
|
| 739 |
+
|
| 740 |
+
The slice will start at the current reader position and will
|
| 741 |
+
have specified size in bytes.
|
| 742 |
+
|
| 743 |
+
Parameters
|
| 744 |
+
----------
|
| 745 |
+
nbytes : int, default None
|
| 746 |
+
Specify the number of bytes to read. Default: None (read all
|
| 747 |
+
remaining bytes).
|
| 748 |
+
|
| 749 |
+
Returns
|
| 750 |
+
-------
|
| 751 |
+
cbuf : CudaBuffer
|
| 752 |
+
New device buffer.
|
| 753 |
+
|
| 754 |
+
"""
|
| 755 |
+
cdef:
|
| 756 |
+
int64_t c_nbytes
|
| 757 |
+
shared_ptr[CCudaBuffer] output
|
| 758 |
+
|
| 759 |
+
if nbytes is None:
|
| 760 |
+
c_nbytes = self.size() - self.tell()
|
| 761 |
+
else:
|
| 762 |
+
c_nbytes = nbytes
|
| 763 |
+
|
| 764 |
+
with nogil:
|
| 765 |
+
output = static_pointer_cast[CCudaBuffer, CBuffer](
|
| 766 |
+
GetResultValue(self.reader.Read(c_nbytes)))
|
| 767 |
+
|
| 768 |
+
return pyarrow_wrap_cudabuffer(output)
|
| 769 |
+
|
| 770 |
+
|
| 771 |
+
cdef class BufferWriter(NativeFile):
|
| 772 |
+
"""File interface for writing to CUDA buffers.
|
| 773 |
+
|
| 774 |
+
By default writes are unbuffered. Use set_buffer_size to enable
|
| 775 |
+
buffering.
|
| 776 |
+
"""
|
| 777 |
+
|
| 778 |
+
def __cinit__(self, CudaBuffer buffer):
|
| 779 |
+
self.buffer = buffer
|
| 780 |
+
self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer)
|
| 781 |
+
self.set_output_stream(shared_ptr[COutputStream](self.writer))
|
| 782 |
+
self.is_writable = True
|
| 783 |
+
|
| 784 |
+
def writeat(self, int64_t position, object data):
|
| 785 |
+
"""Write data to buffer starting from position.
|
| 786 |
+
|
| 787 |
+
Parameters
|
| 788 |
+
----------
|
| 789 |
+
position : int
|
| 790 |
+
Specify device buffer position where the data will be
|
| 791 |
+
written.
|
| 792 |
+
data : array-like
|
| 793 |
+
Specify data, the data instance must implement buffer
|
| 794 |
+
protocol.
|
| 795 |
+
"""
|
| 796 |
+
cdef:
|
| 797 |
+
Buffer buf = as_buffer(data)
|
| 798 |
+
const uint8_t* c_data = buf.buffer.get().data()
|
| 799 |
+
int64_t c_size = buf.buffer.get().size()
|
| 800 |
+
|
| 801 |
+
with nogil:
|
| 802 |
+
check_status(self.writer.WriteAt(position, c_data, c_size))
|
| 803 |
+
|
| 804 |
+
def flush(self):
|
| 805 |
+
""" Flush the buffer stream """
|
| 806 |
+
with nogil:
|
| 807 |
+
check_status(self.writer.Flush())
|
| 808 |
+
|
| 809 |
+
def seek(self, int64_t position, int whence=0):
|
| 810 |
+
# TODO: remove this method after NativeFile.seek supports
|
| 811 |
+
# writable files.
|
| 812 |
+
cdef int64_t offset
|
| 813 |
+
|
| 814 |
+
with nogil:
|
| 815 |
+
if whence == 0:
|
| 816 |
+
offset = position
|
| 817 |
+
elif whence == 1:
|
| 818 |
+
offset = GetResultValue(self.writer.Tell())
|
| 819 |
+
offset = offset + position
|
| 820 |
+
else:
|
| 821 |
+
with gil:
|
| 822 |
+
raise ValueError("Invalid value of whence: {0}"
|
| 823 |
+
.format(whence))
|
| 824 |
+
check_status(self.writer.Seek(offset))
|
| 825 |
+
return self.tell()
|
| 826 |
+
|
| 827 |
+
@property
|
| 828 |
+
def buffer_size(self):
|
| 829 |
+
"""Returns size of host (CPU) buffer, 0 for unbuffered
|
| 830 |
+
"""
|
| 831 |
+
return self.writer.buffer_size()
|
| 832 |
+
|
| 833 |
+
@buffer_size.setter
|
| 834 |
+
def buffer_size(self, int64_t buffer_size):
|
| 835 |
+
"""Set CPU buffer size to limit calls to cudaMemcpy
|
| 836 |
+
|
| 837 |
+
Parameters
|
| 838 |
+
----------
|
| 839 |
+
buffer_size : int
|
| 840 |
+
Specify the size of CPU buffer to allocate in bytes.
|
| 841 |
+
"""
|
| 842 |
+
with nogil:
|
| 843 |
+
check_status(self.writer.SetBufferSize(buffer_size))
|
| 844 |
+
|
| 845 |
+
@property
|
| 846 |
+
def num_bytes_buffered(self):
|
| 847 |
+
"""Returns number of bytes buffered on host
|
| 848 |
+
"""
|
| 849 |
+
return self.writer.num_bytes_buffered()
|
| 850 |
+
|
| 851 |
+
# Functions
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
def new_host_buffer(const int64_t size, int device=0):
|
| 855 |
+
"""Return buffer with CUDA-accessible memory on CPU host
|
| 856 |
+
|
| 857 |
+
Parameters
|
| 858 |
+
----------
|
| 859 |
+
size : int
|
| 860 |
+
Specify the number of bytes to be allocated.
|
| 861 |
+
device : int
|
| 862 |
+
Specify GPU device number.
|
| 863 |
+
|
| 864 |
+
Returns
|
| 865 |
+
-------
|
| 866 |
+
dbuf : HostBuffer
|
| 867 |
+
Allocated host buffer
|
| 868 |
+
"""
|
| 869 |
+
cdef shared_ptr[CCudaHostBuffer] buffer
|
| 870 |
+
with nogil:
|
| 871 |
+
buffer = GetResultValue(AllocateCudaHostBuffer(device, size))
|
| 872 |
+
return pyarrow_wrap_cudahostbuffer(buffer)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def serialize_record_batch(object batch, object ctx):
|
| 876 |
+
""" Write record batch message to GPU device memory
|
| 877 |
+
|
| 878 |
+
Parameters
|
| 879 |
+
----------
|
| 880 |
+
batch : RecordBatch
|
| 881 |
+
Record batch to write
|
| 882 |
+
ctx : Context
|
| 883 |
+
CUDA Context to allocate device memory from
|
| 884 |
+
|
| 885 |
+
Returns
|
| 886 |
+
-------
|
| 887 |
+
dbuf : CudaBuffer
|
| 888 |
+
device buffer which contains the record batch message
|
| 889 |
+
"""
|
| 890 |
+
cdef shared_ptr[CCudaBuffer] buffer
|
| 891 |
+
cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get()
|
| 892 |
+
cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get()
|
| 893 |
+
with nogil:
|
| 894 |
+
buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_))
|
| 895 |
+
return pyarrow_wrap_cudabuffer(buffer)
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
def read_message(object source, pool=None):
|
| 899 |
+
""" Read Arrow IPC message located on GPU device
|
| 900 |
+
|
| 901 |
+
Parameters
|
| 902 |
+
----------
|
| 903 |
+
source : {CudaBuffer, cuda.BufferReader}
|
| 904 |
+
Device buffer or reader of device buffer.
|
| 905 |
+
pool : MemoryPool (optional)
|
| 906 |
+
Pool to allocate CPU memory for the metadata
|
| 907 |
+
|
| 908 |
+
Returns
|
| 909 |
+
-------
|
| 910 |
+
message : Message
|
| 911 |
+
The deserialized message, body still on device
|
| 912 |
+
"""
|
| 913 |
+
cdef:
|
| 914 |
+
Message result = Message.__new__(Message)
|
| 915 |
+
cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
| 916 |
+
if not isinstance(source, BufferReader):
|
| 917 |
+
reader = BufferReader(source)
|
| 918 |
+
with nogil:
|
| 919 |
+
result.message = move(
|
| 920 |
+
GetResultValue(ReadMessage(reader.reader, pool_)))
|
| 921 |
+
return result
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
def read_record_batch(object buffer, object schema, *,
|
| 925 |
+
DictionaryMemo dictionary_memo=None, pool=None):
|
| 926 |
+
"""Construct RecordBatch referencing IPC message located on CUDA device.
|
| 927 |
+
|
| 928 |
+
While the metadata is copied to host memory for deserialization,
|
| 929 |
+
the record batch data remains on the device.
|
| 930 |
+
|
| 931 |
+
Parameters
|
| 932 |
+
----------
|
| 933 |
+
buffer :
|
| 934 |
+
Device buffer containing the complete IPC message
|
| 935 |
+
schema : Schema
|
| 936 |
+
The schema for the record batch
|
| 937 |
+
dictionary_memo : DictionaryMemo, optional
|
| 938 |
+
If message contains dictionaries, must pass a populated
|
| 939 |
+
DictionaryMemo
|
| 940 |
+
pool : MemoryPool (optional)
|
| 941 |
+
Pool to allocate metadata from
|
| 942 |
+
|
| 943 |
+
Returns
|
| 944 |
+
-------
|
| 945 |
+
batch : RecordBatch
|
| 946 |
+
Reconstructed record batch, with device pointers
|
| 947 |
+
|
| 948 |
+
"""
|
| 949 |
+
cdef:
|
| 950 |
+
shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema)
|
| 951 |
+
shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer)
|
| 952 |
+
CDictionaryMemo temp_memo
|
| 953 |
+
CDictionaryMemo* arg_dict_memo
|
| 954 |
+
CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
| 955 |
+
shared_ptr[CRecordBatch] batch
|
| 956 |
+
|
| 957 |
+
if dictionary_memo is not None:
|
| 958 |
+
arg_dict_memo = dictionary_memo.memo
|
| 959 |
+
else:
|
| 960 |
+
arg_dict_memo = &temp_memo
|
| 961 |
+
|
| 962 |
+
with nogil:
|
| 963 |
+
batch = GetResultValue(CudaReadRecordBatch(
|
| 964 |
+
schema_, arg_dict_memo, buffer_, pool_))
|
| 965 |
+
return pyarrow_wrap_batch(batch)
|
| 966 |
+
|
| 967 |
+
|
| 968 |
+
# Public API
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
cdef public api bint pyarrow_is_buffer(object buffer):
|
| 972 |
+
return isinstance(buffer, Buffer)
|
| 973 |
+
|
| 974 |
+
# cudabuffer
|
| 975 |
+
|
| 976 |
+
cdef public api bint pyarrow_is_cudabuffer(object buffer):
|
| 977 |
+
return isinstance(buffer, CudaBuffer)
|
| 978 |
+
|
| 979 |
+
|
| 980 |
+
cdef public api object \
|
| 981 |
+
pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base):
|
| 982 |
+
cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
|
| 983 |
+
result.init_cuda(buf, base)
|
| 984 |
+
return result
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
cdef public api object \
|
| 988 |
+
pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf):
|
| 989 |
+
cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
|
| 990 |
+
result.init_cuda(buf, None)
|
| 991 |
+
return result
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj):
|
| 995 |
+
if pyarrow_is_cudabuffer(obj):
|
| 996 |
+
return (<CudaBuffer>obj).cuda_buffer
|
| 997 |
+
raise TypeError('expected CudaBuffer instance, got %s'
|
| 998 |
+
% (type(obj).__name__))
|
| 999 |
+
|
| 1000 |
+
# cudahostbuffer
|
| 1001 |
+
|
| 1002 |
+
cdef public api bint pyarrow_is_cudahostbuffer(object buffer):
|
| 1003 |
+
return isinstance(buffer, HostBuffer)
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
cdef public api object \
|
| 1007 |
+
pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf):
|
| 1008 |
+
cdef HostBuffer result = HostBuffer.__new__(HostBuffer)
|
| 1009 |
+
result.init_host(buf)
|
| 1010 |
+
return result
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
cdef public api shared_ptr[CCudaHostBuffer] \
|
| 1014 |
+
pyarrow_unwrap_cudahostbuffer(object obj):
|
| 1015 |
+
if pyarrow_is_cudahostbuffer(obj):
|
| 1016 |
+
return (<HostBuffer>obj).host_buffer
|
| 1017 |
+
raise TypeError('expected HostBuffer instance, got %s'
|
| 1018 |
+
% (type(obj).__name__))
|
| 1019 |
+
|
| 1020 |
+
# cudacontext
|
| 1021 |
+
|
| 1022 |
+
cdef public api bint pyarrow_is_cudacontext(object ctx):
|
| 1023 |
+
return isinstance(ctx, Context)
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
cdef public api object \
|
| 1027 |
+
pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx):
|
| 1028 |
+
cdef Context result = Context.__new__(Context)
|
| 1029 |
+
result.init(ctx)
|
| 1030 |
+
return result
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
cdef public api shared_ptr[CCudaContext] \
|
| 1034 |
+
pyarrow_unwrap_cudacontext(object obj):
|
| 1035 |
+
if pyarrow_is_cudacontext(obj):
|
| 1036 |
+
return (<Context>obj).context
|
| 1037 |
+
raise TypeError('expected Context instance, got %s'
|
| 1038 |
+
% (type(obj).__name__))
|
| 1039 |
+
|
| 1040 |
+
# cudaipcmemhandle
|
| 1041 |
+
|
| 1042 |
+
cdef public api bint pyarrow_is_cudaipcmemhandle(object handle):
|
| 1043 |
+
return isinstance(handle, IpcMemHandle)
|
| 1044 |
+
|
| 1045 |
+
|
| 1046 |
+
cdef public api object \
|
| 1047 |
+
pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h):
|
| 1048 |
+
cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle)
|
| 1049 |
+
result.init(h)
|
| 1050 |
+
return result
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
cdef public api shared_ptr[CCudaIpcMemHandle] \
|
| 1054 |
+
pyarrow_unwrap_cudaipcmemhandle(object obj):
|
| 1055 |
+
if pyarrow_is_cudaipcmemhandle(obj):
|
| 1056 |
+
return (<IpcMemHandle>obj).handle
|
| 1057 |
+
raise TypeError('expected IpcMemHandle instance, got %s'
|
| 1058 |
+
% (type(obj).__name__))
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset.pxd
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
"""Dataset is currently unstable. APIs subject to change without notice."""
|
| 21 |
+
|
| 22 |
+
from pyarrow.includes.common cimport *
|
| 23 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
| 24 |
+
from pyarrow.lib cimport *
|
| 25 |
+
from pyarrow._fs cimport FileSystem, FileInfo
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*)
|
| 29 |
+
|
| 30 |
+
cdef class DatasetFactory(_Weakrefable):
|
| 31 |
+
|
| 32 |
+
cdef:
|
| 33 |
+
SharedPtrNoGIL[CDatasetFactory] wrapped
|
| 34 |
+
CDatasetFactory* factory
|
| 35 |
+
|
| 36 |
+
cdef init(self, const shared_ptr[CDatasetFactory]& sp)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
cdef wrap(const shared_ptr[CDatasetFactory]& sp)
|
| 40 |
+
|
| 41 |
+
cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
cdef class Dataset(_Weakrefable):
|
| 45 |
+
|
| 46 |
+
cdef:
|
| 47 |
+
SharedPtrNoGIL[CDataset] wrapped
|
| 48 |
+
CDataset* dataset
|
| 49 |
+
public dict _scan_options
|
| 50 |
+
|
| 51 |
+
cdef void init(self, const shared_ptr[CDataset]& sp)
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
cdef wrap(const shared_ptr[CDataset]& sp)
|
| 55 |
+
|
| 56 |
+
cdef shared_ptr[CDataset] unwrap(self) nogil
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
cdef class Scanner(_Weakrefable):
|
| 60 |
+
cdef:
|
| 61 |
+
SharedPtrNoGIL[CScanner] wrapped
|
| 62 |
+
CScanner* scanner
|
| 63 |
+
|
| 64 |
+
cdef void init(self, const shared_ptr[CScanner]& sp)
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
cdef wrap(const shared_ptr[CScanner]& sp)
|
| 68 |
+
|
| 69 |
+
cdef shared_ptr[CScanner] unwrap(self)
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
cdef class FragmentScanOptions(_Weakrefable):
|
| 76 |
+
|
| 77 |
+
cdef:
|
| 78 |
+
shared_ptr[CFragmentScanOptions] wrapped
|
| 79 |
+
|
| 80 |
+
cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
|
| 81 |
+
|
| 82 |
+
@staticmethod
|
| 83 |
+
cdef wrap(const shared_ptr[CFragmentScanOptions]& sp)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
cdef class FileFormat(_Weakrefable):
|
| 87 |
+
|
| 88 |
+
cdef:
|
| 89 |
+
shared_ptr[CFileFormat] wrapped
|
| 90 |
+
CFileFormat* format
|
| 91 |
+
|
| 92 |
+
cdef void init(self, const shared_ptr[CFileFormat]& sp)
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
cdef wrap(const shared_ptr[CFileFormat]& sp)
|
| 96 |
+
|
| 97 |
+
cdef inline shared_ptr[CFileFormat] unwrap(self)
|
| 98 |
+
|
| 99 |
+
cdef _set_default_fragment_scan_options(self, FragmentScanOptions options)
|
| 100 |
+
|
| 101 |
+
# Return a WrittenFile after a file was written.
|
| 102 |
+
# May be overridden by subclasses, e.g. to add metadata.
|
| 103 |
+
cdef WrittenFile _finish_write(self, path, base_dir,
|
| 104 |
+
CFileWriter* file_writer)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
cdef class FileWriteOptions(_Weakrefable):
|
| 108 |
+
|
| 109 |
+
cdef:
|
| 110 |
+
shared_ptr[CFileWriteOptions] wrapped
|
| 111 |
+
CFileWriteOptions* c_options
|
| 112 |
+
|
| 113 |
+
cdef void init(self, const shared_ptr[CFileWriteOptions]& sp)
|
| 114 |
+
|
| 115 |
+
@staticmethod
|
| 116 |
+
cdef wrap(const shared_ptr[CFileWriteOptions]& sp)
|
| 117 |
+
|
| 118 |
+
cdef inline shared_ptr[CFileWriteOptions] unwrap(self)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
cdef class Fragment(_Weakrefable):
|
| 122 |
+
|
| 123 |
+
cdef:
|
| 124 |
+
SharedPtrNoGIL[CFragment] wrapped
|
| 125 |
+
CFragment* fragment
|
| 126 |
+
|
| 127 |
+
cdef void init(self, const shared_ptr[CFragment]& sp)
|
| 128 |
+
|
| 129 |
+
@staticmethod
|
| 130 |
+
cdef wrap(const shared_ptr[CFragment]& sp)
|
| 131 |
+
|
| 132 |
+
cdef inline shared_ptr[CFragment] unwrap(self)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
cdef class FileFragment(Fragment):
|
| 136 |
+
|
| 137 |
+
cdef:
|
| 138 |
+
CFileFragment* file_fragment
|
| 139 |
+
|
| 140 |
+
cdef void init(self, const shared_ptr[CFragment]& sp)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
cdef class Partitioning(_Weakrefable):
|
| 144 |
+
|
| 145 |
+
cdef:
|
| 146 |
+
shared_ptr[CPartitioning] wrapped
|
| 147 |
+
CPartitioning* partitioning
|
| 148 |
+
|
| 149 |
+
cdef init(self, const shared_ptr[CPartitioning]& sp)
|
| 150 |
+
|
| 151 |
+
@staticmethod
|
| 152 |
+
cdef wrap(const shared_ptr[CPartitioning]& sp)
|
| 153 |
+
|
| 154 |
+
cdef inline shared_ptr[CPartitioning] unwrap(self)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
cdef class PartitioningFactory(_Weakrefable):
|
| 158 |
+
|
| 159 |
+
cdef:
|
| 160 |
+
shared_ptr[CPartitioningFactory] wrapped
|
| 161 |
+
CPartitioningFactory* factory
|
| 162 |
+
object constructor
|
| 163 |
+
object options
|
| 164 |
+
|
| 165 |
+
cdef init(self, const shared_ptr[CPartitioningFactory]& sp)
|
| 166 |
+
|
| 167 |
+
@staticmethod
|
| 168 |
+
cdef wrap(const shared_ptr[CPartitioningFactory]& sp,
|
| 169 |
+
object constructor, object options)
|
| 170 |
+
|
| 171 |
+
cdef inline shared_ptr[CPartitioningFactory] unwrap(self)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
cdef class WrittenFile(_Weakrefable):
|
| 175 |
+
|
| 176 |
+
# The full path to the created file
|
| 177 |
+
cdef public str path
|
| 178 |
+
# Optional Parquet metadata
|
| 179 |
+
# This metadata will have the file path attribute set to the path of
|
| 180 |
+
# the written file.
|
| 181 |
+
cdef public object metadata
|
| 182 |
+
# The size of the file in bytes
|
| 183 |
+
cdef public int64_t size
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset.pyx
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (78.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
"""Dataset support for Parquet file format."""
|
| 21 |
+
|
| 22 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
| 23 |
+
from pyarrow.includes.libarrow_dataset_parquet cimport *
|
| 24 |
+
|
| 25 |
+
from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
cdef class ParquetFragmentScanOptions(FragmentScanOptions):
|
| 29 |
+
cdef:
|
| 30 |
+
CParquetFragmentScanOptions* parquet_options
|
| 31 |
+
object _parquet_decryption_config
|
| 32 |
+
object _decryption_properties
|
| 33 |
+
|
| 34 |
+
cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
|
| 35 |
+
cdef CReaderProperties* reader_properties(self)
|
| 36 |
+
cdef ArrowReaderProperties* arrow_reader_properties(self)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
cdef class ParquetFileWriteOptions(FileWriteOptions):
|
| 40 |
+
|
| 41 |
+
cdef:
|
| 42 |
+
CParquetFileWriteOptions* parquet_options
|
| 43 |
+
object _properties
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx
ADDED
|
@@ -0,0 +1,1053 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
"""Dataset support for Parquet file format."""
|
| 21 |
+
|
| 22 |
+
from cython cimport binding
|
| 23 |
+
from cython.operator cimport dereference as deref
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import warnings
|
| 27 |
+
|
| 28 |
+
import pyarrow as pa
|
| 29 |
+
from pyarrow.lib cimport *
|
| 30 |
+
from pyarrow.lib import frombytes, tobytes, is_threading_enabled
|
| 31 |
+
from pyarrow.includes.libarrow cimport *
|
| 32 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
| 33 |
+
from pyarrow.includes.libarrow_dataset_parquet cimport *
|
| 34 |
+
from pyarrow._fs cimport FileSystem
|
| 35 |
+
|
| 36 |
+
from pyarrow._compute cimport Expression, _bind
|
| 37 |
+
from pyarrow._dataset cimport (
|
| 38 |
+
_make_file_source,
|
| 39 |
+
DatasetFactory,
|
| 40 |
+
FileFormat,
|
| 41 |
+
FileFragment,
|
| 42 |
+
FileWriteOptions,
|
| 43 |
+
Fragment,
|
| 44 |
+
FragmentScanOptions,
|
| 45 |
+
CacheOptions,
|
| 46 |
+
Partitioning,
|
| 47 |
+
PartitioningFactory,
|
| 48 |
+
WrittenFile
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
from pyarrow._parquet cimport (
|
| 52 |
+
_create_writer_properties, _create_arrow_writer_properties,
|
| 53 |
+
FileMetaData,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
from pyarrow._dataset_parquet_encryption import (
|
| 59 |
+
set_encryption_config, set_decryption_config, set_decryption_properties
|
| 60 |
+
)
|
| 61 |
+
parquet_encryption_enabled = True
|
| 62 |
+
except ImportError:
|
| 63 |
+
parquet_encryption_enabled = False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
cdef Expression _true = Expression._scalar(True)
|
| 67 |
+
|
| 68 |
+
ctypedef CParquetFileWriter* _CParquetFileWriterPtr
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
cdef class ParquetFileFormat(FileFormat):
|
| 72 |
+
"""
|
| 73 |
+
FileFormat for Parquet
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
read_options : ParquetReadOptions
|
| 78 |
+
Read options for the file.
|
| 79 |
+
default_fragment_scan_options : ParquetFragmentScanOptions
|
| 80 |
+
Scan Options for the file.
|
| 81 |
+
**kwargs : dict
|
| 82 |
+
Additional options for read option or scan option
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
cdef:
|
| 86 |
+
CParquetFileFormat* parquet_format
|
| 87 |
+
|
| 88 |
+
def __init__(self, read_options=None,
|
| 89 |
+
default_fragment_scan_options=None,
|
| 90 |
+
**kwargs):
|
| 91 |
+
cdef:
|
| 92 |
+
shared_ptr[CParquetFileFormat] wrapped
|
| 93 |
+
CParquetFileFormatReaderOptions* options
|
| 94 |
+
|
| 95 |
+
# Read/scan options
|
| 96 |
+
read_options_args = {option: kwargs[option] for option in kwargs
|
| 97 |
+
if option in _PARQUET_READ_OPTIONS}
|
| 98 |
+
scan_args = {option: kwargs[option] for option in kwargs
|
| 99 |
+
if option not in _PARQUET_READ_OPTIONS}
|
| 100 |
+
if read_options and read_options_args:
|
| 101 |
+
duplicates = ', '.join(sorted(read_options_args))
|
| 102 |
+
raise ValueError(f'If `read_options` is given, '
|
| 103 |
+
f'cannot specify {duplicates}')
|
| 104 |
+
if default_fragment_scan_options and scan_args:
|
| 105 |
+
duplicates = ', '.join(sorted(scan_args))
|
| 106 |
+
raise ValueError(f'If `default_fragment_scan_options` is given, '
|
| 107 |
+
f'cannot specify {duplicates}')
|
| 108 |
+
|
| 109 |
+
if read_options is None:
|
| 110 |
+
read_options = ParquetReadOptions(**read_options_args)
|
| 111 |
+
elif isinstance(read_options, dict):
|
| 112 |
+
# For backwards compatibility
|
| 113 |
+
duplicates = []
|
| 114 |
+
for option, value in read_options.items():
|
| 115 |
+
if option in _PARQUET_READ_OPTIONS:
|
| 116 |
+
read_options_args[option] = value
|
| 117 |
+
else:
|
| 118 |
+
duplicates.append(option)
|
| 119 |
+
scan_args[option] = value
|
| 120 |
+
if duplicates:
|
| 121 |
+
duplicates = ", ".join(duplicates)
|
| 122 |
+
warnings.warn(f'The scan options {duplicates} should be '
|
| 123 |
+
'specified directly as keyword arguments')
|
| 124 |
+
read_options = ParquetReadOptions(**read_options_args)
|
| 125 |
+
elif not isinstance(read_options, ParquetReadOptions):
|
| 126 |
+
raise TypeError('`read_options` must be either a dictionary or an '
|
| 127 |
+
'instance of ParquetReadOptions')
|
| 128 |
+
|
| 129 |
+
if default_fragment_scan_options is None:
|
| 130 |
+
default_fragment_scan_options = ParquetFragmentScanOptions(**scan_args)
|
| 131 |
+
elif isinstance(default_fragment_scan_options, dict):
|
| 132 |
+
default_fragment_scan_options = ParquetFragmentScanOptions(
|
| 133 |
+
**default_fragment_scan_options)
|
| 134 |
+
elif not isinstance(default_fragment_scan_options,
|
| 135 |
+
ParquetFragmentScanOptions):
|
| 136 |
+
raise TypeError('`default_fragment_scan_options` must be either a '
|
| 137 |
+
'dictionary or an instance of '
|
| 138 |
+
'ParquetFragmentScanOptions')
|
| 139 |
+
|
| 140 |
+
wrapped = make_shared[CParquetFileFormat]()
|
| 141 |
+
|
| 142 |
+
options = &(wrapped.get().reader_options)
|
| 143 |
+
if read_options.dictionary_columns is not None:
|
| 144 |
+
for column in read_options.dictionary_columns:
|
| 145 |
+
options.dict_columns.insert(tobytes(column))
|
| 146 |
+
options.coerce_int96_timestamp_unit = \
|
| 147 |
+
read_options._coerce_int96_timestamp_unit
|
| 148 |
+
|
| 149 |
+
self.init(<shared_ptr[CFileFormat]> wrapped)
|
| 150 |
+
self.default_fragment_scan_options = default_fragment_scan_options
|
| 151 |
+
|
| 152 |
+
cdef void init(self, const shared_ptr[CFileFormat]& sp):
|
| 153 |
+
FileFormat.init(self, sp)
|
| 154 |
+
self.parquet_format = <CParquetFileFormat*> sp.get()
|
| 155 |
+
|
| 156 |
+
cdef WrittenFile _finish_write(self, path, base_dir,
|
| 157 |
+
CFileWriter* file_writer):
|
| 158 |
+
cdef:
|
| 159 |
+
FileMetaData parquet_metadata
|
| 160 |
+
CParquetFileWriter* parquet_file_writer
|
| 161 |
+
|
| 162 |
+
parquet_metadata = None
|
| 163 |
+
parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer)
|
| 164 |
+
with nogil:
|
| 165 |
+
metadata = deref(
|
| 166 |
+
deref(parquet_file_writer).parquet_writer()).metadata()
|
| 167 |
+
if metadata:
|
| 168 |
+
parquet_metadata = FileMetaData()
|
| 169 |
+
parquet_metadata.init(metadata)
|
| 170 |
+
parquet_metadata.set_file_path(os.path.relpath(path, base_dir))
|
| 171 |
+
|
| 172 |
+
size = GetResultValue(file_writer.GetBytesWritten())
|
| 173 |
+
|
| 174 |
+
return WrittenFile(path, parquet_metadata, size)
|
| 175 |
+
|
| 176 |
+
@property
|
| 177 |
+
def read_options(self):
|
| 178 |
+
cdef CParquetFileFormatReaderOptions* options
|
| 179 |
+
options = &self.parquet_format.reader_options
|
| 180 |
+
parquet_read_options = ParquetReadOptions(
|
| 181 |
+
dictionary_columns={frombytes(col)
|
| 182 |
+
for col in options.dict_columns},
|
| 183 |
+
)
|
| 184 |
+
# Read options getter/setter works with strings so setting
|
| 185 |
+
# the private property which uses the C Type
|
| 186 |
+
parquet_read_options._coerce_int96_timestamp_unit = \
|
| 187 |
+
options.coerce_int96_timestamp_unit
|
| 188 |
+
return parquet_read_options
|
| 189 |
+
|
| 190 |
+
def make_write_options(self, **kwargs):
|
| 191 |
+
"""
|
| 192 |
+
Parameters
|
| 193 |
+
----------
|
| 194 |
+
**kwargs : dict
|
| 195 |
+
|
| 196 |
+
Returns
|
| 197 |
+
-------
|
| 198 |
+
pyarrow.dataset.FileWriteOptions
|
| 199 |
+
"""
|
| 200 |
+
# Safeguard from calling make_write_options as a static class method
|
| 201 |
+
if not isinstance(self, ParquetFileFormat):
|
| 202 |
+
raise TypeError("make_write_options() should be called on "
|
| 203 |
+
"an instance of ParquetFileFormat")
|
| 204 |
+
opts = FileFormat.make_write_options(self)
|
| 205 |
+
(<ParquetFileWriteOptions> opts).update(**kwargs)
|
| 206 |
+
return opts
|
| 207 |
+
|
| 208 |
+
cdef _set_default_fragment_scan_options(self, FragmentScanOptions options):
|
| 209 |
+
if options.type_name == 'parquet':
|
| 210 |
+
self.parquet_format.default_fragment_scan_options = options.wrapped
|
| 211 |
+
else:
|
| 212 |
+
super()._set_default_fragment_scan_options(options)
|
| 213 |
+
|
| 214 |
+
def equals(self, ParquetFileFormat other):
|
| 215 |
+
"""
|
| 216 |
+
Parameters
|
| 217 |
+
----------
|
| 218 |
+
other : pyarrow.dataset.ParquetFileFormat
|
| 219 |
+
|
| 220 |
+
Returns
|
| 221 |
+
-------
|
| 222 |
+
bool
|
| 223 |
+
"""
|
| 224 |
+
return (
|
| 225 |
+
self.read_options.equals(other.read_options) and
|
| 226 |
+
self.default_fragment_scan_options ==
|
| 227 |
+
other.default_fragment_scan_options
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
@property
|
| 231 |
+
def default_extname(self):
|
| 232 |
+
return "parquet"
|
| 233 |
+
|
| 234 |
+
def __reduce__(self):
|
| 235 |
+
return ParquetFileFormat, (self.read_options,
|
| 236 |
+
self.default_fragment_scan_options)
|
| 237 |
+
|
| 238 |
+
def __repr__(self):
|
| 239 |
+
return f"<ParquetFileFormat read_options={self.read_options}>"
|
| 240 |
+
|
| 241 |
+
def make_fragment(self, file, filesystem=None,
|
| 242 |
+
Expression partition_expression=None, row_groups=None, *, file_size=None):
|
| 243 |
+
"""
|
| 244 |
+
Make a FileFragment from a given file.
|
| 245 |
+
|
| 246 |
+
Parameters
|
| 247 |
+
----------
|
| 248 |
+
file : file-like object, path-like or str
|
| 249 |
+
The file or file path to make a fragment from.
|
| 250 |
+
filesystem : Filesystem, optional
|
| 251 |
+
If `filesystem` is given, `file` must be a string and specifies
|
| 252 |
+
the path of the file to read from the filesystem.
|
| 253 |
+
partition_expression : Expression, optional
|
| 254 |
+
An expression that is guaranteed true for all rows in the fragment. Allows
|
| 255 |
+
fragment to be potentially skipped while scanning with a filter.
|
| 256 |
+
row_groups : Iterable, optional
|
| 257 |
+
The indices of the row groups to include
|
| 258 |
+
file_size : int, optional
|
| 259 |
+
The size of the file in bytes. Can improve performance with high-latency filesystems
|
| 260 |
+
when file size needs to be known before reading.
|
| 261 |
+
|
| 262 |
+
Returns
|
| 263 |
+
-------
|
| 264 |
+
fragment : Fragment
|
| 265 |
+
The file fragment
|
| 266 |
+
"""
|
| 267 |
+
cdef:
|
| 268 |
+
vector[int] c_row_groups
|
| 269 |
+
if partition_expression is None:
|
| 270 |
+
partition_expression = _true
|
| 271 |
+
if row_groups is None:
|
| 272 |
+
return super().make_fragment(file, filesystem,
|
| 273 |
+
partition_expression, file_size=file_size)
|
| 274 |
+
|
| 275 |
+
c_source = _make_file_source(file, filesystem, file_size)
|
| 276 |
+
c_row_groups = [<int> row_group for row_group in set(row_groups)]
|
| 277 |
+
|
| 278 |
+
c_fragment = <shared_ptr[CFragment]> GetResultValue(
|
| 279 |
+
self.parquet_format.MakeFragment(move(c_source),
|
| 280 |
+
partition_expression.unwrap(),
|
| 281 |
+
<shared_ptr[CSchema]>nullptr,
|
| 282 |
+
move(c_row_groups)))
|
| 283 |
+
return Fragment.wrap(move(c_fragment))
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class RowGroupInfo:
|
| 287 |
+
"""
|
| 288 |
+
A wrapper class for RowGroup information
|
| 289 |
+
|
| 290 |
+
Parameters
|
| 291 |
+
----------
|
| 292 |
+
id : integer
|
| 293 |
+
The group ID.
|
| 294 |
+
metadata : FileMetaData
|
| 295 |
+
The rowgroup metadata.
|
| 296 |
+
schema : Schema
|
| 297 |
+
Schema of the rows.
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
def __init__(self, id, metadata, schema):
|
| 301 |
+
self.id = id
|
| 302 |
+
self.metadata = metadata
|
| 303 |
+
self.schema = schema
|
| 304 |
+
|
| 305 |
+
@property
|
| 306 |
+
def num_rows(self):
|
| 307 |
+
return self.metadata.num_rows
|
| 308 |
+
|
| 309 |
+
@property
|
| 310 |
+
def total_byte_size(self):
|
| 311 |
+
return self.metadata.total_byte_size
|
| 312 |
+
|
| 313 |
+
@property
|
| 314 |
+
def statistics(self):
|
| 315 |
+
def name_stats(i):
|
| 316 |
+
col = self.metadata.column(i)
|
| 317 |
+
|
| 318 |
+
stats = col.statistics
|
| 319 |
+
if stats is None or not stats.has_min_max:
|
| 320 |
+
return None, None
|
| 321 |
+
|
| 322 |
+
name = col.path_in_schema
|
| 323 |
+
field_index = self.schema.get_field_index(name)
|
| 324 |
+
if field_index < 0:
|
| 325 |
+
return None, None
|
| 326 |
+
|
| 327 |
+
typ = self.schema.field(field_index).type
|
| 328 |
+
return col.path_in_schema, {
|
| 329 |
+
'min': pa.scalar(stats.min, type=typ).as_py(),
|
| 330 |
+
'max': pa.scalar(stats.max, type=typ).as_py()
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
return {
|
| 334 |
+
name: stats for name, stats
|
| 335 |
+
in map(name_stats, range(self.metadata.num_columns))
|
| 336 |
+
if stats is not None
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
def __repr__(self):
|
| 340 |
+
return "RowGroupInfo({})".format(self.id)
|
| 341 |
+
|
| 342 |
+
def __eq__(self, other):
|
| 343 |
+
if isinstance(other, int):
|
| 344 |
+
return self.id == other
|
| 345 |
+
if not isinstance(other, RowGroupInfo):
|
| 346 |
+
return False
|
| 347 |
+
return self.id == other.id
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
cdef class ParquetFileFragment(FileFragment):
|
| 351 |
+
"""A Fragment representing a parquet file."""
|
| 352 |
+
|
| 353 |
+
cdef:
|
| 354 |
+
CParquetFileFragment* parquet_file_fragment
|
| 355 |
+
|
| 356 |
+
cdef void init(self, const shared_ptr[CFragment]& sp):
|
| 357 |
+
FileFragment.init(self, sp)
|
| 358 |
+
self.parquet_file_fragment = <CParquetFileFragment*> sp.get()
|
| 359 |
+
|
| 360 |
+
def __reduce__(self):
|
| 361 |
+
buffer = self.buffer
|
| 362 |
+
# parquet_file_fragment.row_groups() is empty if the metadata
|
| 363 |
+
# information of the file is not yet populated
|
| 364 |
+
if not bool(self.parquet_file_fragment.row_groups()):
|
| 365 |
+
row_groups = None
|
| 366 |
+
else:
|
| 367 |
+
row_groups = [row_group.id for row_group in self.row_groups]
|
| 368 |
+
|
| 369 |
+
return self.format.make_fragment, (
|
| 370 |
+
self.path if buffer is None else buffer,
|
| 371 |
+
self.filesystem,
|
| 372 |
+
self.partition_expression,
|
| 373 |
+
row_groups
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
def ensure_complete_metadata(self):
|
| 377 |
+
"""
|
| 378 |
+
Ensure that all metadata (statistics, physical schema, ...) have
|
| 379 |
+
been read and cached in this fragment.
|
| 380 |
+
"""
|
| 381 |
+
with nogil:
|
| 382 |
+
check_status(self.parquet_file_fragment.EnsureCompleteMetadata())
|
| 383 |
+
|
| 384 |
+
@property
|
| 385 |
+
def row_groups(self):
|
| 386 |
+
metadata = self.metadata
|
| 387 |
+
cdef vector[int] row_groups = self.parquet_file_fragment.row_groups()
|
| 388 |
+
return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema)
|
| 389 |
+
for i in row_groups]
|
| 390 |
+
|
| 391 |
+
@property
|
| 392 |
+
def metadata(self):
|
| 393 |
+
self.ensure_complete_metadata()
|
| 394 |
+
cdef FileMetaData metadata = FileMetaData()
|
| 395 |
+
metadata.init(self.parquet_file_fragment.metadata())
|
| 396 |
+
return metadata
|
| 397 |
+
|
| 398 |
+
@property
|
| 399 |
+
def num_row_groups(self):
|
| 400 |
+
"""
|
| 401 |
+
Return the number of row groups viewed by this fragment (not the
|
| 402 |
+
number of row groups in the origin file).
|
| 403 |
+
"""
|
| 404 |
+
self.ensure_complete_metadata()
|
| 405 |
+
return self.parquet_file_fragment.row_groups().size()
|
| 406 |
+
|
| 407 |
+
def split_by_row_group(self, Expression filter=None,
|
| 408 |
+
Schema schema=None):
|
| 409 |
+
"""
|
| 410 |
+
Split the fragment into multiple fragments.
|
| 411 |
+
|
| 412 |
+
Yield a Fragment wrapping each row group in this ParquetFileFragment.
|
| 413 |
+
Row groups will be excluded whose metadata contradicts the optional
|
| 414 |
+
filter.
|
| 415 |
+
|
| 416 |
+
Parameters
|
| 417 |
+
----------
|
| 418 |
+
filter : Expression, default None
|
| 419 |
+
Only include the row groups which satisfy this predicate (using
|
| 420 |
+
the Parquet RowGroup statistics).
|
| 421 |
+
schema : Schema, default None
|
| 422 |
+
Schema to use when filtering row groups. Defaults to the
|
| 423 |
+
Fragment's physical schema
|
| 424 |
+
|
| 425 |
+
Returns
|
| 426 |
+
-------
|
| 427 |
+
A list of Fragments
|
| 428 |
+
"""
|
| 429 |
+
cdef:
|
| 430 |
+
vector[shared_ptr[CFragment]] c_fragments
|
| 431 |
+
CExpression c_filter
|
| 432 |
+
shared_ptr[CFragment] c_fragment
|
| 433 |
+
|
| 434 |
+
schema = schema or self.physical_schema
|
| 435 |
+
c_filter = _bind(filter, schema)
|
| 436 |
+
with nogil:
|
| 437 |
+
c_fragments = move(GetResultValue(
|
| 438 |
+
self.parquet_file_fragment.SplitByRowGroup(move(c_filter))))
|
| 439 |
+
|
| 440 |
+
return [Fragment.wrap(c_fragment) for c_fragment in c_fragments]
|
| 441 |
+
|
| 442 |
+
def subset(self, Expression filter=None, Schema schema=None,
|
| 443 |
+
object row_group_ids=None):
|
| 444 |
+
"""
|
| 445 |
+
Create a subset of the fragment (viewing a subset of the row groups).
|
| 446 |
+
|
| 447 |
+
Subset can be specified by either a filter predicate (with optional
|
| 448 |
+
schema) or by a list of row group IDs. Note that when using a filter,
|
| 449 |
+
the resulting fragment can be empty (viewing no row groups).
|
| 450 |
+
|
| 451 |
+
Parameters
|
| 452 |
+
----------
|
| 453 |
+
filter : Expression, default None
|
| 454 |
+
Only include the row groups which satisfy this predicate (using
|
| 455 |
+
the Parquet RowGroup statistics).
|
| 456 |
+
schema : Schema, default None
|
| 457 |
+
Schema to use when filtering row groups. Defaults to the
|
| 458 |
+
Fragment's physical schema
|
| 459 |
+
row_group_ids : list of ints
|
| 460 |
+
The row group IDs to include in the subset. Can only be specified
|
| 461 |
+
if `filter` is None.
|
| 462 |
+
|
| 463 |
+
Returns
|
| 464 |
+
-------
|
| 465 |
+
ParquetFileFragment
|
| 466 |
+
"""
|
| 467 |
+
cdef:
|
| 468 |
+
CExpression c_filter
|
| 469 |
+
vector[int] c_row_group_ids
|
| 470 |
+
shared_ptr[CFragment] c_fragment
|
| 471 |
+
|
| 472 |
+
if filter is not None and row_group_ids is not None:
|
| 473 |
+
raise ValueError(
|
| 474 |
+
"Cannot specify both 'filter' and 'row_group_ids'."
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
if filter is not None:
|
| 478 |
+
schema = schema or self.physical_schema
|
| 479 |
+
c_filter = _bind(filter, schema)
|
| 480 |
+
with nogil:
|
| 481 |
+
c_fragment = move(GetResultValue(
|
| 482 |
+
self.parquet_file_fragment.SubsetWithFilter(
|
| 483 |
+
move(c_filter))))
|
| 484 |
+
elif row_group_ids is not None:
|
| 485 |
+
c_row_group_ids = [
|
| 486 |
+
<int> row_group for row_group in sorted(set(row_group_ids))
|
| 487 |
+
]
|
| 488 |
+
with nogil:
|
| 489 |
+
c_fragment = move(GetResultValue(
|
| 490 |
+
self.parquet_file_fragment.SubsetWithIds(
|
| 491 |
+
move(c_row_group_ids))))
|
| 492 |
+
else:
|
| 493 |
+
raise ValueError(
|
| 494 |
+
"Need to specify one of 'filter' or 'row_group_ids'"
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
return Fragment.wrap(c_fragment)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
cdef class ParquetReadOptions(_Weakrefable):
|
| 501 |
+
"""
|
| 502 |
+
Parquet format specific options for reading.
|
| 503 |
+
|
| 504 |
+
Parameters
|
| 505 |
+
----------
|
| 506 |
+
dictionary_columns : list of string, default None
|
| 507 |
+
Names of columns which should be dictionary encoded as
|
| 508 |
+
they are read
|
| 509 |
+
coerce_int96_timestamp_unit : str, default None
|
| 510 |
+
Cast timestamps that are stored in INT96 format to a particular
|
| 511 |
+
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
|
| 512 |
+
and therefore INT96 timestamps will be inferred as timestamps
|
| 513 |
+
in nanoseconds
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
cdef public:
|
| 517 |
+
set dictionary_columns
|
| 518 |
+
TimeUnit _coerce_int96_timestamp_unit
|
| 519 |
+
|
| 520 |
+
# Also see _PARQUET_READ_OPTIONS
|
| 521 |
+
def __init__(self, dictionary_columns=None,
|
| 522 |
+
coerce_int96_timestamp_unit=None):
|
| 523 |
+
self.dictionary_columns = set(dictionary_columns or set())
|
| 524 |
+
self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit
|
| 525 |
+
|
| 526 |
+
@property
|
| 527 |
+
def coerce_int96_timestamp_unit(self):
|
| 528 |
+
return timeunit_to_string(self._coerce_int96_timestamp_unit)
|
| 529 |
+
|
| 530 |
+
@coerce_int96_timestamp_unit.setter
|
| 531 |
+
def coerce_int96_timestamp_unit(self, unit):
|
| 532 |
+
if unit is not None:
|
| 533 |
+
self._coerce_int96_timestamp_unit = string_to_timeunit(unit)
|
| 534 |
+
else:
|
| 535 |
+
self._coerce_int96_timestamp_unit = TimeUnit_NANO
|
| 536 |
+
|
| 537 |
+
def equals(self, ParquetReadOptions other):
|
| 538 |
+
"""
|
| 539 |
+
Parameters
|
| 540 |
+
----------
|
| 541 |
+
other : pyarrow.dataset.ParquetReadOptions
|
| 542 |
+
|
| 543 |
+
Returns
|
| 544 |
+
-------
|
| 545 |
+
bool
|
| 546 |
+
"""
|
| 547 |
+
return (self.dictionary_columns == other.dictionary_columns and
|
| 548 |
+
self.coerce_int96_timestamp_unit ==
|
| 549 |
+
other.coerce_int96_timestamp_unit)
|
| 550 |
+
|
| 551 |
+
def __eq__(self, other):
|
| 552 |
+
try:
|
| 553 |
+
return self.equals(other)
|
| 554 |
+
except TypeError:
|
| 555 |
+
return False
|
| 556 |
+
|
| 557 |
+
def __repr__(self):
|
| 558 |
+
return (
|
| 559 |
+
f"<ParquetReadOptions"
|
| 560 |
+
f" dictionary_columns={self.dictionary_columns}"
|
| 561 |
+
f" coerce_int96_timestamp_unit={self.coerce_int96_timestamp_unit}>"
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
cdef class ParquetFileWriteOptions(FileWriteOptions):
|
| 566 |
+
|
| 567 |
+
def update(self, **kwargs):
|
| 568 |
+
"""
|
| 569 |
+
Parameters
|
| 570 |
+
----------
|
| 571 |
+
**kwargs : dict
|
| 572 |
+
"""
|
| 573 |
+
arrow_fields = {
|
| 574 |
+
"use_deprecated_int96_timestamps",
|
| 575 |
+
"coerce_timestamps",
|
| 576 |
+
"allow_truncated_timestamps",
|
| 577 |
+
"use_compliant_nested_type",
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
setters = set()
|
| 581 |
+
for name, value in kwargs.items():
|
| 582 |
+
if name not in self._properties:
|
| 583 |
+
raise TypeError("unexpected parquet write option: " + name)
|
| 584 |
+
self._properties[name] = value
|
| 585 |
+
if name in arrow_fields:
|
| 586 |
+
setters.add(self._set_arrow_properties)
|
| 587 |
+
elif name == "encryption_config" and value is not None:
|
| 588 |
+
setters.add(self._set_encryption_config)
|
| 589 |
+
else:
|
| 590 |
+
setters.add(self._set_properties)
|
| 591 |
+
|
| 592 |
+
for setter in setters:
|
| 593 |
+
setter()
|
| 594 |
+
|
| 595 |
+
def _set_properties(self):
|
| 596 |
+
cdef CParquetFileWriteOptions* opts = self.parquet_options
|
| 597 |
+
|
| 598 |
+
opts.writer_properties = _create_writer_properties(
|
| 599 |
+
use_dictionary=self._properties["use_dictionary"],
|
| 600 |
+
compression=self._properties["compression"],
|
| 601 |
+
version=self._properties["version"],
|
| 602 |
+
write_statistics=self._properties["write_statistics"],
|
| 603 |
+
data_page_size=self._properties["data_page_size"],
|
| 604 |
+
compression_level=self._properties["compression_level"],
|
| 605 |
+
use_byte_stream_split=(
|
| 606 |
+
self._properties["use_byte_stream_split"]
|
| 607 |
+
),
|
| 608 |
+
column_encoding=self._properties["column_encoding"],
|
| 609 |
+
data_page_version=self._properties["data_page_version"],
|
| 610 |
+
encryption_properties=self._properties["encryption_properties"],
|
| 611 |
+
write_batch_size=self._properties["write_batch_size"],
|
| 612 |
+
dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"],
|
| 613 |
+
write_page_index=self._properties["write_page_index"],
|
| 614 |
+
write_page_checksum=self._properties["write_page_checksum"],
|
| 615 |
+
sorting_columns=self._properties["sorting_columns"],
|
| 616 |
+
store_decimal_as_integer=self._properties["store_decimal_as_integer"],
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
def _set_arrow_properties(self):
|
| 620 |
+
cdef CParquetFileWriteOptions* opts = self.parquet_options
|
| 621 |
+
|
| 622 |
+
opts.arrow_writer_properties = _create_arrow_writer_properties(
|
| 623 |
+
use_deprecated_int96_timestamps=(
|
| 624 |
+
self._properties["use_deprecated_int96_timestamps"]
|
| 625 |
+
),
|
| 626 |
+
coerce_timestamps=self._properties["coerce_timestamps"],
|
| 627 |
+
allow_truncated_timestamps=(
|
| 628 |
+
self._properties["allow_truncated_timestamps"]
|
| 629 |
+
),
|
| 630 |
+
writer_engine_version="V2",
|
| 631 |
+
use_compliant_nested_type=(
|
| 632 |
+
self._properties["use_compliant_nested_type"]
|
| 633 |
+
)
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
def _set_encryption_config(self):
|
| 637 |
+
if not parquet_encryption_enabled:
|
| 638 |
+
raise NotImplementedError(
|
| 639 |
+
"Encryption is not enabled in your installation of pyarrow, but an "
|
| 640 |
+
"encryption_config was provided."
|
| 641 |
+
)
|
| 642 |
+
set_encryption_config(self, self._properties["encryption_config"])
|
| 643 |
+
|
| 644 |
+
cdef void init(self, const shared_ptr[CFileWriteOptions]& sp):
|
| 645 |
+
FileWriteOptions.init(self, sp)
|
| 646 |
+
self.parquet_options = <CParquetFileWriteOptions*> sp.get()
|
| 647 |
+
self._properties = dict(
|
| 648 |
+
use_dictionary=True,
|
| 649 |
+
compression="snappy",
|
| 650 |
+
version="2.6",
|
| 651 |
+
write_statistics=None,
|
| 652 |
+
data_page_size=None,
|
| 653 |
+
compression_level=None,
|
| 654 |
+
use_byte_stream_split=False,
|
| 655 |
+
column_encoding=None,
|
| 656 |
+
data_page_version="1.0",
|
| 657 |
+
use_deprecated_int96_timestamps=False,
|
| 658 |
+
coerce_timestamps=None,
|
| 659 |
+
allow_truncated_timestamps=False,
|
| 660 |
+
use_compliant_nested_type=True,
|
| 661 |
+
encryption_properties=None,
|
| 662 |
+
write_batch_size=None,
|
| 663 |
+
dictionary_pagesize_limit=None,
|
| 664 |
+
write_page_index=False,
|
| 665 |
+
encryption_config=None,
|
| 666 |
+
write_page_checksum=False,
|
| 667 |
+
sorting_columns=None,
|
| 668 |
+
store_decimal_as_integer=False,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
self._set_properties()
|
| 672 |
+
self._set_arrow_properties()
|
| 673 |
+
|
| 674 |
+
def __repr__(self):
|
| 675 |
+
return "<pyarrow.dataset.ParquetFileWriteOptions {0}>".format(
|
| 676 |
+
" ".join([f"{key}={value}" for key, value in self._properties.items()])
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
cdef set _PARQUET_READ_OPTIONS = {
|
| 681 |
+
'dictionary_columns', 'coerce_int96_timestamp_unit'
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
cdef class ParquetFragmentScanOptions(FragmentScanOptions):
|
| 686 |
+
"""
|
| 687 |
+
Scan-specific options for Parquet fragments.
|
| 688 |
+
|
| 689 |
+
Parameters
|
| 690 |
+
----------
|
| 691 |
+
use_buffered_stream : bool, default False
|
| 692 |
+
Read files through buffered input streams rather than loading entire
|
| 693 |
+
row groups at once. This may be enabled to reduce memory overhead.
|
| 694 |
+
Disabled by default.
|
| 695 |
+
buffer_size : int, default 8192
|
| 696 |
+
Size of buffered stream, if enabled. Default is 8KB.
|
| 697 |
+
pre_buffer : bool, default True
|
| 698 |
+
If enabled, pre-buffer the raw Parquet data instead of issuing one
|
| 699 |
+
read per column chunk. This can improve performance on high-latency
|
| 700 |
+
filesystems (e.g. S3, GCS) by coalescing and issuing file reads in
|
| 701 |
+
parallel using a background I/O thread pool.
|
| 702 |
+
Set to False if you want to prioritize minimal memory usage
|
| 703 |
+
over maximum speed.
|
| 704 |
+
cache_options : pyarrow.CacheOptions, default None
|
| 705 |
+
Cache options used when pre_buffer is enabled. The default values should
|
| 706 |
+
be good for most use cases. You may want to adjust these for example if
|
| 707 |
+
you have exceptionally high latency to the file system.
|
| 708 |
+
thrift_string_size_limit : int, default None
|
| 709 |
+
If not None, override the maximum total string size allocated
|
| 710 |
+
when decoding Thrift structures. The default limit should be
|
| 711 |
+
sufficient for most Parquet files.
|
| 712 |
+
thrift_container_size_limit : int, default None
|
| 713 |
+
If not None, override the maximum total size of containers allocated
|
| 714 |
+
when decoding Thrift structures. The default limit should be
|
| 715 |
+
sufficient for most Parquet files.
|
| 716 |
+
decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None
|
| 717 |
+
If not None, use the provided ParquetDecryptionConfig to decrypt the
|
| 718 |
+
Parquet file.
|
| 719 |
+
decryption_properties : pyarrow.parquet.FileDecryptionProperties, default None
|
| 720 |
+
If not None, use the provided FileDecryptionProperties to decrypt encrypted
|
| 721 |
+
Parquet file.
|
| 722 |
+
page_checksum_verification : bool, default False
|
| 723 |
+
If True, verify the page checksum for each page read from the file.
|
| 724 |
+
"""
|
| 725 |
+
|
| 726 |
+
# Avoid mistakingly creating attributes
|
| 727 |
+
__slots__ = ()
|
| 728 |
+
|
| 729 |
+
def __init__(self, *, bint use_buffered_stream=False,
|
| 730 |
+
buffer_size=8192,
|
| 731 |
+
bint pre_buffer=True,
|
| 732 |
+
cache_options=None,
|
| 733 |
+
thrift_string_size_limit=None,
|
| 734 |
+
thrift_container_size_limit=None,
|
| 735 |
+
decryption_config=None,
|
| 736 |
+
decryption_properties=None,
|
| 737 |
+
bint page_checksum_verification=False):
|
| 738 |
+
self.init(shared_ptr[CFragmentScanOptions](
|
| 739 |
+
new CParquetFragmentScanOptions()))
|
| 740 |
+
self.use_buffered_stream = use_buffered_stream
|
| 741 |
+
self.buffer_size = buffer_size
|
| 742 |
+
if pre_buffer and not is_threading_enabled():
|
| 743 |
+
pre_buffer = False
|
| 744 |
+
self.pre_buffer = pre_buffer
|
| 745 |
+
if cache_options is not None:
|
| 746 |
+
self.cache_options = cache_options
|
| 747 |
+
if thrift_string_size_limit is not None:
|
| 748 |
+
self.thrift_string_size_limit = thrift_string_size_limit
|
| 749 |
+
if thrift_container_size_limit is not None:
|
| 750 |
+
self.thrift_container_size_limit = thrift_container_size_limit
|
| 751 |
+
if decryption_config is not None:
|
| 752 |
+
self.parquet_decryption_config = decryption_config
|
| 753 |
+
if decryption_properties is not None:
|
| 754 |
+
self.decryption_properties = decryption_properties
|
| 755 |
+
self.page_checksum_verification = page_checksum_verification
|
| 756 |
+
|
| 757 |
+
cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp):
|
| 758 |
+
FragmentScanOptions.init(self, sp)
|
| 759 |
+
self.parquet_options = <CParquetFragmentScanOptions*> sp.get()
|
| 760 |
+
|
| 761 |
+
cdef CReaderProperties* reader_properties(self):
|
| 762 |
+
return self.parquet_options.reader_properties.get()
|
| 763 |
+
|
| 764 |
+
cdef ArrowReaderProperties* arrow_reader_properties(self):
|
| 765 |
+
return self.parquet_options.arrow_reader_properties.get()
|
| 766 |
+
|
| 767 |
+
@property
|
| 768 |
+
def use_buffered_stream(self):
|
| 769 |
+
return self.reader_properties().is_buffered_stream_enabled()
|
| 770 |
+
|
| 771 |
+
@use_buffered_stream.setter
|
| 772 |
+
def use_buffered_stream(self, bint use_buffered_stream):
|
| 773 |
+
if use_buffered_stream:
|
| 774 |
+
self.reader_properties().enable_buffered_stream()
|
| 775 |
+
else:
|
| 776 |
+
self.reader_properties().disable_buffered_stream()
|
| 777 |
+
|
| 778 |
+
@property
|
| 779 |
+
def buffer_size(self):
|
| 780 |
+
return self.reader_properties().buffer_size()
|
| 781 |
+
|
| 782 |
+
@buffer_size.setter
|
| 783 |
+
def buffer_size(self, buffer_size):
|
| 784 |
+
if buffer_size <= 0:
|
| 785 |
+
raise ValueError("Buffer size must be larger than zero")
|
| 786 |
+
self.reader_properties().set_buffer_size(buffer_size)
|
| 787 |
+
|
| 788 |
+
@property
|
| 789 |
+
def pre_buffer(self):
|
| 790 |
+
return self.arrow_reader_properties().pre_buffer()
|
| 791 |
+
|
| 792 |
+
@pre_buffer.setter
|
| 793 |
+
def pre_buffer(self, bint pre_buffer):
|
| 794 |
+
if pre_buffer and not is_threading_enabled():
|
| 795 |
+
return
|
| 796 |
+
self.arrow_reader_properties().set_pre_buffer(pre_buffer)
|
| 797 |
+
|
| 798 |
+
@property
|
| 799 |
+
def cache_options(self):
|
| 800 |
+
return CacheOptions.wrap(self.arrow_reader_properties().cache_options())
|
| 801 |
+
|
| 802 |
+
@cache_options.setter
|
| 803 |
+
def cache_options(self, CacheOptions options):
|
| 804 |
+
self.arrow_reader_properties().set_cache_options(options.unwrap())
|
| 805 |
+
|
| 806 |
+
@property
|
| 807 |
+
def thrift_string_size_limit(self):
|
| 808 |
+
return self.reader_properties().thrift_string_size_limit()
|
| 809 |
+
|
| 810 |
+
@thrift_string_size_limit.setter
|
| 811 |
+
def thrift_string_size_limit(self, size):
|
| 812 |
+
if size <= 0:
|
| 813 |
+
raise ValueError("size must be larger than zero")
|
| 814 |
+
self.reader_properties().set_thrift_string_size_limit(size)
|
| 815 |
+
|
| 816 |
+
@property
|
| 817 |
+
def thrift_container_size_limit(self):
|
| 818 |
+
return self.reader_properties().thrift_container_size_limit()
|
| 819 |
+
|
| 820 |
+
@thrift_container_size_limit.setter
|
| 821 |
+
def thrift_container_size_limit(self, size):
|
| 822 |
+
if size <= 0:
|
| 823 |
+
raise ValueError("size must be larger than zero")
|
| 824 |
+
self.reader_properties().set_thrift_container_size_limit(size)
|
| 825 |
+
|
| 826 |
+
@property
|
| 827 |
+
def decryption_properties(self):
|
| 828 |
+
if not parquet_encryption_enabled:
|
| 829 |
+
raise NotImplementedError(
|
| 830 |
+
"Unable to access encryption features. "
|
| 831 |
+
"Encryption is not enabled in your installation of pyarrow."
|
| 832 |
+
)
|
| 833 |
+
return self._decryption_properties
|
| 834 |
+
|
| 835 |
+
@decryption_properties.setter
|
| 836 |
+
def decryption_properties(self, config):
|
| 837 |
+
if not parquet_encryption_enabled:
|
| 838 |
+
raise NotImplementedError(
|
| 839 |
+
"Encryption is not enabled in your installation of pyarrow, but "
|
| 840 |
+
"decryption_properties were provided."
|
| 841 |
+
)
|
| 842 |
+
set_decryption_properties(self, config)
|
| 843 |
+
self._decryption_properties = config
|
| 844 |
+
|
| 845 |
+
@property
|
| 846 |
+
def parquet_decryption_config(self):
|
| 847 |
+
if not parquet_encryption_enabled:
|
| 848 |
+
raise NotImplementedError(
|
| 849 |
+
"Unable to access encryption features. "
|
| 850 |
+
"Encryption is not enabled in your installation of pyarrow."
|
| 851 |
+
)
|
| 852 |
+
return self._parquet_decryption_config
|
| 853 |
+
|
| 854 |
+
@parquet_decryption_config.setter
|
| 855 |
+
def parquet_decryption_config(self, config):
|
| 856 |
+
if not parquet_encryption_enabled:
|
| 857 |
+
raise NotImplementedError(
|
| 858 |
+
"Encryption is not enabled in your installation of pyarrow, but a "
|
| 859 |
+
"decryption_config was provided."
|
| 860 |
+
)
|
| 861 |
+
set_decryption_config(self, config)
|
| 862 |
+
self._parquet_decryption_config = config
|
| 863 |
+
|
| 864 |
+
@property
|
| 865 |
+
def page_checksum_verification(self):
|
| 866 |
+
return self.reader_properties().page_checksum_verification()
|
| 867 |
+
|
| 868 |
+
@page_checksum_verification.setter
|
| 869 |
+
def page_checksum_verification(self, bint page_checksum_verification):
|
| 870 |
+
self.reader_properties().set_page_checksum_verification(page_checksum_verification)
|
| 871 |
+
|
| 872 |
+
def equals(self, ParquetFragmentScanOptions other):
|
| 873 |
+
"""
|
| 874 |
+
Parameters
|
| 875 |
+
----------
|
| 876 |
+
other : pyarrow.dataset.ParquetFragmentScanOptions
|
| 877 |
+
|
| 878 |
+
Returns
|
| 879 |
+
-------
|
| 880 |
+
bool
|
| 881 |
+
"""
|
| 882 |
+
attrs = (
|
| 883 |
+
self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options,
|
| 884 |
+
self.thrift_string_size_limit, self.thrift_container_size_limit,
|
| 885 |
+
self.page_checksum_verification)
|
| 886 |
+
other_attrs = (
|
| 887 |
+
other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options,
|
| 888 |
+
other.thrift_string_size_limit,
|
| 889 |
+
other.thrift_container_size_limit, other.page_checksum_verification)
|
| 890 |
+
return attrs == other_attrs
|
| 891 |
+
|
| 892 |
+
@staticmethod
|
| 893 |
+
@binding(True) # Required for Cython < 3
|
| 894 |
+
def _reconstruct(kwargs):
|
| 895 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
| 896 |
+
# reconstructor, hence this wrapper.
|
| 897 |
+
return ParquetFragmentScanOptions(**kwargs)
|
| 898 |
+
|
| 899 |
+
def __reduce__(self):
|
| 900 |
+
kwargs = dict(
|
| 901 |
+
use_buffered_stream=self.use_buffered_stream,
|
| 902 |
+
buffer_size=self.buffer_size,
|
| 903 |
+
pre_buffer=self.pre_buffer,
|
| 904 |
+
cache_options=self.cache_options,
|
| 905 |
+
thrift_string_size_limit=self.thrift_string_size_limit,
|
| 906 |
+
thrift_container_size_limit=self.thrift_container_size_limit,
|
| 907 |
+
page_checksum_verification=self.page_checksum_verification
|
| 908 |
+
)
|
| 909 |
+
return ParquetFragmentScanOptions._reconstruct, (kwargs,)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
cdef class ParquetFactoryOptions(_Weakrefable):
|
| 913 |
+
"""
|
| 914 |
+
Influences the discovery of parquet dataset.
|
| 915 |
+
|
| 916 |
+
Parameters
|
| 917 |
+
----------
|
| 918 |
+
partition_base_dir : str, optional
|
| 919 |
+
For the purposes of applying the partitioning, paths will be
|
| 920 |
+
stripped of the partition_base_dir. Files not matching the
|
| 921 |
+
partition_base_dir prefix will be skipped for partitioning discovery.
|
| 922 |
+
The ignored files will still be part of the Dataset, but will not
|
| 923 |
+
have partition information.
|
| 924 |
+
partitioning : Partitioning, PartitioningFactory, optional
|
| 925 |
+
The partitioning scheme applied to fragments, see ``Partitioning``.
|
| 926 |
+
validate_column_chunk_paths : bool, default False
|
| 927 |
+
Assert that all ColumnChunk paths are consistent. The parquet spec
|
| 928 |
+
allows for ColumnChunk data to be stored in multiple files, but
|
| 929 |
+
ParquetDatasetFactory supports only a single file with all ColumnChunk
|
| 930 |
+
data. If this flag is set construction of a ParquetDatasetFactory will
|
| 931 |
+
raise an error if ColumnChunk data is not resident in a single file.
|
| 932 |
+
"""
|
| 933 |
+
|
| 934 |
+
cdef:
|
| 935 |
+
CParquetFactoryOptions options
|
| 936 |
+
|
| 937 |
+
__slots__ = () # avoid mistakingly creating attributes
|
| 938 |
+
|
| 939 |
+
def __init__(self, partition_base_dir=None, partitioning=None,
|
| 940 |
+
validate_column_chunk_paths=False):
|
| 941 |
+
if isinstance(partitioning, PartitioningFactory):
|
| 942 |
+
self.partitioning_factory = partitioning
|
| 943 |
+
elif isinstance(partitioning, Partitioning):
|
| 944 |
+
self.partitioning = partitioning
|
| 945 |
+
|
| 946 |
+
if partition_base_dir is not None:
|
| 947 |
+
self.partition_base_dir = partition_base_dir
|
| 948 |
+
|
| 949 |
+
self.options.validate_column_chunk_paths = validate_column_chunk_paths
|
| 950 |
+
|
| 951 |
+
cdef inline CParquetFactoryOptions unwrap(self):
|
| 952 |
+
return self.options
|
| 953 |
+
|
| 954 |
+
@property
|
| 955 |
+
def partitioning(self):
|
| 956 |
+
"""Partitioning to apply to discovered files.
|
| 957 |
+
|
| 958 |
+
NOTE: setting this property will overwrite partitioning_factory.
|
| 959 |
+
"""
|
| 960 |
+
c_partitioning = self.options.partitioning.partitioning()
|
| 961 |
+
if c_partitioning.get() == nullptr:
|
| 962 |
+
return None
|
| 963 |
+
return Partitioning.wrap(c_partitioning)
|
| 964 |
+
|
| 965 |
+
@partitioning.setter
|
| 966 |
+
def partitioning(self, Partitioning value):
|
| 967 |
+
self.options.partitioning = (<Partitioning> value).unwrap()
|
| 968 |
+
|
| 969 |
+
@property
|
| 970 |
+
def partitioning_factory(self):
|
| 971 |
+
"""PartitioningFactory to apply to discovered files and
|
| 972 |
+
discover a Partitioning.
|
| 973 |
+
|
| 974 |
+
NOTE: setting this property will overwrite partitioning.
|
| 975 |
+
"""
|
| 976 |
+
c_factory = self.options.partitioning.factory()
|
| 977 |
+
if c_factory.get() == nullptr:
|
| 978 |
+
return None
|
| 979 |
+
return PartitioningFactory.wrap(c_factory, None, None)
|
| 980 |
+
|
| 981 |
+
@partitioning_factory.setter
|
| 982 |
+
def partitioning_factory(self, PartitioningFactory value):
|
| 983 |
+
self.options.partitioning = (<PartitioningFactory> value).unwrap()
|
| 984 |
+
|
| 985 |
+
@property
|
| 986 |
+
def partition_base_dir(self):
|
| 987 |
+
"""
|
| 988 |
+
Base directory to strip paths before applying the partitioning.
|
| 989 |
+
"""
|
| 990 |
+
return frombytes(self.options.partition_base_dir)
|
| 991 |
+
|
| 992 |
+
@partition_base_dir.setter
|
| 993 |
+
def partition_base_dir(self, value):
|
| 994 |
+
self.options.partition_base_dir = tobytes(value)
|
| 995 |
+
|
| 996 |
+
@property
|
| 997 |
+
def validate_column_chunk_paths(self):
|
| 998 |
+
"""
|
| 999 |
+
Base directory to strip paths before applying the partitioning.
|
| 1000 |
+
"""
|
| 1001 |
+
return self.options.validate_column_chunk_paths
|
| 1002 |
+
|
| 1003 |
+
@validate_column_chunk_paths.setter
|
| 1004 |
+
def validate_column_chunk_paths(self, value):
|
| 1005 |
+
self.options.validate_column_chunk_paths = value
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
cdef class ParquetDatasetFactory(DatasetFactory):
|
| 1009 |
+
"""
|
| 1010 |
+
Create a ParquetDatasetFactory from a Parquet `_metadata` file.
|
| 1011 |
+
|
| 1012 |
+
Parameters
|
| 1013 |
+
----------
|
| 1014 |
+
metadata_path : str
|
| 1015 |
+
Path to the `_metadata` parquet metadata-only file generated with
|
| 1016 |
+
`pyarrow.parquet.write_metadata`.
|
| 1017 |
+
filesystem : pyarrow.fs.FileSystem
|
| 1018 |
+
Filesystem to read the metadata_path from, and subsequent parquet
|
| 1019 |
+
files.
|
| 1020 |
+
format : ParquetFileFormat
|
| 1021 |
+
Parquet format options.
|
| 1022 |
+
options : ParquetFactoryOptions, optional
|
| 1023 |
+
Various flags influencing the discovery of filesystem paths.
|
| 1024 |
+
"""
|
| 1025 |
+
|
| 1026 |
+
cdef:
|
| 1027 |
+
CParquetDatasetFactory* parquet_factory
|
| 1028 |
+
|
| 1029 |
+
def __init__(self, metadata_path, FileSystem filesystem not None,
|
| 1030 |
+
FileFormat format not None,
|
| 1031 |
+
ParquetFactoryOptions options=None):
|
| 1032 |
+
cdef:
|
| 1033 |
+
c_string c_path
|
| 1034 |
+
shared_ptr[CFileSystem] c_filesystem
|
| 1035 |
+
shared_ptr[CParquetFileFormat] c_format
|
| 1036 |
+
CResult[shared_ptr[CDatasetFactory]] result
|
| 1037 |
+
CParquetFactoryOptions c_options
|
| 1038 |
+
|
| 1039 |
+
c_path = tobytes(metadata_path)
|
| 1040 |
+
c_filesystem = filesystem.unwrap()
|
| 1041 |
+
c_format = static_pointer_cast[CParquetFileFormat, CFileFormat](
|
| 1042 |
+
format.unwrap())
|
| 1043 |
+
options = options or ParquetFactoryOptions()
|
| 1044 |
+
c_options = options.unwrap()
|
| 1045 |
+
|
| 1046 |
+
with nogil:
|
| 1047 |
+
result = CParquetDatasetFactory.MakeFromMetaDataPath(
|
| 1048 |
+
c_path, c_filesystem, c_format, c_options)
|
| 1049 |
+
self.init(GetResultValue(result))
|
| 1050 |
+
|
| 1051 |
+
cdef init(self, shared_ptr[CDatasetFactory]& sp):
|
| 1052 |
+
DatasetFactory.init(self, sp)
|
| 1053 |
+
self.parquet_factory = <CParquetDatasetFactory*> sp.get()
|
parrot/lib/python3.10/site-packages/pyarrow/_flight.pyx
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/_fs.pyx
ADDED
|
@@ -0,0 +1,1628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
|
| 20 |
+
from cpython.datetime cimport datetime, PyDateTime_DateTime
|
| 21 |
+
from cython cimport binding
|
| 22 |
+
|
| 23 |
+
from pyarrow.includes.common cimport *
|
| 24 |
+
from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint
|
| 25 |
+
from pyarrow.lib import _detect_compression, frombytes, tobytes
|
| 26 |
+
from pyarrow.lib cimport *
|
| 27 |
+
from pyarrow.util import _stringify_path
|
| 28 |
+
|
| 29 |
+
from abc import ABC, abstractmethod
|
| 30 |
+
from datetime import datetime, timezone
|
| 31 |
+
import os
|
| 32 |
+
import pathlib
|
| 33 |
+
import sys
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
cdef _init_ca_paths():
|
| 37 |
+
cdef CFileSystemGlobalOptions options
|
| 38 |
+
|
| 39 |
+
import ssl
|
| 40 |
+
paths = ssl.get_default_verify_paths()
|
| 41 |
+
if paths.cafile:
|
| 42 |
+
options.tls_ca_file_path = os.fsencode(paths.cafile)
|
| 43 |
+
if paths.capath:
|
| 44 |
+
options.tls_ca_dir_path = os.fsencode(paths.capath)
|
| 45 |
+
check_status(CFileSystemsInitialize(options))
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if sys.platform == 'linux':
|
| 49 |
+
# ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs
|
| 50 |
+
# (especially in manylinux packages) since the values hardcoded at
|
| 51 |
+
# compile-time in libcurl may be wrong.
|
| 52 |
+
_init_ca_paths()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
cdef inline c_string _path_as_bytes(path) except *:
|
| 56 |
+
# handle only abstract paths, not bound to any filesystem like pathlib is,
|
| 57 |
+
# so we only accept plain strings
|
| 58 |
+
if not isinstance(path, (bytes, str)):
|
| 59 |
+
raise TypeError('Path must be a string')
|
| 60 |
+
# tobytes always uses utf-8, which is more or less ok, at least on Windows
|
| 61 |
+
# since the C++ side then decodes from utf-8. On Unix, os.fsencode may be
|
| 62 |
+
# better.
|
| 63 |
+
return tobytes(path)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
cdef object _wrap_file_type(CFileType ty):
|
| 67 |
+
return FileType(<int8_t> ty)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
cdef CFileType _unwrap_file_type(FileType ty) except *:
|
| 71 |
+
if ty == FileType.Unknown:
|
| 72 |
+
return CFileType_Unknown
|
| 73 |
+
elif ty == FileType.NotFound:
|
| 74 |
+
return CFileType_NotFound
|
| 75 |
+
elif ty == FileType.File:
|
| 76 |
+
return CFileType_File
|
| 77 |
+
elif ty == FileType.Directory:
|
| 78 |
+
return CFileType_Directory
|
| 79 |
+
assert 0
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _file_type_to_string(ty):
|
| 83 |
+
# Python 3.11 changed str(IntEnum) to return the string representation
|
| 84 |
+
# of the integer value: https://github.com/python/cpython/issues/94763
|
| 85 |
+
return f"{ty.__class__.__name__}.{ty._name_}"
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
cdef class FileInfo(_Weakrefable):
|
| 89 |
+
"""
|
| 90 |
+
FileSystem entry info.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
path : str
|
| 95 |
+
The full path to the filesystem entry.
|
| 96 |
+
type : FileType
|
| 97 |
+
The type of the filesystem entry.
|
| 98 |
+
mtime : datetime or float, default None
|
| 99 |
+
If given, the modification time of the filesystem entry.
|
| 100 |
+
If a float is given, it is the number of seconds since the
|
| 101 |
+
Unix epoch.
|
| 102 |
+
mtime_ns : int, default None
|
| 103 |
+
If given, the modification time of the filesystem entry,
|
| 104 |
+
in nanoseconds since the Unix epoch.
|
| 105 |
+
`mtime` and `mtime_ns` are mutually exclusive.
|
| 106 |
+
size : int, default None
|
| 107 |
+
If given, the filesystem entry size in bytes. This should only
|
| 108 |
+
be given if `type` is `FileType.File`.
|
| 109 |
+
|
| 110 |
+
Examples
|
| 111 |
+
--------
|
| 112 |
+
Generate a file:
|
| 113 |
+
|
| 114 |
+
>>> from pyarrow import fs
|
| 115 |
+
>>> local = fs.LocalFileSystem()
|
| 116 |
+
>>> path_fs = local_path + '/pyarrow-fs-example.dat'
|
| 117 |
+
>>> with local.open_output_stream(path_fs) as stream:
|
| 118 |
+
... stream.write(b'data')
|
| 119 |
+
4
|
| 120 |
+
|
| 121 |
+
Get FileInfo object using ``get_file_info()``:
|
| 122 |
+
|
| 123 |
+
>>> file_info = local.get_file_info(path_fs)
|
| 124 |
+
>>> file_info
|
| 125 |
+
<FileInfo for '.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
| 126 |
+
|
| 127 |
+
Inspect FileInfo attributes:
|
| 128 |
+
|
| 129 |
+
>>> file_info.type
|
| 130 |
+
<FileType.File: 2>
|
| 131 |
+
|
| 132 |
+
>>> file_info.is_file
|
| 133 |
+
True
|
| 134 |
+
|
| 135 |
+
>>> file_info.path
|
| 136 |
+
'/.../pyarrow-fs-example.dat'
|
| 137 |
+
|
| 138 |
+
>>> file_info.base_name
|
| 139 |
+
'pyarrow-fs-example.dat'
|
| 140 |
+
|
| 141 |
+
>>> file_info.size
|
| 142 |
+
4
|
| 143 |
+
|
| 144 |
+
>>> file_info.extension
|
| 145 |
+
'dat'
|
| 146 |
+
|
| 147 |
+
>>> file_info.mtime # doctest: +SKIP
|
| 148 |
+
datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
|
| 149 |
+
|
| 150 |
+
>>> file_info.mtime_ns # doctest: +SKIP
|
| 151 |
+
1656489370873922073
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
def __init__(self, path, FileType type=FileType.Unknown, *,
|
| 155 |
+
mtime=None, mtime_ns=None, size=None):
|
| 156 |
+
self.info.set_path(tobytes(path))
|
| 157 |
+
self.info.set_type(_unwrap_file_type(type))
|
| 158 |
+
if mtime is not None:
|
| 159 |
+
if mtime_ns is not None:
|
| 160 |
+
raise TypeError("Only one of mtime and mtime_ns "
|
| 161 |
+
"can be given")
|
| 162 |
+
if isinstance(mtime, datetime):
|
| 163 |
+
self.info.set_mtime(PyDateTime_to_TimePoint(
|
| 164 |
+
<PyDateTime_DateTime*> mtime))
|
| 165 |
+
else:
|
| 166 |
+
self.info.set_mtime(TimePoint_from_s(mtime))
|
| 167 |
+
elif mtime_ns is not None:
|
| 168 |
+
self.info.set_mtime(TimePoint_from_ns(mtime_ns))
|
| 169 |
+
if size is not None:
|
| 170 |
+
self.info.set_size(size)
|
| 171 |
+
|
| 172 |
+
@staticmethod
|
| 173 |
+
cdef wrap(CFileInfo info):
|
| 174 |
+
cdef FileInfo self = FileInfo.__new__(FileInfo)
|
| 175 |
+
self.info = move(info)
|
| 176 |
+
return self
|
| 177 |
+
|
| 178 |
+
cdef inline CFileInfo unwrap(self) nogil:
|
| 179 |
+
return self.info
|
| 180 |
+
|
| 181 |
+
@staticmethod
|
| 182 |
+
cdef CFileInfo unwrap_safe(obj):
|
| 183 |
+
if not isinstance(obj, FileInfo):
|
| 184 |
+
raise TypeError("Expected FileInfo instance, got {0}"
|
| 185 |
+
.format(type(obj)))
|
| 186 |
+
return (<FileInfo> obj).unwrap()
|
| 187 |
+
|
| 188 |
+
def __repr__(self):
|
| 189 |
+
def getvalue(attr):
|
| 190 |
+
try:
|
| 191 |
+
return getattr(self, attr)
|
| 192 |
+
except ValueError:
|
| 193 |
+
return ''
|
| 194 |
+
|
| 195 |
+
s = (f'<FileInfo for {self.path!r}: '
|
| 196 |
+
f'type={_file_type_to_string(self.type)}')
|
| 197 |
+
if self.is_file:
|
| 198 |
+
s += f', size={self.size}'
|
| 199 |
+
s += '>'
|
| 200 |
+
return s
|
| 201 |
+
|
| 202 |
+
@property
|
| 203 |
+
def type(self):
|
| 204 |
+
"""
|
| 205 |
+
Type of the file.
|
| 206 |
+
|
| 207 |
+
The returned enum values can be the following:
|
| 208 |
+
|
| 209 |
+
- FileType.NotFound: target does not exist
|
| 210 |
+
- FileType.Unknown: target exists but its type is unknown (could be a
|
| 211 |
+
special file such as a Unix socket or character device, or
|
| 212 |
+
Windows NUL / CON / ...)
|
| 213 |
+
- FileType.File: target is a regular file
|
| 214 |
+
- FileType.Directory: target is a regular directory
|
| 215 |
+
|
| 216 |
+
Returns
|
| 217 |
+
-------
|
| 218 |
+
type : FileType
|
| 219 |
+
"""
|
| 220 |
+
return _wrap_file_type(self.info.type())
|
| 221 |
+
|
| 222 |
+
@property
|
| 223 |
+
def is_file(self):
|
| 224 |
+
"""
|
| 225 |
+
"""
|
| 226 |
+
return self.type == FileType.File
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def path(self):
|
| 230 |
+
"""
|
| 231 |
+
The full file path in the filesystem.
|
| 232 |
+
|
| 233 |
+
Examples
|
| 234 |
+
--------
|
| 235 |
+
>>> file_info = local.get_file_info(path)
|
| 236 |
+
>>> file_info.path
|
| 237 |
+
'/.../pyarrow-fs-example.dat'
|
| 238 |
+
"""
|
| 239 |
+
return frombytes(self.info.path())
|
| 240 |
+
|
| 241 |
+
@property
|
| 242 |
+
def base_name(self):
|
| 243 |
+
"""
|
| 244 |
+
The file base name.
|
| 245 |
+
|
| 246 |
+
Component after the last directory separator.
|
| 247 |
+
|
| 248 |
+
Examples
|
| 249 |
+
--------
|
| 250 |
+
>>> file_info = local.get_file_info(path)
|
| 251 |
+
>>> file_info.base_name
|
| 252 |
+
'pyarrow-fs-example.dat'
|
| 253 |
+
"""
|
| 254 |
+
return frombytes(self.info.base_name())
|
| 255 |
+
|
| 256 |
+
@property
|
| 257 |
+
def size(self):
|
| 258 |
+
"""
|
| 259 |
+
The size in bytes, if available.
|
| 260 |
+
|
| 261 |
+
Only regular files are guaranteed to have a size.
|
| 262 |
+
|
| 263 |
+
Returns
|
| 264 |
+
-------
|
| 265 |
+
size : int or None
|
| 266 |
+
"""
|
| 267 |
+
cdef int64_t size
|
| 268 |
+
size = self.info.size()
|
| 269 |
+
return (size if size != -1 else None)
|
| 270 |
+
|
| 271 |
+
@property
|
| 272 |
+
def extension(self):
|
| 273 |
+
"""
|
| 274 |
+
The file extension.
|
| 275 |
+
|
| 276 |
+
Examples
|
| 277 |
+
--------
|
| 278 |
+
>>> file_info = local.get_file_info(path)
|
| 279 |
+
>>> file_info.extension
|
| 280 |
+
'dat'
|
| 281 |
+
"""
|
| 282 |
+
return frombytes(self.info.extension())
|
| 283 |
+
|
| 284 |
+
@property
|
| 285 |
+
def mtime(self):
|
| 286 |
+
"""
|
| 287 |
+
The time of last modification, if available.
|
| 288 |
+
|
| 289 |
+
Returns
|
| 290 |
+
-------
|
| 291 |
+
mtime : datetime.datetime or None
|
| 292 |
+
|
| 293 |
+
Examples
|
| 294 |
+
--------
|
| 295 |
+
>>> file_info = local.get_file_info(path)
|
| 296 |
+
>>> file_info.mtime # doctest: +SKIP
|
| 297 |
+
datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
|
| 298 |
+
"""
|
| 299 |
+
cdef int64_t nanoseconds
|
| 300 |
+
nanoseconds = TimePoint_to_ns(self.info.mtime())
|
| 301 |
+
return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc)
|
| 302 |
+
if nanoseconds != -1 else None)
|
| 303 |
+
|
| 304 |
+
@property
|
| 305 |
+
def mtime_ns(self):
|
| 306 |
+
"""
|
| 307 |
+
The time of last modification, if available, expressed in nanoseconds
|
| 308 |
+
since the Unix epoch.
|
| 309 |
+
|
| 310 |
+
Returns
|
| 311 |
+
-------
|
| 312 |
+
mtime_ns : int or None
|
| 313 |
+
|
| 314 |
+
Examples
|
| 315 |
+
--------
|
| 316 |
+
>>> file_info = local.get_file_info(path)
|
| 317 |
+
>>> file_info.mtime_ns # doctest: +SKIP
|
| 318 |
+
1656489370873922073
|
| 319 |
+
"""
|
| 320 |
+
cdef int64_t nanoseconds
|
| 321 |
+
nanoseconds = TimePoint_to_ns(self.info.mtime())
|
| 322 |
+
return (nanoseconds if nanoseconds != -1 else None)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
cdef class FileSelector(_Weakrefable):
|
| 326 |
+
"""
|
| 327 |
+
File and directory selector.
|
| 328 |
+
|
| 329 |
+
It contains a set of options that describes how to search for files and
|
| 330 |
+
directories.
|
| 331 |
+
|
| 332 |
+
Parameters
|
| 333 |
+
----------
|
| 334 |
+
base_dir : str
|
| 335 |
+
The directory in which to select files. Relative paths also work, use
|
| 336 |
+
'.' for the current directory and '..' for the parent.
|
| 337 |
+
allow_not_found : bool, default False
|
| 338 |
+
The behavior if `base_dir` doesn't exist in the filesystem.
|
| 339 |
+
If false, an error is returned.
|
| 340 |
+
If true, an empty selection is returned.
|
| 341 |
+
recursive : bool, default False
|
| 342 |
+
Whether to recurse into subdirectories.
|
| 343 |
+
|
| 344 |
+
Examples
|
| 345 |
+
--------
|
| 346 |
+
List the contents of a directory and subdirectories:
|
| 347 |
+
|
| 348 |
+
>>> selector_1 = fs.FileSelector(local_path, recursive=True)
|
| 349 |
+
>>> local.get_file_info(selector_1) # doctest: +SKIP
|
| 350 |
+
[<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
|
| 351 |
+
<FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>,
|
| 352 |
+
<FileInfo for 'tmp/alphabet/subdir/example_copy.dat': type=FileType.File, size=4>]
|
| 353 |
+
|
| 354 |
+
List only the contents of the base directory:
|
| 355 |
+
|
| 356 |
+
>>> selector_2 = fs.FileSelector(local_path)
|
| 357 |
+
>>> local.get_file_info(selector_2) # doctest: +SKIP
|
| 358 |
+
[<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
|
| 359 |
+
<FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>]
|
| 360 |
+
|
| 361 |
+
Return empty selection if the directory doesn't exist:
|
| 362 |
+
|
| 363 |
+
>>> selector_not_found = fs.FileSelector(local_path + '/missing',
|
| 364 |
+
... recursive=True,
|
| 365 |
+
... allow_not_found=True)
|
| 366 |
+
>>> local.get_file_info(selector_not_found)
|
| 367 |
+
[]
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
def __init__(self, base_dir, bint allow_not_found=False,
|
| 371 |
+
bint recursive=False):
|
| 372 |
+
self.base_dir = base_dir
|
| 373 |
+
self.recursive = recursive
|
| 374 |
+
self.allow_not_found = allow_not_found
|
| 375 |
+
|
| 376 |
+
@staticmethod
|
| 377 |
+
cdef FileSelector wrap(CFileSelector wrapped):
|
| 378 |
+
cdef FileSelector self = FileSelector.__new__(FileSelector)
|
| 379 |
+
self.selector = move(wrapped)
|
| 380 |
+
return self
|
| 381 |
+
|
| 382 |
+
cdef inline CFileSelector unwrap(self) nogil:
|
| 383 |
+
return self.selector
|
| 384 |
+
|
| 385 |
+
@property
|
| 386 |
+
def base_dir(self):
|
| 387 |
+
return frombytes(self.selector.base_dir)
|
| 388 |
+
|
| 389 |
+
@base_dir.setter
|
| 390 |
+
def base_dir(self, base_dir):
|
| 391 |
+
self.selector.base_dir = _path_as_bytes(base_dir)
|
| 392 |
+
|
| 393 |
+
@property
|
| 394 |
+
def allow_not_found(self):
|
| 395 |
+
return self.selector.allow_not_found
|
| 396 |
+
|
| 397 |
+
@allow_not_found.setter
|
| 398 |
+
def allow_not_found(self, bint allow_not_found):
|
| 399 |
+
self.selector.allow_not_found = allow_not_found
|
| 400 |
+
|
| 401 |
+
@property
|
| 402 |
+
def recursive(self):
|
| 403 |
+
return self.selector.recursive
|
| 404 |
+
|
| 405 |
+
@recursive.setter
|
| 406 |
+
def recursive(self, bint recursive):
|
| 407 |
+
self.selector.recursive = recursive
|
| 408 |
+
|
| 409 |
+
def __repr__(self):
|
| 410 |
+
return ("<FileSelector base_dir={0.base_dir!r} "
|
| 411 |
+
"recursive={0.recursive}>".format(self))
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
cdef class FileSystem(_Weakrefable):
|
| 415 |
+
"""
|
| 416 |
+
Abstract file system API.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
def __init__(self):
|
| 420 |
+
raise TypeError("FileSystem is an abstract class, instantiate one of "
|
| 421 |
+
"the subclasses instead: LocalFileSystem or "
|
| 422 |
+
"SubTreeFileSystem")
|
| 423 |
+
|
| 424 |
+
@staticmethod
|
| 425 |
+
@binding(True) # Required for cython < 3
|
| 426 |
+
def _from_uri(uri):
|
| 427 |
+
fs, _path = FileSystem.from_uri(uri)
|
| 428 |
+
return fs
|
| 429 |
+
|
| 430 |
+
@staticmethod
|
| 431 |
+
def from_uri(uri):
|
| 432 |
+
"""
|
| 433 |
+
Create a new FileSystem from URI or Path.
|
| 434 |
+
|
| 435 |
+
Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs".
|
| 436 |
+
In addition, the argument can be a pathlib.Path object, or a string
|
| 437 |
+
describing an absolute local path.
|
| 438 |
+
|
| 439 |
+
Parameters
|
| 440 |
+
----------
|
| 441 |
+
uri : string
|
| 442 |
+
URI-based path, for example: file:///some/local/path.
|
| 443 |
+
|
| 444 |
+
Returns
|
| 445 |
+
-------
|
| 446 |
+
tuple of (FileSystem, str path)
|
| 447 |
+
With (filesystem, path) tuple where path is the abstract path
|
| 448 |
+
inside the FileSystem instance.
|
| 449 |
+
|
| 450 |
+
Examples
|
| 451 |
+
--------
|
| 452 |
+
Create a new FileSystem subclass from a URI:
|
| 453 |
+
|
| 454 |
+
>>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path)
|
| 455 |
+
>>> local_new, path_new = fs.FileSystem.from_uri(uri)
|
| 456 |
+
>>> local_new
|
| 457 |
+
<pyarrow._fs.LocalFileSystem object at ...
|
| 458 |
+
>>> path_new
|
| 459 |
+
'/.../pyarrow-fs-example.dat'
|
| 460 |
+
|
| 461 |
+
Or from a s3 bucket:
|
| 462 |
+
|
| 463 |
+
>>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/")
|
| 464 |
+
(<pyarrow._s3fs.S3FileSystem object at ...>, 'usgs-landsat/collection02')
|
| 465 |
+
"""
|
| 466 |
+
cdef:
|
| 467 |
+
c_string c_path
|
| 468 |
+
c_string c_uri
|
| 469 |
+
CResult[shared_ptr[CFileSystem]] result
|
| 470 |
+
|
| 471 |
+
if isinstance(uri, pathlib.Path):
|
| 472 |
+
# Make absolute
|
| 473 |
+
uri = uri.resolve().absolute()
|
| 474 |
+
c_uri = tobytes(_stringify_path(uri))
|
| 475 |
+
with nogil:
|
| 476 |
+
result = CFileSystemFromUriOrPath(c_uri, &c_path)
|
| 477 |
+
return FileSystem.wrap(GetResultValue(result)), frombytes(c_path)
|
| 478 |
+
|
| 479 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
| 480 |
+
self.wrapped = wrapped
|
| 481 |
+
self.fs = wrapped.get()
|
| 482 |
+
|
| 483 |
+
@staticmethod
|
| 484 |
+
cdef wrap(const shared_ptr[CFileSystem]& sp):
|
| 485 |
+
cdef FileSystem self
|
| 486 |
+
|
| 487 |
+
typ = frombytes(sp.get().type_name())
|
| 488 |
+
if typ == 'local':
|
| 489 |
+
self = LocalFileSystem.__new__(LocalFileSystem)
|
| 490 |
+
elif typ == 'mock':
|
| 491 |
+
self = _MockFileSystem.__new__(_MockFileSystem)
|
| 492 |
+
elif typ == 'subtree':
|
| 493 |
+
self = SubTreeFileSystem.__new__(SubTreeFileSystem)
|
| 494 |
+
elif typ == 's3':
|
| 495 |
+
from pyarrow._s3fs import S3FileSystem
|
| 496 |
+
self = S3FileSystem.__new__(S3FileSystem)
|
| 497 |
+
elif typ == 'gcs':
|
| 498 |
+
from pyarrow._gcsfs import GcsFileSystem
|
| 499 |
+
self = GcsFileSystem.__new__(GcsFileSystem)
|
| 500 |
+
elif typ == 'abfs':
|
| 501 |
+
from pyarrow._azurefs import AzureFileSystem
|
| 502 |
+
self = AzureFileSystem.__new__(AzureFileSystem)
|
| 503 |
+
elif typ == 'hdfs':
|
| 504 |
+
from pyarrow._hdfs import HadoopFileSystem
|
| 505 |
+
self = HadoopFileSystem.__new__(HadoopFileSystem)
|
| 506 |
+
elif typ.startswith('py::'):
|
| 507 |
+
self = PyFileSystem.__new__(PyFileSystem)
|
| 508 |
+
else:
|
| 509 |
+
raise TypeError('Cannot wrap FileSystem pointer')
|
| 510 |
+
|
| 511 |
+
self.init(sp)
|
| 512 |
+
return self
|
| 513 |
+
|
| 514 |
+
cdef inline shared_ptr[CFileSystem] unwrap(self) nogil:
|
| 515 |
+
return self.wrapped
|
| 516 |
+
|
| 517 |
+
def equals(self, FileSystem other not None):
|
| 518 |
+
"""
|
| 519 |
+
Parameters
|
| 520 |
+
----------
|
| 521 |
+
other : pyarrow.fs.FileSystem
|
| 522 |
+
|
| 523 |
+
Returns
|
| 524 |
+
-------
|
| 525 |
+
bool
|
| 526 |
+
"""
|
| 527 |
+
return self.fs.Equals(other.unwrap())
|
| 528 |
+
|
| 529 |
+
def __eq__(self, other):
|
| 530 |
+
try:
|
| 531 |
+
return self.equals(other)
|
| 532 |
+
except TypeError:
|
| 533 |
+
return NotImplemented
|
| 534 |
+
|
| 535 |
+
@property
|
| 536 |
+
def type_name(self):
|
| 537 |
+
"""
|
| 538 |
+
The filesystem's type name.
|
| 539 |
+
"""
|
| 540 |
+
return frombytes(self.fs.type_name())
|
| 541 |
+
|
| 542 |
+
def get_file_info(self, paths_or_selector):
|
| 543 |
+
"""
|
| 544 |
+
Get info for the given files.
|
| 545 |
+
|
| 546 |
+
Any symlink is automatically dereferenced, recursively. A non-existing
|
| 547 |
+
or unreachable file returns a FileStat object and has a FileType of
|
| 548 |
+
value NotFound. An exception indicates a truly exceptional condition
|
| 549 |
+
(low-level I/O error, etc.).
|
| 550 |
+
|
| 551 |
+
Parameters
|
| 552 |
+
----------
|
| 553 |
+
paths_or_selector : FileSelector, path-like or list of path-likes
|
| 554 |
+
Either a selector object, a path-like object or a list of
|
| 555 |
+
path-like objects. The selector's base directory will not be
|
| 556 |
+
part of the results, even if it exists. If it doesn't exist,
|
| 557 |
+
use `allow_not_found`.
|
| 558 |
+
|
| 559 |
+
Returns
|
| 560 |
+
-------
|
| 561 |
+
FileInfo or list of FileInfo
|
| 562 |
+
Single FileInfo object is returned for a single path, otherwise
|
| 563 |
+
a list of FileInfo objects is returned.
|
| 564 |
+
|
| 565 |
+
Examples
|
| 566 |
+
--------
|
| 567 |
+
>>> local
|
| 568 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
| 569 |
+
>>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path))
|
| 570 |
+
<FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
| 571 |
+
"""
|
| 572 |
+
cdef:
|
| 573 |
+
CFileInfo info
|
| 574 |
+
c_string path
|
| 575 |
+
vector[CFileInfo] infos
|
| 576 |
+
vector[c_string] paths
|
| 577 |
+
CFileSelector selector
|
| 578 |
+
|
| 579 |
+
if isinstance(paths_or_selector, FileSelector):
|
| 580 |
+
with nogil:
|
| 581 |
+
selector = (<FileSelector>paths_or_selector).selector
|
| 582 |
+
infos = GetResultValue(self.fs.GetFileInfo(selector))
|
| 583 |
+
elif isinstance(paths_or_selector, (list, tuple)):
|
| 584 |
+
paths = [_path_as_bytes(s) for s in paths_or_selector]
|
| 585 |
+
with nogil:
|
| 586 |
+
infos = GetResultValue(self.fs.GetFileInfo(paths))
|
| 587 |
+
elif isinstance(paths_or_selector, (bytes, str)):
|
| 588 |
+
path =_path_as_bytes(paths_or_selector)
|
| 589 |
+
with nogil:
|
| 590 |
+
info = GetResultValue(self.fs.GetFileInfo(path))
|
| 591 |
+
return FileInfo.wrap(info)
|
| 592 |
+
else:
|
| 593 |
+
raise TypeError('Must pass either path(s) or a FileSelector')
|
| 594 |
+
|
| 595 |
+
return [FileInfo.wrap(info) for info in infos]
|
| 596 |
+
|
| 597 |
+
def create_dir(self, path, *, bint recursive=True):
|
| 598 |
+
"""
|
| 599 |
+
Create a directory and subdirectories.
|
| 600 |
+
|
| 601 |
+
This function succeeds if the directory already exists.
|
| 602 |
+
|
| 603 |
+
Parameters
|
| 604 |
+
----------
|
| 605 |
+
path : str
|
| 606 |
+
The path of the new directory.
|
| 607 |
+
recursive : bool, default True
|
| 608 |
+
Create nested directories as well.
|
| 609 |
+
"""
|
| 610 |
+
cdef c_string directory = _path_as_bytes(path)
|
| 611 |
+
with nogil:
|
| 612 |
+
check_status(self.fs.CreateDir(directory, recursive=recursive))
|
| 613 |
+
|
| 614 |
+
def delete_dir(self, path):
|
| 615 |
+
"""
|
| 616 |
+
Delete a directory and its contents, recursively.
|
| 617 |
+
|
| 618 |
+
Parameters
|
| 619 |
+
----------
|
| 620 |
+
path : str
|
| 621 |
+
The path of the directory to be deleted.
|
| 622 |
+
"""
|
| 623 |
+
cdef c_string directory = _path_as_bytes(path)
|
| 624 |
+
with nogil:
|
| 625 |
+
check_status(self.fs.DeleteDir(directory))
|
| 626 |
+
|
| 627 |
+
def delete_dir_contents(self, path, *,
|
| 628 |
+
bint accept_root_dir=False,
|
| 629 |
+
bint missing_dir_ok=False):
|
| 630 |
+
"""
|
| 631 |
+
Delete a directory's contents, recursively.
|
| 632 |
+
|
| 633 |
+
Like delete_dir, but doesn't delete the directory itself.
|
| 634 |
+
|
| 635 |
+
Parameters
|
| 636 |
+
----------
|
| 637 |
+
path : str
|
| 638 |
+
The path of the directory to be deleted.
|
| 639 |
+
accept_root_dir : boolean, default False
|
| 640 |
+
Allow deleting the root directory's contents
|
| 641 |
+
(if path is empty or "/")
|
| 642 |
+
missing_dir_ok : boolean, default False
|
| 643 |
+
If False then an error is raised if path does
|
| 644 |
+
not exist
|
| 645 |
+
"""
|
| 646 |
+
cdef c_string directory = _path_as_bytes(path)
|
| 647 |
+
if accept_root_dir and directory.strip(b"/") == b"":
|
| 648 |
+
with nogil:
|
| 649 |
+
check_status(self.fs.DeleteRootDirContents())
|
| 650 |
+
else:
|
| 651 |
+
with nogil:
|
| 652 |
+
check_status(self.fs.DeleteDirContents(directory,
|
| 653 |
+
missing_dir_ok))
|
| 654 |
+
|
| 655 |
+
def move(self, src, dest):
|
| 656 |
+
"""
|
| 657 |
+
Move / rename a file or directory.
|
| 658 |
+
|
| 659 |
+
If the destination exists:
|
| 660 |
+
- if it is a non-empty directory, an error is returned
|
| 661 |
+
- otherwise, if it has the same type as the source, it is replaced
|
| 662 |
+
- otherwise, behavior is unspecified (implementation-dependent).
|
| 663 |
+
|
| 664 |
+
Parameters
|
| 665 |
+
----------
|
| 666 |
+
src : str
|
| 667 |
+
The path of the file or the directory to be moved.
|
| 668 |
+
dest : str
|
| 669 |
+
The destination path where the file or directory is moved to.
|
| 670 |
+
|
| 671 |
+
Examples
|
| 672 |
+
--------
|
| 673 |
+
Create a new folder with a file:
|
| 674 |
+
|
| 675 |
+
>>> local.create_dir('/tmp/other_dir')
|
| 676 |
+
>>> local.copy_file(path,'/tmp/move_example.dat')
|
| 677 |
+
|
| 678 |
+
Move the file:
|
| 679 |
+
|
| 680 |
+
>>> local.move('/tmp/move_example.dat',
|
| 681 |
+
... '/tmp/other_dir/move_example_2.dat')
|
| 682 |
+
|
| 683 |
+
Inspect the file info:
|
| 684 |
+
|
| 685 |
+
>>> local.get_file_info('/tmp/other_dir/move_example_2.dat')
|
| 686 |
+
<FileInfo for '/tmp/other_dir/move_example_2.dat': type=FileType.File, size=4>
|
| 687 |
+
>>> local.get_file_info('/tmp/move_example.dat')
|
| 688 |
+
<FileInfo for '/tmp/move_example.dat': type=FileType.NotFound>
|
| 689 |
+
|
| 690 |
+
Delete the folder:
|
| 691 |
+
>>> local.delete_dir('/tmp/other_dir')
|
| 692 |
+
"""
|
| 693 |
+
cdef:
|
| 694 |
+
c_string source = _path_as_bytes(src)
|
| 695 |
+
c_string destination = _path_as_bytes(dest)
|
| 696 |
+
with nogil:
|
| 697 |
+
check_status(self.fs.Move(source, destination))
|
| 698 |
+
|
| 699 |
+
def copy_file(self, src, dest):
|
| 700 |
+
"""
|
| 701 |
+
Copy a file.
|
| 702 |
+
|
| 703 |
+
If the destination exists and is a directory, an error is returned.
|
| 704 |
+
Otherwise, it is replaced.
|
| 705 |
+
|
| 706 |
+
Parameters
|
| 707 |
+
----------
|
| 708 |
+
src : str
|
| 709 |
+
The path of the file to be copied from.
|
| 710 |
+
dest : str
|
| 711 |
+
The destination path where the file is copied to.
|
| 712 |
+
|
| 713 |
+
Examples
|
| 714 |
+
--------
|
| 715 |
+
>>> local.copy_file(path,
|
| 716 |
+
... local_path + '/pyarrow-fs-example_copy.dat')
|
| 717 |
+
|
| 718 |
+
Inspect the file info:
|
| 719 |
+
|
| 720 |
+
>>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat')
|
| 721 |
+
<FileInfo for '/.../pyarrow-fs-example_copy.dat': type=FileType.File, size=4>
|
| 722 |
+
>>> local.get_file_info(path)
|
| 723 |
+
<FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
| 724 |
+
"""
|
| 725 |
+
cdef:
|
| 726 |
+
c_string source = _path_as_bytes(src)
|
| 727 |
+
c_string destination = _path_as_bytes(dest)
|
| 728 |
+
with nogil:
|
| 729 |
+
check_status(self.fs.CopyFile(source, destination))
|
| 730 |
+
|
| 731 |
+
def delete_file(self, path):
|
| 732 |
+
"""
|
| 733 |
+
Delete a file.
|
| 734 |
+
|
| 735 |
+
Parameters
|
| 736 |
+
----------
|
| 737 |
+
path : str
|
| 738 |
+
The path of the file to be deleted.
|
| 739 |
+
"""
|
| 740 |
+
cdef c_string file = _path_as_bytes(path)
|
| 741 |
+
with nogil:
|
| 742 |
+
check_status(self.fs.DeleteFile(file))
|
| 743 |
+
|
| 744 |
+
def _wrap_input_stream(self, stream, path, compression, buffer_size):
|
| 745 |
+
if buffer_size is not None and buffer_size != 0:
|
| 746 |
+
stream = BufferedInputStream(stream, buffer_size)
|
| 747 |
+
if compression == 'detect':
|
| 748 |
+
compression = _detect_compression(path)
|
| 749 |
+
if compression is not None:
|
| 750 |
+
stream = CompressedInputStream(stream, compression)
|
| 751 |
+
return stream
|
| 752 |
+
|
| 753 |
+
def _wrap_output_stream(self, stream, path, compression, buffer_size):
|
| 754 |
+
if buffer_size is not None and buffer_size != 0:
|
| 755 |
+
stream = BufferedOutputStream(stream, buffer_size)
|
| 756 |
+
if compression == 'detect':
|
| 757 |
+
compression = _detect_compression(path)
|
| 758 |
+
if compression is not None:
|
| 759 |
+
stream = CompressedOutputStream(stream, compression)
|
| 760 |
+
return stream
|
| 761 |
+
|
| 762 |
+
def open_input_file(self, path):
|
| 763 |
+
"""
|
| 764 |
+
Open an input file for random access reading.
|
| 765 |
+
|
| 766 |
+
Parameters
|
| 767 |
+
----------
|
| 768 |
+
path : str
|
| 769 |
+
The source to open for reading.
|
| 770 |
+
|
| 771 |
+
Returns
|
| 772 |
+
-------
|
| 773 |
+
stream : NativeFile
|
| 774 |
+
|
| 775 |
+
Examples
|
| 776 |
+
--------
|
| 777 |
+
Print the data from the file with `open_input_file()`:
|
| 778 |
+
|
| 779 |
+
>>> with local.open_input_file(path) as f:
|
| 780 |
+
... print(f.readall())
|
| 781 |
+
b'data'
|
| 782 |
+
"""
|
| 783 |
+
cdef:
|
| 784 |
+
c_string pathstr = _path_as_bytes(path)
|
| 785 |
+
NativeFile stream = NativeFile()
|
| 786 |
+
shared_ptr[CRandomAccessFile] in_handle
|
| 787 |
+
|
| 788 |
+
with nogil:
|
| 789 |
+
in_handle = GetResultValue(self.fs.OpenInputFile(pathstr))
|
| 790 |
+
|
| 791 |
+
stream.set_random_access_file(in_handle)
|
| 792 |
+
stream.is_readable = True
|
| 793 |
+
return stream
|
| 794 |
+
|
| 795 |
+
def open_input_stream(self, path, compression='detect', buffer_size=None):
|
| 796 |
+
"""
|
| 797 |
+
Open an input stream for sequential reading.
|
| 798 |
+
|
| 799 |
+
Parameters
|
| 800 |
+
----------
|
| 801 |
+
path : str
|
| 802 |
+
The source to open for reading.
|
| 803 |
+
compression : str optional, default 'detect'
|
| 804 |
+
The compression algorithm to use for on-the-fly decompression.
|
| 805 |
+
If "detect" and source is a file path, then compression will be
|
| 806 |
+
chosen based on the file extension.
|
| 807 |
+
If None, no compression will be applied. Otherwise, a well-known
|
| 808 |
+
algorithm name must be supplied (e.g. "gzip").
|
| 809 |
+
buffer_size : int optional, default None
|
| 810 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
| 811 |
+
temporary read buffer.
|
| 812 |
+
|
| 813 |
+
Returns
|
| 814 |
+
-------
|
| 815 |
+
stream : NativeFile
|
| 816 |
+
|
| 817 |
+
Examples
|
| 818 |
+
--------
|
| 819 |
+
Print the data from the file with `open_input_stream()`:
|
| 820 |
+
|
| 821 |
+
>>> with local.open_input_stream(path) as f:
|
| 822 |
+
... print(f.readall())
|
| 823 |
+
b'data'
|
| 824 |
+
"""
|
| 825 |
+
cdef:
|
| 826 |
+
c_string pathstr = _path_as_bytes(path)
|
| 827 |
+
NativeFile stream = NativeFile()
|
| 828 |
+
shared_ptr[CInputStream] in_handle
|
| 829 |
+
|
| 830 |
+
with nogil:
|
| 831 |
+
in_handle = GetResultValue(self.fs.OpenInputStream(pathstr))
|
| 832 |
+
|
| 833 |
+
stream.set_input_stream(in_handle)
|
| 834 |
+
stream.is_readable = True
|
| 835 |
+
|
| 836 |
+
return self._wrap_input_stream(
|
| 837 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
def open_output_stream(self, path, compression='detect',
|
| 841 |
+
buffer_size=None, metadata=None):
|
| 842 |
+
"""
|
| 843 |
+
Open an output stream for sequential writing.
|
| 844 |
+
|
| 845 |
+
If the target already exists, existing data is truncated.
|
| 846 |
+
|
| 847 |
+
Parameters
|
| 848 |
+
----------
|
| 849 |
+
path : str
|
| 850 |
+
The source to open for writing.
|
| 851 |
+
compression : str optional, default 'detect'
|
| 852 |
+
The compression algorithm to use for on-the-fly compression.
|
| 853 |
+
If "detect" and source is a file path, then compression will be
|
| 854 |
+
chosen based on the file extension.
|
| 855 |
+
If None, no compression will be applied. Otherwise, a well-known
|
| 856 |
+
algorithm name must be supplied (e.g. "gzip").
|
| 857 |
+
buffer_size : int optional, default None
|
| 858 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
| 859 |
+
temporary write buffer.
|
| 860 |
+
metadata : dict optional, default None
|
| 861 |
+
If not None, a mapping of string keys to string values.
|
| 862 |
+
Some filesystems support storing metadata along the file
|
| 863 |
+
(such as "Content-Type").
|
| 864 |
+
Unsupported metadata keys will be ignored.
|
| 865 |
+
|
| 866 |
+
Returns
|
| 867 |
+
-------
|
| 868 |
+
stream : NativeFile
|
| 869 |
+
|
| 870 |
+
Examples
|
| 871 |
+
--------
|
| 872 |
+
>>> local = fs.LocalFileSystem()
|
| 873 |
+
>>> with local.open_output_stream(path) as stream:
|
| 874 |
+
... stream.write(b'data')
|
| 875 |
+
4
|
| 876 |
+
"""
|
| 877 |
+
cdef:
|
| 878 |
+
c_string pathstr = _path_as_bytes(path)
|
| 879 |
+
NativeFile stream = NativeFile()
|
| 880 |
+
shared_ptr[COutputStream] out_handle
|
| 881 |
+
shared_ptr[const CKeyValueMetadata] c_metadata
|
| 882 |
+
|
| 883 |
+
if metadata is not None:
|
| 884 |
+
c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
|
| 885 |
+
|
| 886 |
+
with nogil:
|
| 887 |
+
out_handle = GetResultValue(
|
| 888 |
+
self.fs.OpenOutputStream(pathstr, c_metadata))
|
| 889 |
+
|
| 890 |
+
stream.set_output_stream(out_handle)
|
| 891 |
+
stream.is_writable = True
|
| 892 |
+
|
| 893 |
+
return self._wrap_output_stream(
|
| 894 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
| 895 |
+
)
|
| 896 |
+
|
| 897 |
+
def open_append_stream(self, path, compression='detect',
|
| 898 |
+
buffer_size=None, metadata=None):
|
| 899 |
+
"""
|
| 900 |
+
Open an output stream for appending.
|
| 901 |
+
|
| 902 |
+
If the target doesn't exist, a new empty file is created.
|
| 903 |
+
|
| 904 |
+
.. note::
|
| 905 |
+
Some filesystem implementations do not support efficient
|
| 906 |
+
appending to an existing file, in which case this method will
|
| 907 |
+
raise NotImplementedError.
|
| 908 |
+
Consider writing to multiple files (using e.g. the dataset layer)
|
| 909 |
+
instead.
|
| 910 |
+
|
| 911 |
+
Parameters
|
| 912 |
+
----------
|
| 913 |
+
path : str
|
| 914 |
+
The source to open for writing.
|
| 915 |
+
compression : str optional, default 'detect'
|
| 916 |
+
The compression algorithm to use for on-the-fly compression.
|
| 917 |
+
If "detect" and source is a file path, then compression will be
|
| 918 |
+
chosen based on the file extension.
|
| 919 |
+
If None, no compression will be applied. Otherwise, a well-known
|
| 920 |
+
algorithm name must be supplied (e.g. "gzip").
|
| 921 |
+
buffer_size : int optional, default None
|
| 922 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
| 923 |
+
temporary write buffer.
|
| 924 |
+
metadata : dict optional, default None
|
| 925 |
+
If not None, a mapping of string keys to string values.
|
| 926 |
+
Some filesystems support storing metadata along the file
|
| 927 |
+
(such as "Content-Type").
|
| 928 |
+
Unsupported metadata keys will be ignored.
|
| 929 |
+
|
| 930 |
+
Returns
|
| 931 |
+
-------
|
| 932 |
+
stream : NativeFile
|
| 933 |
+
|
| 934 |
+
Examples
|
| 935 |
+
--------
|
| 936 |
+
Append new data to a FileSystem subclass with nonempty file:
|
| 937 |
+
|
| 938 |
+
>>> with local.open_append_stream(path) as f:
|
| 939 |
+
... f.write(b'+newly added')
|
| 940 |
+
12
|
| 941 |
+
|
| 942 |
+
Print out the content fo the file:
|
| 943 |
+
|
| 944 |
+
>>> with local.open_input_file(path) as f:
|
| 945 |
+
... print(f.readall())
|
| 946 |
+
b'data+newly added'
|
| 947 |
+
"""
|
| 948 |
+
cdef:
|
| 949 |
+
c_string pathstr = _path_as_bytes(path)
|
| 950 |
+
NativeFile stream = NativeFile()
|
| 951 |
+
shared_ptr[COutputStream] out_handle
|
| 952 |
+
shared_ptr[const CKeyValueMetadata] c_metadata
|
| 953 |
+
|
| 954 |
+
if metadata is not None:
|
| 955 |
+
c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
|
| 956 |
+
|
| 957 |
+
with nogil:
|
| 958 |
+
out_handle = GetResultValue(
|
| 959 |
+
self.fs.OpenAppendStream(pathstr, c_metadata))
|
| 960 |
+
|
| 961 |
+
stream.set_output_stream(out_handle)
|
| 962 |
+
stream.is_writable = True
|
| 963 |
+
|
| 964 |
+
return self._wrap_output_stream(
|
| 965 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
| 966 |
+
)
|
| 967 |
+
|
| 968 |
+
def normalize_path(self, path):
|
| 969 |
+
"""
|
| 970 |
+
Normalize filesystem path.
|
| 971 |
+
|
| 972 |
+
Parameters
|
| 973 |
+
----------
|
| 974 |
+
path : str
|
| 975 |
+
The path to normalize
|
| 976 |
+
|
| 977 |
+
Returns
|
| 978 |
+
-------
|
| 979 |
+
normalized_path : str
|
| 980 |
+
The normalized path
|
| 981 |
+
"""
|
| 982 |
+
cdef:
|
| 983 |
+
c_string c_path = _path_as_bytes(path)
|
| 984 |
+
c_string c_path_normalized
|
| 985 |
+
|
| 986 |
+
c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path))
|
| 987 |
+
return frombytes(c_path_normalized)
|
| 988 |
+
|
| 989 |
+
|
| 990 |
+
cdef class LocalFileSystem(FileSystem):
|
| 991 |
+
"""
|
| 992 |
+
A FileSystem implementation accessing files on the local machine.
|
| 993 |
+
|
| 994 |
+
Details such as symlinks are abstracted away (symlinks are always followed,
|
| 995 |
+
except when deleting an entry).
|
| 996 |
+
|
| 997 |
+
Parameters
|
| 998 |
+
----------
|
| 999 |
+
use_mmap : bool, default False
|
| 1000 |
+
Whether open_input_stream and open_input_file should return
|
| 1001 |
+
a mmap'ed file or a regular file.
|
| 1002 |
+
|
| 1003 |
+
Examples
|
| 1004 |
+
--------
|
| 1005 |
+
Create a FileSystem object with LocalFileSystem constructor:
|
| 1006 |
+
|
| 1007 |
+
>>> from pyarrow import fs
|
| 1008 |
+
>>> local = fs.LocalFileSystem()
|
| 1009 |
+
>>> local
|
| 1010 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
| 1011 |
+
|
| 1012 |
+
and write data on to the file:
|
| 1013 |
+
|
| 1014 |
+
>>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
|
| 1015 |
+
... stream.write(b'data')
|
| 1016 |
+
4
|
| 1017 |
+
>>> with local.open_input_stream('/tmp/local_fs.dat') as stream:
|
| 1018 |
+
... print(stream.readall())
|
| 1019 |
+
b'data'
|
| 1020 |
+
|
| 1021 |
+
Create a FileSystem object inferred from a URI of the saved file:
|
| 1022 |
+
|
| 1023 |
+
>>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat')
|
| 1024 |
+
>>> local_new
|
| 1025 |
+
<pyarrow._fs.LocalFileSystem object at ...
|
| 1026 |
+
>>> path
|
| 1027 |
+
'/tmp/local_fs.dat'
|
| 1028 |
+
|
| 1029 |
+
Check if FileSystems `local` and `local_new` are equal:
|
| 1030 |
+
|
| 1031 |
+
>>> local.equals(local_new)
|
| 1032 |
+
True
|
| 1033 |
+
|
| 1034 |
+
Compare two different FileSystems:
|
| 1035 |
+
|
| 1036 |
+
>>> local2 = fs.LocalFileSystem(use_mmap=True)
|
| 1037 |
+
>>> local.equals(local2)
|
| 1038 |
+
False
|
| 1039 |
+
|
| 1040 |
+
Copy a file and print out the data:
|
| 1041 |
+
|
| 1042 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat')
|
| 1043 |
+
>>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream:
|
| 1044 |
+
... print(stream.readall())
|
| 1045 |
+
...
|
| 1046 |
+
b'data'
|
| 1047 |
+
|
| 1048 |
+
Open an output stream for appending, add text and print the new data:
|
| 1049 |
+
|
| 1050 |
+
>>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f:
|
| 1051 |
+
... f.write(b'+newly added')
|
| 1052 |
+
12
|
| 1053 |
+
|
| 1054 |
+
>>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f:
|
| 1055 |
+
... print(f.readall())
|
| 1056 |
+
b'data+newly added'
|
| 1057 |
+
|
| 1058 |
+
Create a directory, copy a file into it and then delete the whole directory:
|
| 1059 |
+
|
| 1060 |
+
>>> local.create_dir('/tmp/new_folder')
|
| 1061 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
| 1062 |
+
>>> local.get_file_info('/tmp/new_folder')
|
| 1063 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
| 1064 |
+
>>> local.delete_dir('/tmp/new_folder')
|
| 1065 |
+
>>> local.get_file_info('/tmp/new_folder')
|
| 1066 |
+
<FileInfo for '/tmp/new_folder': type=FileType.NotFound>
|
| 1067 |
+
|
| 1068 |
+
Create a directory, copy a file into it and then delete
|
| 1069 |
+
the content of the directory:
|
| 1070 |
+
|
| 1071 |
+
>>> local.create_dir('/tmp/new_folder')
|
| 1072 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
| 1073 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
| 1074 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.File, size=4>
|
| 1075 |
+
>>> local.delete_dir_contents('/tmp/new_folder')
|
| 1076 |
+
>>> local.get_file_info('/tmp/new_folder')
|
| 1077 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
| 1078 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
| 1079 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
|
| 1080 |
+
|
| 1081 |
+
Create a directory, copy a file into it and then delete
|
| 1082 |
+
the file from the directory:
|
| 1083 |
+
|
| 1084 |
+
>>> local.create_dir('/tmp/new_folder')
|
| 1085 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
| 1086 |
+
>>> local.delete_file('/tmp/new_folder/local_fs.dat')
|
| 1087 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
| 1088 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
|
| 1089 |
+
>>> local.get_file_info('/tmp/new_folder')
|
| 1090 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
| 1091 |
+
|
| 1092 |
+
Move the file:
|
| 1093 |
+
|
| 1094 |
+
>>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat')
|
| 1095 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat')
|
| 1096 |
+
<FileInfo for '/tmp/new_folder/local_fs-copy.dat': type=FileType.File, size=16>
|
| 1097 |
+
>>> local.get_file_info('/tmp/local_fs-copy.dat')
|
| 1098 |
+
<FileInfo for '/tmp/local_fs-copy.dat': type=FileType.NotFound>
|
| 1099 |
+
|
| 1100 |
+
To finish delete the file left:
|
| 1101 |
+
>>> local.delete_file('/tmp/local_fs.dat')
|
| 1102 |
+
"""
|
| 1103 |
+
|
| 1104 |
+
def __init__(self, *, use_mmap=False):
|
| 1105 |
+
cdef:
|
| 1106 |
+
shared_ptr[CFileSystem] fs
|
| 1107 |
+
c_string c_uri
|
| 1108 |
+
|
| 1109 |
+
# from_uri needs a non-empty path, so just use a placeholder of /_
|
| 1110 |
+
c_uri = tobytes(f"file:///_?use_mmap={int(use_mmap)}")
|
| 1111 |
+
with nogil:
|
| 1112 |
+
fs = GetResultValue(CFileSystemFromUri(c_uri))
|
| 1113 |
+
self.init(<shared_ptr[CFileSystem]> fs)
|
| 1114 |
+
|
| 1115 |
+
def __reduce__(self):
|
| 1116 |
+
uri = frombytes(GetResultValue(self.fs.MakeUri(b"/_")))
|
| 1117 |
+
return FileSystem._from_uri, (uri,)
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
cdef class SubTreeFileSystem(FileSystem):
|
| 1121 |
+
"""
|
| 1122 |
+
Delegates to another implementation after prepending a fixed base path.
|
| 1123 |
+
|
| 1124 |
+
This is useful to expose a logical view of a subtree of a filesystem,
|
| 1125 |
+
for example a directory in a LocalFileSystem.
|
| 1126 |
+
|
| 1127 |
+
Note, that this makes no security guarantee. For example, symlinks may
|
| 1128 |
+
allow to "escape" the subtree and access other parts of the underlying
|
| 1129 |
+
filesystem.
|
| 1130 |
+
|
| 1131 |
+
Parameters
|
| 1132 |
+
----------
|
| 1133 |
+
base_path : str
|
| 1134 |
+
The root of the subtree.
|
| 1135 |
+
base_fs : FileSystem
|
| 1136 |
+
FileSystem object the operations delegated to.
|
| 1137 |
+
|
| 1138 |
+
Examples
|
| 1139 |
+
--------
|
| 1140 |
+
Create a LocalFileSystem instance:
|
| 1141 |
+
|
| 1142 |
+
>>> from pyarrow import fs
|
| 1143 |
+
>>> local = fs.LocalFileSystem()
|
| 1144 |
+
>>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
|
| 1145 |
+
... stream.write(b'data')
|
| 1146 |
+
4
|
| 1147 |
+
|
| 1148 |
+
Create a directory and a SubTreeFileSystem instance:
|
| 1149 |
+
|
| 1150 |
+
>>> local.create_dir('/tmp/sub_tree')
|
| 1151 |
+
>>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local)
|
| 1152 |
+
|
| 1153 |
+
Write data into the existing file:
|
| 1154 |
+
|
| 1155 |
+
>>> with subtree.open_append_stream('sub_tree_fs.dat') as f:
|
| 1156 |
+
... f.write(b'+newly added')
|
| 1157 |
+
12
|
| 1158 |
+
|
| 1159 |
+
Print out the attributes:
|
| 1160 |
+
|
| 1161 |
+
>>> subtree.base_fs
|
| 1162 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
| 1163 |
+
>>> subtree.base_path
|
| 1164 |
+
'/tmp/sub_tree/'
|
| 1165 |
+
|
| 1166 |
+
Get info for the given directory or given file:
|
| 1167 |
+
|
| 1168 |
+
>>> subtree.get_file_info('')
|
| 1169 |
+
<FileInfo for '': type=FileType.Directory>
|
| 1170 |
+
>>> subtree.get_file_info('sub_tree_fs.dat')
|
| 1171 |
+
<FileInfo for 'sub_tree_fs.dat': type=FileType.File, size=12>
|
| 1172 |
+
|
| 1173 |
+
Delete the file and directory:
|
| 1174 |
+
|
| 1175 |
+
>>> subtree.delete_file('sub_tree_fs.dat')
|
| 1176 |
+
>>> local.delete_dir('/tmp/sub_tree')
|
| 1177 |
+
>>> local.delete_file('/tmp/local_fs.dat')
|
| 1178 |
+
|
| 1179 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
| 1180 |
+
"""
|
| 1181 |
+
|
| 1182 |
+
def __init__(self, base_path, FileSystem base_fs):
|
| 1183 |
+
cdef:
|
| 1184 |
+
c_string pathstr
|
| 1185 |
+
shared_ptr[CSubTreeFileSystem] wrapped
|
| 1186 |
+
|
| 1187 |
+
pathstr = _path_as_bytes(base_path)
|
| 1188 |
+
wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped)
|
| 1189 |
+
|
| 1190 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
| 1191 |
+
|
| 1192 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
| 1193 |
+
FileSystem.init(self, wrapped)
|
| 1194 |
+
self.subtreefs = <CSubTreeFileSystem*> wrapped.get()
|
| 1195 |
+
|
| 1196 |
+
def __repr__(self):
|
| 1197 |
+
return ("SubTreeFileSystem(base_path={}, base_fs={}"
|
| 1198 |
+
.format(self.base_path, self.base_fs))
|
| 1199 |
+
|
| 1200 |
+
def __reduce__(self):
|
| 1201 |
+
return SubTreeFileSystem, (
|
| 1202 |
+
frombytes(self.subtreefs.base_path()),
|
| 1203 |
+
FileSystem.wrap(self.subtreefs.base_fs())
|
| 1204 |
+
)
|
| 1205 |
+
|
| 1206 |
+
@property
|
| 1207 |
+
def base_path(self):
|
| 1208 |
+
return frombytes(self.subtreefs.base_path())
|
| 1209 |
+
|
| 1210 |
+
@property
|
| 1211 |
+
def base_fs(self):
|
| 1212 |
+
return FileSystem.wrap(self.subtreefs.base_fs())
|
| 1213 |
+
|
| 1214 |
+
|
| 1215 |
+
cdef class _MockFileSystem(FileSystem):
|
| 1216 |
+
|
| 1217 |
+
def __init__(self, datetime current_time=None):
|
| 1218 |
+
cdef shared_ptr[CMockFileSystem] wrapped
|
| 1219 |
+
|
| 1220 |
+
current_time = current_time or datetime.now()
|
| 1221 |
+
wrapped = make_shared[CMockFileSystem](
|
| 1222 |
+
PyDateTime_to_TimePoint(<PyDateTime_DateTime*> current_time)
|
| 1223 |
+
)
|
| 1224 |
+
|
| 1225 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
| 1226 |
+
|
| 1227 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
| 1228 |
+
FileSystem.init(self, wrapped)
|
| 1229 |
+
self.mockfs = <CMockFileSystem*> wrapped.get()
|
| 1230 |
+
|
| 1231 |
+
|
| 1232 |
+
cdef class PyFileSystem(FileSystem):
|
| 1233 |
+
"""
|
| 1234 |
+
A FileSystem with behavior implemented in Python.
|
| 1235 |
+
|
| 1236 |
+
Parameters
|
| 1237 |
+
----------
|
| 1238 |
+
handler : FileSystemHandler
|
| 1239 |
+
The handler object implementing custom filesystem behavior.
|
| 1240 |
+
|
| 1241 |
+
Examples
|
| 1242 |
+
--------
|
| 1243 |
+
Create an fsspec-based filesystem object for GitHub:
|
| 1244 |
+
|
| 1245 |
+
>>> from fsspec.implementations import github
|
| 1246 |
+
>>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP
|
| 1247 |
+
|
| 1248 |
+
Get a PyArrow FileSystem object:
|
| 1249 |
+
|
| 1250 |
+
>>> from pyarrow.fs import PyFileSystem, FSSpecHandler
|
| 1251 |
+
>>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP
|
| 1252 |
+
|
| 1253 |
+
Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``:
|
| 1254 |
+
|
| 1255 |
+
>>> pa_fs.get_file_info('README.md') # doctest: +SKIP
|
| 1256 |
+
<FileInfo for 'README.md': type=FileType.File, size=...>
|
| 1257 |
+
"""
|
| 1258 |
+
|
| 1259 |
+
def __init__(self, handler):
|
| 1260 |
+
cdef:
|
| 1261 |
+
CPyFileSystemVtable vtable
|
| 1262 |
+
shared_ptr[CPyFileSystem] wrapped
|
| 1263 |
+
|
| 1264 |
+
if not isinstance(handler, FileSystemHandler):
|
| 1265 |
+
raise TypeError("Expected a FileSystemHandler instance, got {0}"
|
| 1266 |
+
.format(type(handler)))
|
| 1267 |
+
|
| 1268 |
+
vtable.get_type_name = _cb_get_type_name
|
| 1269 |
+
vtable.equals = _cb_equals
|
| 1270 |
+
vtable.get_file_info = _cb_get_file_info
|
| 1271 |
+
vtable.get_file_info_vector = _cb_get_file_info_vector
|
| 1272 |
+
vtable.get_file_info_selector = _cb_get_file_info_selector
|
| 1273 |
+
vtable.create_dir = _cb_create_dir
|
| 1274 |
+
vtable.delete_dir = _cb_delete_dir
|
| 1275 |
+
vtable.delete_dir_contents = _cb_delete_dir_contents
|
| 1276 |
+
vtable.delete_root_dir_contents = _cb_delete_root_dir_contents
|
| 1277 |
+
vtable.delete_file = _cb_delete_file
|
| 1278 |
+
vtable.move = _cb_move
|
| 1279 |
+
vtable.copy_file = _cb_copy_file
|
| 1280 |
+
vtable.open_input_stream = _cb_open_input_stream
|
| 1281 |
+
vtable.open_input_file = _cb_open_input_file
|
| 1282 |
+
vtable.open_output_stream = _cb_open_output_stream
|
| 1283 |
+
vtable.open_append_stream = _cb_open_append_stream
|
| 1284 |
+
vtable.normalize_path = _cb_normalize_path
|
| 1285 |
+
|
| 1286 |
+
wrapped = CPyFileSystem.Make(handler, move(vtable))
|
| 1287 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
| 1288 |
+
|
| 1289 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
| 1290 |
+
FileSystem.init(self, wrapped)
|
| 1291 |
+
self.pyfs = <CPyFileSystem*> wrapped.get()
|
| 1292 |
+
|
| 1293 |
+
@property
|
| 1294 |
+
def handler(self):
|
| 1295 |
+
"""
|
| 1296 |
+
The filesystem's underlying handler.
|
| 1297 |
+
|
| 1298 |
+
Returns
|
| 1299 |
+
-------
|
| 1300 |
+
handler : FileSystemHandler
|
| 1301 |
+
"""
|
| 1302 |
+
return <object> self.pyfs.handler()
|
| 1303 |
+
|
| 1304 |
+
def __reduce__(self):
|
| 1305 |
+
return PyFileSystem, (self.handler,)
|
| 1306 |
+
|
| 1307 |
+
|
| 1308 |
+
class FileSystemHandler(ABC):
|
| 1309 |
+
"""
|
| 1310 |
+
An abstract class exposing methods to implement PyFileSystem's behavior.
|
| 1311 |
+
"""
|
| 1312 |
+
|
| 1313 |
+
@abstractmethod
|
| 1314 |
+
def get_type_name(self):
|
| 1315 |
+
"""
|
| 1316 |
+
Implement PyFileSystem.type_name.
|
| 1317 |
+
"""
|
| 1318 |
+
|
| 1319 |
+
@abstractmethod
|
| 1320 |
+
def get_file_info(self, paths):
|
| 1321 |
+
"""
|
| 1322 |
+
Implement PyFileSystem.get_file_info(paths).
|
| 1323 |
+
|
| 1324 |
+
Parameters
|
| 1325 |
+
----------
|
| 1326 |
+
paths : list of str
|
| 1327 |
+
paths for which we want to retrieve the info.
|
| 1328 |
+
"""
|
| 1329 |
+
|
| 1330 |
+
@abstractmethod
|
| 1331 |
+
def get_file_info_selector(self, selector):
|
| 1332 |
+
"""
|
| 1333 |
+
Implement PyFileSystem.get_file_info(selector).
|
| 1334 |
+
|
| 1335 |
+
Parameters
|
| 1336 |
+
----------
|
| 1337 |
+
selector : FileSelector
|
| 1338 |
+
selector for which we want to retrieve the info.
|
| 1339 |
+
"""
|
| 1340 |
+
|
| 1341 |
+
@abstractmethod
|
| 1342 |
+
def create_dir(self, path, recursive):
|
| 1343 |
+
"""
|
| 1344 |
+
Implement PyFileSystem.create_dir(...).
|
| 1345 |
+
|
| 1346 |
+
Parameters
|
| 1347 |
+
----------
|
| 1348 |
+
path : str
|
| 1349 |
+
path of the directory.
|
| 1350 |
+
recursive : bool
|
| 1351 |
+
if the parent directories should be created too.
|
| 1352 |
+
"""
|
| 1353 |
+
|
| 1354 |
+
@abstractmethod
|
| 1355 |
+
def delete_dir(self, path):
|
| 1356 |
+
"""
|
| 1357 |
+
Implement PyFileSystem.delete_dir(...).
|
| 1358 |
+
|
| 1359 |
+
Parameters
|
| 1360 |
+
----------
|
| 1361 |
+
path : str
|
| 1362 |
+
path of the directory.
|
| 1363 |
+
"""
|
| 1364 |
+
|
| 1365 |
+
@abstractmethod
|
| 1366 |
+
def delete_dir_contents(self, path, missing_dir_ok=False):
|
| 1367 |
+
"""
|
| 1368 |
+
Implement PyFileSystem.delete_dir_contents(...).
|
| 1369 |
+
|
| 1370 |
+
Parameters
|
| 1371 |
+
----------
|
| 1372 |
+
path : str
|
| 1373 |
+
path of the directory.
|
| 1374 |
+
missing_dir_ok : bool
|
| 1375 |
+
if False an error should be raised if path does not exist
|
| 1376 |
+
"""
|
| 1377 |
+
|
| 1378 |
+
@abstractmethod
|
| 1379 |
+
def delete_root_dir_contents(self):
|
| 1380 |
+
"""
|
| 1381 |
+
Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True).
|
| 1382 |
+
"""
|
| 1383 |
+
|
| 1384 |
+
@abstractmethod
|
| 1385 |
+
def delete_file(self, path):
|
| 1386 |
+
"""
|
| 1387 |
+
Implement PyFileSystem.delete_file(...).
|
| 1388 |
+
|
| 1389 |
+
Parameters
|
| 1390 |
+
----------
|
| 1391 |
+
path : str
|
| 1392 |
+
path of the file.
|
| 1393 |
+
"""
|
| 1394 |
+
|
| 1395 |
+
@abstractmethod
|
| 1396 |
+
def move(self, src, dest):
|
| 1397 |
+
"""
|
| 1398 |
+
Implement PyFileSystem.move(...).
|
| 1399 |
+
|
| 1400 |
+
Parameters
|
| 1401 |
+
----------
|
| 1402 |
+
src : str
|
| 1403 |
+
path of what should be moved.
|
| 1404 |
+
dest : str
|
| 1405 |
+
path of where it should be moved to.
|
| 1406 |
+
"""
|
| 1407 |
+
|
| 1408 |
+
@abstractmethod
|
| 1409 |
+
def copy_file(self, src, dest):
|
| 1410 |
+
"""
|
| 1411 |
+
Implement PyFileSystem.copy_file(...).
|
| 1412 |
+
|
| 1413 |
+
Parameters
|
| 1414 |
+
----------
|
| 1415 |
+
src : str
|
| 1416 |
+
path of what should be copied.
|
| 1417 |
+
dest : str
|
| 1418 |
+
path of where it should be copied to.
|
| 1419 |
+
"""
|
| 1420 |
+
|
| 1421 |
+
@abstractmethod
|
| 1422 |
+
def open_input_stream(self, path):
|
| 1423 |
+
"""
|
| 1424 |
+
Implement PyFileSystem.open_input_stream(...).
|
| 1425 |
+
|
| 1426 |
+
Parameters
|
| 1427 |
+
----------
|
| 1428 |
+
path : str
|
| 1429 |
+
path of what should be opened.
|
| 1430 |
+
"""
|
| 1431 |
+
|
| 1432 |
+
@abstractmethod
|
| 1433 |
+
def open_input_file(self, path):
|
| 1434 |
+
"""
|
| 1435 |
+
Implement PyFileSystem.open_input_file(...).
|
| 1436 |
+
|
| 1437 |
+
Parameters
|
| 1438 |
+
----------
|
| 1439 |
+
path : str
|
| 1440 |
+
path of what should be opened.
|
| 1441 |
+
"""
|
| 1442 |
+
|
| 1443 |
+
@abstractmethod
|
| 1444 |
+
def open_output_stream(self, path, metadata):
|
| 1445 |
+
"""
|
| 1446 |
+
Implement PyFileSystem.open_output_stream(...).
|
| 1447 |
+
|
| 1448 |
+
Parameters
|
| 1449 |
+
----------
|
| 1450 |
+
path : str
|
| 1451 |
+
path of what should be opened.
|
| 1452 |
+
metadata : mapping
|
| 1453 |
+
Mapping of string keys to string values.
|
| 1454 |
+
Some filesystems support storing metadata along the file
|
| 1455 |
+
(such as "Content-Type").
|
| 1456 |
+
"""
|
| 1457 |
+
|
| 1458 |
+
@abstractmethod
|
| 1459 |
+
def open_append_stream(self, path, metadata):
|
| 1460 |
+
"""
|
| 1461 |
+
Implement PyFileSystem.open_append_stream(...).
|
| 1462 |
+
|
| 1463 |
+
Parameters
|
| 1464 |
+
----------
|
| 1465 |
+
path : str
|
| 1466 |
+
path of what should be opened.
|
| 1467 |
+
metadata : mapping
|
| 1468 |
+
Mapping of string keys to string values.
|
| 1469 |
+
Some filesystems support storing metadata along the file
|
| 1470 |
+
(such as "Content-Type").
|
| 1471 |
+
"""
|
| 1472 |
+
|
| 1473 |
+
@abstractmethod
|
| 1474 |
+
def normalize_path(self, path):
|
| 1475 |
+
"""
|
| 1476 |
+
Implement PyFileSystem.normalize_path(...).
|
| 1477 |
+
|
| 1478 |
+
Parameters
|
| 1479 |
+
----------
|
| 1480 |
+
path : str
|
| 1481 |
+
path of what should be normalized.
|
| 1482 |
+
"""
|
| 1483 |
+
|
| 1484 |
+
# Callback definitions for CPyFileSystemVtable
|
| 1485 |
+
|
| 1486 |
+
|
| 1487 |
+
cdef void _cb_get_type_name(handler, c_string* out) except *:
|
| 1488 |
+
out[0] = tobytes("py::" + handler.get_type_name())
|
| 1489 |
+
|
| 1490 |
+
cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False:
|
| 1491 |
+
if c_other.type_name().startswith(b"py::"):
|
| 1492 |
+
return <object> (<const CPyFileSystem&> c_other).handler() == handler
|
| 1493 |
+
|
| 1494 |
+
return False
|
| 1495 |
+
|
| 1496 |
+
cdef void _cb_get_file_info(handler, const c_string& path,
|
| 1497 |
+
CFileInfo* out) except *:
|
| 1498 |
+
infos = handler.get_file_info([frombytes(path)])
|
| 1499 |
+
if not isinstance(infos, list) or len(infos) != 1:
|
| 1500 |
+
raise TypeError("get_file_info should have returned a 1-element list")
|
| 1501 |
+
out[0] = FileInfo.unwrap_safe(infos[0])
|
| 1502 |
+
|
| 1503 |
+
cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths,
|
| 1504 |
+
vector[CFileInfo]* out) except *:
|
| 1505 |
+
py_paths = [frombytes(paths[i]) for i in range(len(paths))]
|
| 1506 |
+
infos = handler.get_file_info(py_paths)
|
| 1507 |
+
if not isinstance(infos, list):
|
| 1508 |
+
raise TypeError("get_file_info should have returned a list")
|
| 1509 |
+
out[0].clear()
|
| 1510 |
+
out[0].reserve(len(infos))
|
| 1511 |
+
for info in infos:
|
| 1512 |
+
out[0].push_back(FileInfo.unwrap_safe(info))
|
| 1513 |
+
|
| 1514 |
+
cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector,
|
| 1515 |
+
vector[CFileInfo]* out) except *:
|
| 1516 |
+
infos = handler.get_file_info_selector(FileSelector.wrap(selector))
|
| 1517 |
+
if not isinstance(infos, list):
|
| 1518 |
+
raise TypeError("get_file_info_selector should have returned a list")
|
| 1519 |
+
out[0].clear()
|
| 1520 |
+
out[0].reserve(len(infos))
|
| 1521 |
+
for info in infos:
|
| 1522 |
+
out[0].push_back(FileInfo.unwrap_safe(info))
|
| 1523 |
+
|
| 1524 |
+
cdef void _cb_create_dir(handler, const c_string& path,
|
| 1525 |
+
c_bool recursive) except *:
|
| 1526 |
+
handler.create_dir(frombytes(path), recursive)
|
| 1527 |
+
|
| 1528 |
+
cdef void _cb_delete_dir(handler, const c_string& path) except *:
|
| 1529 |
+
handler.delete_dir(frombytes(path))
|
| 1530 |
+
|
| 1531 |
+
cdef void _cb_delete_dir_contents(handler, const c_string& path,
|
| 1532 |
+
c_bool missing_dir_ok) except *:
|
| 1533 |
+
handler.delete_dir_contents(frombytes(path), missing_dir_ok)
|
| 1534 |
+
|
| 1535 |
+
cdef void _cb_delete_root_dir_contents(handler) except *:
|
| 1536 |
+
handler.delete_root_dir_contents()
|
| 1537 |
+
|
| 1538 |
+
cdef void _cb_delete_file(handler, const c_string& path) except *:
|
| 1539 |
+
handler.delete_file(frombytes(path))
|
| 1540 |
+
|
| 1541 |
+
cdef void _cb_move(handler, const c_string& src,
|
| 1542 |
+
const c_string& dest) except *:
|
| 1543 |
+
handler.move(frombytes(src), frombytes(dest))
|
| 1544 |
+
|
| 1545 |
+
cdef void _cb_copy_file(handler, const c_string& src,
|
| 1546 |
+
const c_string& dest) except *:
|
| 1547 |
+
handler.copy_file(frombytes(src), frombytes(dest))
|
| 1548 |
+
|
| 1549 |
+
cdef void _cb_open_input_stream(handler, const c_string& path,
|
| 1550 |
+
shared_ptr[CInputStream]* out) except *:
|
| 1551 |
+
stream = handler.open_input_stream(frombytes(path))
|
| 1552 |
+
if not isinstance(stream, NativeFile):
|
| 1553 |
+
raise TypeError("open_input_stream should have returned "
|
| 1554 |
+
"a PyArrow file")
|
| 1555 |
+
out[0] = (<NativeFile> stream).get_input_stream()
|
| 1556 |
+
|
| 1557 |
+
cdef void _cb_open_input_file(handler, const c_string& path,
|
| 1558 |
+
shared_ptr[CRandomAccessFile]* out) except *:
|
| 1559 |
+
stream = handler.open_input_file(frombytes(path))
|
| 1560 |
+
if not isinstance(stream, NativeFile):
|
| 1561 |
+
raise TypeError("open_input_file should have returned "
|
| 1562 |
+
"a PyArrow file")
|
| 1563 |
+
out[0] = (<NativeFile> stream).get_random_access_file()
|
| 1564 |
+
|
| 1565 |
+
cdef void _cb_open_output_stream(
|
| 1566 |
+
handler, const c_string& path,
|
| 1567 |
+
const shared_ptr[const CKeyValueMetadata]& metadata,
|
| 1568 |
+
shared_ptr[COutputStream]* out) except *:
|
| 1569 |
+
stream = handler.open_output_stream(
|
| 1570 |
+
frombytes(path), pyarrow_wrap_metadata(metadata))
|
| 1571 |
+
if not isinstance(stream, NativeFile):
|
| 1572 |
+
raise TypeError("open_output_stream should have returned "
|
| 1573 |
+
"a PyArrow file")
|
| 1574 |
+
out[0] = (<NativeFile> stream).get_output_stream()
|
| 1575 |
+
|
| 1576 |
+
cdef void _cb_open_append_stream(
|
| 1577 |
+
handler, const c_string& path,
|
| 1578 |
+
const shared_ptr[const CKeyValueMetadata]& metadata,
|
| 1579 |
+
shared_ptr[COutputStream]* out) except *:
|
| 1580 |
+
stream = handler.open_append_stream(
|
| 1581 |
+
frombytes(path), pyarrow_wrap_metadata(metadata))
|
| 1582 |
+
if not isinstance(stream, NativeFile):
|
| 1583 |
+
raise TypeError("open_append_stream should have returned "
|
| 1584 |
+
"a PyArrow file")
|
| 1585 |
+
out[0] = (<NativeFile> stream).get_output_stream()
|
| 1586 |
+
|
| 1587 |
+
cdef void _cb_normalize_path(handler, const c_string& path,
|
| 1588 |
+
c_string* out) except *:
|
| 1589 |
+
out[0] = tobytes(handler.normalize_path(frombytes(path)))
|
| 1590 |
+
|
| 1591 |
+
|
| 1592 |
+
def _copy_files(FileSystem source_fs, str source_path,
|
| 1593 |
+
FileSystem destination_fs, str destination_path,
|
| 1594 |
+
int64_t chunk_size, c_bool use_threads):
|
| 1595 |
+
# low-level helper exposed through pyarrow/fs.py::copy_files
|
| 1596 |
+
cdef:
|
| 1597 |
+
CFileLocator c_source
|
| 1598 |
+
vector[CFileLocator] c_sources
|
| 1599 |
+
CFileLocator c_destination
|
| 1600 |
+
vector[CFileLocator] c_destinations
|
| 1601 |
+
|
| 1602 |
+
c_source.filesystem = source_fs.unwrap()
|
| 1603 |
+
c_source.path = tobytes(source_path)
|
| 1604 |
+
c_sources.push_back(c_source)
|
| 1605 |
+
|
| 1606 |
+
c_destination.filesystem = destination_fs.unwrap()
|
| 1607 |
+
c_destination.path = tobytes(destination_path)
|
| 1608 |
+
c_destinations.push_back(c_destination)
|
| 1609 |
+
|
| 1610 |
+
with nogil:
|
| 1611 |
+
check_status(CCopyFiles(
|
| 1612 |
+
c_sources, c_destinations,
|
| 1613 |
+
c_default_io_context(), chunk_size, use_threads,
|
| 1614 |
+
))
|
| 1615 |
+
|
| 1616 |
+
|
| 1617 |
+
def _copy_files_selector(FileSystem source_fs, FileSelector source_sel,
|
| 1618 |
+
FileSystem destination_fs, str destination_base_dir,
|
| 1619 |
+
int64_t chunk_size, c_bool use_threads):
|
| 1620 |
+
# low-level helper exposed through pyarrow/fs.py::copy_files
|
| 1621 |
+
cdef c_string c_destination_base_dir = tobytes(destination_base_dir)
|
| 1622 |
+
|
| 1623 |
+
with nogil:
|
| 1624 |
+
check_status(CCopyFilesWithSelector(
|
| 1625 |
+
source_fs.unwrap(), source_sel.unwrap(),
|
| 1626 |
+
destination_fs.unwrap(), c_destination_base_dir,
|
| 1627 |
+
c_default_io_context(), chunk_size, use_threads,
|
| 1628 |
+
))
|
parrot/lib/python3.10/site-packages/pyarrow/_orc.pyx
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: profile=False
|
| 19 |
+
# distutils: language = c++
|
| 20 |
+
|
| 21 |
+
from cython.operator cimport dereference as deref
|
| 22 |
+
from libcpp.vector cimport vector as std_vector
|
| 23 |
+
from libcpp.utility cimport move
|
| 24 |
+
from pyarrow.includes.common cimport *
|
| 25 |
+
from pyarrow.includes.libarrow cimport *
|
| 26 |
+
from pyarrow.lib cimport (check_status, _Weakrefable,
|
| 27 |
+
MemoryPool, maybe_unbox_memory_pool,
|
| 28 |
+
pyarrow_wrap_schema,
|
| 29 |
+
pyarrow_wrap_batch,
|
| 30 |
+
Table,
|
| 31 |
+
pyarrow_wrap_table,
|
| 32 |
+
pyarrow_wrap_metadata,
|
| 33 |
+
pyarrow_unwrap_table,
|
| 34 |
+
get_reader,
|
| 35 |
+
get_writer)
|
| 36 |
+
from pyarrow.lib import frombytes, tobytes
|
| 37 |
+
from pyarrow.util import _stringify_path
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
cdef compression_type_from_enum(CCompressionType compression_type):
|
| 41 |
+
compression_map = {
|
| 42 |
+
CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
|
| 43 |
+
CCompressionType_GZIP: 'ZLIB',
|
| 44 |
+
CCompressionType_SNAPPY: 'SNAPPY',
|
| 45 |
+
CCompressionType_LZ4: 'LZ4',
|
| 46 |
+
CCompressionType_ZSTD: 'ZSTD',
|
| 47 |
+
}
|
| 48 |
+
if compression_type in compression_map:
|
| 49 |
+
return compression_map[compression_type]
|
| 50 |
+
raise ValueError('Unsupported compression')
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
cdef CCompressionType compression_type_from_name(name) except *:
|
| 54 |
+
if not isinstance(name, str):
|
| 55 |
+
raise TypeError('compression must be a string')
|
| 56 |
+
name = name.upper()
|
| 57 |
+
if name == 'ZLIB':
|
| 58 |
+
return CCompressionType_GZIP
|
| 59 |
+
elif name == 'SNAPPY':
|
| 60 |
+
return CCompressionType_SNAPPY
|
| 61 |
+
elif name == 'LZ4':
|
| 62 |
+
return CCompressionType_LZ4
|
| 63 |
+
elif name == 'ZSTD':
|
| 64 |
+
return CCompressionType_ZSTD
|
| 65 |
+
elif name == 'UNCOMPRESSED':
|
| 66 |
+
return CCompressionType_UNCOMPRESSED
|
| 67 |
+
raise ValueError(f'Unknown CompressionKind: {name}')
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
cdef compression_strategy_from_enum(
|
| 71 |
+
CompressionStrategy compression_strategy
|
| 72 |
+
):
|
| 73 |
+
compression_strategy_map = {
|
| 74 |
+
_CompressionStrategy_SPEED: 'SPEED',
|
| 75 |
+
_CompressionStrategy_COMPRESSION: 'COMPRESSION',
|
| 76 |
+
}
|
| 77 |
+
if compression_strategy in compression_strategy_map:
|
| 78 |
+
return compression_strategy_map[compression_strategy]
|
| 79 |
+
raise ValueError('Unsupported compression strategy')
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
cdef CompressionStrategy compression_strategy_from_name(name) except *:
|
| 83 |
+
if not isinstance(name, str):
|
| 84 |
+
raise TypeError('compression strategy must be a string')
|
| 85 |
+
name = name.upper()
|
| 86 |
+
if name == 'COMPRESSION':
|
| 87 |
+
return _CompressionStrategy_COMPRESSION
|
| 88 |
+
elif name == 'SPEED':
|
| 89 |
+
return _CompressionStrategy_SPEED
|
| 90 |
+
raise ValueError(f'Unknown CompressionStrategy: {name}')
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
cdef file_version_from_class(FileVersion file_version):
|
| 94 |
+
return frombytes(file_version.ToString())
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
cdef writer_id_from_enum(WriterId writer_id):
|
| 98 |
+
writer_id_map = {
|
| 99 |
+
_WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
|
| 100 |
+
_WriterId_ORC_CPP_WRITER: 'ORC_CPP',
|
| 101 |
+
_WriterId_PRESTO_WRITER: 'PRESTO',
|
| 102 |
+
_WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
|
| 103 |
+
_WriterId_TRINO_WRITER: 'TRINO',
|
| 104 |
+
}
|
| 105 |
+
if writer_id in writer_id_map:
|
| 106 |
+
return writer_id_map[writer_id]
|
| 107 |
+
raise ValueError('Unsupported writer ID')
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
cdef writer_version_from_enum(WriterVersion writer_version):
|
| 111 |
+
writer_version_map = {
|
| 112 |
+
_WriterVersion_ORIGINAL: 'ORIGINAL',
|
| 113 |
+
_WriterVersion_HIVE_8732: 'HIVE_8732',
|
| 114 |
+
_WriterVersion_HIVE_4243: 'HIVE_4243',
|
| 115 |
+
_WriterVersion_HIVE_12055: 'HIVE_12055',
|
| 116 |
+
_WriterVersion_HIVE_13083: 'HIVE_13083',
|
| 117 |
+
_WriterVersion_ORC_101: 'ORC_101',
|
| 118 |
+
_WriterVersion_ORC_135: 'ORC_135',
|
| 119 |
+
_WriterVersion_ORC_517: 'ORC_517',
|
| 120 |
+
_WriterVersion_ORC_203: 'ORC_203',
|
| 121 |
+
_WriterVersion_ORC_14: 'ORC_14',
|
| 122 |
+
}
|
| 123 |
+
if writer_version in writer_version_map:
|
| 124 |
+
return writer_version_map[writer_version]
|
| 125 |
+
raise ValueError('Unsupported writer version')
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
cdef shared_ptr[WriteOptions] _create_write_options(
|
| 129 |
+
file_version=None,
|
| 130 |
+
batch_size=None,
|
| 131 |
+
stripe_size=None,
|
| 132 |
+
compression=None,
|
| 133 |
+
compression_block_size=None,
|
| 134 |
+
compression_strategy=None,
|
| 135 |
+
row_index_stride=None,
|
| 136 |
+
padding_tolerance=None,
|
| 137 |
+
dictionary_key_size_threshold=None,
|
| 138 |
+
bloom_filter_columns=None,
|
| 139 |
+
bloom_filter_fpp=None
|
| 140 |
+
) except *:
|
| 141 |
+
"""General writer options"""
|
| 142 |
+
cdef:
|
| 143 |
+
shared_ptr[WriteOptions] options
|
| 144 |
+
options = make_shared[WriteOptions]()
|
| 145 |
+
# batch_size
|
| 146 |
+
if batch_size is not None:
|
| 147 |
+
if isinstance(batch_size, int) and batch_size > 0:
|
| 148 |
+
deref(options).batch_size = batch_size
|
| 149 |
+
else:
|
| 150 |
+
raise ValueError(f"Invalid ORC writer batch size: {batch_size}")
|
| 151 |
+
# file_version
|
| 152 |
+
if file_version is not None:
|
| 153 |
+
if file_version == "0.12":
|
| 154 |
+
deref(options).file_version = FileVersion(0, 12)
|
| 155 |
+
elif file_version == "0.11":
|
| 156 |
+
deref(options).file_version = FileVersion(0, 11)
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError(f"Unsupported ORC file version: {file_version}")
|
| 159 |
+
# stripe_size
|
| 160 |
+
if stripe_size is not None:
|
| 161 |
+
if isinstance(stripe_size, int) and stripe_size > 0:
|
| 162 |
+
deref(options).stripe_size = stripe_size
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError(f"Invalid ORC stripe size: {stripe_size}")
|
| 165 |
+
# compression
|
| 166 |
+
if compression is not None:
|
| 167 |
+
if isinstance(compression, str):
|
| 168 |
+
deref(options).compression = compression_type_from_name(
|
| 169 |
+
compression)
|
| 170 |
+
else:
|
| 171 |
+
raise TypeError("Unsupported ORC compression type: "
|
| 172 |
+
f"{compression}")
|
| 173 |
+
# compression_block_size
|
| 174 |
+
if compression_block_size is not None:
|
| 175 |
+
if (isinstance(compression_block_size, int) and
|
| 176 |
+
compression_block_size > 0):
|
| 177 |
+
deref(options).compression_block_size = compression_block_size
|
| 178 |
+
else:
|
| 179 |
+
raise ValueError("Invalid ORC compression block size: "
|
| 180 |
+
f"{compression_block_size}")
|
| 181 |
+
# compression_strategy
|
| 182 |
+
if compression_strategy is not None:
|
| 183 |
+
if isinstance(compression, str):
|
| 184 |
+
deref(options).compression_strategy = \
|
| 185 |
+
compression_strategy_from_name(compression_strategy)
|
| 186 |
+
else:
|
| 187 |
+
raise TypeError("Unsupported ORC compression strategy: "
|
| 188 |
+
f"{compression_strategy}")
|
| 189 |
+
# row_index_stride
|
| 190 |
+
if row_index_stride is not None:
|
| 191 |
+
if isinstance(row_index_stride, int) and row_index_stride > 0:
|
| 192 |
+
deref(options).row_index_stride = row_index_stride
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError("Invalid ORC row index stride: "
|
| 195 |
+
f"{row_index_stride}")
|
| 196 |
+
# padding_tolerance
|
| 197 |
+
if padding_tolerance is not None:
|
| 198 |
+
try:
|
| 199 |
+
padding_tolerance = float(padding_tolerance)
|
| 200 |
+
deref(options).padding_tolerance = padding_tolerance
|
| 201 |
+
except Exception:
|
| 202 |
+
raise ValueError("Invalid ORC padding tolerance: "
|
| 203 |
+
f"{padding_tolerance}")
|
| 204 |
+
# dictionary_key_size_threshold
|
| 205 |
+
if dictionary_key_size_threshold is not None:
|
| 206 |
+
try:
|
| 207 |
+
dictionary_key_size_threshold = float(
|
| 208 |
+
dictionary_key_size_threshold)
|
| 209 |
+
assert 0 <= dictionary_key_size_threshold <= 1
|
| 210 |
+
deref(options).dictionary_key_size_threshold = \
|
| 211 |
+
dictionary_key_size_threshold
|
| 212 |
+
except Exception:
|
| 213 |
+
raise ValueError("Invalid ORC dictionary key size threshold: "
|
| 214 |
+
f"{dictionary_key_size_threshold}")
|
| 215 |
+
# bloom_filter_columns
|
| 216 |
+
if bloom_filter_columns is not None:
|
| 217 |
+
try:
|
| 218 |
+
bloom_filter_columns = list(bloom_filter_columns)
|
| 219 |
+
for col in bloom_filter_columns:
|
| 220 |
+
assert isinstance(col, int) and col >= 0
|
| 221 |
+
deref(options).bloom_filter_columns = bloom_filter_columns
|
| 222 |
+
except Exception:
|
| 223 |
+
raise ValueError("Invalid ORC BloomFilter columns: "
|
| 224 |
+
f"{bloom_filter_columns}")
|
| 225 |
+
# Max false positive rate of the Bloom Filter
|
| 226 |
+
if bloom_filter_fpp is not None:
|
| 227 |
+
try:
|
| 228 |
+
bloom_filter_fpp = float(bloom_filter_fpp)
|
| 229 |
+
assert 0 <= bloom_filter_fpp <= 1
|
| 230 |
+
deref(options).bloom_filter_fpp = bloom_filter_fpp
|
| 231 |
+
except Exception:
|
| 232 |
+
raise ValueError("Invalid ORC BloomFilter false positive rate: "
|
| 233 |
+
f"{bloom_filter_fpp}")
|
| 234 |
+
return options
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
cdef class ORCReader(_Weakrefable):
|
| 238 |
+
cdef:
|
| 239 |
+
object source
|
| 240 |
+
CMemoryPool* allocator
|
| 241 |
+
unique_ptr[ORCFileReader] reader
|
| 242 |
+
|
| 243 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
| 244 |
+
self.allocator = maybe_unbox_memory_pool(memory_pool)
|
| 245 |
+
|
| 246 |
+
def open(self, object source, c_bool use_memory_map=True):
|
| 247 |
+
cdef:
|
| 248 |
+
shared_ptr[CRandomAccessFile] rd_handle
|
| 249 |
+
|
| 250 |
+
self.source = source
|
| 251 |
+
|
| 252 |
+
get_reader(source, use_memory_map, &rd_handle)
|
| 253 |
+
with nogil:
|
| 254 |
+
self.reader = move(GetResultValue(
|
| 255 |
+
ORCFileReader.Open(rd_handle, self.allocator)
|
| 256 |
+
))
|
| 257 |
+
|
| 258 |
+
def metadata(self):
|
| 259 |
+
"""
|
| 260 |
+
The arrow metadata for this file.
|
| 261 |
+
|
| 262 |
+
Returns
|
| 263 |
+
-------
|
| 264 |
+
metadata : pyarrow.KeyValueMetadata
|
| 265 |
+
"""
|
| 266 |
+
cdef:
|
| 267 |
+
shared_ptr[const CKeyValueMetadata] sp_arrow_metadata
|
| 268 |
+
|
| 269 |
+
with nogil:
|
| 270 |
+
sp_arrow_metadata = GetResultValue(
|
| 271 |
+
deref(self.reader).ReadMetadata()
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
return pyarrow_wrap_metadata(sp_arrow_metadata)
|
| 275 |
+
|
| 276 |
+
def schema(self):
|
| 277 |
+
"""
|
| 278 |
+
The arrow schema for this file.
|
| 279 |
+
|
| 280 |
+
Returns
|
| 281 |
+
-------
|
| 282 |
+
schema : pyarrow.Schema
|
| 283 |
+
"""
|
| 284 |
+
cdef:
|
| 285 |
+
shared_ptr[CSchema] sp_arrow_schema
|
| 286 |
+
|
| 287 |
+
with nogil:
|
| 288 |
+
sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema())
|
| 289 |
+
|
| 290 |
+
return pyarrow_wrap_schema(sp_arrow_schema)
|
| 291 |
+
|
| 292 |
+
def nrows(self):
|
| 293 |
+
return deref(self.reader).NumberOfRows()
|
| 294 |
+
|
| 295 |
+
def nstripes(self):
|
| 296 |
+
return deref(self.reader).NumberOfStripes()
|
| 297 |
+
|
| 298 |
+
def file_version(self):
|
| 299 |
+
return file_version_from_class(deref(self.reader).GetFileVersion())
|
| 300 |
+
|
| 301 |
+
def software_version(self):
|
| 302 |
+
return frombytes(deref(self.reader).GetSoftwareVersion())
|
| 303 |
+
|
| 304 |
+
def compression(self):
|
| 305 |
+
return compression_type_from_enum(
|
| 306 |
+
GetResultValue(deref(self.reader).GetCompression()))
|
| 307 |
+
|
| 308 |
+
def compression_size(self):
|
| 309 |
+
return deref(self.reader).GetCompressionSize()
|
| 310 |
+
|
| 311 |
+
def row_index_stride(self):
|
| 312 |
+
return deref(self.reader).GetRowIndexStride()
|
| 313 |
+
|
| 314 |
+
def writer(self):
|
| 315 |
+
writer_name = writer_id_from_enum(deref(self.reader).GetWriterId())
|
| 316 |
+
if writer_name == 'UNKNOWN':
|
| 317 |
+
return deref(self.reader).GetWriterIdValue()
|
| 318 |
+
else:
|
| 319 |
+
return writer_name
|
| 320 |
+
|
| 321 |
+
def writer_version(self):
|
| 322 |
+
return writer_version_from_enum(deref(self.reader).GetWriterVersion())
|
| 323 |
+
|
| 324 |
+
def nstripe_statistics(self):
|
| 325 |
+
return deref(self.reader).GetNumberOfStripeStatistics()
|
| 326 |
+
|
| 327 |
+
def content_length(self):
|
| 328 |
+
return deref(self.reader).GetContentLength()
|
| 329 |
+
|
| 330 |
+
def stripe_statistics_length(self):
|
| 331 |
+
return deref(self.reader).GetStripeStatisticsLength()
|
| 332 |
+
|
| 333 |
+
def file_footer_length(self):
|
| 334 |
+
return deref(self.reader).GetFileFooterLength()
|
| 335 |
+
|
| 336 |
+
def file_postscript_length(self):
|
| 337 |
+
return deref(self.reader).GetFilePostscriptLength()
|
| 338 |
+
|
| 339 |
+
def file_length(self):
|
| 340 |
+
return deref(self.reader).GetFileLength()
|
| 341 |
+
|
| 342 |
+
def serialized_file_tail(self):
|
| 343 |
+
return deref(self.reader).GetSerializedFileTail()
|
| 344 |
+
|
| 345 |
+
def read_stripe(self, n, columns=None):
|
| 346 |
+
cdef:
|
| 347 |
+
shared_ptr[CRecordBatch] sp_record_batch
|
| 348 |
+
int64_t stripe
|
| 349 |
+
std_vector[c_string] c_names
|
| 350 |
+
|
| 351 |
+
stripe = n
|
| 352 |
+
|
| 353 |
+
if columns is None:
|
| 354 |
+
with nogil:
|
| 355 |
+
sp_record_batch = GetResultValue(
|
| 356 |
+
deref(self.reader).ReadStripe(stripe)
|
| 357 |
+
)
|
| 358 |
+
else:
|
| 359 |
+
c_names = [tobytes(name) for name in columns]
|
| 360 |
+
with nogil:
|
| 361 |
+
sp_record_batch = GetResultValue(
|
| 362 |
+
deref(self.reader).ReadStripe(stripe, c_names)
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
return pyarrow_wrap_batch(sp_record_batch)
|
| 366 |
+
|
| 367 |
+
def read(self, columns=None):
|
| 368 |
+
cdef:
|
| 369 |
+
shared_ptr[CTable] sp_table
|
| 370 |
+
std_vector[c_string] c_names
|
| 371 |
+
|
| 372 |
+
if columns is None:
|
| 373 |
+
with nogil:
|
| 374 |
+
sp_table = GetResultValue(deref(self.reader).Read())
|
| 375 |
+
else:
|
| 376 |
+
c_names = [tobytes(name) for name in columns]
|
| 377 |
+
with nogil:
|
| 378 |
+
sp_table = GetResultValue(deref(self.reader).Read(c_names))
|
| 379 |
+
|
| 380 |
+
return pyarrow_wrap_table(sp_table)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
cdef class ORCWriter(_Weakrefable):
|
| 384 |
+
cdef:
|
| 385 |
+
unique_ptr[ORCFileWriter] writer
|
| 386 |
+
shared_ptr[COutputStream] sink
|
| 387 |
+
c_bool own_sink
|
| 388 |
+
|
| 389 |
+
def open(self, object where, *,
|
| 390 |
+
file_version=None,
|
| 391 |
+
batch_size=None,
|
| 392 |
+
stripe_size=None,
|
| 393 |
+
compression=None,
|
| 394 |
+
compression_block_size=None,
|
| 395 |
+
compression_strategy=None,
|
| 396 |
+
row_index_stride=None,
|
| 397 |
+
padding_tolerance=None,
|
| 398 |
+
dictionary_key_size_threshold=None,
|
| 399 |
+
bloom_filter_columns=None,
|
| 400 |
+
bloom_filter_fpp=None):
|
| 401 |
+
cdef:
|
| 402 |
+
shared_ptr[WriteOptions] write_options
|
| 403 |
+
c_string c_where
|
| 404 |
+
try:
|
| 405 |
+
where = _stringify_path(where)
|
| 406 |
+
except TypeError:
|
| 407 |
+
get_writer(where, &self.sink)
|
| 408 |
+
self.own_sink = False
|
| 409 |
+
else:
|
| 410 |
+
c_where = tobytes(where)
|
| 411 |
+
with nogil:
|
| 412 |
+
self.sink = GetResultValue(FileOutputStream.Open(c_where))
|
| 413 |
+
self.own_sink = True
|
| 414 |
+
|
| 415 |
+
write_options = _create_write_options(
|
| 416 |
+
file_version=file_version,
|
| 417 |
+
batch_size=batch_size,
|
| 418 |
+
stripe_size=stripe_size,
|
| 419 |
+
compression=compression,
|
| 420 |
+
compression_block_size=compression_block_size,
|
| 421 |
+
compression_strategy=compression_strategy,
|
| 422 |
+
row_index_stride=row_index_stride,
|
| 423 |
+
padding_tolerance=padding_tolerance,
|
| 424 |
+
dictionary_key_size_threshold=dictionary_key_size_threshold,
|
| 425 |
+
bloom_filter_columns=bloom_filter_columns,
|
| 426 |
+
bloom_filter_fpp=bloom_filter_fpp
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
with nogil:
|
| 430 |
+
self.writer = move(GetResultValue(
|
| 431 |
+
ORCFileWriter.Open(self.sink.get(),
|
| 432 |
+
deref(write_options))))
|
| 433 |
+
|
| 434 |
+
def write(self, Table table):
|
| 435 |
+
cdef:
|
| 436 |
+
shared_ptr[CTable] sp_table
|
| 437 |
+
sp_table = pyarrow_unwrap_table(table)
|
| 438 |
+
with nogil:
|
| 439 |
+
check_status(deref(self.writer).Write(deref(sp_table)))
|
| 440 |
+
|
| 441 |
+
def close(self):
|
| 442 |
+
with nogil:
|
| 443 |
+
check_status(deref(self.writer).Close())
|
| 444 |
+
if self.own_sink:
|
| 445 |
+
check_status(deref(self.sink).Close())
|
parrot/lib/python3.10/site-packages/pyarrow/_parquet.pxd
ADDED
|
@@ -0,0 +1,679 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
# cython: language_level = 3
|
| 20 |
+
|
| 21 |
+
from pyarrow.includes.common cimport *
|
| 22 |
+
from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus,
|
| 23 |
+
CTable, CMemoryPool, CBuffer,
|
| 24 |
+
CKeyValueMetadata, CRandomAccessFile,
|
| 25 |
+
COutputStream, CCacheOptions,
|
| 26 |
+
TimeUnit, CRecordBatchReader)
|
| 27 |
+
from pyarrow.lib cimport _Weakrefable
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil:
|
| 31 |
+
cdef cppclass Node:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
cdef cppclass GroupNode(Node):
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
cdef cppclass PrimitiveNode(Node):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
cdef cppclass ColumnPath:
|
| 41 |
+
c_string ToDotString()
|
| 42 |
+
vector[c_string] ToDotVector()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
cdef extern from "parquet/api/schema.h" namespace "parquet" nogil:
|
| 46 |
+
enum ParquetType" parquet::Type::type":
|
| 47 |
+
ParquetType_BOOLEAN" parquet::Type::BOOLEAN"
|
| 48 |
+
ParquetType_INT32" parquet::Type::INT32"
|
| 49 |
+
ParquetType_INT64" parquet::Type::INT64"
|
| 50 |
+
ParquetType_INT96" parquet::Type::INT96"
|
| 51 |
+
ParquetType_FLOAT" parquet::Type::FLOAT"
|
| 52 |
+
ParquetType_DOUBLE" parquet::Type::DOUBLE"
|
| 53 |
+
ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY"
|
| 54 |
+
ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY"
|
| 55 |
+
|
| 56 |
+
enum ParquetLogicalTypeId" parquet::LogicalType::Type::type":
|
| 57 |
+
ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED"
|
| 58 |
+
ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING"
|
| 59 |
+
ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP"
|
| 60 |
+
ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST"
|
| 61 |
+
ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM"
|
| 62 |
+
ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL"
|
| 63 |
+
ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE"
|
| 64 |
+
ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME"
|
| 65 |
+
ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP"
|
| 66 |
+
ParquetLogicalType_INT" parquet::LogicalType::Type::INT"
|
| 67 |
+
ParquetLogicalType_FLOAT16" parquet::LogicalType::Type::FLOAT16"
|
| 68 |
+
ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON"
|
| 69 |
+
ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON"
|
| 70 |
+
ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID"
|
| 71 |
+
ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE"
|
| 72 |
+
|
| 73 |
+
enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit":
|
| 74 |
+
ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN"
|
| 75 |
+
ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS"
|
| 76 |
+
ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS"
|
| 77 |
+
ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS"
|
| 78 |
+
|
| 79 |
+
enum ParquetConvertedType" parquet::ConvertedType::type":
|
| 80 |
+
ParquetConvertedType_NONE" parquet::ConvertedType::NONE"
|
| 81 |
+
ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8"
|
| 82 |
+
ParquetConvertedType_MAP" parquet::ConvertedType::MAP"
|
| 83 |
+
ParquetConvertedType_MAP_KEY_VALUE \
|
| 84 |
+
" parquet::ConvertedType::MAP_KEY_VALUE"
|
| 85 |
+
ParquetConvertedType_LIST" parquet::ConvertedType::LIST"
|
| 86 |
+
ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM"
|
| 87 |
+
ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL"
|
| 88 |
+
ParquetConvertedType_DATE" parquet::ConvertedType::DATE"
|
| 89 |
+
ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS"
|
| 90 |
+
ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS"
|
| 91 |
+
ParquetConvertedType_TIMESTAMP_MILLIS \
|
| 92 |
+
" parquet::ConvertedType::TIMESTAMP_MILLIS"
|
| 93 |
+
ParquetConvertedType_TIMESTAMP_MICROS \
|
| 94 |
+
" parquet::ConvertedType::TIMESTAMP_MICROS"
|
| 95 |
+
ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8"
|
| 96 |
+
ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16"
|
| 97 |
+
ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32"
|
| 98 |
+
ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64"
|
| 99 |
+
ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8"
|
| 100 |
+
ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16"
|
| 101 |
+
ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32"
|
| 102 |
+
ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64"
|
| 103 |
+
ParquetConvertedType_JSON" parquet::ConvertedType::JSON"
|
| 104 |
+
ParquetConvertedType_BSON" parquet::ConvertedType::BSON"
|
| 105 |
+
ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL"
|
| 106 |
+
|
| 107 |
+
enum ParquetRepetition" parquet::Repetition::type":
|
| 108 |
+
ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED"
|
| 109 |
+
ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL"
|
| 110 |
+
ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED"
|
| 111 |
+
|
| 112 |
+
enum ParquetEncoding" parquet::Encoding::type":
|
| 113 |
+
ParquetEncoding_PLAIN" parquet::Encoding::PLAIN"
|
| 114 |
+
ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY"
|
| 115 |
+
ParquetEncoding_RLE" parquet::Encoding::RLE"
|
| 116 |
+
ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED"
|
| 117 |
+
ParquetEncoding_DELTA_BINARY_PACKED \
|
| 118 |
+
" parquet::Encoding::DELTA_BINARY_PACKED"
|
| 119 |
+
ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \
|
| 120 |
+
" parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY"
|
| 121 |
+
ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY"
|
| 122 |
+
ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY"
|
| 123 |
+
ParquetEncoding_BYTE_STREAM_SPLIT \
|
| 124 |
+
" parquet::Encoding::BYTE_STREAM_SPLIT"
|
| 125 |
+
|
| 126 |
+
enum ParquetCompression" parquet::Compression::type":
|
| 127 |
+
ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED"
|
| 128 |
+
ParquetCompression_SNAPPY" parquet::Compression::SNAPPY"
|
| 129 |
+
ParquetCompression_GZIP" parquet::Compression::GZIP"
|
| 130 |
+
ParquetCompression_LZO" parquet::Compression::LZO"
|
| 131 |
+
ParquetCompression_BROTLI" parquet::Compression::BROTLI"
|
| 132 |
+
ParquetCompression_LZ4" parquet::Compression::LZ4"
|
| 133 |
+
ParquetCompression_ZSTD" parquet::Compression::ZSTD"
|
| 134 |
+
|
| 135 |
+
enum ParquetVersion" parquet::ParquetVersion::type":
|
| 136 |
+
ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0"
|
| 137 |
+
ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0"
|
| 138 |
+
ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4"
|
| 139 |
+
ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6"
|
| 140 |
+
|
| 141 |
+
enum ParquetSortOrder" parquet::SortOrder::type":
|
| 142 |
+
ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED"
|
| 143 |
+
ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED"
|
| 144 |
+
ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN"
|
| 145 |
+
|
| 146 |
+
cdef cppclass CParquetLogicalType" parquet::LogicalType":
|
| 147 |
+
c_string ToString() const
|
| 148 |
+
c_string ToJSON() const
|
| 149 |
+
ParquetLogicalTypeId type() const
|
| 150 |
+
|
| 151 |
+
cdef cppclass CParquetDecimalType \
|
| 152 |
+
" parquet::DecimalLogicalType"(CParquetLogicalType):
|
| 153 |
+
int32_t precision() const
|
| 154 |
+
int32_t scale() const
|
| 155 |
+
|
| 156 |
+
cdef cppclass CParquetIntType \
|
| 157 |
+
" parquet::IntLogicalType"(CParquetLogicalType):
|
| 158 |
+
int bit_width() const
|
| 159 |
+
c_bool is_signed() const
|
| 160 |
+
|
| 161 |
+
cdef cppclass CParquetTimeType \
|
| 162 |
+
" parquet::TimeLogicalType"(CParquetLogicalType):
|
| 163 |
+
c_bool is_adjusted_to_utc() const
|
| 164 |
+
ParquetTimeUnit time_unit() const
|
| 165 |
+
|
| 166 |
+
cdef cppclass CParquetTimestampType \
|
| 167 |
+
" parquet::TimestampLogicalType"(CParquetLogicalType):
|
| 168 |
+
c_bool is_adjusted_to_utc() const
|
| 169 |
+
ParquetTimeUnit time_unit() const
|
| 170 |
+
|
| 171 |
+
cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor":
|
| 172 |
+
c_bool Equals(const ColumnDescriptor& other)
|
| 173 |
+
|
| 174 |
+
shared_ptr[ColumnPath] path()
|
| 175 |
+
int16_t max_definition_level()
|
| 176 |
+
int16_t max_repetition_level()
|
| 177 |
+
|
| 178 |
+
ParquetType physical_type()
|
| 179 |
+
const shared_ptr[const CParquetLogicalType]& logical_type()
|
| 180 |
+
ParquetConvertedType converted_type()
|
| 181 |
+
const c_string& name()
|
| 182 |
+
int type_length()
|
| 183 |
+
int type_precision()
|
| 184 |
+
int type_scale()
|
| 185 |
+
|
| 186 |
+
cdef cppclass SchemaDescriptor:
|
| 187 |
+
const ColumnDescriptor* Column(int i)
|
| 188 |
+
shared_ptr[Node] schema()
|
| 189 |
+
GroupNode* group()
|
| 190 |
+
c_bool Equals(const SchemaDescriptor& other)
|
| 191 |
+
c_string ToString()
|
| 192 |
+
int num_columns()
|
| 193 |
+
|
| 194 |
+
cdef c_string FormatStatValue(ParquetType parquet_type, c_string val)
|
| 195 |
+
|
| 196 |
+
enum ParquetCipher" parquet::ParquetCipher::type":
|
| 197 |
+
ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1"
|
| 198 |
+
ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1"
|
| 199 |
+
|
| 200 |
+
struct AadMetadata:
|
| 201 |
+
c_string aad_prefix
|
| 202 |
+
c_string aad_file_unique
|
| 203 |
+
c_bool supply_aad_prefix
|
| 204 |
+
|
| 205 |
+
struct EncryptionAlgorithm:
|
| 206 |
+
ParquetCipher algorithm
|
| 207 |
+
AadMetadata aad
|
| 208 |
+
|
| 209 |
+
cdef extern from "parquet/api/reader.h" namespace "parquet" nogil:
|
| 210 |
+
cdef cppclass ColumnReader:
|
| 211 |
+
pass
|
| 212 |
+
|
| 213 |
+
cdef cppclass BoolReader(ColumnReader):
|
| 214 |
+
pass
|
| 215 |
+
|
| 216 |
+
cdef cppclass Int32Reader(ColumnReader):
|
| 217 |
+
pass
|
| 218 |
+
|
| 219 |
+
cdef cppclass Int64Reader(ColumnReader):
|
| 220 |
+
pass
|
| 221 |
+
|
| 222 |
+
cdef cppclass Int96Reader(ColumnReader):
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
cdef cppclass FloatReader(ColumnReader):
|
| 226 |
+
pass
|
| 227 |
+
|
| 228 |
+
cdef cppclass DoubleReader(ColumnReader):
|
| 229 |
+
pass
|
| 230 |
+
|
| 231 |
+
cdef cppclass ByteArrayReader(ColumnReader):
|
| 232 |
+
pass
|
| 233 |
+
|
| 234 |
+
cdef cppclass RowGroupReader:
|
| 235 |
+
pass
|
| 236 |
+
|
| 237 |
+
cdef cppclass CEncodedStatistics" parquet::EncodedStatistics":
|
| 238 |
+
const c_string& max() const
|
| 239 |
+
const c_string& min() const
|
| 240 |
+
int64_t null_count
|
| 241 |
+
int64_t distinct_count
|
| 242 |
+
bint has_min
|
| 243 |
+
bint has_max
|
| 244 |
+
bint has_null_count
|
| 245 |
+
bint has_distinct_count
|
| 246 |
+
|
| 247 |
+
cdef cppclass ParquetByteArray" parquet::ByteArray":
|
| 248 |
+
uint32_t len
|
| 249 |
+
const uint8_t* ptr
|
| 250 |
+
|
| 251 |
+
cdef cppclass ParquetFLBA" parquet::FLBA":
|
| 252 |
+
const uint8_t* ptr
|
| 253 |
+
|
| 254 |
+
cdef cppclass CStatistics" parquet::Statistics":
|
| 255 |
+
int64_t null_count() const
|
| 256 |
+
int64_t distinct_count() const
|
| 257 |
+
int64_t num_values() const
|
| 258 |
+
bint HasMinMax()
|
| 259 |
+
bint HasNullCount()
|
| 260 |
+
bint HasDistinctCount()
|
| 261 |
+
c_bool Equals(const CStatistics&) const
|
| 262 |
+
void Reset()
|
| 263 |
+
c_string EncodeMin()
|
| 264 |
+
c_string EncodeMax()
|
| 265 |
+
CEncodedStatistics Encode()
|
| 266 |
+
void SetComparator()
|
| 267 |
+
ParquetType physical_type() const
|
| 268 |
+
const ColumnDescriptor* descr() const
|
| 269 |
+
|
| 270 |
+
cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics):
|
| 271 |
+
c_bool min()
|
| 272 |
+
c_bool max()
|
| 273 |
+
|
| 274 |
+
cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics):
|
| 275 |
+
int32_t min()
|
| 276 |
+
int32_t max()
|
| 277 |
+
|
| 278 |
+
cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics):
|
| 279 |
+
int64_t min()
|
| 280 |
+
int64_t max()
|
| 281 |
+
|
| 282 |
+
cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics):
|
| 283 |
+
float min()
|
| 284 |
+
float max()
|
| 285 |
+
|
| 286 |
+
cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics):
|
| 287 |
+
double min()
|
| 288 |
+
double max()
|
| 289 |
+
|
| 290 |
+
cdef cppclass CByteArrayStatistics \
|
| 291 |
+
" parquet::ByteArrayStatistics"(CStatistics):
|
| 292 |
+
ParquetByteArray min()
|
| 293 |
+
ParquetByteArray max()
|
| 294 |
+
|
| 295 |
+
cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics):
|
| 296 |
+
ParquetFLBA min()
|
| 297 |
+
ParquetFLBA max()
|
| 298 |
+
|
| 299 |
+
cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData":
|
| 300 |
+
shared_ptr[ColumnPath] path_in_schema() const
|
| 301 |
+
c_bool encrypted_with_footer_key() const
|
| 302 |
+
const c_string& key_metadata() const
|
| 303 |
+
|
| 304 |
+
cdef cppclass ParquetIndexLocation" parquet::IndexLocation":
|
| 305 |
+
int64_t offset
|
| 306 |
+
int32_t length
|
| 307 |
+
|
| 308 |
+
cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData":
|
| 309 |
+
int64_t file_offset() const
|
| 310 |
+
const c_string& file_path() const
|
| 311 |
+
|
| 312 |
+
c_bool is_metadata_set() const
|
| 313 |
+
ParquetType type() const
|
| 314 |
+
int64_t num_values() const
|
| 315 |
+
shared_ptr[ColumnPath] path_in_schema() const
|
| 316 |
+
bint is_stats_set() const
|
| 317 |
+
shared_ptr[CStatistics] statistics() const
|
| 318 |
+
ParquetCompression compression() const
|
| 319 |
+
const vector[ParquetEncoding]& encodings() const
|
| 320 |
+
c_bool Equals(const CColumnChunkMetaData&) const
|
| 321 |
+
|
| 322 |
+
int64_t has_dictionary_page() const
|
| 323 |
+
int64_t dictionary_page_offset() const
|
| 324 |
+
int64_t data_page_offset() const
|
| 325 |
+
int64_t index_page_offset() const
|
| 326 |
+
int64_t total_compressed_size() const
|
| 327 |
+
int64_t total_uncompressed_size() const
|
| 328 |
+
unique_ptr[CColumnCryptoMetaData] crypto_metadata() const
|
| 329 |
+
optional[ParquetIndexLocation] GetColumnIndexLocation() const
|
| 330 |
+
optional[ParquetIndexLocation] GetOffsetIndexLocation() const
|
| 331 |
+
|
| 332 |
+
struct CSortingColumn" parquet::SortingColumn":
|
| 333 |
+
int column_idx
|
| 334 |
+
c_bool descending
|
| 335 |
+
c_bool nulls_first
|
| 336 |
+
|
| 337 |
+
cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData":
|
| 338 |
+
c_bool Equals(const CRowGroupMetaData&) const
|
| 339 |
+
int num_columns() const
|
| 340 |
+
int64_t num_rows() const
|
| 341 |
+
int64_t total_byte_size() const
|
| 342 |
+
vector[CSortingColumn] sorting_columns() const
|
| 343 |
+
unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const
|
| 344 |
+
|
| 345 |
+
cdef cppclass CFileMetaData" parquet::FileMetaData":
|
| 346 |
+
c_bool Equals(const CFileMetaData&) const
|
| 347 |
+
uint32_t size()
|
| 348 |
+
int num_columns()
|
| 349 |
+
int64_t num_rows()
|
| 350 |
+
int num_row_groups()
|
| 351 |
+
ParquetVersion version()
|
| 352 |
+
const c_string created_by()
|
| 353 |
+
int num_schema_elements()
|
| 354 |
+
|
| 355 |
+
void set_file_path(const c_string& path)
|
| 356 |
+
void AppendRowGroups(const CFileMetaData& other) except +
|
| 357 |
+
|
| 358 |
+
unique_ptr[CRowGroupMetaData] RowGroup(int i)
|
| 359 |
+
const SchemaDescriptor* schema()
|
| 360 |
+
shared_ptr[const CKeyValueMetadata] key_value_metadata() const
|
| 361 |
+
void WriteTo(COutputStream* dst) const
|
| 362 |
+
|
| 363 |
+
inline c_bool is_encryption_algorithm_set() const
|
| 364 |
+
inline EncryptionAlgorithm encryption_algorithm() const
|
| 365 |
+
inline const c_string& footer_signing_key_metadata() const
|
| 366 |
+
|
| 367 |
+
cdef shared_ptr[CFileMetaData] CFileMetaData_Make \
|
| 368 |
+
" parquet::FileMetaData::Make"(const void* serialized_metadata,
|
| 369 |
+
uint32_t* metadata_len)
|
| 370 |
+
|
| 371 |
+
cdef cppclass CReaderProperties" parquet::ReaderProperties":
|
| 372 |
+
c_bool is_buffered_stream_enabled() const
|
| 373 |
+
void enable_buffered_stream()
|
| 374 |
+
void disable_buffered_stream()
|
| 375 |
+
|
| 376 |
+
void set_buffer_size(int64_t buf_size)
|
| 377 |
+
int64_t buffer_size() const
|
| 378 |
+
|
| 379 |
+
void set_thrift_string_size_limit(int32_t size)
|
| 380 |
+
int32_t thrift_string_size_limit() const
|
| 381 |
+
|
| 382 |
+
void set_thrift_container_size_limit(int32_t size)
|
| 383 |
+
int32_t thrift_container_size_limit() const
|
| 384 |
+
|
| 385 |
+
void file_decryption_properties(shared_ptr[CFileDecryptionProperties]
|
| 386 |
+
decryption)
|
| 387 |
+
shared_ptr[CFileDecryptionProperties] file_decryption_properties() \
|
| 388 |
+
const
|
| 389 |
+
|
| 390 |
+
c_bool page_checksum_verification() const
|
| 391 |
+
void set_page_checksum_verification(c_bool check_crc)
|
| 392 |
+
|
| 393 |
+
CReaderProperties default_reader_properties()
|
| 394 |
+
|
| 395 |
+
cdef cppclass ArrowReaderProperties:
|
| 396 |
+
ArrowReaderProperties()
|
| 397 |
+
void set_read_dictionary(int column_index, c_bool read_dict)
|
| 398 |
+
c_bool read_dictionary()
|
| 399 |
+
void set_batch_size(int64_t batch_size)
|
| 400 |
+
int64_t batch_size()
|
| 401 |
+
void set_pre_buffer(c_bool pre_buffer)
|
| 402 |
+
c_bool pre_buffer() const
|
| 403 |
+
void set_cache_options(CCacheOptions options)
|
| 404 |
+
CCacheOptions cache_options() const
|
| 405 |
+
void set_coerce_int96_timestamp_unit(TimeUnit unit)
|
| 406 |
+
TimeUnit coerce_int96_timestamp_unit() const
|
| 407 |
+
|
| 408 |
+
ArrowReaderProperties default_arrow_reader_properties()
|
| 409 |
+
|
| 410 |
+
cdef cppclass ParquetFileReader:
|
| 411 |
+
shared_ptr[CFileMetaData] metadata()
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
cdef extern from "parquet/api/writer.h" namespace "parquet" nogil:
|
| 415 |
+
cdef cppclass WriterProperties:
|
| 416 |
+
cppclass Builder:
|
| 417 |
+
Builder* data_page_version(ParquetDataPageVersion version)
|
| 418 |
+
Builder* version(ParquetVersion version)
|
| 419 |
+
Builder* compression(ParquetCompression codec)
|
| 420 |
+
Builder* compression(const c_string& path,
|
| 421 |
+
ParquetCompression codec)
|
| 422 |
+
Builder* compression_level(int compression_level)
|
| 423 |
+
Builder* compression_level(const c_string& path,
|
| 424 |
+
int compression_level)
|
| 425 |
+
Builder* encryption(
|
| 426 |
+
shared_ptr[CFileEncryptionProperties]
|
| 427 |
+
file_encryption_properties)
|
| 428 |
+
Builder* disable_dictionary()
|
| 429 |
+
Builder* enable_dictionary()
|
| 430 |
+
Builder* enable_dictionary(const c_string& path)
|
| 431 |
+
Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns)
|
| 432 |
+
Builder* disable_statistics()
|
| 433 |
+
Builder* enable_statistics()
|
| 434 |
+
Builder* enable_statistics(const c_string& path)
|
| 435 |
+
Builder* enable_store_decimal_as_integer()
|
| 436 |
+
Builder* disable_store_decimal_as_integer()
|
| 437 |
+
Builder* data_pagesize(int64_t size)
|
| 438 |
+
Builder* encoding(ParquetEncoding encoding)
|
| 439 |
+
Builder* encoding(const c_string& path,
|
| 440 |
+
ParquetEncoding encoding)
|
| 441 |
+
Builder* max_row_group_length(int64_t size)
|
| 442 |
+
Builder* write_batch_size(int64_t batch_size)
|
| 443 |
+
Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit)
|
| 444 |
+
Builder* enable_write_page_index()
|
| 445 |
+
Builder* disable_write_page_index()
|
| 446 |
+
Builder* enable_page_checksum()
|
| 447 |
+
Builder* disable_page_checksum()
|
| 448 |
+
shared_ptr[WriterProperties] build()
|
| 449 |
+
|
| 450 |
+
cdef cppclass ArrowWriterProperties:
|
| 451 |
+
cppclass Builder:
|
| 452 |
+
Builder()
|
| 453 |
+
Builder* disable_deprecated_int96_timestamps()
|
| 454 |
+
Builder* enable_deprecated_int96_timestamps()
|
| 455 |
+
Builder* coerce_timestamps(TimeUnit unit)
|
| 456 |
+
Builder* allow_truncated_timestamps()
|
| 457 |
+
Builder* disallow_truncated_timestamps()
|
| 458 |
+
Builder* store_schema()
|
| 459 |
+
Builder* enable_compliant_nested_types()
|
| 460 |
+
Builder* disable_compliant_nested_types()
|
| 461 |
+
Builder* set_engine_version(ArrowWriterEngineVersion version)
|
| 462 |
+
shared_ptr[ArrowWriterProperties] build()
|
| 463 |
+
c_bool support_deprecated_int96_timestamps()
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil:
|
| 467 |
+
cdef cppclass FileReader:
|
| 468 |
+
FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader)
|
| 469 |
+
|
| 470 |
+
CStatus GetSchema(shared_ptr[CSchema]* out)
|
| 471 |
+
|
| 472 |
+
CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out)
|
| 473 |
+
CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out)
|
| 474 |
+
|
| 475 |
+
int num_row_groups()
|
| 476 |
+
CStatus ReadRowGroup(int i, shared_ptr[CTable]* out)
|
| 477 |
+
CStatus ReadRowGroup(int i, const vector[int]& column_indices,
|
| 478 |
+
shared_ptr[CTable]* out)
|
| 479 |
+
|
| 480 |
+
CStatus ReadRowGroups(const vector[int]& row_groups,
|
| 481 |
+
shared_ptr[CTable]* out)
|
| 482 |
+
CStatus ReadRowGroups(const vector[int]& row_groups,
|
| 483 |
+
const vector[int]& column_indices,
|
| 484 |
+
shared_ptr[CTable]* out)
|
| 485 |
+
|
| 486 |
+
CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
|
| 487 |
+
const vector[int]& column_indices,
|
| 488 |
+
unique_ptr[CRecordBatchReader]* out)
|
| 489 |
+
CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
|
| 490 |
+
unique_ptr[CRecordBatchReader]* out)
|
| 491 |
+
|
| 492 |
+
CStatus ReadTable(shared_ptr[CTable]* out)
|
| 493 |
+
CStatus ReadTable(const vector[int]& column_indices,
|
| 494 |
+
shared_ptr[CTable]* out)
|
| 495 |
+
|
| 496 |
+
CStatus ScanContents(vector[int] columns, int32_t column_batch_size,
|
| 497 |
+
int64_t* num_rows)
|
| 498 |
+
|
| 499 |
+
const ParquetFileReader* parquet_reader()
|
| 500 |
+
|
| 501 |
+
void set_use_threads(c_bool use_threads)
|
| 502 |
+
|
| 503 |
+
void set_batch_size(int64_t batch_size)
|
| 504 |
+
|
| 505 |
+
cdef cppclass FileReaderBuilder:
|
| 506 |
+
FileReaderBuilder()
|
| 507 |
+
CStatus Open(const shared_ptr[CRandomAccessFile]& file,
|
| 508 |
+
const CReaderProperties& properties,
|
| 509 |
+
const shared_ptr[CFileMetaData]& metadata)
|
| 510 |
+
|
| 511 |
+
ParquetFileReader* raw_reader()
|
| 512 |
+
FileReaderBuilder* memory_pool(CMemoryPool*)
|
| 513 |
+
FileReaderBuilder* properties(const ArrowReaderProperties&)
|
| 514 |
+
CStatus Build(unique_ptr[FileReader]* out)
|
| 515 |
+
|
| 516 |
+
CStatus FromParquetSchema(
|
| 517 |
+
const SchemaDescriptor* parquet_schema,
|
| 518 |
+
const ArrowReaderProperties& properties,
|
| 519 |
+
const shared_ptr[const CKeyValueMetadata]& key_value_metadata,
|
| 520 |
+
shared_ptr[CSchema]* out)
|
| 521 |
+
|
| 522 |
+
CStatus StatisticsAsScalars(const CStatistics& Statistics,
|
| 523 |
+
shared_ptr[CScalar]* min,
|
| 524 |
+
shared_ptr[CScalar]* max)
|
| 525 |
+
|
| 526 |
+
cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil:
|
| 527 |
+
|
| 528 |
+
CStatus ToParquetSchema(
|
| 529 |
+
const CSchema* arrow_schema,
|
| 530 |
+
const WriterProperties& properties,
|
| 531 |
+
const ArrowWriterProperties& arrow_properties,
|
| 532 |
+
shared_ptr[SchemaDescriptor]* out)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
cdef extern from "parquet/properties.h" namespace "parquet" nogil:
|
| 536 |
+
cdef enum ArrowWriterEngineVersion:
|
| 537 |
+
V1 "parquet::ArrowWriterProperties::V1",
|
| 538 |
+
V2 "parquet::ArrowWriterProperties::V2"
|
| 539 |
+
|
| 540 |
+
cdef cppclass ParquetDataPageVersion:
|
| 541 |
+
pass
|
| 542 |
+
|
| 543 |
+
cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \
|
| 544 |
+
" parquet::ParquetDataPageVersion::V1"
|
| 545 |
+
cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \
|
| 546 |
+
" parquet::ParquetDataPageVersion::V2"
|
| 547 |
+
|
| 548 |
+
cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil:
|
| 549 |
+
cdef cppclass FileWriter:
|
| 550 |
+
|
| 551 |
+
@staticmethod
|
| 552 |
+
CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool,
|
| 553 |
+
const shared_ptr[COutputStream]& sink,
|
| 554 |
+
const shared_ptr[WriterProperties]& properties,
|
| 555 |
+
const shared_ptr[ArrowWriterProperties]& arrow_properties)
|
| 556 |
+
|
| 557 |
+
CStatus WriteTable(const CTable& table, int64_t chunk_size)
|
| 558 |
+
CStatus NewRowGroup(int64_t chunk_size)
|
| 559 |
+
CStatus Close()
|
| 560 |
+
CStatus AddKeyValueMetadata(const shared_ptr[const CKeyValueMetadata]& key_value_metadata)
|
| 561 |
+
|
| 562 |
+
const shared_ptr[CFileMetaData] metadata() const
|
| 563 |
+
|
| 564 |
+
CStatus WriteMetaDataFile(
|
| 565 |
+
const CFileMetaData& file_metadata,
|
| 566 |
+
const COutputStream* sink)
|
| 567 |
+
|
| 568 |
+
cdef class FileEncryptionProperties:
|
| 569 |
+
"""File-level encryption properties for the low-level API"""
|
| 570 |
+
cdef:
|
| 571 |
+
shared_ptr[CFileEncryptionProperties] properties
|
| 572 |
+
|
| 573 |
+
@staticmethod
|
| 574 |
+
cdef inline FileEncryptionProperties wrap(
|
| 575 |
+
shared_ptr[CFileEncryptionProperties] properties):
|
| 576 |
+
|
| 577 |
+
result = FileEncryptionProperties()
|
| 578 |
+
result.properties = properties
|
| 579 |
+
return result
|
| 580 |
+
|
| 581 |
+
cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self):
|
| 582 |
+
return self.properties
|
| 583 |
+
|
| 584 |
+
cdef shared_ptr[WriterProperties] _create_writer_properties(
|
| 585 |
+
use_dictionary=*,
|
| 586 |
+
compression=*,
|
| 587 |
+
version=*,
|
| 588 |
+
write_statistics=*,
|
| 589 |
+
data_page_size=*,
|
| 590 |
+
compression_level=*,
|
| 591 |
+
use_byte_stream_split=*,
|
| 592 |
+
column_encoding=*,
|
| 593 |
+
data_page_version=*,
|
| 594 |
+
FileEncryptionProperties encryption_properties=*,
|
| 595 |
+
write_batch_size=*,
|
| 596 |
+
dictionary_pagesize_limit=*,
|
| 597 |
+
write_page_index=*,
|
| 598 |
+
write_page_checksum=*,
|
| 599 |
+
sorting_columns=*,
|
| 600 |
+
store_decimal_as_integer=*,
|
| 601 |
+
) except *
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
|
| 605 |
+
use_deprecated_int96_timestamps=*,
|
| 606 |
+
coerce_timestamps=*,
|
| 607 |
+
allow_truncated_timestamps=*,
|
| 608 |
+
writer_engine_version=*,
|
| 609 |
+
use_compliant_nested_type=*,
|
| 610 |
+
store_schema=*,
|
| 611 |
+
) except *
|
| 612 |
+
|
| 613 |
+
cdef class ParquetSchema(_Weakrefable):
|
| 614 |
+
cdef:
|
| 615 |
+
FileMetaData parent # the FileMetaData owning the SchemaDescriptor
|
| 616 |
+
const SchemaDescriptor* schema
|
| 617 |
+
|
| 618 |
+
cdef class FileMetaData(_Weakrefable):
|
| 619 |
+
cdef:
|
| 620 |
+
shared_ptr[CFileMetaData] sp_metadata
|
| 621 |
+
CFileMetaData* _metadata
|
| 622 |
+
ParquetSchema _schema
|
| 623 |
+
|
| 624 |
+
cdef inline init(self, const shared_ptr[CFileMetaData]& metadata):
|
| 625 |
+
self.sp_metadata = metadata
|
| 626 |
+
self._metadata = metadata.get()
|
| 627 |
+
|
| 628 |
+
cdef class RowGroupMetaData(_Weakrefable):
|
| 629 |
+
cdef:
|
| 630 |
+
int index # for pickling support
|
| 631 |
+
unique_ptr[CRowGroupMetaData] up_metadata
|
| 632 |
+
CRowGroupMetaData* metadata
|
| 633 |
+
FileMetaData parent
|
| 634 |
+
|
| 635 |
+
cdef class ColumnChunkMetaData(_Weakrefable):
|
| 636 |
+
cdef:
|
| 637 |
+
unique_ptr[CColumnChunkMetaData] up_metadata
|
| 638 |
+
CColumnChunkMetaData* metadata
|
| 639 |
+
RowGroupMetaData parent
|
| 640 |
+
|
| 641 |
+
cdef inline init(self, RowGroupMetaData parent, int i):
|
| 642 |
+
self.up_metadata = parent.metadata.ColumnChunk(i)
|
| 643 |
+
self.metadata = self.up_metadata.get()
|
| 644 |
+
self.parent = parent
|
| 645 |
+
|
| 646 |
+
cdef class Statistics(_Weakrefable):
|
| 647 |
+
cdef:
|
| 648 |
+
shared_ptr[CStatistics] statistics
|
| 649 |
+
ColumnChunkMetaData parent
|
| 650 |
+
|
| 651 |
+
cdef inline init(self, const shared_ptr[CStatistics]& statistics,
|
| 652 |
+
ColumnChunkMetaData parent):
|
| 653 |
+
self.statistics = statistics
|
| 654 |
+
self.parent = parent
|
| 655 |
+
|
| 656 |
+
cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil:
|
| 657 |
+
cdef cppclass CFileDecryptionProperties\
|
| 658 |
+
" parquet::FileDecryptionProperties":
|
| 659 |
+
pass
|
| 660 |
+
|
| 661 |
+
cdef cppclass CFileEncryptionProperties\
|
| 662 |
+
" parquet::FileEncryptionProperties":
|
| 663 |
+
pass
|
| 664 |
+
|
| 665 |
+
cdef class FileDecryptionProperties:
|
| 666 |
+
"""File-level decryption properties for the low-level API"""
|
| 667 |
+
cdef:
|
| 668 |
+
shared_ptr[CFileDecryptionProperties] properties
|
| 669 |
+
|
| 670 |
+
@staticmethod
|
| 671 |
+
cdef inline FileDecryptionProperties wrap(
|
| 672 |
+
shared_ptr[CFileDecryptionProperties] properties):
|
| 673 |
+
|
| 674 |
+
result = FileDecryptionProperties()
|
| 675 |
+
result.properties = properties
|
| 676 |
+
return result
|
| 677 |
+
|
| 678 |
+
cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self):
|
| 679 |
+
return self.properties
|
parrot/lib/python3.10/site-packages/pyarrow/_parquet.pyx
ADDED
|
@@ -0,0 +1,2253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: profile=False
|
| 19 |
+
# distutils: language = c++
|
| 20 |
+
|
| 21 |
+
from collections.abc import Sequence
|
| 22 |
+
from textwrap import indent
|
| 23 |
+
import warnings
|
| 24 |
+
|
| 25 |
+
from cython.operator cimport dereference as deref
|
| 26 |
+
from pyarrow.includes.common cimport *
|
| 27 |
+
from pyarrow.includes.libarrow cimport *
|
| 28 |
+
from pyarrow.includes.libarrow_python cimport *
|
| 29 |
+
from pyarrow.lib cimport (_Weakrefable, Buffer, Schema,
|
| 30 |
+
check_status,
|
| 31 |
+
MemoryPool, maybe_unbox_memory_pool,
|
| 32 |
+
Table, KeyValueMetadata,
|
| 33 |
+
pyarrow_wrap_chunked_array,
|
| 34 |
+
pyarrow_wrap_schema,
|
| 35 |
+
pyarrow_unwrap_metadata,
|
| 36 |
+
pyarrow_unwrap_schema,
|
| 37 |
+
pyarrow_wrap_table,
|
| 38 |
+
pyarrow_wrap_batch,
|
| 39 |
+
pyarrow_wrap_scalar,
|
| 40 |
+
NativeFile, get_reader, get_writer,
|
| 41 |
+
string_to_timeunit)
|
| 42 |
+
|
| 43 |
+
from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream,
|
| 44 |
+
_stringify_path,
|
| 45 |
+
tobytes, frombytes, is_threading_enabled)
|
| 46 |
+
|
| 47 |
+
cimport cpython as cp
|
| 48 |
+
|
| 49 |
+
_DEFAULT_ROW_GROUP_SIZE = 1024*1024
|
| 50 |
+
_MAX_ROW_GROUP_SIZE = 64*1024*1024
|
| 51 |
+
|
| 52 |
+
cdef class Statistics(_Weakrefable):
|
| 53 |
+
"""Statistics for a single column in a single row group."""
|
| 54 |
+
|
| 55 |
+
def __cinit__(self):
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
def __repr__(self):
|
| 59 |
+
return """{}
|
| 60 |
+
has_min_max: {}
|
| 61 |
+
min: {}
|
| 62 |
+
max: {}
|
| 63 |
+
null_count: {}
|
| 64 |
+
distinct_count: {}
|
| 65 |
+
num_values: {}
|
| 66 |
+
physical_type: {}
|
| 67 |
+
logical_type: {}
|
| 68 |
+
converted_type (legacy): {}""".format(object.__repr__(self),
|
| 69 |
+
self.has_min_max,
|
| 70 |
+
self.min,
|
| 71 |
+
self.max,
|
| 72 |
+
self.null_count,
|
| 73 |
+
self.distinct_count,
|
| 74 |
+
self.num_values,
|
| 75 |
+
self.physical_type,
|
| 76 |
+
str(self.logical_type),
|
| 77 |
+
self.converted_type)
|
| 78 |
+
|
| 79 |
+
def to_dict(self):
|
| 80 |
+
"""
|
| 81 |
+
Get dictionary representation of statistics.
|
| 82 |
+
|
| 83 |
+
Returns
|
| 84 |
+
-------
|
| 85 |
+
dict
|
| 86 |
+
Dictionary with a key for each attribute of this class.
|
| 87 |
+
"""
|
| 88 |
+
d = dict(
|
| 89 |
+
has_min_max=self.has_min_max,
|
| 90 |
+
min=self.min,
|
| 91 |
+
max=self.max,
|
| 92 |
+
null_count=self.null_count,
|
| 93 |
+
distinct_count=self.distinct_count,
|
| 94 |
+
num_values=self.num_values,
|
| 95 |
+
physical_type=self.physical_type
|
| 96 |
+
)
|
| 97 |
+
return d
|
| 98 |
+
|
| 99 |
+
def __eq__(self, other):
|
| 100 |
+
try:
|
| 101 |
+
return self.equals(other)
|
| 102 |
+
except TypeError:
|
| 103 |
+
return NotImplemented
|
| 104 |
+
|
| 105 |
+
def equals(self, Statistics other):
|
| 106 |
+
"""
|
| 107 |
+
Return whether the two column statistics objects are equal.
|
| 108 |
+
|
| 109 |
+
Parameters
|
| 110 |
+
----------
|
| 111 |
+
other : Statistics
|
| 112 |
+
Statistics to compare against.
|
| 113 |
+
|
| 114 |
+
Returns
|
| 115 |
+
-------
|
| 116 |
+
are_equal : bool
|
| 117 |
+
"""
|
| 118 |
+
return self.statistics.get().Equals(deref(other.statistics.get()))
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def has_min_max(self):
|
| 122 |
+
"""Whether min and max are present (bool)."""
|
| 123 |
+
return self.statistics.get().HasMinMax()
|
| 124 |
+
|
| 125 |
+
@property
|
| 126 |
+
def has_null_count(self):
|
| 127 |
+
"""Whether null count is present (bool)."""
|
| 128 |
+
return self.statistics.get().HasNullCount()
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def has_distinct_count(self):
|
| 132 |
+
"""Whether distinct count is preset (bool)."""
|
| 133 |
+
return self.statistics.get().HasDistinctCount()
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def min_raw(self):
|
| 137 |
+
"""Min value as physical type (bool, int, float, or bytes)."""
|
| 138 |
+
if self.has_min_max:
|
| 139 |
+
return _cast_statistic_raw_min(self.statistics.get())
|
| 140 |
+
else:
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def max_raw(self):
|
| 145 |
+
"""Max value as physical type (bool, int, float, or bytes)."""
|
| 146 |
+
if self.has_min_max:
|
| 147 |
+
return _cast_statistic_raw_max(self.statistics.get())
|
| 148 |
+
else:
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def min(self):
|
| 153 |
+
"""
|
| 154 |
+
Min value as logical type.
|
| 155 |
+
|
| 156 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
| 157 |
+
for dates and decimal.Decimal for decimals.
|
| 158 |
+
"""
|
| 159 |
+
if self.has_min_max:
|
| 160 |
+
min_scalar, _ = _cast_statistics(self.statistics.get())
|
| 161 |
+
return min_scalar.as_py()
|
| 162 |
+
else:
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
@property
|
| 166 |
+
def max(self):
|
| 167 |
+
"""
|
| 168 |
+
Max value as logical type.
|
| 169 |
+
|
| 170 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
| 171 |
+
for dates and decimal.Decimal for decimals.
|
| 172 |
+
"""
|
| 173 |
+
if self.has_min_max:
|
| 174 |
+
_, max_scalar = _cast_statistics(self.statistics.get())
|
| 175 |
+
return max_scalar.as_py()
|
| 176 |
+
else:
|
| 177 |
+
return None
|
| 178 |
+
|
| 179 |
+
@property
|
| 180 |
+
def null_count(self):
|
| 181 |
+
"""Number of null values in chunk (int)."""
|
| 182 |
+
if self.has_null_count:
|
| 183 |
+
return self.statistics.get().null_count()
|
| 184 |
+
else:
|
| 185 |
+
return None
|
| 186 |
+
|
| 187 |
+
@property
|
| 188 |
+
def distinct_count(self):
|
| 189 |
+
"""Distinct number of values in chunk (int)."""
|
| 190 |
+
if self.has_distinct_count:
|
| 191 |
+
return self.statistics.get().distinct_count()
|
| 192 |
+
else:
|
| 193 |
+
return None
|
| 194 |
+
|
| 195 |
+
@property
|
| 196 |
+
def num_values(self):
|
| 197 |
+
"""Number of non-null values (int)."""
|
| 198 |
+
return self.statistics.get().num_values()
|
| 199 |
+
|
| 200 |
+
@property
|
| 201 |
+
def physical_type(self):
|
| 202 |
+
"""Physical type of column (str)."""
|
| 203 |
+
raw_physical_type = self.statistics.get().physical_type()
|
| 204 |
+
return physical_type_name_from_enum(raw_physical_type)
|
| 205 |
+
|
| 206 |
+
@property
|
| 207 |
+
def logical_type(self):
|
| 208 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
| 209 |
+
return wrap_logical_type(self.statistics.get().descr().logical_type())
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def converted_type(self):
|
| 213 |
+
"""Legacy converted type (str or None)."""
|
| 214 |
+
raw_converted_type = self.statistics.get().descr().converted_type()
|
| 215 |
+
return converted_type_name_from_enum(raw_converted_type)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
cdef class ParquetLogicalType(_Weakrefable):
|
| 219 |
+
"""Logical type of parquet type."""
|
| 220 |
+
cdef:
|
| 221 |
+
shared_ptr[const CParquetLogicalType] type
|
| 222 |
+
|
| 223 |
+
def __cinit__(self):
|
| 224 |
+
pass
|
| 225 |
+
|
| 226 |
+
cdef init(self, const shared_ptr[const CParquetLogicalType]& type):
|
| 227 |
+
self.type = type
|
| 228 |
+
|
| 229 |
+
def __repr__(self):
|
| 230 |
+
return "{}\n {}".format(object.__repr__(self), str(self))
|
| 231 |
+
|
| 232 |
+
def __str__(self):
|
| 233 |
+
return frombytes(self.type.get().ToString(), safe=True)
|
| 234 |
+
|
| 235 |
+
def to_json(self):
|
| 236 |
+
"""
|
| 237 |
+
Get a JSON string containing type and type parameters.
|
| 238 |
+
|
| 239 |
+
Returns
|
| 240 |
+
-------
|
| 241 |
+
json : str
|
| 242 |
+
JSON representation of type, with at least a field called 'Type'
|
| 243 |
+
which contains the type name. If the type is parameterized, such
|
| 244 |
+
as a decimal with scale and precision, will contain those as fields
|
| 245 |
+
as well.
|
| 246 |
+
"""
|
| 247 |
+
return frombytes(self.type.get().ToJSON())
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def type(self):
|
| 251 |
+
"""Name of the logical type (str)."""
|
| 252 |
+
return logical_type_name_from_enum(self.type.get().type())
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type):
|
| 256 |
+
cdef ParquetLogicalType out = ParquetLogicalType()
|
| 257 |
+
out.init(type)
|
| 258 |
+
return out
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
cdef _cast_statistic_raw_min(CStatistics* statistics):
|
| 262 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
| 263 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
| 264 |
+
if physical_type == ParquetType_BOOLEAN:
|
| 265 |
+
return (<CBoolStatistics*> statistics).min()
|
| 266 |
+
elif physical_type == ParquetType_INT32:
|
| 267 |
+
return (<CInt32Statistics*> statistics).min()
|
| 268 |
+
elif physical_type == ParquetType_INT64:
|
| 269 |
+
return (<CInt64Statistics*> statistics).min()
|
| 270 |
+
elif physical_type == ParquetType_FLOAT:
|
| 271 |
+
return (<CFloatStatistics*> statistics).min()
|
| 272 |
+
elif physical_type == ParquetType_DOUBLE:
|
| 273 |
+
return (<CDoubleStatistics*> statistics).min()
|
| 274 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
| 275 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).min())
|
| 276 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
| 277 |
+
return _box_flba((<CFLBAStatistics*> statistics).min(), type_length)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
cdef _cast_statistic_raw_max(CStatistics* statistics):
|
| 281 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
| 282 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
| 283 |
+
if physical_type == ParquetType_BOOLEAN:
|
| 284 |
+
return (<CBoolStatistics*> statistics).max()
|
| 285 |
+
elif physical_type == ParquetType_INT32:
|
| 286 |
+
return (<CInt32Statistics*> statistics).max()
|
| 287 |
+
elif physical_type == ParquetType_INT64:
|
| 288 |
+
return (<CInt64Statistics*> statistics).max()
|
| 289 |
+
elif physical_type == ParquetType_FLOAT:
|
| 290 |
+
return (<CFloatStatistics*> statistics).max()
|
| 291 |
+
elif physical_type == ParquetType_DOUBLE:
|
| 292 |
+
return (<CDoubleStatistics*> statistics).max()
|
| 293 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
| 294 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).max())
|
| 295 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
| 296 |
+
return _box_flba((<CFLBAStatistics*> statistics).max(), type_length)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
cdef _cast_statistics(CStatistics* statistics):
|
| 300 |
+
cdef:
|
| 301 |
+
shared_ptr[CScalar] c_min
|
| 302 |
+
shared_ptr[CScalar] c_max
|
| 303 |
+
check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max))
|
| 304 |
+
return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max))
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
cdef _box_byte_array(ParquetByteArray val):
|
| 308 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> val.len)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
cdef _box_flba(ParquetFLBA val, uint32_t len):
|
| 312 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> len)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
cdef class ColumnChunkMetaData(_Weakrefable):
|
| 316 |
+
"""Column metadata for a single row group."""
|
| 317 |
+
|
| 318 |
+
def __cinit__(self):
|
| 319 |
+
pass
|
| 320 |
+
|
| 321 |
+
def __repr__(self):
|
| 322 |
+
statistics = indent(repr(self.statistics), 4 * ' ')
|
| 323 |
+
return """{0}
|
| 324 |
+
file_offset: {1}
|
| 325 |
+
file_path: {2}
|
| 326 |
+
physical_type: {3}
|
| 327 |
+
num_values: {4}
|
| 328 |
+
path_in_schema: {5}
|
| 329 |
+
is_stats_set: {6}
|
| 330 |
+
statistics:
|
| 331 |
+
{7}
|
| 332 |
+
compression: {8}
|
| 333 |
+
encodings: {9}
|
| 334 |
+
has_dictionary_page: {10}
|
| 335 |
+
dictionary_page_offset: {11}
|
| 336 |
+
data_page_offset: {12}
|
| 337 |
+
total_compressed_size: {13}
|
| 338 |
+
total_uncompressed_size: {14}""".format(object.__repr__(self),
|
| 339 |
+
self.file_offset,
|
| 340 |
+
self.file_path,
|
| 341 |
+
self.physical_type,
|
| 342 |
+
self.num_values,
|
| 343 |
+
self.path_in_schema,
|
| 344 |
+
self.is_stats_set,
|
| 345 |
+
statistics,
|
| 346 |
+
self.compression,
|
| 347 |
+
self.encodings,
|
| 348 |
+
self.has_dictionary_page,
|
| 349 |
+
self.dictionary_page_offset,
|
| 350 |
+
self.data_page_offset,
|
| 351 |
+
self.total_compressed_size,
|
| 352 |
+
self.total_uncompressed_size)
|
| 353 |
+
|
| 354 |
+
def to_dict(self):
|
| 355 |
+
"""
|
| 356 |
+
Get dictionary representation of the column chunk metadata.
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
dict
|
| 361 |
+
Dictionary with a key for each attribute of this class.
|
| 362 |
+
"""
|
| 363 |
+
statistics = self.statistics.to_dict() if self.is_stats_set else None
|
| 364 |
+
d = dict(
|
| 365 |
+
file_offset=self.file_offset,
|
| 366 |
+
file_path=self.file_path,
|
| 367 |
+
physical_type=self.physical_type,
|
| 368 |
+
num_values=self.num_values,
|
| 369 |
+
path_in_schema=self.path_in_schema,
|
| 370 |
+
is_stats_set=self.is_stats_set,
|
| 371 |
+
statistics=statistics,
|
| 372 |
+
compression=self.compression,
|
| 373 |
+
encodings=self.encodings,
|
| 374 |
+
has_dictionary_page=self.has_dictionary_page,
|
| 375 |
+
dictionary_page_offset=self.dictionary_page_offset,
|
| 376 |
+
data_page_offset=self.data_page_offset,
|
| 377 |
+
total_compressed_size=self.total_compressed_size,
|
| 378 |
+
total_uncompressed_size=self.total_uncompressed_size
|
| 379 |
+
)
|
| 380 |
+
return d
|
| 381 |
+
|
| 382 |
+
def __eq__(self, other):
|
| 383 |
+
try:
|
| 384 |
+
return self.equals(other)
|
| 385 |
+
except TypeError:
|
| 386 |
+
return NotImplemented
|
| 387 |
+
|
| 388 |
+
def equals(self, ColumnChunkMetaData other):
|
| 389 |
+
"""
|
| 390 |
+
Return whether the two column chunk metadata objects are equal.
|
| 391 |
+
|
| 392 |
+
Parameters
|
| 393 |
+
----------
|
| 394 |
+
other : ColumnChunkMetaData
|
| 395 |
+
Metadata to compare against.
|
| 396 |
+
|
| 397 |
+
Returns
|
| 398 |
+
-------
|
| 399 |
+
are_equal : bool
|
| 400 |
+
"""
|
| 401 |
+
return self.metadata.Equals(deref(other.metadata))
|
| 402 |
+
|
| 403 |
+
@property
|
| 404 |
+
def file_offset(self):
|
| 405 |
+
"""Offset into file where column chunk is located (int)."""
|
| 406 |
+
return self.metadata.file_offset()
|
| 407 |
+
|
| 408 |
+
@property
|
| 409 |
+
def file_path(self):
|
| 410 |
+
"""Optional file path if set (str or None)."""
|
| 411 |
+
return frombytes(self.metadata.file_path())
|
| 412 |
+
|
| 413 |
+
@property
|
| 414 |
+
def physical_type(self):
|
| 415 |
+
"""Physical type of column (str)."""
|
| 416 |
+
return physical_type_name_from_enum(self.metadata.type())
|
| 417 |
+
|
| 418 |
+
@property
|
| 419 |
+
def num_values(self):
|
| 420 |
+
"""Total number of values (int)."""
|
| 421 |
+
return self.metadata.num_values()
|
| 422 |
+
|
| 423 |
+
@property
|
| 424 |
+
def path_in_schema(self):
|
| 425 |
+
"""Nested path to field, separated by periods (str)."""
|
| 426 |
+
path = self.metadata.path_in_schema().get().ToDotString()
|
| 427 |
+
return frombytes(path)
|
| 428 |
+
|
| 429 |
+
@property
|
| 430 |
+
def is_stats_set(self):
|
| 431 |
+
"""Whether or not statistics are present in metadata (bool)."""
|
| 432 |
+
return self.metadata.is_stats_set()
|
| 433 |
+
|
| 434 |
+
@property
|
| 435 |
+
def statistics(self):
|
| 436 |
+
"""Statistics for column chunk (:class:`Statistics`)."""
|
| 437 |
+
if not self.metadata.is_stats_set():
|
| 438 |
+
return None
|
| 439 |
+
statistics = Statistics()
|
| 440 |
+
statistics.init(self.metadata.statistics(), self)
|
| 441 |
+
return statistics
|
| 442 |
+
|
| 443 |
+
@property
|
| 444 |
+
def compression(self):
|
| 445 |
+
"""
|
| 446 |
+
Type of compression used for column (str).
|
| 447 |
+
|
| 448 |
+
One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD',
|
| 449 |
+
or 'UNKNOWN'.
|
| 450 |
+
"""
|
| 451 |
+
return compression_name_from_enum(self.metadata.compression())
|
| 452 |
+
|
| 453 |
+
@property
|
| 454 |
+
def encodings(self):
|
| 455 |
+
"""
|
| 456 |
+
Encodings used for column (tuple of str).
|
| 457 |
+
|
| 458 |
+
One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED',
|
| 459 |
+
'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'.
|
| 460 |
+
"""
|
| 461 |
+
return tuple(map(encoding_name_from_enum, self.metadata.encodings()))
|
| 462 |
+
|
| 463 |
+
@property
|
| 464 |
+
def has_dictionary_page(self):
|
| 465 |
+
"""Whether there is dictionary data present in the column chunk (bool)."""
|
| 466 |
+
return bool(self.metadata.has_dictionary_page())
|
| 467 |
+
|
| 468 |
+
@property
|
| 469 |
+
def dictionary_page_offset(self):
|
| 470 |
+
"""Offset of dictionary page relative to column chunk offset (int)."""
|
| 471 |
+
if self.has_dictionary_page:
|
| 472 |
+
return self.metadata.dictionary_page_offset()
|
| 473 |
+
else:
|
| 474 |
+
return None
|
| 475 |
+
|
| 476 |
+
@property
|
| 477 |
+
def data_page_offset(self):
|
| 478 |
+
"""Offset of data page relative to column chunk offset (int)."""
|
| 479 |
+
return self.metadata.data_page_offset()
|
| 480 |
+
|
| 481 |
+
@property
|
| 482 |
+
def has_index_page(self):
|
| 483 |
+
"""Not yet supported."""
|
| 484 |
+
raise NotImplementedError('not supported in parquet-cpp')
|
| 485 |
+
|
| 486 |
+
@property
|
| 487 |
+
def index_page_offset(self):
|
| 488 |
+
"""Not yet supported."""
|
| 489 |
+
raise NotImplementedError("parquet-cpp doesn't return valid values")
|
| 490 |
+
|
| 491 |
+
@property
|
| 492 |
+
def total_compressed_size(self):
|
| 493 |
+
"""Compressed size in bytes (int)."""
|
| 494 |
+
return self.metadata.total_compressed_size()
|
| 495 |
+
|
| 496 |
+
@property
|
| 497 |
+
def total_uncompressed_size(self):
|
| 498 |
+
"""Uncompressed size in bytes (int)."""
|
| 499 |
+
return self.metadata.total_uncompressed_size()
|
| 500 |
+
|
| 501 |
+
@property
|
| 502 |
+
def has_offset_index(self):
|
| 503 |
+
"""Whether the column chunk has an offset index"""
|
| 504 |
+
return self.metadata.GetOffsetIndexLocation().has_value()
|
| 505 |
+
|
| 506 |
+
@property
|
| 507 |
+
def has_column_index(self):
|
| 508 |
+
"""Whether the column chunk has a column index"""
|
| 509 |
+
return self.metadata.GetColumnIndexLocation().has_value()
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
cdef class SortingColumn:
|
| 513 |
+
"""
|
| 514 |
+
Sorting specification for a single column.
|
| 515 |
+
|
| 516 |
+
Returned by :meth:`RowGroupMetaData.sorting_columns` and used in
|
| 517 |
+
:class:`ParquetWriter` to specify the sort order of the data.
|
| 518 |
+
|
| 519 |
+
Parameters
|
| 520 |
+
----------
|
| 521 |
+
column_index : int
|
| 522 |
+
Index of column that data is sorted by.
|
| 523 |
+
descending : bool, default False
|
| 524 |
+
Whether column is sorted in descending order.
|
| 525 |
+
nulls_first : bool, default False
|
| 526 |
+
Whether null values appear before valid values.
|
| 527 |
+
|
| 528 |
+
Notes
|
| 529 |
+
-----
|
| 530 |
+
|
| 531 |
+
Column indices are zero-based, refer only to leaf fields, and are in
|
| 532 |
+
depth-first order. This may make the column indices for nested schemas
|
| 533 |
+
different from what you expect. In most cases, it will be easier to
|
| 534 |
+
specify the sort order using column names instead of column indices
|
| 535 |
+
and converting using the ``from_ordering`` method.
|
| 536 |
+
|
| 537 |
+
Examples
|
| 538 |
+
--------
|
| 539 |
+
|
| 540 |
+
In other APIs, sort order is specified by names, such as:
|
| 541 |
+
|
| 542 |
+
>>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')]
|
| 543 |
+
|
| 544 |
+
For Parquet, the column index must be used instead:
|
| 545 |
+
|
| 546 |
+
>>> import pyarrow.parquet as pq
|
| 547 |
+
>>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)]
|
| 548 |
+
[SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)]
|
| 549 |
+
|
| 550 |
+
Convert the sort_order into the list of sorting columns with
|
| 551 |
+
``from_ordering`` (note that the schema must be provided as well):
|
| 552 |
+
|
| 553 |
+
>>> import pyarrow as pa
|
| 554 |
+
>>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))])
|
| 555 |
+
>>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order)
|
| 556 |
+
>>> sorting_columns
|
| 557 |
+
(SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False))
|
| 558 |
+
|
| 559 |
+
Convert back to the sort order with ``to_ordering``:
|
| 560 |
+
|
| 561 |
+
>>> pq.SortingColumn.to_ordering(schema, sorting_columns)
|
| 562 |
+
((('id', 'ascending'), ('timestamp', 'descending')), 'at_end')
|
| 563 |
+
|
| 564 |
+
See Also
|
| 565 |
+
--------
|
| 566 |
+
RowGroupMetaData.sorting_columns
|
| 567 |
+
"""
|
| 568 |
+
cdef int column_index
|
| 569 |
+
cdef c_bool descending
|
| 570 |
+
cdef c_bool nulls_first
|
| 571 |
+
|
| 572 |
+
def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False):
|
| 573 |
+
self.column_index = column_index
|
| 574 |
+
self.descending = descending
|
| 575 |
+
self.nulls_first = nulls_first
|
| 576 |
+
|
| 577 |
+
@classmethod
|
| 578 |
+
def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'):
|
| 579 |
+
"""
|
| 580 |
+
Create a tuple of SortingColumn objects from the same arguments as
|
| 581 |
+
:class:`pyarrow.compute.SortOptions`.
|
| 582 |
+
|
| 583 |
+
Parameters
|
| 584 |
+
----------
|
| 585 |
+
schema : Schema
|
| 586 |
+
Schema of the input data.
|
| 587 |
+
sort_keys : Sequence of (name, order) tuples
|
| 588 |
+
Names of field/column keys (str) to sort the input on,
|
| 589 |
+
along with the order each field/column is sorted in.
|
| 590 |
+
Accepted values for `order` are "ascending", "descending".
|
| 591 |
+
null_placement : {'at_start', 'at_end'}, default 'at_end'
|
| 592 |
+
Where null values should appear in the sort order.
|
| 593 |
+
|
| 594 |
+
Returns
|
| 595 |
+
-------
|
| 596 |
+
sorting_columns : tuple of SortingColumn
|
| 597 |
+
"""
|
| 598 |
+
if null_placement == 'at_start':
|
| 599 |
+
nulls_first = True
|
| 600 |
+
elif null_placement == 'at_end':
|
| 601 |
+
nulls_first = False
|
| 602 |
+
else:
|
| 603 |
+
raise ValueError('null_placement must be "at_start" or "at_end"')
|
| 604 |
+
|
| 605 |
+
col_map = _name_to_index_map(schema)
|
| 606 |
+
|
| 607 |
+
sorting_columns = []
|
| 608 |
+
|
| 609 |
+
for sort_key in sort_keys:
|
| 610 |
+
if isinstance(sort_key, str):
|
| 611 |
+
name = sort_key
|
| 612 |
+
descending = False
|
| 613 |
+
elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and
|
| 614 |
+
isinstance(sort_key[0], str) and
|
| 615 |
+
isinstance(sort_key[1], str)):
|
| 616 |
+
name, descending = sort_key
|
| 617 |
+
if descending == "descending":
|
| 618 |
+
descending = True
|
| 619 |
+
elif descending == "ascending":
|
| 620 |
+
descending = False
|
| 621 |
+
else:
|
| 622 |
+
raise ValueError("Invalid sort key direction: {0}"
|
| 623 |
+
.format(descending))
|
| 624 |
+
else:
|
| 625 |
+
raise ValueError("Invalid sort key: {0}".format(sort_key))
|
| 626 |
+
|
| 627 |
+
try:
|
| 628 |
+
column_index = col_map[name]
|
| 629 |
+
except KeyError:
|
| 630 |
+
raise ValueError("Sort key name '{0}' not found in schema:\n{1}"
|
| 631 |
+
.format(name, schema))
|
| 632 |
+
|
| 633 |
+
sorting_columns.append(
|
| 634 |
+
cls(column_index, descending=descending, nulls_first=nulls_first)
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
return tuple(sorting_columns)
|
| 638 |
+
|
| 639 |
+
@staticmethod
|
| 640 |
+
def to_ordering(Schema schema, sorting_columns):
|
| 641 |
+
"""
|
| 642 |
+
Convert a tuple of SortingColumn objects to the same format as
|
| 643 |
+
:class:`pyarrow.compute.SortOptions`.
|
| 644 |
+
|
| 645 |
+
Parameters
|
| 646 |
+
----------
|
| 647 |
+
schema : Schema
|
| 648 |
+
Schema of the input data.
|
| 649 |
+
sorting_columns : tuple of SortingColumn
|
| 650 |
+
Columns to sort the input on.
|
| 651 |
+
|
| 652 |
+
Returns
|
| 653 |
+
-------
|
| 654 |
+
sort_keys : tuple of (name, order) tuples
|
| 655 |
+
null_placement : {'at_start', 'at_end'}
|
| 656 |
+
"""
|
| 657 |
+
col_map = {i: name for name, i in _name_to_index_map(schema).items()}
|
| 658 |
+
|
| 659 |
+
sort_keys = []
|
| 660 |
+
nulls_first = None
|
| 661 |
+
|
| 662 |
+
for sorting_column in sorting_columns:
|
| 663 |
+
name = col_map[sorting_column.column_index]
|
| 664 |
+
if sorting_column.descending:
|
| 665 |
+
order = "descending"
|
| 666 |
+
else:
|
| 667 |
+
order = "ascending"
|
| 668 |
+
sort_keys.append((name, order))
|
| 669 |
+
if nulls_first is None:
|
| 670 |
+
nulls_first = sorting_column.nulls_first
|
| 671 |
+
elif nulls_first != sorting_column.nulls_first:
|
| 672 |
+
raise ValueError("Sorting columns have inconsistent null placement")
|
| 673 |
+
|
| 674 |
+
if nulls_first:
|
| 675 |
+
null_placement = "at_start"
|
| 676 |
+
else:
|
| 677 |
+
null_placement = "at_end"
|
| 678 |
+
|
| 679 |
+
return tuple(sort_keys), null_placement
|
| 680 |
+
|
| 681 |
+
def __repr__(self):
|
| 682 |
+
return """{}(column_index={}, descending={}, nulls_first={})""".format(
|
| 683 |
+
self.__class__.__name__,
|
| 684 |
+
self.column_index, self.descending, self.nulls_first)
|
| 685 |
+
|
| 686 |
+
def __eq__(self, SortingColumn other):
|
| 687 |
+
return (self.column_index == other.column_index and
|
| 688 |
+
self.descending == other.descending and
|
| 689 |
+
self.nulls_first == other.nulls_first)
|
| 690 |
+
|
| 691 |
+
def __hash__(self):
|
| 692 |
+
return hash((self.column_index, self.descending, self.nulls_first))
|
| 693 |
+
|
| 694 |
+
@property
|
| 695 |
+
def column_index(self):
|
| 696 |
+
""""Index of column data is sorted by (int)."""
|
| 697 |
+
return self.column_index
|
| 698 |
+
|
| 699 |
+
@property
|
| 700 |
+
def descending(self):
|
| 701 |
+
"""Whether column is sorted in descending order (bool)."""
|
| 702 |
+
return self.descending
|
| 703 |
+
|
| 704 |
+
@property
|
| 705 |
+
def nulls_first(self):
|
| 706 |
+
"""Whether null values appear before valid values (bool)."""
|
| 707 |
+
return self.nulls_first
|
| 708 |
+
|
| 709 |
+
def to_dict(self):
|
| 710 |
+
"""
|
| 711 |
+
Get dictionary representation of the SortingColumn.
|
| 712 |
+
|
| 713 |
+
Returns
|
| 714 |
+
-------
|
| 715 |
+
dict
|
| 716 |
+
Dictionary with a key for each attribute of this class.
|
| 717 |
+
"""
|
| 718 |
+
d = dict(
|
| 719 |
+
column_index=self.column_index,
|
| 720 |
+
descending=self.descending,
|
| 721 |
+
nulls_first=self.nulls_first
|
| 722 |
+
)
|
| 723 |
+
return d
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
cdef class RowGroupMetaData(_Weakrefable):
|
| 727 |
+
"""Metadata for a single row group."""
|
| 728 |
+
|
| 729 |
+
def __cinit__(self, FileMetaData parent, int index):
|
| 730 |
+
if index < 0 or index >= parent.num_row_groups:
|
| 731 |
+
raise IndexError('{0} out of bounds'.format(index))
|
| 732 |
+
self.up_metadata = parent._metadata.RowGroup(index)
|
| 733 |
+
self.metadata = self.up_metadata.get()
|
| 734 |
+
self.parent = parent
|
| 735 |
+
self.index = index
|
| 736 |
+
|
| 737 |
+
def __reduce__(self):
|
| 738 |
+
return RowGroupMetaData, (self.parent, self.index)
|
| 739 |
+
|
| 740 |
+
def __eq__(self, other):
|
| 741 |
+
try:
|
| 742 |
+
return self.equals(other)
|
| 743 |
+
except TypeError:
|
| 744 |
+
return NotImplemented
|
| 745 |
+
|
| 746 |
+
def equals(self, RowGroupMetaData other):
|
| 747 |
+
"""
|
| 748 |
+
Return whether the two row group metadata objects are equal.
|
| 749 |
+
|
| 750 |
+
Parameters
|
| 751 |
+
----------
|
| 752 |
+
other : RowGroupMetaData
|
| 753 |
+
Metadata to compare against.
|
| 754 |
+
|
| 755 |
+
Returns
|
| 756 |
+
-------
|
| 757 |
+
are_equal : bool
|
| 758 |
+
"""
|
| 759 |
+
return self.metadata.Equals(deref(other.metadata))
|
| 760 |
+
|
| 761 |
+
def column(self, int i):
|
| 762 |
+
"""
|
| 763 |
+
Get column metadata at given index.
|
| 764 |
+
|
| 765 |
+
Parameters
|
| 766 |
+
----------
|
| 767 |
+
i : int
|
| 768 |
+
Index of column to get metadata for.
|
| 769 |
+
|
| 770 |
+
Returns
|
| 771 |
+
-------
|
| 772 |
+
ColumnChunkMetaData
|
| 773 |
+
Metadata for column within this chunk.
|
| 774 |
+
"""
|
| 775 |
+
if i < 0 or i >= self.num_columns:
|
| 776 |
+
raise IndexError('{0} out of bounds'.format(i))
|
| 777 |
+
chunk = ColumnChunkMetaData()
|
| 778 |
+
chunk.init(self, i)
|
| 779 |
+
return chunk
|
| 780 |
+
|
| 781 |
+
def __repr__(self):
|
| 782 |
+
return """{0}
|
| 783 |
+
num_columns: {1}
|
| 784 |
+
num_rows: {2}
|
| 785 |
+
total_byte_size: {3}
|
| 786 |
+
sorting_columns: {4}""".format(object.__repr__(self),
|
| 787 |
+
self.num_columns,
|
| 788 |
+
self.num_rows,
|
| 789 |
+
self.total_byte_size,
|
| 790 |
+
self.sorting_columns)
|
| 791 |
+
|
| 792 |
+
def to_dict(self):
|
| 793 |
+
"""
|
| 794 |
+
Get dictionary representation of the row group metadata.
|
| 795 |
+
|
| 796 |
+
Returns
|
| 797 |
+
-------
|
| 798 |
+
dict
|
| 799 |
+
Dictionary with a key for each attribute of this class.
|
| 800 |
+
"""
|
| 801 |
+
columns = []
|
| 802 |
+
d = dict(
|
| 803 |
+
num_columns=self.num_columns,
|
| 804 |
+
num_rows=self.num_rows,
|
| 805 |
+
total_byte_size=self.total_byte_size,
|
| 806 |
+
columns=columns,
|
| 807 |
+
sorting_columns=[col.to_dict() for col in self.sorting_columns]
|
| 808 |
+
)
|
| 809 |
+
for i in range(self.num_columns):
|
| 810 |
+
columns.append(self.column(i).to_dict())
|
| 811 |
+
return d
|
| 812 |
+
|
| 813 |
+
@property
|
| 814 |
+
def num_columns(self):
|
| 815 |
+
"""Number of columns in this row group (int)."""
|
| 816 |
+
return self.metadata.num_columns()
|
| 817 |
+
|
| 818 |
+
@property
|
| 819 |
+
def num_rows(self):
|
| 820 |
+
"""Number of rows in this row group (int)."""
|
| 821 |
+
return self.metadata.num_rows()
|
| 822 |
+
|
| 823 |
+
@property
|
| 824 |
+
def total_byte_size(self):
|
| 825 |
+
"""Total byte size of all the uncompressed column data in this row group (int)."""
|
| 826 |
+
return self.metadata.total_byte_size()
|
| 827 |
+
|
| 828 |
+
@property
|
| 829 |
+
def sorting_columns(self):
|
| 830 |
+
"""Columns the row group is sorted by (tuple of :class:`SortingColumn`))."""
|
| 831 |
+
out = []
|
| 832 |
+
cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns()
|
| 833 |
+
for sorting_col in sorting_columns:
|
| 834 |
+
out.append(SortingColumn(
|
| 835 |
+
sorting_col.column_idx,
|
| 836 |
+
sorting_col.descending,
|
| 837 |
+
sorting_col.nulls_first
|
| 838 |
+
))
|
| 839 |
+
return tuple(out)
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
def _reconstruct_filemetadata(Buffer serialized):
|
| 843 |
+
cdef:
|
| 844 |
+
FileMetaData metadata = FileMetaData.__new__(FileMetaData)
|
| 845 |
+
CBuffer *buffer = serialized.buffer.get()
|
| 846 |
+
uint32_t metadata_len = <uint32_t>buffer.size()
|
| 847 |
+
|
| 848 |
+
metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len))
|
| 849 |
+
|
| 850 |
+
return metadata
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
cdef class FileMetaData(_Weakrefable):
|
| 854 |
+
"""Parquet metadata for a single file."""
|
| 855 |
+
|
| 856 |
+
def __cinit__(self):
|
| 857 |
+
pass
|
| 858 |
+
|
| 859 |
+
def __reduce__(self):
|
| 860 |
+
cdef:
|
| 861 |
+
NativeFile sink = BufferOutputStream()
|
| 862 |
+
COutputStream* c_sink = sink.get_output_stream().get()
|
| 863 |
+
with nogil:
|
| 864 |
+
self._metadata.WriteTo(c_sink)
|
| 865 |
+
|
| 866 |
+
cdef Buffer buffer = sink.getvalue()
|
| 867 |
+
return _reconstruct_filemetadata, (buffer,)
|
| 868 |
+
|
| 869 |
+
def __hash__(self):
|
| 870 |
+
return hash((self.schema,
|
| 871 |
+
self.num_rows,
|
| 872 |
+
self.num_row_groups,
|
| 873 |
+
self.format_version,
|
| 874 |
+
self.serialized_size))
|
| 875 |
+
|
| 876 |
+
def __repr__(self):
|
| 877 |
+
return """{0}
|
| 878 |
+
created_by: {1}
|
| 879 |
+
num_columns: {2}
|
| 880 |
+
num_rows: {3}
|
| 881 |
+
num_row_groups: {4}
|
| 882 |
+
format_version: {5}
|
| 883 |
+
serialized_size: {6}""".format(object.__repr__(self),
|
| 884 |
+
self.created_by, self.num_columns,
|
| 885 |
+
self.num_rows, self.num_row_groups,
|
| 886 |
+
self.format_version,
|
| 887 |
+
self.serialized_size)
|
| 888 |
+
|
| 889 |
+
def to_dict(self):
|
| 890 |
+
"""
|
| 891 |
+
Get dictionary representation of the file metadata.
|
| 892 |
+
|
| 893 |
+
Returns
|
| 894 |
+
-------
|
| 895 |
+
dict
|
| 896 |
+
Dictionary with a key for each attribute of this class.
|
| 897 |
+
"""
|
| 898 |
+
row_groups = []
|
| 899 |
+
d = dict(
|
| 900 |
+
created_by=self.created_by,
|
| 901 |
+
num_columns=self.num_columns,
|
| 902 |
+
num_rows=self.num_rows,
|
| 903 |
+
num_row_groups=self.num_row_groups,
|
| 904 |
+
row_groups=row_groups,
|
| 905 |
+
format_version=self.format_version,
|
| 906 |
+
serialized_size=self.serialized_size
|
| 907 |
+
)
|
| 908 |
+
for i in range(self.num_row_groups):
|
| 909 |
+
row_groups.append(self.row_group(i).to_dict())
|
| 910 |
+
return d
|
| 911 |
+
|
| 912 |
+
def __eq__(self, other):
|
| 913 |
+
try:
|
| 914 |
+
return self.equals(other)
|
| 915 |
+
except TypeError:
|
| 916 |
+
return NotImplemented
|
| 917 |
+
|
| 918 |
+
def equals(self, FileMetaData other not None):
|
| 919 |
+
"""
|
| 920 |
+
Return whether the two file metadata objects are equal.
|
| 921 |
+
|
| 922 |
+
Parameters
|
| 923 |
+
----------
|
| 924 |
+
other : FileMetaData
|
| 925 |
+
Metadata to compare against.
|
| 926 |
+
|
| 927 |
+
Returns
|
| 928 |
+
-------
|
| 929 |
+
are_equal : bool
|
| 930 |
+
"""
|
| 931 |
+
return self._metadata.Equals(deref(other._metadata))
|
| 932 |
+
|
| 933 |
+
@property
|
| 934 |
+
def schema(self):
|
| 935 |
+
"""Schema of the file (:class:`ParquetSchema`)."""
|
| 936 |
+
if self._schema is None:
|
| 937 |
+
self._schema = ParquetSchema(self)
|
| 938 |
+
return self._schema
|
| 939 |
+
|
| 940 |
+
@property
|
| 941 |
+
def serialized_size(self):
|
| 942 |
+
"""Size of the original thrift encoded metadata footer (int)."""
|
| 943 |
+
return self._metadata.size()
|
| 944 |
+
|
| 945 |
+
@property
|
| 946 |
+
def num_columns(self):
|
| 947 |
+
"""Number of columns in file (int)."""
|
| 948 |
+
return self._metadata.num_columns()
|
| 949 |
+
|
| 950 |
+
@property
|
| 951 |
+
def num_rows(self):
|
| 952 |
+
"""Total number of rows in file (int)."""
|
| 953 |
+
return self._metadata.num_rows()
|
| 954 |
+
|
| 955 |
+
@property
|
| 956 |
+
def num_row_groups(self):
|
| 957 |
+
"""Number of row groups in file (int)."""
|
| 958 |
+
return self._metadata.num_row_groups()
|
| 959 |
+
|
| 960 |
+
@property
|
| 961 |
+
def format_version(self):
|
| 962 |
+
"""
|
| 963 |
+
Parquet format version used in file (str, such as '1.0', '2.4').
|
| 964 |
+
|
| 965 |
+
If version is missing or unparsable, will default to assuming '2.6'.
|
| 966 |
+
"""
|
| 967 |
+
cdef ParquetVersion version = self._metadata.version()
|
| 968 |
+
if version == ParquetVersion_V1:
|
| 969 |
+
return '1.0'
|
| 970 |
+
elif version == ParquetVersion_V2_0:
|
| 971 |
+
return 'pseudo-2.0'
|
| 972 |
+
elif version == ParquetVersion_V2_4:
|
| 973 |
+
return '2.4'
|
| 974 |
+
elif version == ParquetVersion_V2_6:
|
| 975 |
+
return '2.6'
|
| 976 |
+
else:
|
| 977 |
+
warnings.warn('Unrecognized file version, assuming 2.6: {}'
|
| 978 |
+
.format(version))
|
| 979 |
+
return '2.6'
|
| 980 |
+
|
| 981 |
+
@property
|
| 982 |
+
def created_by(self):
|
| 983 |
+
"""
|
| 984 |
+
String describing source of the parquet file (str).
|
| 985 |
+
|
| 986 |
+
This typically includes library name and version number. For example, Arrow 7.0's
|
| 987 |
+
writer returns 'parquet-cpp-arrow version 7.0.0'.
|
| 988 |
+
"""
|
| 989 |
+
return frombytes(self._metadata.created_by())
|
| 990 |
+
|
| 991 |
+
@property
|
| 992 |
+
def metadata(self):
|
| 993 |
+
"""Additional metadata as key value pairs (dict[bytes, bytes])."""
|
| 994 |
+
cdef:
|
| 995 |
+
unordered_map[c_string, c_string] metadata
|
| 996 |
+
const CKeyValueMetadata* underlying_metadata
|
| 997 |
+
underlying_metadata = self._metadata.key_value_metadata().get()
|
| 998 |
+
if underlying_metadata != NULL:
|
| 999 |
+
underlying_metadata.ToUnorderedMap(&metadata)
|
| 1000 |
+
return metadata
|
| 1001 |
+
else:
|
| 1002 |
+
return None
|
| 1003 |
+
|
| 1004 |
+
def row_group(self, int i):
|
| 1005 |
+
"""
|
| 1006 |
+
Get metadata for row group at index i.
|
| 1007 |
+
|
| 1008 |
+
Parameters
|
| 1009 |
+
----------
|
| 1010 |
+
i : int
|
| 1011 |
+
Row group index to get.
|
| 1012 |
+
|
| 1013 |
+
Returns
|
| 1014 |
+
-------
|
| 1015 |
+
row_group_metadata : RowGroupMetaData
|
| 1016 |
+
"""
|
| 1017 |
+
return RowGroupMetaData(self, i)
|
| 1018 |
+
|
| 1019 |
+
def set_file_path(self, path):
|
| 1020 |
+
"""
|
| 1021 |
+
Set ColumnChunk file paths to the given value.
|
| 1022 |
+
|
| 1023 |
+
This method modifies the ``file_path`` field of each ColumnChunk
|
| 1024 |
+
in the FileMetaData to be a particular value.
|
| 1025 |
+
|
| 1026 |
+
Parameters
|
| 1027 |
+
----------
|
| 1028 |
+
path : str
|
| 1029 |
+
The file path to set on all ColumnChunks.
|
| 1030 |
+
"""
|
| 1031 |
+
cdef:
|
| 1032 |
+
c_string c_path = tobytes(path)
|
| 1033 |
+
self._metadata.set_file_path(c_path)
|
| 1034 |
+
|
| 1035 |
+
def append_row_groups(self, FileMetaData other):
|
| 1036 |
+
"""
|
| 1037 |
+
Append row groups from other FileMetaData object.
|
| 1038 |
+
|
| 1039 |
+
Parameters
|
| 1040 |
+
----------
|
| 1041 |
+
other : FileMetaData
|
| 1042 |
+
Other metadata to append row groups from.
|
| 1043 |
+
"""
|
| 1044 |
+
cdef shared_ptr[CFileMetaData] c_metadata
|
| 1045 |
+
|
| 1046 |
+
c_metadata = other.sp_metadata
|
| 1047 |
+
self._metadata.AppendRowGroups(deref(c_metadata))
|
| 1048 |
+
|
| 1049 |
+
def write_metadata_file(self, where):
|
| 1050 |
+
"""
|
| 1051 |
+
Write the metadata to a metadata-only Parquet file.
|
| 1052 |
+
|
| 1053 |
+
Parameters
|
| 1054 |
+
----------
|
| 1055 |
+
where : path or file-like object
|
| 1056 |
+
Where to write the metadata. Should be a writable path on
|
| 1057 |
+
the local filesystem, or a writable file-like object.
|
| 1058 |
+
"""
|
| 1059 |
+
cdef:
|
| 1060 |
+
shared_ptr[COutputStream] sink
|
| 1061 |
+
c_string c_where
|
| 1062 |
+
|
| 1063 |
+
try:
|
| 1064 |
+
where = _stringify_path(where)
|
| 1065 |
+
except TypeError:
|
| 1066 |
+
get_writer(where, &sink)
|
| 1067 |
+
else:
|
| 1068 |
+
c_where = tobytes(where)
|
| 1069 |
+
with nogil:
|
| 1070 |
+
sink = GetResultValue(FileOutputStream.Open(c_where))
|
| 1071 |
+
|
| 1072 |
+
with nogil:
|
| 1073 |
+
check_status(
|
| 1074 |
+
WriteMetaDataFile(deref(self._metadata), sink.get()))
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
cdef class ParquetSchema(_Weakrefable):
|
| 1078 |
+
"""A Parquet schema."""
|
| 1079 |
+
|
| 1080 |
+
def __cinit__(self, FileMetaData container):
|
| 1081 |
+
self.parent = container
|
| 1082 |
+
self.schema = container._metadata.schema()
|
| 1083 |
+
|
| 1084 |
+
def __repr__(self):
|
| 1085 |
+
return "{0}\n{1}".format(
|
| 1086 |
+
object.__repr__(self),
|
| 1087 |
+
frombytes(self.schema.ToString(), safe=True))
|
| 1088 |
+
|
| 1089 |
+
def __reduce__(self):
|
| 1090 |
+
return ParquetSchema, (self.parent,)
|
| 1091 |
+
|
| 1092 |
+
def __len__(self):
|
| 1093 |
+
return self.schema.num_columns()
|
| 1094 |
+
|
| 1095 |
+
def __getitem__(self, i):
|
| 1096 |
+
return self.column(i)
|
| 1097 |
+
|
| 1098 |
+
def __hash__(self):
|
| 1099 |
+
return hash(self.schema.ToString())
|
| 1100 |
+
|
| 1101 |
+
@property
|
| 1102 |
+
def names(self):
|
| 1103 |
+
"""Name of each field (list of str)."""
|
| 1104 |
+
return [self[i].name for i in range(len(self))]
|
| 1105 |
+
|
| 1106 |
+
def to_arrow_schema(self):
|
| 1107 |
+
"""
|
| 1108 |
+
Convert Parquet schema to effective Arrow schema.
|
| 1109 |
+
|
| 1110 |
+
Returns
|
| 1111 |
+
-------
|
| 1112 |
+
schema : Schema
|
| 1113 |
+
"""
|
| 1114 |
+
cdef shared_ptr[CSchema] sp_arrow_schema
|
| 1115 |
+
|
| 1116 |
+
with nogil:
|
| 1117 |
+
check_status(FromParquetSchema(
|
| 1118 |
+
self.schema, default_arrow_reader_properties(),
|
| 1119 |
+
self.parent._metadata.key_value_metadata(),
|
| 1120 |
+
&sp_arrow_schema))
|
| 1121 |
+
|
| 1122 |
+
return pyarrow_wrap_schema(sp_arrow_schema)
|
| 1123 |
+
|
| 1124 |
+
def __eq__(self, other):
|
| 1125 |
+
try:
|
| 1126 |
+
return self.equals(other)
|
| 1127 |
+
except TypeError:
|
| 1128 |
+
return NotImplemented
|
| 1129 |
+
|
| 1130 |
+
def equals(self, ParquetSchema other):
|
| 1131 |
+
"""
|
| 1132 |
+
Return whether the two schemas are equal.
|
| 1133 |
+
|
| 1134 |
+
Parameters
|
| 1135 |
+
----------
|
| 1136 |
+
other : ParquetSchema
|
| 1137 |
+
Schema to compare against.
|
| 1138 |
+
|
| 1139 |
+
Returns
|
| 1140 |
+
-------
|
| 1141 |
+
are_equal : bool
|
| 1142 |
+
"""
|
| 1143 |
+
return self.schema.Equals(deref(other.schema))
|
| 1144 |
+
|
| 1145 |
+
def column(self, i):
|
| 1146 |
+
"""
|
| 1147 |
+
Return the schema for a single column.
|
| 1148 |
+
|
| 1149 |
+
Parameters
|
| 1150 |
+
----------
|
| 1151 |
+
i : int
|
| 1152 |
+
Index of column in schema.
|
| 1153 |
+
|
| 1154 |
+
Returns
|
| 1155 |
+
-------
|
| 1156 |
+
column_schema : ColumnSchema
|
| 1157 |
+
"""
|
| 1158 |
+
if i < 0 or i >= len(self):
|
| 1159 |
+
raise IndexError('{0} out of bounds'.format(i))
|
| 1160 |
+
|
| 1161 |
+
return ColumnSchema(self, i)
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
cdef class ColumnSchema(_Weakrefable):
|
| 1165 |
+
"""Schema for a single column."""
|
| 1166 |
+
cdef:
|
| 1167 |
+
int index
|
| 1168 |
+
ParquetSchema parent
|
| 1169 |
+
const ColumnDescriptor* descr
|
| 1170 |
+
|
| 1171 |
+
def __cinit__(self, ParquetSchema schema, int index):
|
| 1172 |
+
self.parent = schema
|
| 1173 |
+
self.index = index # for pickling support
|
| 1174 |
+
self.descr = schema.schema.Column(index)
|
| 1175 |
+
|
| 1176 |
+
def __eq__(self, other):
|
| 1177 |
+
try:
|
| 1178 |
+
return self.equals(other)
|
| 1179 |
+
except TypeError:
|
| 1180 |
+
return NotImplemented
|
| 1181 |
+
|
| 1182 |
+
def __reduce__(self):
|
| 1183 |
+
return ColumnSchema, (self.parent, self.index)
|
| 1184 |
+
|
| 1185 |
+
def equals(self, ColumnSchema other):
|
| 1186 |
+
"""
|
| 1187 |
+
Return whether the two column schemas are equal.
|
| 1188 |
+
|
| 1189 |
+
Parameters
|
| 1190 |
+
----------
|
| 1191 |
+
other : ColumnSchema
|
| 1192 |
+
Schema to compare against.
|
| 1193 |
+
|
| 1194 |
+
Returns
|
| 1195 |
+
-------
|
| 1196 |
+
are_equal : bool
|
| 1197 |
+
"""
|
| 1198 |
+
return self.descr.Equals(deref(other.descr))
|
| 1199 |
+
|
| 1200 |
+
def __repr__(self):
|
| 1201 |
+
physical_type = self.physical_type
|
| 1202 |
+
converted_type = self.converted_type
|
| 1203 |
+
if converted_type == 'DECIMAL':
|
| 1204 |
+
converted_type = 'DECIMAL({0}, {1})'.format(self.precision,
|
| 1205 |
+
self.scale)
|
| 1206 |
+
elif physical_type == 'FIXED_LEN_BYTE_ARRAY':
|
| 1207 |
+
converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})'
|
| 1208 |
+
.format(self.length))
|
| 1209 |
+
|
| 1210 |
+
return """<ParquetColumnSchema>
|
| 1211 |
+
name: {0}
|
| 1212 |
+
path: {1}
|
| 1213 |
+
max_definition_level: {2}
|
| 1214 |
+
max_repetition_level: {3}
|
| 1215 |
+
physical_type: {4}
|
| 1216 |
+
logical_type: {5}
|
| 1217 |
+
converted_type (legacy): {6}""".format(self.name, self.path,
|
| 1218 |
+
self.max_definition_level,
|
| 1219 |
+
self.max_repetition_level,
|
| 1220 |
+
physical_type,
|
| 1221 |
+
str(self.logical_type),
|
| 1222 |
+
converted_type)
|
| 1223 |
+
|
| 1224 |
+
@property
|
| 1225 |
+
def name(self):
|
| 1226 |
+
"""Name of field (str)."""
|
| 1227 |
+
return frombytes(self.descr.name())
|
| 1228 |
+
|
| 1229 |
+
@property
|
| 1230 |
+
def path(self):
|
| 1231 |
+
"""Nested path to field, separated by periods (str)."""
|
| 1232 |
+
return frombytes(self.descr.path().get().ToDotString())
|
| 1233 |
+
|
| 1234 |
+
@property
|
| 1235 |
+
def max_definition_level(self):
|
| 1236 |
+
"""Maximum definition level (int)."""
|
| 1237 |
+
return self.descr.max_definition_level()
|
| 1238 |
+
|
| 1239 |
+
@property
|
| 1240 |
+
def max_repetition_level(self):
|
| 1241 |
+
"""Maximum repetition level (int)."""
|
| 1242 |
+
return self.descr.max_repetition_level()
|
| 1243 |
+
|
| 1244 |
+
@property
|
| 1245 |
+
def physical_type(self):
|
| 1246 |
+
"""Name of physical type (str)."""
|
| 1247 |
+
return physical_type_name_from_enum(self.descr.physical_type())
|
| 1248 |
+
|
| 1249 |
+
@property
|
| 1250 |
+
def logical_type(self):
|
| 1251 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
| 1252 |
+
return wrap_logical_type(self.descr.logical_type())
|
| 1253 |
+
|
| 1254 |
+
@property
|
| 1255 |
+
def converted_type(self):
|
| 1256 |
+
"""Legacy converted type (str or None)."""
|
| 1257 |
+
return converted_type_name_from_enum(self.descr.converted_type())
|
| 1258 |
+
|
| 1259 |
+
# FIXED_LEN_BYTE_ARRAY attribute
|
| 1260 |
+
@property
|
| 1261 |
+
def length(self):
|
| 1262 |
+
"""Array length if fixed length byte array type, None otherwise (int or None)."""
|
| 1263 |
+
return self.descr.type_length()
|
| 1264 |
+
|
| 1265 |
+
# Decimal attributes
|
| 1266 |
+
@property
|
| 1267 |
+
def precision(self):
|
| 1268 |
+
"""Precision if decimal type, None otherwise (int or None)."""
|
| 1269 |
+
return self.descr.type_precision()
|
| 1270 |
+
|
| 1271 |
+
@property
|
| 1272 |
+
def scale(self):
|
| 1273 |
+
"""Scale if decimal type, None otherwise (int or None)."""
|
| 1274 |
+
return self.descr.type_scale()
|
| 1275 |
+
|
| 1276 |
+
|
| 1277 |
+
cdef physical_type_name_from_enum(ParquetType type_):
|
| 1278 |
+
return {
|
| 1279 |
+
ParquetType_BOOLEAN: 'BOOLEAN',
|
| 1280 |
+
ParquetType_INT32: 'INT32',
|
| 1281 |
+
ParquetType_INT64: 'INT64',
|
| 1282 |
+
ParquetType_INT96: 'INT96',
|
| 1283 |
+
ParquetType_FLOAT: 'FLOAT',
|
| 1284 |
+
ParquetType_DOUBLE: 'DOUBLE',
|
| 1285 |
+
ParquetType_BYTE_ARRAY: 'BYTE_ARRAY',
|
| 1286 |
+
ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY',
|
| 1287 |
+
}.get(type_, 'UNKNOWN')
|
| 1288 |
+
|
| 1289 |
+
|
| 1290 |
+
cdef logical_type_name_from_enum(ParquetLogicalTypeId type_):
|
| 1291 |
+
return {
|
| 1292 |
+
ParquetLogicalType_UNDEFINED: 'UNDEFINED',
|
| 1293 |
+
ParquetLogicalType_STRING: 'STRING',
|
| 1294 |
+
ParquetLogicalType_MAP: 'MAP',
|
| 1295 |
+
ParquetLogicalType_LIST: 'LIST',
|
| 1296 |
+
ParquetLogicalType_ENUM: 'ENUM',
|
| 1297 |
+
ParquetLogicalType_DECIMAL: 'DECIMAL',
|
| 1298 |
+
ParquetLogicalType_DATE: 'DATE',
|
| 1299 |
+
ParquetLogicalType_TIME: 'TIME',
|
| 1300 |
+
ParquetLogicalType_TIMESTAMP: 'TIMESTAMP',
|
| 1301 |
+
ParquetLogicalType_INT: 'INT',
|
| 1302 |
+
ParquetLogicalType_FLOAT16: 'FLOAT16',
|
| 1303 |
+
ParquetLogicalType_JSON: 'JSON',
|
| 1304 |
+
ParquetLogicalType_BSON: 'BSON',
|
| 1305 |
+
ParquetLogicalType_UUID: 'UUID',
|
| 1306 |
+
ParquetLogicalType_NONE: 'NONE',
|
| 1307 |
+
}.get(type_, 'UNKNOWN')
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
cdef converted_type_name_from_enum(ParquetConvertedType type_):
|
| 1311 |
+
return {
|
| 1312 |
+
ParquetConvertedType_NONE: 'NONE',
|
| 1313 |
+
ParquetConvertedType_UTF8: 'UTF8',
|
| 1314 |
+
ParquetConvertedType_MAP: 'MAP',
|
| 1315 |
+
ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE',
|
| 1316 |
+
ParquetConvertedType_LIST: 'LIST',
|
| 1317 |
+
ParquetConvertedType_ENUM: 'ENUM',
|
| 1318 |
+
ParquetConvertedType_DECIMAL: 'DECIMAL',
|
| 1319 |
+
ParquetConvertedType_DATE: 'DATE',
|
| 1320 |
+
ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS',
|
| 1321 |
+
ParquetConvertedType_TIME_MICROS: 'TIME_MICROS',
|
| 1322 |
+
ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS',
|
| 1323 |
+
ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS',
|
| 1324 |
+
ParquetConvertedType_UINT_8: 'UINT_8',
|
| 1325 |
+
ParquetConvertedType_UINT_16: 'UINT_16',
|
| 1326 |
+
ParquetConvertedType_UINT_32: 'UINT_32',
|
| 1327 |
+
ParquetConvertedType_UINT_64: 'UINT_64',
|
| 1328 |
+
ParquetConvertedType_INT_8: 'INT_8',
|
| 1329 |
+
ParquetConvertedType_INT_16: 'INT_16',
|
| 1330 |
+
ParquetConvertedType_INT_32: 'INT_32',
|
| 1331 |
+
ParquetConvertedType_INT_64: 'INT_64',
|
| 1332 |
+
ParquetConvertedType_JSON: 'JSON',
|
| 1333 |
+
ParquetConvertedType_BSON: 'BSON',
|
| 1334 |
+
ParquetConvertedType_INTERVAL: 'INTERVAL',
|
| 1335 |
+
}.get(type_, 'UNKNOWN')
|
| 1336 |
+
|
| 1337 |
+
|
| 1338 |
+
cdef encoding_name_from_enum(ParquetEncoding encoding_):
|
| 1339 |
+
return {
|
| 1340 |
+
ParquetEncoding_PLAIN: 'PLAIN',
|
| 1341 |
+
ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY',
|
| 1342 |
+
ParquetEncoding_RLE: 'RLE',
|
| 1343 |
+
ParquetEncoding_BIT_PACKED: 'BIT_PACKED',
|
| 1344 |
+
ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED',
|
| 1345 |
+
ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY',
|
| 1346 |
+
ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY',
|
| 1347 |
+
ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY',
|
| 1348 |
+
ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT',
|
| 1349 |
+
}.get(encoding_, 'UNKNOWN')
|
| 1350 |
+
|
| 1351 |
+
|
| 1352 |
+
cdef encoding_enum_from_name(str encoding_name):
|
| 1353 |
+
enc = {
|
| 1354 |
+
'PLAIN': ParquetEncoding_PLAIN,
|
| 1355 |
+
'BIT_PACKED': ParquetEncoding_BIT_PACKED,
|
| 1356 |
+
'RLE': ParquetEncoding_RLE,
|
| 1357 |
+
'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT,
|
| 1358 |
+
'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED,
|
| 1359 |
+
'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY,
|
| 1360 |
+
'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY,
|
| 1361 |
+
'RLE_DICTIONARY': 'dict',
|
| 1362 |
+
'PLAIN_DICTIONARY': 'dict',
|
| 1363 |
+
}.get(encoding_name, None)
|
| 1364 |
+
if enc is None:
|
| 1365 |
+
raise ValueError(f"Unsupported column encoding: {encoding_name!r}")
|
| 1366 |
+
elif enc == 'dict':
|
| 1367 |
+
raise ValueError(f"{encoding_name!r} is already used by default.")
|
| 1368 |
+
else:
|
| 1369 |
+
return enc
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
cdef compression_name_from_enum(ParquetCompression compression_):
|
| 1373 |
+
return {
|
| 1374 |
+
ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED',
|
| 1375 |
+
ParquetCompression_SNAPPY: 'SNAPPY',
|
| 1376 |
+
ParquetCompression_GZIP: 'GZIP',
|
| 1377 |
+
ParquetCompression_LZO: 'LZO',
|
| 1378 |
+
ParquetCompression_BROTLI: 'BROTLI',
|
| 1379 |
+
ParquetCompression_LZ4: 'LZ4',
|
| 1380 |
+
ParquetCompression_ZSTD: 'ZSTD',
|
| 1381 |
+
}.get(compression_, 'UNKNOWN')
|
| 1382 |
+
|
| 1383 |
+
|
| 1384 |
+
cdef int check_compression_name(name) except -1:
|
| 1385 |
+
if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4',
|
| 1386 |
+
'ZSTD'}:
|
| 1387 |
+
raise ArrowException("Unsupported compression: " + name)
|
| 1388 |
+
return 0
|
| 1389 |
+
|
| 1390 |
+
|
| 1391 |
+
cdef ParquetCompression compression_from_name(name):
|
| 1392 |
+
name = name.upper()
|
| 1393 |
+
if name == 'SNAPPY':
|
| 1394 |
+
return ParquetCompression_SNAPPY
|
| 1395 |
+
elif name == 'GZIP':
|
| 1396 |
+
return ParquetCompression_GZIP
|
| 1397 |
+
elif name == 'LZO':
|
| 1398 |
+
return ParquetCompression_LZO
|
| 1399 |
+
elif name == 'BROTLI':
|
| 1400 |
+
return ParquetCompression_BROTLI
|
| 1401 |
+
elif name == 'LZ4':
|
| 1402 |
+
return ParquetCompression_LZ4
|
| 1403 |
+
elif name == 'ZSTD':
|
| 1404 |
+
return ParquetCompression_ZSTD
|
| 1405 |
+
else:
|
| 1406 |
+
return ParquetCompression_UNCOMPRESSED
|
| 1407 |
+
|
| 1408 |
+
|
| 1409 |
+
cdef class ParquetReader(_Weakrefable):
|
| 1410 |
+
cdef:
|
| 1411 |
+
object source
|
| 1412 |
+
CMemoryPool* pool
|
| 1413 |
+
UniquePtrNoGIL[FileReader] reader
|
| 1414 |
+
FileMetaData _metadata
|
| 1415 |
+
shared_ptr[CRandomAccessFile] rd_handle
|
| 1416 |
+
|
| 1417 |
+
cdef public:
|
| 1418 |
+
_column_idx_map
|
| 1419 |
+
|
| 1420 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
| 1421 |
+
self.pool = maybe_unbox_memory_pool(memory_pool)
|
| 1422 |
+
self._metadata = None
|
| 1423 |
+
|
| 1424 |
+
def open(self, object source not None, *, bint use_memory_map=False,
|
| 1425 |
+
read_dictionary=None, FileMetaData metadata=None,
|
| 1426 |
+
int buffer_size=0, bint pre_buffer=False,
|
| 1427 |
+
coerce_int96_timestamp_unit=None,
|
| 1428 |
+
FileDecryptionProperties decryption_properties=None,
|
| 1429 |
+
thrift_string_size_limit=None,
|
| 1430 |
+
thrift_container_size_limit=None,
|
| 1431 |
+
page_checksum_verification=False):
|
| 1432 |
+
"""
|
| 1433 |
+
Open a parquet file for reading.
|
| 1434 |
+
|
| 1435 |
+
Parameters
|
| 1436 |
+
----------
|
| 1437 |
+
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
|
| 1438 |
+
use_memory_map : bool, default False
|
| 1439 |
+
read_dictionary : iterable[int or str], optional
|
| 1440 |
+
metadata : FileMetaData, optional
|
| 1441 |
+
buffer_size : int, default 0
|
| 1442 |
+
pre_buffer : bool, default False
|
| 1443 |
+
coerce_int96_timestamp_unit : str, optional
|
| 1444 |
+
decryption_properties : FileDecryptionProperties, optional
|
| 1445 |
+
thrift_string_size_limit : int, optional
|
| 1446 |
+
thrift_container_size_limit : int, optional
|
| 1447 |
+
page_checksum_verification : bool, default False
|
| 1448 |
+
"""
|
| 1449 |
+
cdef:
|
| 1450 |
+
shared_ptr[CFileMetaData] c_metadata
|
| 1451 |
+
CReaderProperties properties = default_reader_properties()
|
| 1452 |
+
ArrowReaderProperties arrow_props = (
|
| 1453 |
+
default_arrow_reader_properties())
|
| 1454 |
+
FileReaderBuilder builder
|
| 1455 |
+
|
| 1456 |
+
if pre_buffer and not is_threading_enabled():
|
| 1457 |
+
pre_buffer = False
|
| 1458 |
+
|
| 1459 |
+
if metadata is not None:
|
| 1460 |
+
c_metadata = metadata.sp_metadata
|
| 1461 |
+
|
| 1462 |
+
if buffer_size > 0:
|
| 1463 |
+
properties.enable_buffered_stream()
|
| 1464 |
+
properties.set_buffer_size(buffer_size)
|
| 1465 |
+
elif buffer_size == 0:
|
| 1466 |
+
properties.disable_buffered_stream()
|
| 1467 |
+
else:
|
| 1468 |
+
raise ValueError('Buffer size must be larger than zero')
|
| 1469 |
+
|
| 1470 |
+
if thrift_string_size_limit is not None:
|
| 1471 |
+
if thrift_string_size_limit <= 0:
|
| 1472 |
+
raise ValueError("thrift_string_size_limit "
|
| 1473 |
+
"must be larger than zero")
|
| 1474 |
+
properties.set_thrift_string_size_limit(thrift_string_size_limit)
|
| 1475 |
+
if thrift_container_size_limit is not None:
|
| 1476 |
+
if thrift_container_size_limit <= 0:
|
| 1477 |
+
raise ValueError("thrift_container_size_limit "
|
| 1478 |
+
"must be larger than zero")
|
| 1479 |
+
properties.set_thrift_container_size_limit(
|
| 1480 |
+
thrift_container_size_limit)
|
| 1481 |
+
|
| 1482 |
+
if decryption_properties is not None:
|
| 1483 |
+
properties.file_decryption_properties(
|
| 1484 |
+
decryption_properties.unwrap())
|
| 1485 |
+
|
| 1486 |
+
arrow_props.set_pre_buffer(pre_buffer)
|
| 1487 |
+
|
| 1488 |
+
properties.set_page_checksum_verification(page_checksum_verification)
|
| 1489 |
+
|
| 1490 |
+
if coerce_int96_timestamp_unit is None:
|
| 1491 |
+
# use the default defined in default_arrow_reader_properties()
|
| 1492 |
+
pass
|
| 1493 |
+
else:
|
| 1494 |
+
arrow_props.set_coerce_int96_timestamp_unit(
|
| 1495 |
+
string_to_timeunit(coerce_int96_timestamp_unit))
|
| 1496 |
+
|
| 1497 |
+
self.source = source
|
| 1498 |
+
get_reader(source, use_memory_map, &self.rd_handle)
|
| 1499 |
+
|
| 1500 |
+
with nogil:
|
| 1501 |
+
check_status(builder.Open(self.rd_handle, properties, c_metadata))
|
| 1502 |
+
|
| 1503 |
+
# Set up metadata
|
| 1504 |
+
with nogil:
|
| 1505 |
+
c_metadata = builder.raw_reader().metadata()
|
| 1506 |
+
self._metadata = result = FileMetaData()
|
| 1507 |
+
result.init(c_metadata)
|
| 1508 |
+
|
| 1509 |
+
if read_dictionary is not None:
|
| 1510 |
+
self._set_read_dictionary(read_dictionary, &arrow_props)
|
| 1511 |
+
|
| 1512 |
+
with nogil:
|
| 1513 |
+
check_status(builder.memory_pool(self.pool)
|
| 1514 |
+
.properties(arrow_props)
|
| 1515 |
+
.Build(&self.reader))
|
| 1516 |
+
|
| 1517 |
+
cdef _set_read_dictionary(self, read_dictionary,
|
| 1518 |
+
ArrowReaderProperties* props):
|
| 1519 |
+
for column in read_dictionary:
|
| 1520 |
+
if not isinstance(column, int):
|
| 1521 |
+
column = self.column_name_idx(column)
|
| 1522 |
+
props.set_read_dictionary(column, True)
|
| 1523 |
+
|
| 1524 |
+
@property
|
| 1525 |
+
def column_paths(self):
|
| 1526 |
+
cdef:
|
| 1527 |
+
FileMetaData container = self.metadata
|
| 1528 |
+
const CFileMetaData* metadata = container._metadata
|
| 1529 |
+
vector[c_string] path
|
| 1530 |
+
int i = 0
|
| 1531 |
+
|
| 1532 |
+
paths = []
|
| 1533 |
+
for i in range(0, metadata.num_columns()):
|
| 1534 |
+
path = (metadata.schema().Column(i)
|
| 1535 |
+
.path().get().ToDotVector())
|
| 1536 |
+
paths.append([frombytes(x) for x in path])
|
| 1537 |
+
|
| 1538 |
+
return paths
|
| 1539 |
+
|
| 1540 |
+
@property
|
| 1541 |
+
def metadata(self):
|
| 1542 |
+
return self._metadata
|
| 1543 |
+
|
| 1544 |
+
@property
|
| 1545 |
+
def schema_arrow(self):
|
| 1546 |
+
cdef shared_ptr[CSchema] out
|
| 1547 |
+
with nogil:
|
| 1548 |
+
check_status(self.reader.get().GetSchema(&out))
|
| 1549 |
+
return pyarrow_wrap_schema(out)
|
| 1550 |
+
|
| 1551 |
+
@property
|
| 1552 |
+
def num_row_groups(self):
|
| 1553 |
+
return self.reader.get().num_row_groups()
|
| 1554 |
+
|
| 1555 |
+
def set_use_threads(self, bint use_threads):
|
| 1556 |
+
"""
|
| 1557 |
+
Parameters
|
| 1558 |
+
----------
|
| 1559 |
+
use_threads : bool
|
| 1560 |
+
"""
|
| 1561 |
+
if is_threading_enabled():
|
| 1562 |
+
self.reader.get().set_use_threads(use_threads)
|
| 1563 |
+
else:
|
| 1564 |
+
self.reader.get().set_use_threads(False)
|
| 1565 |
+
|
| 1566 |
+
def set_batch_size(self, int64_t batch_size):
|
| 1567 |
+
"""
|
| 1568 |
+
Parameters
|
| 1569 |
+
----------
|
| 1570 |
+
batch_size : int64
|
| 1571 |
+
"""
|
| 1572 |
+
self.reader.get().set_batch_size(batch_size)
|
| 1573 |
+
|
| 1574 |
+
def iter_batches(self, int64_t batch_size, row_groups, column_indices=None,
|
| 1575 |
+
bint use_threads=True):
|
| 1576 |
+
"""
|
| 1577 |
+
Parameters
|
| 1578 |
+
----------
|
| 1579 |
+
batch_size : int64
|
| 1580 |
+
row_groups : list[int]
|
| 1581 |
+
column_indices : list[int], optional
|
| 1582 |
+
use_threads : bool, default True
|
| 1583 |
+
|
| 1584 |
+
Yields
|
| 1585 |
+
------
|
| 1586 |
+
next : RecordBatch
|
| 1587 |
+
"""
|
| 1588 |
+
cdef:
|
| 1589 |
+
vector[int] c_row_groups
|
| 1590 |
+
vector[int] c_column_indices
|
| 1591 |
+
shared_ptr[CRecordBatch] record_batch
|
| 1592 |
+
UniquePtrNoGIL[CRecordBatchReader] recordbatchreader
|
| 1593 |
+
|
| 1594 |
+
self.set_batch_size(batch_size)
|
| 1595 |
+
|
| 1596 |
+
if use_threads:
|
| 1597 |
+
self.set_use_threads(use_threads)
|
| 1598 |
+
|
| 1599 |
+
for row_group in row_groups:
|
| 1600 |
+
c_row_groups.push_back(row_group)
|
| 1601 |
+
|
| 1602 |
+
if column_indices is not None:
|
| 1603 |
+
for index in column_indices:
|
| 1604 |
+
c_column_indices.push_back(index)
|
| 1605 |
+
with nogil:
|
| 1606 |
+
check_status(
|
| 1607 |
+
self.reader.get().GetRecordBatchReader(
|
| 1608 |
+
c_row_groups, c_column_indices, &recordbatchreader
|
| 1609 |
+
)
|
| 1610 |
+
)
|
| 1611 |
+
else:
|
| 1612 |
+
with nogil:
|
| 1613 |
+
check_status(
|
| 1614 |
+
self.reader.get().GetRecordBatchReader(
|
| 1615 |
+
c_row_groups, &recordbatchreader
|
| 1616 |
+
)
|
| 1617 |
+
)
|
| 1618 |
+
|
| 1619 |
+
while True:
|
| 1620 |
+
with nogil:
|
| 1621 |
+
check_status(
|
| 1622 |
+
recordbatchreader.get().ReadNext(&record_batch)
|
| 1623 |
+
)
|
| 1624 |
+
if record_batch.get() == NULL:
|
| 1625 |
+
break
|
| 1626 |
+
|
| 1627 |
+
yield pyarrow_wrap_batch(record_batch)
|
| 1628 |
+
|
| 1629 |
+
def read_row_group(self, int i, column_indices=None,
|
| 1630 |
+
bint use_threads=True):
|
| 1631 |
+
"""
|
| 1632 |
+
Parameters
|
| 1633 |
+
----------
|
| 1634 |
+
i : int
|
| 1635 |
+
column_indices : list[int], optional
|
| 1636 |
+
use_threads : bool, default True
|
| 1637 |
+
|
| 1638 |
+
Returns
|
| 1639 |
+
-------
|
| 1640 |
+
table : pyarrow.Table
|
| 1641 |
+
"""
|
| 1642 |
+
return self.read_row_groups([i], column_indices, use_threads)
|
| 1643 |
+
|
| 1644 |
+
def read_row_groups(self, row_groups not None, column_indices=None,
|
| 1645 |
+
bint use_threads=True):
|
| 1646 |
+
"""
|
| 1647 |
+
Parameters
|
| 1648 |
+
----------
|
| 1649 |
+
row_groups : list[int]
|
| 1650 |
+
column_indices : list[int], optional
|
| 1651 |
+
use_threads : bool, default True
|
| 1652 |
+
|
| 1653 |
+
Returns
|
| 1654 |
+
-------
|
| 1655 |
+
table : pyarrow.Table
|
| 1656 |
+
"""
|
| 1657 |
+
cdef:
|
| 1658 |
+
shared_ptr[CTable] ctable
|
| 1659 |
+
vector[int] c_row_groups
|
| 1660 |
+
vector[int] c_column_indices
|
| 1661 |
+
|
| 1662 |
+
self.set_use_threads(use_threads)
|
| 1663 |
+
|
| 1664 |
+
for row_group in row_groups:
|
| 1665 |
+
c_row_groups.push_back(row_group)
|
| 1666 |
+
|
| 1667 |
+
if column_indices is not None:
|
| 1668 |
+
for index in column_indices:
|
| 1669 |
+
c_column_indices.push_back(index)
|
| 1670 |
+
|
| 1671 |
+
with nogil:
|
| 1672 |
+
check_status(self.reader.get()
|
| 1673 |
+
.ReadRowGroups(c_row_groups, c_column_indices,
|
| 1674 |
+
&ctable))
|
| 1675 |
+
else:
|
| 1676 |
+
# Read all columns
|
| 1677 |
+
with nogil:
|
| 1678 |
+
check_status(self.reader.get()
|
| 1679 |
+
.ReadRowGroups(c_row_groups, &ctable))
|
| 1680 |
+
return pyarrow_wrap_table(ctable)
|
| 1681 |
+
|
| 1682 |
+
def read_all(self, column_indices=None, bint use_threads=True):
|
| 1683 |
+
"""
|
| 1684 |
+
Parameters
|
| 1685 |
+
----------
|
| 1686 |
+
column_indices : list[int], optional
|
| 1687 |
+
use_threads : bool, default True
|
| 1688 |
+
|
| 1689 |
+
Returns
|
| 1690 |
+
-------
|
| 1691 |
+
table : pyarrow.Table
|
| 1692 |
+
"""
|
| 1693 |
+
cdef:
|
| 1694 |
+
shared_ptr[CTable] ctable
|
| 1695 |
+
vector[int] c_column_indices
|
| 1696 |
+
|
| 1697 |
+
self.set_use_threads(use_threads)
|
| 1698 |
+
|
| 1699 |
+
if column_indices is not None:
|
| 1700 |
+
for index in column_indices:
|
| 1701 |
+
c_column_indices.push_back(index)
|
| 1702 |
+
|
| 1703 |
+
with nogil:
|
| 1704 |
+
check_status(self.reader.get()
|
| 1705 |
+
.ReadTable(c_column_indices, &ctable))
|
| 1706 |
+
else:
|
| 1707 |
+
# Read all columns
|
| 1708 |
+
with nogil:
|
| 1709 |
+
check_status(self.reader.get()
|
| 1710 |
+
.ReadTable(&ctable))
|
| 1711 |
+
return pyarrow_wrap_table(ctable)
|
| 1712 |
+
|
| 1713 |
+
def scan_contents(self, column_indices=None, batch_size=65536):
|
| 1714 |
+
"""
|
| 1715 |
+
Parameters
|
| 1716 |
+
----------
|
| 1717 |
+
column_indices : list[int], optional
|
| 1718 |
+
batch_size : int32, default 65536
|
| 1719 |
+
|
| 1720 |
+
Returns
|
| 1721 |
+
-------
|
| 1722 |
+
num_rows : int64
|
| 1723 |
+
"""
|
| 1724 |
+
cdef:
|
| 1725 |
+
vector[int] c_column_indices
|
| 1726 |
+
int32_t c_batch_size
|
| 1727 |
+
int64_t c_num_rows
|
| 1728 |
+
|
| 1729 |
+
if column_indices is not None:
|
| 1730 |
+
for index in column_indices:
|
| 1731 |
+
c_column_indices.push_back(index)
|
| 1732 |
+
|
| 1733 |
+
c_batch_size = batch_size
|
| 1734 |
+
|
| 1735 |
+
with nogil:
|
| 1736 |
+
check_status(self.reader.get()
|
| 1737 |
+
.ScanContents(c_column_indices, c_batch_size,
|
| 1738 |
+
&c_num_rows))
|
| 1739 |
+
|
| 1740 |
+
return c_num_rows
|
| 1741 |
+
|
| 1742 |
+
def column_name_idx(self, column_name):
|
| 1743 |
+
"""
|
| 1744 |
+
Find the index of a column by its name.
|
| 1745 |
+
|
| 1746 |
+
Parameters
|
| 1747 |
+
----------
|
| 1748 |
+
column_name : str
|
| 1749 |
+
Name of the column; separation of nesting levels is done via ".".
|
| 1750 |
+
|
| 1751 |
+
Returns
|
| 1752 |
+
-------
|
| 1753 |
+
column_idx : int
|
| 1754 |
+
Integer index of the column in the schema.
|
| 1755 |
+
"""
|
| 1756 |
+
cdef:
|
| 1757 |
+
FileMetaData container = self.metadata
|
| 1758 |
+
const CFileMetaData* metadata = container._metadata
|
| 1759 |
+
int i = 0
|
| 1760 |
+
|
| 1761 |
+
if self._column_idx_map is None:
|
| 1762 |
+
self._column_idx_map = {}
|
| 1763 |
+
for i in range(0, metadata.num_columns()):
|
| 1764 |
+
col_bytes = tobytes(metadata.schema().Column(i)
|
| 1765 |
+
.path().get().ToDotString())
|
| 1766 |
+
self._column_idx_map[col_bytes] = i
|
| 1767 |
+
|
| 1768 |
+
return self._column_idx_map[tobytes(column_name)]
|
| 1769 |
+
|
| 1770 |
+
def read_column(self, int column_index):
|
| 1771 |
+
"""
|
| 1772 |
+
Read the column at the specified index.
|
| 1773 |
+
|
| 1774 |
+
Parameters
|
| 1775 |
+
----------
|
| 1776 |
+
column_index : int
|
| 1777 |
+
Index of the column.
|
| 1778 |
+
|
| 1779 |
+
Returns
|
| 1780 |
+
-------
|
| 1781 |
+
column : pyarrow.ChunkedArray
|
| 1782 |
+
"""
|
| 1783 |
+
cdef shared_ptr[CChunkedArray] out
|
| 1784 |
+
with nogil:
|
| 1785 |
+
check_status(self.reader.get()
|
| 1786 |
+
.ReadColumn(column_index, &out))
|
| 1787 |
+
return pyarrow_wrap_chunked_array(out)
|
| 1788 |
+
|
| 1789 |
+
def close(self):
|
| 1790 |
+
if not self.closed:
|
| 1791 |
+
with nogil:
|
| 1792 |
+
check_status(self.rd_handle.get().Close())
|
| 1793 |
+
|
| 1794 |
+
@property
|
| 1795 |
+
def closed(self):
|
| 1796 |
+
if self.rd_handle == NULL:
|
| 1797 |
+
return True
|
| 1798 |
+
with nogil:
|
| 1799 |
+
closed = self.rd_handle.get().closed()
|
| 1800 |
+
return closed
|
| 1801 |
+
|
| 1802 |
+
|
| 1803 |
+
cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column):
|
| 1804 |
+
cdef CSortingColumn c_sorting_column
|
| 1805 |
+
|
| 1806 |
+
c_sorting_column.column_idx = sorting_column.column_index
|
| 1807 |
+
c_sorting_column.descending = sorting_column.descending
|
| 1808 |
+
c_sorting_column.nulls_first = sorting_column.nulls_first
|
| 1809 |
+
|
| 1810 |
+
return c_sorting_column
|
| 1811 |
+
|
| 1812 |
+
|
| 1813 |
+
cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *:
|
| 1814 |
+
if not (isinstance(sorting_columns, Sequence)
|
| 1815 |
+
and all(isinstance(col, SortingColumn) for col in sorting_columns)):
|
| 1816 |
+
raise ValueError(
|
| 1817 |
+
"'sorting_columns' must be a list of `SortingColumn`")
|
| 1818 |
+
|
| 1819 |
+
cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col)
|
| 1820 |
+
for col in sorting_columns]
|
| 1821 |
+
|
| 1822 |
+
return c_sorting_columns
|
| 1823 |
+
|
| 1824 |
+
|
| 1825 |
+
cdef shared_ptr[WriterProperties] _create_writer_properties(
|
| 1826 |
+
use_dictionary=None,
|
| 1827 |
+
compression=None,
|
| 1828 |
+
version=None,
|
| 1829 |
+
write_statistics=None,
|
| 1830 |
+
data_page_size=None,
|
| 1831 |
+
compression_level=None,
|
| 1832 |
+
use_byte_stream_split=False,
|
| 1833 |
+
column_encoding=None,
|
| 1834 |
+
data_page_version=None,
|
| 1835 |
+
FileEncryptionProperties encryption_properties=None,
|
| 1836 |
+
write_batch_size=None,
|
| 1837 |
+
dictionary_pagesize_limit=None,
|
| 1838 |
+
write_page_index=False,
|
| 1839 |
+
write_page_checksum=False,
|
| 1840 |
+
sorting_columns=None,
|
| 1841 |
+
store_decimal_as_integer=False) except *:
|
| 1842 |
+
|
| 1843 |
+
"""General writer properties"""
|
| 1844 |
+
cdef:
|
| 1845 |
+
shared_ptr[WriterProperties] properties
|
| 1846 |
+
WriterProperties.Builder props
|
| 1847 |
+
|
| 1848 |
+
# data_page_version
|
| 1849 |
+
|
| 1850 |
+
if data_page_version is not None:
|
| 1851 |
+
if data_page_version == "1.0":
|
| 1852 |
+
props.data_page_version(ParquetDataPageVersion_V1)
|
| 1853 |
+
elif data_page_version == "2.0":
|
| 1854 |
+
props.data_page_version(ParquetDataPageVersion_V2)
|
| 1855 |
+
else:
|
| 1856 |
+
raise ValueError("Unsupported Parquet data page version: {0}"
|
| 1857 |
+
.format(data_page_version))
|
| 1858 |
+
|
| 1859 |
+
# version
|
| 1860 |
+
|
| 1861 |
+
if version is not None:
|
| 1862 |
+
if version == "1.0":
|
| 1863 |
+
props.version(ParquetVersion_V1)
|
| 1864 |
+
elif version in ("2.0", "pseudo-2.0"):
|
| 1865 |
+
warnings.warn(
|
| 1866 |
+
"Parquet format '2.0' pseudo version is deprecated, use "
|
| 1867 |
+
"'2.4' or '2.6' for fine-grained feature selection",
|
| 1868 |
+
FutureWarning, stacklevel=2)
|
| 1869 |
+
props.version(ParquetVersion_V2_0)
|
| 1870 |
+
elif version == "2.4":
|
| 1871 |
+
props.version(ParquetVersion_V2_4)
|
| 1872 |
+
elif version == "2.6":
|
| 1873 |
+
props.version(ParquetVersion_V2_6)
|
| 1874 |
+
else:
|
| 1875 |
+
raise ValueError("Unsupported Parquet format version: {0}"
|
| 1876 |
+
.format(version))
|
| 1877 |
+
|
| 1878 |
+
# compression
|
| 1879 |
+
|
| 1880 |
+
if isinstance(compression, basestring):
|
| 1881 |
+
check_compression_name(compression)
|
| 1882 |
+
props.compression(compression_from_name(compression))
|
| 1883 |
+
elif compression is not None:
|
| 1884 |
+
for column, codec in compression.iteritems():
|
| 1885 |
+
check_compression_name(codec)
|
| 1886 |
+
props.compression(tobytes(column), compression_from_name(codec))
|
| 1887 |
+
|
| 1888 |
+
if isinstance(compression_level, int):
|
| 1889 |
+
props.compression_level(compression_level)
|
| 1890 |
+
elif compression_level is not None:
|
| 1891 |
+
for column, level in compression_level.iteritems():
|
| 1892 |
+
props.compression_level(tobytes(column), level)
|
| 1893 |
+
|
| 1894 |
+
# use_dictionary
|
| 1895 |
+
|
| 1896 |
+
if isinstance(use_dictionary, bool):
|
| 1897 |
+
if use_dictionary:
|
| 1898 |
+
props.enable_dictionary()
|
| 1899 |
+
if column_encoding is not None:
|
| 1900 |
+
raise ValueError(
|
| 1901 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
| 1902 |
+
else:
|
| 1903 |
+
props.disable_dictionary()
|
| 1904 |
+
elif use_dictionary is not None:
|
| 1905 |
+
# Deactivate dictionary encoding by default
|
| 1906 |
+
props.disable_dictionary()
|
| 1907 |
+
for column in use_dictionary:
|
| 1908 |
+
props.enable_dictionary(tobytes(column))
|
| 1909 |
+
if (column_encoding is not None and
|
| 1910 |
+
column_encoding.get(column) is not None):
|
| 1911 |
+
raise ValueError(
|
| 1912 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
| 1913 |
+
|
| 1914 |
+
# write_statistics
|
| 1915 |
+
|
| 1916 |
+
if isinstance(write_statistics, bool):
|
| 1917 |
+
if write_statistics:
|
| 1918 |
+
props.enable_statistics()
|
| 1919 |
+
else:
|
| 1920 |
+
props.disable_statistics()
|
| 1921 |
+
elif write_statistics is not None:
|
| 1922 |
+
# Deactivate statistics by default and enable for specified columns
|
| 1923 |
+
props.disable_statistics()
|
| 1924 |
+
for column in write_statistics:
|
| 1925 |
+
props.enable_statistics(tobytes(column))
|
| 1926 |
+
|
| 1927 |
+
# sorting_columns
|
| 1928 |
+
|
| 1929 |
+
if sorting_columns is not None:
|
| 1930 |
+
props.set_sorting_columns(_convert_sorting_columns(sorting_columns))
|
| 1931 |
+
|
| 1932 |
+
# use_byte_stream_split
|
| 1933 |
+
|
| 1934 |
+
if isinstance(use_byte_stream_split, bool):
|
| 1935 |
+
if use_byte_stream_split:
|
| 1936 |
+
if column_encoding is not None:
|
| 1937 |
+
raise ValueError(
|
| 1938 |
+
"'use_byte_stream_split' cannot be passed"
|
| 1939 |
+
"together with 'column_encoding'")
|
| 1940 |
+
else:
|
| 1941 |
+
props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT)
|
| 1942 |
+
elif use_byte_stream_split is not None:
|
| 1943 |
+
for column in use_byte_stream_split:
|
| 1944 |
+
if column_encoding is None:
|
| 1945 |
+
column_encoding = {column: 'BYTE_STREAM_SPLIT'}
|
| 1946 |
+
elif column_encoding.get(column, None) is None:
|
| 1947 |
+
column_encoding[column] = 'BYTE_STREAM_SPLIT'
|
| 1948 |
+
else:
|
| 1949 |
+
raise ValueError(
|
| 1950 |
+
"'use_byte_stream_split' cannot be passed"
|
| 1951 |
+
"together with 'column_encoding'")
|
| 1952 |
+
|
| 1953 |
+
# store_decimal_as_integer
|
| 1954 |
+
|
| 1955 |
+
if isinstance(store_decimal_as_integer, bool):
|
| 1956 |
+
if store_decimal_as_integer:
|
| 1957 |
+
props.enable_store_decimal_as_integer()
|
| 1958 |
+
else:
|
| 1959 |
+
props.disable_store_decimal_as_integer()
|
| 1960 |
+
else:
|
| 1961 |
+
raise TypeError("'store_decimal_as_integer' must be a boolean")
|
| 1962 |
+
|
| 1963 |
+
# column_encoding
|
| 1964 |
+
# encoding map - encode individual columns
|
| 1965 |
+
|
| 1966 |
+
if column_encoding is not None:
|
| 1967 |
+
if isinstance(column_encoding, dict):
|
| 1968 |
+
for column, _encoding in column_encoding.items():
|
| 1969 |
+
props.encoding(tobytes(column),
|
| 1970 |
+
encoding_enum_from_name(_encoding))
|
| 1971 |
+
elif isinstance(column_encoding, str):
|
| 1972 |
+
props.encoding(encoding_enum_from_name(column_encoding))
|
| 1973 |
+
else:
|
| 1974 |
+
raise TypeError(
|
| 1975 |
+
"'column_encoding' should be a dictionary or a string")
|
| 1976 |
+
|
| 1977 |
+
if data_page_size is not None:
|
| 1978 |
+
props.data_pagesize(data_page_size)
|
| 1979 |
+
|
| 1980 |
+
if write_batch_size is not None:
|
| 1981 |
+
props.write_batch_size(write_batch_size)
|
| 1982 |
+
|
| 1983 |
+
if dictionary_pagesize_limit is not None:
|
| 1984 |
+
props.dictionary_pagesize_limit(dictionary_pagesize_limit)
|
| 1985 |
+
|
| 1986 |
+
# encryption
|
| 1987 |
+
|
| 1988 |
+
if encryption_properties is not None:
|
| 1989 |
+
props.encryption(
|
| 1990 |
+
(<FileEncryptionProperties>encryption_properties).unwrap())
|
| 1991 |
+
|
| 1992 |
+
# For backwards compatibility reasons we cap the maximum row group size
|
| 1993 |
+
# at 64Mi rows. This could be changed in the future, though it would be
|
| 1994 |
+
# a breaking change.
|
| 1995 |
+
#
|
| 1996 |
+
# The user can always specify a smaller row group size (and the default
|
| 1997 |
+
# is smaller) when calling write_table. If the call to write_table uses
|
| 1998 |
+
# a size larger than this then it will be latched to this value.
|
| 1999 |
+
props.max_row_group_length(_MAX_ROW_GROUP_SIZE)
|
| 2000 |
+
|
| 2001 |
+
# checksum
|
| 2002 |
+
|
| 2003 |
+
if write_page_checksum:
|
| 2004 |
+
props.enable_page_checksum()
|
| 2005 |
+
else:
|
| 2006 |
+
props.disable_page_checksum()
|
| 2007 |
+
|
| 2008 |
+
# page index
|
| 2009 |
+
|
| 2010 |
+
if write_page_index:
|
| 2011 |
+
props.enable_write_page_index()
|
| 2012 |
+
else:
|
| 2013 |
+
props.disable_write_page_index()
|
| 2014 |
+
|
| 2015 |
+
properties = props.build()
|
| 2016 |
+
|
| 2017 |
+
return properties
|
| 2018 |
+
|
| 2019 |
+
|
| 2020 |
+
cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
|
| 2021 |
+
use_deprecated_int96_timestamps=False,
|
| 2022 |
+
coerce_timestamps=None,
|
| 2023 |
+
allow_truncated_timestamps=False,
|
| 2024 |
+
writer_engine_version=None,
|
| 2025 |
+
use_compliant_nested_type=True,
|
| 2026 |
+
store_schema=True) except *:
|
| 2027 |
+
"""Arrow writer properties"""
|
| 2028 |
+
cdef:
|
| 2029 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
| 2030 |
+
ArrowWriterProperties.Builder arrow_props
|
| 2031 |
+
|
| 2032 |
+
# Store the original Arrow schema so things like dictionary types can
|
| 2033 |
+
# be automatically reconstructed
|
| 2034 |
+
if store_schema:
|
| 2035 |
+
arrow_props.store_schema()
|
| 2036 |
+
|
| 2037 |
+
# int96 support
|
| 2038 |
+
|
| 2039 |
+
if use_deprecated_int96_timestamps:
|
| 2040 |
+
arrow_props.enable_deprecated_int96_timestamps()
|
| 2041 |
+
else:
|
| 2042 |
+
arrow_props.disable_deprecated_int96_timestamps()
|
| 2043 |
+
|
| 2044 |
+
# coerce_timestamps
|
| 2045 |
+
|
| 2046 |
+
if coerce_timestamps == 'ms':
|
| 2047 |
+
arrow_props.coerce_timestamps(TimeUnit_MILLI)
|
| 2048 |
+
elif coerce_timestamps == 'us':
|
| 2049 |
+
arrow_props.coerce_timestamps(TimeUnit_MICRO)
|
| 2050 |
+
elif coerce_timestamps is not None:
|
| 2051 |
+
raise ValueError('Invalid value for coerce_timestamps: {0}'
|
| 2052 |
+
.format(coerce_timestamps))
|
| 2053 |
+
|
| 2054 |
+
# allow_truncated_timestamps
|
| 2055 |
+
|
| 2056 |
+
if allow_truncated_timestamps:
|
| 2057 |
+
arrow_props.allow_truncated_timestamps()
|
| 2058 |
+
else:
|
| 2059 |
+
arrow_props.disallow_truncated_timestamps()
|
| 2060 |
+
|
| 2061 |
+
# use_compliant_nested_type
|
| 2062 |
+
|
| 2063 |
+
if use_compliant_nested_type:
|
| 2064 |
+
arrow_props.enable_compliant_nested_types()
|
| 2065 |
+
else:
|
| 2066 |
+
arrow_props.disable_compliant_nested_types()
|
| 2067 |
+
|
| 2068 |
+
# writer_engine_version
|
| 2069 |
+
|
| 2070 |
+
if writer_engine_version == "V1":
|
| 2071 |
+
warnings.warn("V1 parquet writer engine is a no-op. Use V2.")
|
| 2072 |
+
arrow_props.set_engine_version(ArrowWriterEngineVersion.V1)
|
| 2073 |
+
elif writer_engine_version != "V2":
|
| 2074 |
+
raise ValueError("Unsupported Writer Engine Version: {0}"
|
| 2075 |
+
.format(writer_engine_version))
|
| 2076 |
+
|
| 2077 |
+
arrow_properties = arrow_props.build()
|
| 2078 |
+
|
| 2079 |
+
return arrow_properties
|
| 2080 |
+
|
| 2081 |
+
cdef _name_to_index_map(Schema arrow_schema):
|
| 2082 |
+
cdef:
|
| 2083 |
+
shared_ptr[CSchema] sp_arrow_schema
|
| 2084 |
+
shared_ptr[SchemaDescriptor] sp_parquet_schema
|
| 2085 |
+
shared_ptr[WriterProperties] props = _create_writer_properties()
|
| 2086 |
+
shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties(
|
| 2087 |
+
use_deprecated_int96_timestamps=False,
|
| 2088 |
+
coerce_timestamps=None,
|
| 2089 |
+
allow_truncated_timestamps=False,
|
| 2090 |
+
writer_engine_version="V2"
|
| 2091 |
+
)
|
| 2092 |
+
|
| 2093 |
+
sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema)
|
| 2094 |
+
|
| 2095 |
+
with nogil:
|
| 2096 |
+
check_status(ToParquetSchema(
|
| 2097 |
+
sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema))
|
| 2098 |
+
|
| 2099 |
+
out = dict()
|
| 2100 |
+
|
| 2101 |
+
cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get()
|
| 2102 |
+
|
| 2103 |
+
for i in range(parquet_schema.num_columns()):
|
| 2104 |
+
name = frombytes(parquet_schema.Column(i).path().get().ToDotString())
|
| 2105 |
+
out[name] = i
|
| 2106 |
+
|
| 2107 |
+
return out
|
| 2108 |
+
|
| 2109 |
+
|
| 2110 |
+
cdef class ParquetWriter(_Weakrefable):
|
| 2111 |
+
cdef:
|
| 2112 |
+
unique_ptr[FileWriter] writer
|
| 2113 |
+
shared_ptr[COutputStream] sink
|
| 2114 |
+
bint own_sink
|
| 2115 |
+
|
| 2116 |
+
cdef readonly:
|
| 2117 |
+
object use_dictionary
|
| 2118 |
+
object use_deprecated_int96_timestamps
|
| 2119 |
+
object use_byte_stream_split
|
| 2120 |
+
object column_encoding
|
| 2121 |
+
object coerce_timestamps
|
| 2122 |
+
object allow_truncated_timestamps
|
| 2123 |
+
object compression
|
| 2124 |
+
object compression_level
|
| 2125 |
+
object data_page_version
|
| 2126 |
+
object use_compliant_nested_type
|
| 2127 |
+
object version
|
| 2128 |
+
object write_statistics
|
| 2129 |
+
object writer_engine_version
|
| 2130 |
+
int row_group_size
|
| 2131 |
+
int64_t data_page_size
|
| 2132 |
+
FileEncryptionProperties encryption_properties
|
| 2133 |
+
int64_t write_batch_size
|
| 2134 |
+
int64_t dictionary_pagesize_limit
|
| 2135 |
+
object store_schema
|
| 2136 |
+
object store_decimal_as_integer
|
| 2137 |
+
|
| 2138 |
+
def __cinit__(self, where, Schema schema not None, use_dictionary=None,
|
| 2139 |
+
compression=None, version=None,
|
| 2140 |
+
write_statistics=None,
|
| 2141 |
+
MemoryPool memory_pool=None,
|
| 2142 |
+
use_deprecated_int96_timestamps=False,
|
| 2143 |
+
coerce_timestamps=None,
|
| 2144 |
+
data_page_size=None,
|
| 2145 |
+
allow_truncated_timestamps=False,
|
| 2146 |
+
compression_level=None,
|
| 2147 |
+
use_byte_stream_split=False,
|
| 2148 |
+
column_encoding=None,
|
| 2149 |
+
writer_engine_version=None,
|
| 2150 |
+
data_page_version=None,
|
| 2151 |
+
use_compliant_nested_type=True,
|
| 2152 |
+
encryption_properties=None,
|
| 2153 |
+
write_batch_size=None,
|
| 2154 |
+
dictionary_pagesize_limit=None,
|
| 2155 |
+
store_schema=True,
|
| 2156 |
+
write_page_index=False,
|
| 2157 |
+
write_page_checksum=False,
|
| 2158 |
+
sorting_columns=None,
|
| 2159 |
+
store_decimal_as_integer=False):
|
| 2160 |
+
cdef:
|
| 2161 |
+
shared_ptr[WriterProperties] properties
|
| 2162 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
| 2163 |
+
c_string c_where
|
| 2164 |
+
CMemoryPool* pool
|
| 2165 |
+
|
| 2166 |
+
try:
|
| 2167 |
+
where = _stringify_path(where)
|
| 2168 |
+
except TypeError:
|
| 2169 |
+
get_writer(where, &self.sink)
|
| 2170 |
+
self.own_sink = False
|
| 2171 |
+
else:
|
| 2172 |
+
c_where = tobytes(where)
|
| 2173 |
+
with nogil:
|
| 2174 |
+
self.sink = GetResultValue(FileOutputStream.Open(c_where))
|
| 2175 |
+
self.own_sink = True
|
| 2176 |
+
|
| 2177 |
+
properties = _create_writer_properties(
|
| 2178 |
+
use_dictionary=use_dictionary,
|
| 2179 |
+
compression=compression,
|
| 2180 |
+
version=version,
|
| 2181 |
+
write_statistics=write_statistics,
|
| 2182 |
+
data_page_size=data_page_size,
|
| 2183 |
+
compression_level=compression_level,
|
| 2184 |
+
use_byte_stream_split=use_byte_stream_split,
|
| 2185 |
+
column_encoding=column_encoding,
|
| 2186 |
+
data_page_version=data_page_version,
|
| 2187 |
+
encryption_properties=encryption_properties,
|
| 2188 |
+
write_batch_size=write_batch_size,
|
| 2189 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
| 2190 |
+
write_page_index=write_page_index,
|
| 2191 |
+
write_page_checksum=write_page_checksum,
|
| 2192 |
+
sorting_columns=sorting_columns,
|
| 2193 |
+
store_decimal_as_integer=store_decimal_as_integer,
|
| 2194 |
+
)
|
| 2195 |
+
arrow_properties = _create_arrow_writer_properties(
|
| 2196 |
+
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
|
| 2197 |
+
coerce_timestamps=coerce_timestamps,
|
| 2198 |
+
allow_truncated_timestamps=allow_truncated_timestamps,
|
| 2199 |
+
writer_engine_version=writer_engine_version,
|
| 2200 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
| 2201 |
+
store_schema=store_schema,
|
| 2202 |
+
)
|
| 2203 |
+
|
| 2204 |
+
pool = maybe_unbox_memory_pool(memory_pool)
|
| 2205 |
+
with nogil:
|
| 2206 |
+
self.writer = move(GetResultValue(
|
| 2207 |
+
FileWriter.Open(deref(schema.schema), pool,
|
| 2208 |
+
self.sink, properties, arrow_properties)))
|
| 2209 |
+
|
| 2210 |
+
def close(self):
|
| 2211 |
+
with nogil:
|
| 2212 |
+
check_status(self.writer.get().Close())
|
| 2213 |
+
if self.own_sink:
|
| 2214 |
+
check_status(self.sink.get().Close())
|
| 2215 |
+
|
| 2216 |
+
def write_table(self, Table table, row_group_size=None):
|
| 2217 |
+
cdef:
|
| 2218 |
+
CTable* ctable = table.table
|
| 2219 |
+
int64_t c_row_group_size
|
| 2220 |
+
|
| 2221 |
+
if row_group_size is None or row_group_size == -1:
|
| 2222 |
+
c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE)
|
| 2223 |
+
elif row_group_size == 0:
|
| 2224 |
+
raise ValueError('Row group size cannot be 0')
|
| 2225 |
+
else:
|
| 2226 |
+
c_row_group_size = row_group_size
|
| 2227 |
+
|
| 2228 |
+
with nogil:
|
| 2229 |
+
check_status(self.writer.get()
|
| 2230 |
+
.WriteTable(deref(ctable), c_row_group_size))
|
| 2231 |
+
|
| 2232 |
+
def add_key_value_metadata(self, key_value_metadata):
|
| 2233 |
+
cdef:
|
| 2234 |
+
shared_ptr[const CKeyValueMetadata] c_metadata
|
| 2235 |
+
|
| 2236 |
+
c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(key_value_metadata))
|
| 2237 |
+
with nogil:
|
| 2238 |
+
check_status(self.writer.get()
|
| 2239 |
+
.AddKeyValueMetadata(c_metadata))
|
| 2240 |
+
|
| 2241 |
+
@property
|
| 2242 |
+
def metadata(self):
|
| 2243 |
+
cdef:
|
| 2244 |
+
shared_ptr[CFileMetaData] metadata
|
| 2245 |
+
FileMetaData result
|
| 2246 |
+
with nogil:
|
| 2247 |
+
metadata = self.writer.get().metadata()
|
| 2248 |
+
if metadata:
|
| 2249 |
+
result = FileMetaData()
|
| 2250 |
+
result.init(metadata)
|
| 2251 |
+
return result
|
| 2252 |
+
raise RuntimeError(
|
| 2253 |
+
'file metadata is only available after writer close')
|
parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (84 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/_substrait.pyx
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: language_level = 3
|
| 19 |
+
from cython.operator cimport dereference as deref
|
| 20 |
+
from libcpp.vector cimport vector as std_vector
|
| 21 |
+
|
| 22 |
+
from pyarrow import Buffer, py_buffer
|
| 23 |
+
from pyarrow._compute cimport Expression
|
| 24 |
+
from pyarrow.lib import frombytes, tobytes
|
| 25 |
+
from pyarrow.lib cimport *
|
| 26 |
+
from pyarrow.includes.libarrow cimport *
|
| 27 |
+
from pyarrow.includes.libarrow_substrait cimport *
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# TODO GH-37235: Fix exception handling
|
| 31 |
+
cdef CDeclaration _create_named_table_provider(
|
| 32 |
+
dict named_args, const std_vector[c_string]& names, const CSchema& schema
|
| 33 |
+
) noexcept:
|
| 34 |
+
cdef:
|
| 35 |
+
c_string c_name
|
| 36 |
+
shared_ptr[CTable] c_in_table
|
| 37 |
+
shared_ptr[CTableSourceNodeOptions] c_tablesourceopts
|
| 38 |
+
shared_ptr[CExecNodeOptions] c_input_node_opts
|
| 39 |
+
vector[CDeclaration.Input] no_c_inputs
|
| 40 |
+
|
| 41 |
+
py_names = []
|
| 42 |
+
for i in range(names.size()):
|
| 43 |
+
c_name = names[i]
|
| 44 |
+
py_names.append(frombytes(c_name))
|
| 45 |
+
py_schema = pyarrow_wrap_schema(make_shared[CSchema](schema))
|
| 46 |
+
|
| 47 |
+
py_table = named_args["provider"](py_names, py_schema)
|
| 48 |
+
c_in_table = pyarrow_unwrap_table(py_table)
|
| 49 |
+
c_tablesourceopts = make_shared[CTableSourceNodeOptions](c_in_table)
|
| 50 |
+
c_input_node_opts = static_pointer_cast[CExecNodeOptions, CTableSourceNodeOptions](
|
| 51 |
+
c_tablesourceopts)
|
| 52 |
+
return CDeclaration(tobytes("table_source"),
|
| 53 |
+
no_c_inputs, c_input_node_opts)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def run_query(plan, *, table_provider=None, use_threads=True):
|
| 57 |
+
"""
|
| 58 |
+
Execute a Substrait plan and read the results as a RecordBatchReader.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
plan : Union[Buffer, bytes]
|
| 63 |
+
The serialized Substrait plan to execute.
|
| 64 |
+
table_provider : object (optional)
|
| 65 |
+
A function to resolve any NamedTable relation to a table.
|
| 66 |
+
The function will receive two arguments which will be a list
|
| 67 |
+
of strings representing the table name and a pyarrow.Schema representing
|
| 68 |
+
the expected schema and should return a pyarrow.Table.
|
| 69 |
+
use_threads : bool, default True
|
| 70 |
+
If True then multiple threads will be used to run the query. If False then
|
| 71 |
+
all CPU intensive work will be done on the calling thread.
|
| 72 |
+
|
| 73 |
+
Returns
|
| 74 |
+
-------
|
| 75 |
+
RecordBatchReader
|
| 76 |
+
A reader containing the result of the executed query
|
| 77 |
+
|
| 78 |
+
Examples
|
| 79 |
+
--------
|
| 80 |
+
>>> import pyarrow as pa
|
| 81 |
+
>>> from pyarrow.lib import tobytes
|
| 82 |
+
>>> import pyarrow.substrait as substrait
|
| 83 |
+
>>> test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]})
|
| 84 |
+
>>> test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]})
|
| 85 |
+
>>> def table_provider(names, schema):
|
| 86 |
+
... if not names:
|
| 87 |
+
... raise Exception("No names provided")
|
| 88 |
+
... elif names[0] == "t1":
|
| 89 |
+
... return test_table_1
|
| 90 |
+
... elif names[1] == "t2":
|
| 91 |
+
... return test_table_2
|
| 92 |
+
... else:
|
| 93 |
+
... raise Exception("Unrecognized table name")
|
| 94 |
+
...
|
| 95 |
+
>>> substrait_query = '''
|
| 96 |
+
... {
|
| 97 |
+
... "relations": [
|
| 98 |
+
... {"rel": {
|
| 99 |
+
... "read": {
|
| 100 |
+
... "base_schema": {
|
| 101 |
+
... "struct": {
|
| 102 |
+
... "types": [
|
| 103 |
+
... {"i64": {}}
|
| 104 |
+
... ]
|
| 105 |
+
... },
|
| 106 |
+
... "names": [
|
| 107 |
+
... "x"
|
| 108 |
+
... ]
|
| 109 |
+
... },
|
| 110 |
+
... "namedTable": {
|
| 111 |
+
... "names": ["t1"]
|
| 112 |
+
... }
|
| 113 |
+
... }
|
| 114 |
+
... }}
|
| 115 |
+
... ]
|
| 116 |
+
... }
|
| 117 |
+
... '''
|
| 118 |
+
>>> buf = pa._substrait._parse_json_plan(tobytes(substrait_query))
|
| 119 |
+
>>> reader = pa.substrait.run_query(buf, table_provider=table_provider)
|
| 120 |
+
>>> reader.read_all()
|
| 121 |
+
pyarrow.Table
|
| 122 |
+
x: int64
|
| 123 |
+
----
|
| 124 |
+
x: [[1,2,3]]
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
cdef:
|
| 128 |
+
CResult[shared_ptr[CRecordBatchReader]] c_res_reader
|
| 129 |
+
shared_ptr[CRecordBatchReader] c_reader
|
| 130 |
+
RecordBatchReader reader
|
| 131 |
+
shared_ptr[CBuffer] c_buf_plan
|
| 132 |
+
CConversionOptions c_conversion_options
|
| 133 |
+
c_bool c_use_threads
|
| 134 |
+
|
| 135 |
+
c_use_threads = use_threads
|
| 136 |
+
if isinstance(plan, bytes):
|
| 137 |
+
c_buf_plan = pyarrow_unwrap_buffer(py_buffer(plan))
|
| 138 |
+
elif isinstance(plan, Buffer):
|
| 139 |
+
c_buf_plan = pyarrow_unwrap_buffer(plan)
|
| 140 |
+
else:
|
| 141 |
+
raise TypeError(
|
| 142 |
+
f"Expected 'pyarrow.Buffer' or bytes, got '{type(plan)}'")
|
| 143 |
+
|
| 144 |
+
if table_provider is not None:
|
| 145 |
+
named_table_args = {
|
| 146 |
+
"provider": table_provider
|
| 147 |
+
}
|
| 148 |
+
c_conversion_options.named_table_provider = BindFunction[CNamedTableProvider](
|
| 149 |
+
&_create_named_table_provider, named_table_args)
|
| 150 |
+
|
| 151 |
+
with nogil:
|
| 152 |
+
c_res_reader = ExecuteSerializedPlan(
|
| 153 |
+
deref(c_buf_plan), default_extension_id_registry(),
|
| 154 |
+
GetFunctionRegistry(), c_conversion_options, c_use_threads)
|
| 155 |
+
|
| 156 |
+
c_reader = GetResultValue(c_res_reader)
|
| 157 |
+
|
| 158 |
+
reader = RecordBatchReader.__new__(RecordBatchReader)
|
| 159 |
+
reader.reader = c_reader
|
| 160 |
+
return reader
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _parse_json_plan(plan):
|
| 164 |
+
"""
|
| 165 |
+
Parse a JSON plan into equivalent serialized Protobuf.
|
| 166 |
+
|
| 167 |
+
Parameters
|
| 168 |
+
----------
|
| 169 |
+
plan : bytes
|
| 170 |
+
Substrait plan in JSON.
|
| 171 |
+
|
| 172 |
+
Returns
|
| 173 |
+
-------
|
| 174 |
+
Buffer
|
| 175 |
+
A buffer containing the serialized Protobuf plan.
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
cdef:
|
| 179 |
+
CResult[shared_ptr[CBuffer]] c_res_buffer
|
| 180 |
+
c_string c_str_plan
|
| 181 |
+
shared_ptr[CBuffer] c_buf_plan
|
| 182 |
+
|
| 183 |
+
c_str_plan = plan
|
| 184 |
+
c_res_buffer = SerializeJsonPlan(c_str_plan)
|
| 185 |
+
with nogil:
|
| 186 |
+
c_buf_plan = GetResultValue(c_res_buffer)
|
| 187 |
+
return pyarrow_wrap_buffer(c_buf_plan)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def serialize_expressions(exprs, names, schema, *, allow_arrow_extensions=False):
|
| 191 |
+
"""
|
| 192 |
+
Serialize a collection of expressions into Substrait
|
| 193 |
+
|
| 194 |
+
Substrait expressions must be bound to a schema. For example,
|
| 195 |
+
the Substrait expression ``a:i32 + b:i32`` is different from the
|
| 196 |
+
Substrait expression ``a:i64 + b:i64``. Pyarrow expressions are
|
| 197 |
+
typically unbound. For example, both of the above expressions
|
| 198 |
+
would be represented as ``a + b`` in pyarrow.
|
| 199 |
+
|
| 200 |
+
This means a schema must be provided when serializing an expression.
|
| 201 |
+
It also means that the serialization may fail if a matching function
|
| 202 |
+
call cannot be found for the expression.
|
| 203 |
+
|
| 204 |
+
Parameters
|
| 205 |
+
----------
|
| 206 |
+
exprs : list of Expression
|
| 207 |
+
The expressions to serialize
|
| 208 |
+
names : list of str
|
| 209 |
+
Names for the expressions
|
| 210 |
+
schema : Schema
|
| 211 |
+
The schema the expressions will be bound to
|
| 212 |
+
allow_arrow_extensions : bool, default False
|
| 213 |
+
If False then only functions that are part of the core Substrait function
|
| 214 |
+
definitions will be allowed. Set this to True to allow pyarrow-specific functions
|
| 215 |
+
and user defined functions but the result may not be accepted by other
|
| 216 |
+
compute libraries.
|
| 217 |
+
|
| 218 |
+
Returns
|
| 219 |
+
-------
|
| 220 |
+
Buffer
|
| 221 |
+
An ExtendedExpression message containing the serialized expressions
|
| 222 |
+
"""
|
| 223 |
+
cdef:
|
| 224 |
+
CResult[shared_ptr[CBuffer]] c_res_buffer
|
| 225 |
+
shared_ptr[CBuffer] c_buffer
|
| 226 |
+
CNamedExpression c_named_expr
|
| 227 |
+
CBoundExpressions c_bound_exprs
|
| 228 |
+
CConversionOptions c_conversion_options
|
| 229 |
+
|
| 230 |
+
if len(exprs) != len(names):
|
| 231 |
+
raise ValueError("exprs and names need to have the same length")
|
| 232 |
+
for expr, name in zip(exprs, names):
|
| 233 |
+
if not isinstance(expr, Expression):
|
| 234 |
+
raise TypeError(f"Expected Expression, got '{type(expr)}' in exprs")
|
| 235 |
+
if not isinstance(name, str):
|
| 236 |
+
raise TypeError(f"Expected str, got '{type(name)}' in names")
|
| 237 |
+
c_named_expr.expression = (<Expression> expr).unwrap()
|
| 238 |
+
c_named_expr.name = tobytes(<str> name)
|
| 239 |
+
c_bound_exprs.named_expressions.push_back(c_named_expr)
|
| 240 |
+
|
| 241 |
+
c_bound_exprs.schema = (<Schema> schema).sp_schema
|
| 242 |
+
|
| 243 |
+
c_conversion_options.allow_arrow_extensions = allow_arrow_extensions
|
| 244 |
+
|
| 245 |
+
with nogil:
|
| 246 |
+
c_res_buffer = SerializeExpressions(c_bound_exprs, c_conversion_options)
|
| 247 |
+
c_buffer = GetResultValue(c_res_buffer)
|
| 248 |
+
return pyarrow_wrap_buffer(c_buffer)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
cdef class BoundExpressions(_Weakrefable):
|
| 252 |
+
"""
|
| 253 |
+
A collection of named expressions and the schema they are bound to
|
| 254 |
+
|
| 255 |
+
This is equivalent to the Substrait ExtendedExpression message
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
cdef:
|
| 259 |
+
CBoundExpressions c_bound_exprs
|
| 260 |
+
|
| 261 |
+
def __init__(self):
|
| 262 |
+
msg = 'BoundExpressions is an abstract class thus cannot be initialized.'
|
| 263 |
+
raise TypeError(msg)
|
| 264 |
+
|
| 265 |
+
cdef void init(self, CBoundExpressions bound_expressions):
|
| 266 |
+
self.c_bound_exprs = bound_expressions
|
| 267 |
+
|
| 268 |
+
@property
|
| 269 |
+
def schema(self):
|
| 270 |
+
"""
|
| 271 |
+
The common schema that all expressions are bound to
|
| 272 |
+
"""
|
| 273 |
+
return pyarrow_wrap_schema(self.c_bound_exprs.schema)
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def expressions(self):
|
| 277 |
+
"""
|
| 278 |
+
A dict from expression name to expression
|
| 279 |
+
"""
|
| 280 |
+
expr_dict = {}
|
| 281 |
+
for named_expr in self.c_bound_exprs.named_expressions:
|
| 282 |
+
name = frombytes(named_expr.name)
|
| 283 |
+
expr = Expression.wrap(named_expr.expression)
|
| 284 |
+
expr_dict[name] = expr
|
| 285 |
+
return expr_dict
|
| 286 |
+
|
| 287 |
+
@staticmethod
|
| 288 |
+
cdef wrap(const CBoundExpressions& bound_expressions):
|
| 289 |
+
cdef BoundExpressions self = BoundExpressions.__new__(BoundExpressions)
|
| 290 |
+
self.init(bound_expressions)
|
| 291 |
+
return self
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def deserialize_expressions(buf):
|
| 295 |
+
"""
|
| 296 |
+
Deserialize an ExtendedExpression Substrait message into a BoundExpressions object
|
| 297 |
+
|
| 298 |
+
Parameters
|
| 299 |
+
----------
|
| 300 |
+
buf : Buffer or bytes
|
| 301 |
+
The message to deserialize
|
| 302 |
+
|
| 303 |
+
Returns
|
| 304 |
+
-------
|
| 305 |
+
BoundExpressions
|
| 306 |
+
The deserialized expressions, their names, and the bound schema
|
| 307 |
+
"""
|
| 308 |
+
cdef:
|
| 309 |
+
shared_ptr[CBuffer] c_buffer
|
| 310 |
+
CResult[CBoundExpressions] c_res_bound_exprs
|
| 311 |
+
CBoundExpressions c_bound_exprs
|
| 312 |
+
|
| 313 |
+
if isinstance(buf, bytes):
|
| 314 |
+
c_buffer = pyarrow_unwrap_buffer(py_buffer(buf))
|
| 315 |
+
elif isinstance(buf, Buffer):
|
| 316 |
+
c_buffer = pyarrow_unwrap_buffer(buf)
|
| 317 |
+
else:
|
| 318 |
+
raise TypeError(
|
| 319 |
+
f"Expected 'pyarrow.Buffer' or bytes, got '{type(buf)}'")
|
| 320 |
+
|
| 321 |
+
with nogil:
|
| 322 |
+
c_res_bound_exprs = DeserializeExpressions(deref(c_buffer))
|
| 323 |
+
c_bound_exprs = GetResultValue(c_res_bound_exprs)
|
| 324 |
+
|
| 325 |
+
return BoundExpressions.wrap(c_bound_exprs)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def get_supported_functions():
|
| 329 |
+
"""
|
| 330 |
+
Get a list of Substrait functions that the underlying
|
| 331 |
+
engine currently supports.
|
| 332 |
+
|
| 333 |
+
Returns
|
| 334 |
+
-------
|
| 335 |
+
list[str]
|
| 336 |
+
A list of function ids encoded as '{uri}#{name}'
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
cdef:
|
| 340 |
+
ExtensionIdRegistry* c_id_registry
|
| 341 |
+
std_vector[c_string] c_ids
|
| 342 |
+
|
| 343 |
+
c_id_registry = default_extension_id_registry()
|
| 344 |
+
c_ids = c_id_registry.GetSupportedSubstraitFunctions()
|
| 345 |
+
|
| 346 |
+
functions_list = []
|
| 347 |
+
for c_id in c_ids:
|
| 348 |
+
functions_list.append(frombytes(c_id))
|
| 349 |
+
return functions_list
|
parrot/lib/python3.10/site-packages/pyarrow/benchmark.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# flake8: noqa
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
from pyarrow.lib import benchmark_PandasObjectIsNull
|
parrot/lib/python3.10/site-packages/pyarrow/builder.pxi
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
cdef class StringBuilder(_Weakrefable):
|
| 20 |
+
"""
|
| 21 |
+
Builder class for UTF8 strings.
|
| 22 |
+
|
| 23 |
+
This class exposes facilities for incrementally adding string values and
|
| 24 |
+
building the null bitmap for a pyarrow.Array (type='string').
|
| 25 |
+
"""
|
| 26 |
+
cdef:
|
| 27 |
+
unique_ptr[CStringBuilder] builder
|
| 28 |
+
|
| 29 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
| 30 |
+
cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
|
| 31 |
+
self.builder.reset(new CStringBuilder(pool))
|
| 32 |
+
|
| 33 |
+
def append(self, value):
|
| 34 |
+
"""
|
| 35 |
+
Append a single value to the builder.
|
| 36 |
+
|
| 37 |
+
The value can either be a string/bytes object or a null value
|
| 38 |
+
(np.nan or None).
|
| 39 |
+
|
| 40 |
+
Parameters
|
| 41 |
+
----------
|
| 42 |
+
value : string/bytes or np.nan/None
|
| 43 |
+
The value to append to the string array builder.
|
| 44 |
+
"""
|
| 45 |
+
if value is None or value is np.nan:
|
| 46 |
+
self.builder.get().AppendNull()
|
| 47 |
+
elif isinstance(value, (bytes, str)):
|
| 48 |
+
self.builder.get().Append(tobytes(value))
|
| 49 |
+
else:
|
| 50 |
+
raise TypeError('StringBuilder only accepts string objects')
|
| 51 |
+
|
| 52 |
+
def append_values(self, values):
|
| 53 |
+
"""
|
| 54 |
+
Append all the values from an iterable.
|
| 55 |
+
|
| 56 |
+
Parameters
|
| 57 |
+
----------
|
| 58 |
+
values : iterable of string/bytes or np.nan/None values
|
| 59 |
+
The values to append to the string array builder.
|
| 60 |
+
"""
|
| 61 |
+
for value in values:
|
| 62 |
+
self.append(value)
|
| 63 |
+
|
| 64 |
+
def finish(self):
|
| 65 |
+
"""
|
| 66 |
+
Return result of builder as an Array object; also resets the builder.
|
| 67 |
+
|
| 68 |
+
Returns
|
| 69 |
+
-------
|
| 70 |
+
array : pyarrow.Array
|
| 71 |
+
"""
|
| 72 |
+
cdef shared_ptr[CArray] out
|
| 73 |
+
with nogil:
|
| 74 |
+
self.builder.get().Finish(&out)
|
| 75 |
+
return pyarrow_wrap_array(out)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def null_count(self):
|
| 79 |
+
return self.builder.get().null_count()
|
| 80 |
+
|
| 81 |
+
def __len__(self):
|
| 82 |
+
return self.builder.get().length()
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
cdef class StringViewBuilder(_Weakrefable):
|
| 86 |
+
"""
|
| 87 |
+
Builder class for UTF8 string views.
|
| 88 |
+
|
| 89 |
+
This class exposes facilities for incrementally adding string values and
|
| 90 |
+
building the null bitmap for a pyarrow.Array (type='string_view').
|
| 91 |
+
"""
|
| 92 |
+
cdef:
|
| 93 |
+
unique_ptr[CStringViewBuilder] builder
|
| 94 |
+
|
| 95 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
| 96 |
+
cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
|
| 97 |
+
self.builder.reset(new CStringViewBuilder(pool))
|
| 98 |
+
|
| 99 |
+
def append(self, value):
|
| 100 |
+
"""
|
| 101 |
+
Append a single value to the builder.
|
| 102 |
+
|
| 103 |
+
The value can either be a string/bytes object or a null value
|
| 104 |
+
(np.nan or None).
|
| 105 |
+
|
| 106 |
+
Parameters
|
| 107 |
+
----------
|
| 108 |
+
value : string/bytes or np.nan/None
|
| 109 |
+
The value to append to the string array builder.
|
| 110 |
+
"""
|
| 111 |
+
if value is None or value is np.nan:
|
| 112 |
+
self.builder.get().AppendNull()
|
| 113 |
+
elif isinstance(value, (bytes, str)):
|
| 114 |
+
self.builder.get().Append(tobytes(value))
|
| 115 |
+
else:
|
| 116 |
+
raise TypeError('StringViewBuilder only accepts string objects')
|
| 117 |
+
|
| 118 |
+
def append_values(self, values):
|
| 119 |
+
"""
|
| 120 |
+
Append all the values from an iterable.
|
| 121 |
+
|
| 122 |
+
Parameters
|
| 123 |
+
----------
|
| 124 |
+
values : iterable of string/bytes or np.nan/None values
|
| 125 |
+
The values to append to the string array builder.
|
| 126 |
+
"""
|
| 127 |
+
for value in values:
|
| 128 |
+
self.append(value)
|
| 129 |
+
|
| 130 |
+
def finish(self):
|
| 131 |
+
"""
|
| 132 |
+
Return result of builder as an Array object; also resets the builder.
|
| 133 |
+
|
| 134 |
+
Returns
|
| 135 |
+
-------
|
| 136 |
+
array : pyarrow.Array
|
| 137 |
+
"""
|
| 138 |
+
cdef shared_ptr[CArray] out
|
| 139 |
+
with nogil:
|
| 140 |
+
self.builder.get().Finish(&out)
|
| 141 |
+
return pyarrow_wrap_array(out)
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def null_count(self):
|
| 145 |
+
return self.builder.get().null_count()
|
| 146 |
+
|
| 147 |
+
def __len__(self):
|
| 148 |
+
return self.builder.get().length()
|
parrot/lib/python3.10/site-packages/pyarrow/cffi.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
from __future__ import absolute_import
|
| 19 |
+
|
| 20 |
+
import cffi
|
| 21 |
+
|
| 22 |
+
c_source = """
|
| 23 |
+
struct ArrowSchema {
|
| 24 |
+
// Array type description
|
| 25 |
+
const char* format;
|
| 26 |
+
const char* name;
|
| 27 |
+
const char* metadata;
|
| 28 |
+
int64_t flags;
|
| 29 |
+
int64_t n_children;
|
| 30 |
+
struct ArrowSchema** children;
|
| 31 |
+
struct ArrowSchema* dictionary;
|
| 32 |
+
|
| 33 |
+
// Release callback
|
| 34 |
+
void (*release)(struct ArrowSchema*);
|
| 35 |
+
// Opaque producer-specific data
|
| 36 |
+
void* private_data;
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct ArrowArray {
|
| 40 |
+
// Array data description
|
| 41 |
+
int64_t length;
|
| 42 |
+
int64_t null_count;
|
| 43 |
+
int64_t offset;
|
| 44 |
+
int64_t n_buffers;
|
| 45 |
+
int64_t n_children;
|
| 46 |
+
const void** buffers;
|
| 47 |
+
struct ArrowArray** children;
|
| 48 |
+
struct ArrowArray* dictionary;
|
| 49 |
+
|
| 50 |
+
// Release callback
|
| 51 |
+
void (*release)(struct ArrowArray*);
|
| 52 |
+
// Opaque producer-specific data
|
| 53 |
+
void* private_data;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
struct ArrowArrayStream {
|
| 57 |
+
int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
|
| 58 |
+
int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
|
| 59 |
+
|
| 60 |
+
const char* (*get_last_error)(struct ArrowArrayStream*);
|
| 61 |
+
|
| 62 |
+
// Release callback
|
| 63 |
+
void (*release)(struct ArrowArrayStream*);
|
| 64 |
+
// Opaque producer-specific data
|
| 65 |
+
void* private_data;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
typedef int32_t ArrowDeviceType;
|
| 69 |
+
|
| 70 |
+
struct ArrowDeviceArray {
|
| 71 |
+
struct ArrowArray array;
|
| 72 |
+
int64_t device_id;
|
| 73 |
+
ArrowDeviceType device_type;
|
| 74 |
+
void* sync_event;
|
| 75 |
+
int64_t reserved[3];
|
| 76 |
+
};
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
# TODO use out-of-line mode for faster import and avoid C parsing
|
| 80 |
+
ffi = cffi.FFI()
|
| 81 |
+
ffi.cdef(c_source)
|
parrot/lib/python3.10/site-packages/pyarrow/config.pxi
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
from pyarrow.includes.libarrow cimport GetBuildInfo
|
| 19 |
+
|
| 20 |
+
from collections import namedtuple
|
| 21 |
+
import os
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
VersionInfo = namedtuple('VersionInfo', ('major', 'minor', 'patch'))
|
| 25 |
+
|
| 26 |
+
BuildInfo = namedtuple(
|
| 27 |
+
'BuildInfo',
|
| 28 |
+
('version', 'version_info', 'so_version', 'full_so_version',
|
| 29 |
+
'compiler_id', 'compiler_version', 'compiler_flags',
|
| 30 |
+
'git_id', 'git_description', 'package_kind', 'build_type'))
|
| 31 |
+
|
| 32 |
+
RuntimeInfo = namedtuple('RuntimeInfo',
|
| 33 |
+
('simd_level', 'detected_simd_level'))
|
| 34 |
+
|
| 35 |
+
cdef _build_info():
|
| 36 |
+
cdef:
|
| 37 |
+
const CBuildInfo* c_info
|
| 38 |
+
|
| 39 |
+
c_info = &GetBuildInfo()
|
| 40 |
+
|
| 41 |
+
return BuildInfo(version=frombytes(c_info.version_string),
|
| 42 |
+
version_info=VersionInfo(c_info.version_major,
|
| 43 |
+
c_info.version_minor,
|
| 44 |
+
c_info.version_patch),
|
| 45 |
+
so_version=frombytes(c_info.so_version),
|
| 46 |
+
full_so_version=frombytes(c_info.full_so_version),
|
| 47 |
+
compiler_id=frombytes(c_info.compiler_id),
|
| 48 |
+
compiler_version=frombytes(c_info.compiler_version),
|
| 49 |
+
compiler_flags=frombytes(c_info.compiler_flags),
|
| 50 |
+
git_id=frombytes(c_info.git_id),
|
| 51 |
+
git_description=frombytes(c_info.git_description),
|
| 52 |
+
package_kind=frombytes(c_info.package_kind),
|
| 53 |
+
build_type=frombytes(c_info.build_type).lower(),
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
cpp_build_info = _build_info()
|
| 58 |
+
cpp_version = cpp_build_info.version
|
| 59 |
+
cpp_version_info = cpp_build_info.version_info
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def runtime_info():
|
| 63 |
+
"""
|
| 64 |
+
Get runtime information.
|
| 65 |
+
|
| 66 |
+
Returns
|
| 67 |
+
-------
|
| 68 |
+
info : pyarrow.RuntimeInfo
|
| 69 |
+
"""
|
| 70 |
+
cdef:
|
| 71 |
+
CRuntimeInfo c_info
|
| 72 |
+
|
| 73 |
+
c_info = GetRuntimeInfo()
|
| 74 |
+
|
| 75 |
+
return RuntimeInfo(
|
| 76 |
+
simd_level=frombytes(c_info.simd_level),
|
| 77 |
+
detected_simd_level=frombytes(c_info.detected_simd_level))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def set_timezone_db_path(path):
|
| 81 |
+
"""
|
| 82 |
+
Configure the path to text timezone database on Windows.
|
| 83 |
+
|
| 84 |
+
Parameters
|
| 85 |
+
----------
|
| 86 |
+
path : str
|
| 87 |
+
Path to text timezone database.
|
| 88 |
+
"""
|
| 89 |
+
cdef:
|
| 90 |
+
CGlobalOptions options
|
| 91 |
+
|
| 92 |
+
if path is not None:
|
| 93 |
+
options.timezone_db_path = <c_string>tobytes(path)
|
| 94 |
+
|
| 95 |
+
check_status(Initialize(options))
|
parrot/lib/python3.10/site-packages/pyarrow/conftest.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
import pytest
|
| 19 |
+
|
| 20 |
+
import os
|
| 21 |
+
import pyarrow as pa
|
| 22 |
+
from pyarrow import Codec
|
| 23 |
+
from pyarrow import fs
|
| 24 |
+
from pyarrow.lib import is_threading_enabled
|
| 25 |
+
from pyarrow.tests.util import windows_has_tzdata
|
| 26 |
+
import sys
|
| 27 |
+
|
| 28 |
+
import numpy as np
|
| 29 |
+
|
| 30 |
+
groups = [
|
| 31 |
+
'acero',
|
| 32 |
+
'azure',
|
| 33 |
+
'brotli',
|
| 34 |
+
'bz2',
|
| 35 |
+
'cython',
|
| 36 |
+
'dataset',
|
| 37 |
+
'hypothesis',
|
| 38 |
+
'fastparquet',
|
| 39 |
+
'flight',
|
| 40 |
+
'gandiva',
|
| 41 |
+
'gcs',
|
| 42 |
+
'gdb',
|
| 43 |
+
'gzip',
|
| 44 |
+
'hdfs',
|
| 45 |
+
'large_memory',
|
| 46 |
+
'lz4',
|
| 47 |
+
'memory_leak',
|
| 48 |
+
'nopandas',
|
| 49 |
+
'orc',
|
| 50 |
+
'pandas',
|
| 51 |
+
'parquet',
|
| 52 |
+
'parquet_encryption',
|
| 53 |
+
'processes',
|
| 54 |
+
'requires_testing_data',
|
| 55 |
+
's3',
|
| 56 |
+
'slow',
|
| 57 |
+
'snappy',
|
| 58 |
+
'sockets',
|
| 59 |
+
'substrait',
|
| 60 |
+
'threading',
|
| 61 |
+
'timezone_data',
|
| 62 |
+
'zstd',
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
defaults = {
|
| 66 |
+
'acero': False,
|
| 67 |
+
'azure': False,
|
| 68 |
+
'brotli': Codec.is_available('brotli'),
|
| 69 |
+
'bz2': Codec.is_available('bz2'),
|
| 70 |
+
'cython': False,
|
| 71 |
+
'dataset': False,
|
| 72 |
+
'fastparquet': False,
|
| 73 |
+
'flight': False,
|
| 74 |
+
'gandiva': False,
|
| 75 |
+
'gcs': False,
|
| 76 |
+
'gdb': True,
|
| 77 |
+
'gzip': Codec.is_available('gzip'),
|
| 78 |
+
'hdfs': False,
|
| 79 |
+
'hypothesis': False,
|
| 80 |
+
'large_memory': False,
|
| 81 |
+
'lz4': Codec.is_available('lz4'),
|
| 82 |
+
'memory_leak': False,
|
| 83 |
+
'nopandas': False,
|
| 84 |
+
'orc': False,
|
| 85 |
+
'pandas': False,
|
| 86 |
+
'parquet': False,
|
| 87 |
+
'parquet_encryption': False,
|
| 88 |
+
'processes': True,
|
| 89 |
+
'requires_testing_data': True,
|
| 90 |
+
's3': False,
|
| 91 |
+
'slow': False,
|
| 92 |
+
'snappy': Codec.is_available('snappy'),
|
| 93 |
+
'sockets': True,
|
| 94 |
+
'substrait': False,
|
| 95 |
+
'threading': is_threading_enabled(),
|
| 96 |
+
'timezone_data': True,
|
| 97 |
+
'zstd': Codec.is_available('zstd'),
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
if sys.platform == "emscripten":
|
| 101 |
+
# Emscripten doesn't support subprocess,
|
| 102 |
+
# multiprocessing, gdb or socket based
|
| 103 |
+
# networking
|
| 104 |
+
defaults['gdb'] = False
|
| 105 |
+
defaults['processes'] = False
|
| 106 |
+
defaults['sockets'] = False
|
| 107 |
+
|
| 108 |
+
if sys.platform == "win32":
|
| 109 |
+
defaults['timezone_data'] = windows_has_tzdata()
|
| 110 |
+
elif sys.platform == "emscripten":
|
| 111 |
+
defaults['timezone_data'] = os.path.exists("/usr/share/zoneinfo")
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
import cython # noqa
|
| 115 |
+
defaults['cython'] = True
|
| 116 |
+
except ImportError:
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
import fastparquet # noqa
|
| 121 |
+
defaults['fastparquet'] = True
|
| 122 |
+
except ImportError:
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
try:
|
| 126 |
+
import pyarrow.gandiva # noqa
|
| 127 |
+
defaults['gandiva'] = True
|
| 128 |
+
except ImportError:
|
| 129 |
+
pass
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
import pyarrow.acero # noqa
|
| 133 |
+
defaults['acero'] = True
|
| 134 |
+
except ImportError:
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
try:
|
| 138 |
+
import pyarrow.dataset # noqa
|
| 139 |
+
defaults['dataset'] = True
|
| 140 |
+
except ImportError:
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
try:
|
| 144 |
+
import pyarrow.orc # noqa
|
| 145 |
+
if sys.platform == "win32":
|
| 146 |
+
defaults['orc'] = True
|
| 147 |
+
else:
|
| 148 |
+
# orc tests on non-Windows platforms only work
|
| 149 |
+
# if timezone data exists, so skip them if
|
| 150 |
+
# not.
|
| 151 |
+
defaults['orc'] = defaults['timezone_data']
|
| 152 |
+
except ImportError:
|
| 153 |
+
pass
|
| 154 |
+
|
| 155 |
+
try:
|
| 156 |
+
import pandas # noqa
|
| 157 |
+
defaults['pandas'] = True
|
| 158 |
+
except ImportError:
|
| 159 |
+
defaults['nopandas'] = True
|
| 160 |
+
|
| 161 |
+
try:
|
| 162 |
+
import pyarrow.parquet # noqa
|
| 163 |
+
defaults['parquet'] = True
|
| 164 |
+
except ImportError:
|
| 165 |
+
pass
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
import pyarrow.parquet.encryption # noqa
|
| 169 |
+
defaults['parquet_encryption'] = True
|
| 170 |
+
except ImportError:
|
| 171 |
+
pass
|
| 172 |
+
|
| 173 |
+
try:
|
| 174 |
+
import pyarrow.flight # noqa
|
| 175 |
+
defaults['flight'] = True
|
| 176 |
+
except ImportError:
|
| 177 |
+
pass
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
from pyarrow.fs import AzureFileSystem # noqa
|
| 181 |
+
defaults['azure'] = True
|
| 182 |
+
except ImportError:
|
| 183 |
+
pass
|
| 184 |
+
|
| 185 |
+
try:
|
| 186 |
+
from pyarrow.fs import GcsFileSystem # noqa
|
| 187 |
+
defaults['gcs'] = True
|
| 188 |
+
except ImportError:
|
| 189 |
+
pass
|
| 190 |
+
|
| 191 |
+
try:
|
| 192 |
+
from pyarrow.fs import S3FileSystem # noqa
|
| 193 |
+
defaults['s3'] = True
|
| 194 |
+
except ImportError:
|
| 195 |
+
pass
|
| 196 |
+
|
| 197 |
+
try:
|
| 198 |
+
from pyarrow.fs import HadoopFileSystem # noqa
|
| 199 |
+
defaults['hdfs'] = True
|
| 200 |
+
except ImportError:
|
| 201 |
+
pass
|
| 202 |
+
|
| 203 |
+
try:
|
| 204 |
+
import pyarrow.substrait # noqa
|
| 205 |
+
defaults['substrait'] = True
|
| 206 |
+
except ImportError:
|
| 207 |
+
pass
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# Doctest should ignore files for the modules that are not built
|
| 211 |
+
def pytest_ignore_collect(path, config):
|
| 212 |
+
if config.option.doctestmodules:
|
| 213 |
+
# don't try to run doctests on the /tests directory
|
| 214 |
+
if "/pyarrow/tests/" in str(path):
|
| 215 |
+
return True
|
| 216 |
+
|
| 217 |
+
doctest_groups = [
|
| 218 |
+
'dataset',
|
| 219 |
+
'orc',
|
| 220 |
+
'parquet',
|
| 221 |
+
'flight',
|
| 222 |
+
'substrait',
|
| 223 |
+
]
|
| 224 |
+
|
| 225 |
+
# handle cuda, flight, etc
|
| 226 |
+
for group in doctest_groups:
|
| 227 |
+
if 'pyarrow/{}'.format(group) in str(path):
|
| 228 |
+
if not defaults[group]:
|
| 229 |
+
return True
|
| 230 |
+
|
| 231 |
+
if 'pyarrow/parquet/encryption' in str(path):
|
| 232 |
+
if not defaults['parquet_encryption']:
|
| 233 |
+
return True
|
| 234 |
+
|
| 235 |
+
if 'pyarrow/cuda' in str(path):
|
| 236 |
+
try:
|
| 237 |
+
import pyarrow.cuda # noqa
|
| 238 |
+
return False
|
| 239 |
+
except ImportError:
|
| 240 |
+
return True
|
| 241 |
+
|
| 242 |
+
if 'pyarrow/fs' in str(path):
|
| 243 |
+
try:
|
| 244 |
+
from pyarrow.fs import S3FileSystem # noqa
|
| 245 |
+
return False
|
| 246 |
+
except ImportError:
|
| 247 |
+
return True
|
| 248 |
+
|
| 249 |
+
if getattr(config.option, "doctest_cython", False):
|
| 250 |
+
if "/pyarrow/tests/" in str(path):
|
| 251 |
+
return True
|
| 252 |
+
if "/pyarrow/_parquet_encryption" in str(path):
|
| 253 |
+
return True
|
| 254 |
+
|
| 255 |
+
return False
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
# Save output files from doctest examples into temp dir
|
| 259 |
+
@pytest.fixture(autouse=True)
|
| 260 |
+
def _docdir(request):
|
| 261 |
+
|
| 262 |
+
# Trigger ONLY for the doctests
|
| 263 |
+
doctest_m = request.config.option.doctestmodules
|
| 264 |
+
doctest_c = getattr(request.config.option, "doctest_cython", False)
|
| 265 |
+
|
| 266 |
+
if doctest_m or doctest_c:
|
| 267 |
+
|
| 268 |
+
# Get the fixture dynamically by its name.
|
| 269 |
+
tmpdir = request.getfixturevalue('tmpdir')
|
| 270 |
+
|
| 271 |
+
# Chdir only for the duration of the test.
|
| 272 |
+
with tmpdir.as_cwd():
|
| 273 |
+
yield
|
| 274 |
+
|
| 275 |
+
else:
|
| 276 |
+
yield
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Define doctest_namespace for fs module docstring import
|
| 280 |
+
@pytest.fixture(autouse=True)
|
| 281 |
+
def add_fs(doctest_namespace, request, tmp_path):
|
| 282 |
+
|
| 283 |
+
# Trigger ONLY for the doctests
|
| 284 |
+
doctest_m = request.config.option.doctestmodules
|
| 285 |
+
doctest_c = getattr(request.config.option, "doctest_cython", False)
|
| 286 |
+
|
| 287 |
+
if doctest_m or doctest_c:
|
| 288 |
+
# fs import
|
| 289 |
+
doctest_namespace["fs"] = fs
|
| 290 |
+
|
| 291 |
+
# Creation of an object and file with data
|
| 292 |
+
local = fs.LocalFileSystem()
|
| 293 |
+
path = tmp_path / 'pyarrow-fs-example.dat'
|
| 294 |
+
with local.open_output_stream(str(path)) as stream:
|
| 295 |
+
stream.write(b'data')
|
| 296 |
+
doctest_namespace["local"] = local
|
| 297 |
+
doctest_namespace["local_path"] = str(tmp_path)
|
| 298 |
+
doctest_namespace["path"] = str(path)
|
| 299 |
+
yield
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# Define udf fixture for test_udf.py and test_substrait.py
|
| 303 |
+
@pytest.fixture(scope="session")
|
| 304 |
+
def unary_func_fixture():
|
| 305 |
+
"""
|
| 306 |
+
Register a unary scalar function.
|
| 307 |
+
"""
|
| 308 |
+
from pyarrow import compute as pc
|
| 309 |
+
|
| 310 |
+
def unary_function(ctx, x):
|
| 311 |
+
return pc.call_function("add", [x, 1],
|
| 312 |
+
memory_pool=ctx.memory_pool)
|
| 313 |
+
func_name = "y=x+1"
|
| 314 |
+
unary_doc = {"summary": "add function",
|
| 315 |
+
"description": "test add function"}
|
| 316 |
+
pc.register_scalar_function(unary_function,
|
| 317 |
+
func_name,
|
| 318 |
+
unary_doc,
|
| 319 |
+
{"array": pa.int64()},
|
| 320 |
+
pa.int64())
|
| 321 |
+
return unary_function, func_name
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
@pytest.fixture(scope="session")
|
| 325 |
+
def unary_agg_func_fixture():
|
| 326 |
+
"""
|
| 327 |
+
Register a unary aggregate function (mean)
|
| 328 |
+
"""
|
| 329 |
+
from pyarrow import compute as pc
|
| 330 |
+
|
| 331 |
+
def func(ctx, x):
|
| 332 |
+
return pa.scalar(np.nanmean(x))
|
| 333 |
+
|
| 334 |
+
func_name = "mean_udf"
|
| 335 |
+
func_doc = {"summary": "y=avg(x)",
|
| 336 |
+
"description": "find mean of x"}
|
| 337 |
+
|
| 338 |
+
pc.register_aggregate_function(func,
|
| 339 |
+
func_name,
|
| 340 |
+
func_doc,
|
| 341 |
+
{
|
| 342 |
+
"x": pa.float64(),
|
| 343 |
+
},
|
| 344 |
+
pa.float64()
|
| 345 |
+
)
|
| 346 |
+
return func, func_name
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
@pytest.fixture(scope="session")
|
| 350 |
+
def varargs_agg_func_fixture():
|
| 351 |
+
"""
|
| 352 |
+
Register a unary aggregate function
|
| 353 |
+
"""
|
| 354 |
+
from pyarrow import compute as pc
|
| 355 |
+
|
| 356 |
+
def func(ctx, *args):
|
| 357 |
+
sum = 0.0
|
| 358 |
+
for arg in args:
|
| 359 |
+
sum += np.nanmean(arg)
|
| 360 |
+
return pa.scalar(sum)
|
| 361 |
+
|
| 362 |
+
func_name = "sum_mean"
|
| 363 |
+
func_doc = {"summary": "Varargs aggregate",
|
| 364 |
+
"description": "Varargs aggregate"}
|
| 365 |
+
|
| 366 |
+
pc.register_aggregate_function(func,
|
| 367 |
+
func_name,
|
| 368 |
+
func_doc,
|
| 369 |
+
{
|
| 370 |
+
"x": pa.int64(),
|
| 371 |
+
"y": pa.float64()
|
| 372 |
+
},
|
| 373 |
+
pa.float64()
|
| 374 |
+
)
|
| 375 |
+
return func, func_name
|
parrot/lib/python3.10/site-packages/pyarrow/device.pxi
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# cython: profile=False
|
| 19 |
+
# distutils: language = c++
|
| 20 |
+
# cython: embedsignature = True
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
cpdef enum DeviceAllocationType:
|
| 24 |
+
CPU = <char> CDeviceAllocationType_kCPU
|
| 25 |
+
CUDA = <char> CDeviceAllocationType_kCUDA
|
| 26 |
+
CUDA_HOST = <char> CDeviceAllocationType_kCUDA_HOST
|
| 27 |
+
OPENCL = <char> CDeviceAllocationType_kOPENCL
|
| 28 |
+
VULKAN = <char> CDeviceAllocationType_kVULKAN
|
| 29 |
+
METAL = <char> CDeviceAllocationType_kMETAL
|
| 30 |
+
VPI = <char> CDeviceAllocationType_kVPI
|
| 31 |
+
ROCM = <char> CDeviceAllocationType_kROCM
|
| 32 |
+
ROCM_HOST = <char> CDeviceAllocationType_kROCM_HOST
|
| 33 |
+
EXT_DEV = <char> CDeviceAllocationType_kEXT_DEV
|
| 34 |
+
CUDA_MANAGED = <char> CDeviceAllocationType_kCUDA_MANAGED
|
| 35 |
+
ONEAPI = <char> CDeviceAllocationType_kONEAPI
|
| 36 |
+
WEBGPU = <char> CDeviceAllocationType_kWEBGPU
|
| 37 |
+
HEXAGON = <char> CDeviceAllocationType_kHEXAGON
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
cdef object _wrap_device_allocation_type(CDeviceAllocationType device_type):
|
| 41 |
+
return DeviceAllocationType(<char> device_type)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
cdef class Device(_Weakrefable):
|
| 45 |
+
"""
|
| 46 |
+
Abstract interface for hardware devices
|
| 47 |
+
|
| 48 |
+
This object represents a device with access to some memory spaces.
|
| 49 |
+
When handling a Buffer or raw memory address, it allows deciding in which
|
| 50 |
+
context the raw memory address should be interpreted
|
| 51 |
+
(e.g. CPU-accessible memory, or embedded memory on some particular GPU).
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self):
|
| 55 |
+
raise TypeError("Do not call Device's constructor directly, "
|
| 56 |
+
"use the device attribute of the MemoryManager instead.")
|
| 57 |
+
|
| 58 |
+
cdef void init(self, const shared_ptr[CDevice]& device):
|
| 59 |
+
self.device = device
|
| 60 |
+
|
| 61 |
+
@staticmethod
|
| 62 |
+
cdef wrap(const shared_ptr[CDevice]& device):
|
| 63 |
+
cdef Device self = Device.__new__(Device)
|
| 64 |
+
self.init(device)
|
| 65 |
+
return self
|
| 66 |
+
|
| 67 |
+
def __eq__(self, other):
|
| 68 |
+
if not isinstance(other, Device):
|
| 69 |
+
return False
|
| 70 |
+
return self.device.get().Equals(deref((<Device>other).device.get()))
|
| 71 |
+
|
| 72 |
+
def __repr__(self):
|
| 73 |
+
return "<pyarrow.Device: {}>".format(frombytes(self.device.get().ToString()))
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def type_name(self):
|
| 77 |
+
"""
|
| 78 |
+
A shorthand for this device's type.
|
| 79 |
+
"""
|
| 80 |
+
return frombytes(self.device.get().type_name())
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def device_id(self):
|
| 84 |
+
"""
|
| 85 |
+
A device ID to identify this device if there are multiple of this type.
|
| 86 |
+
|
| 87 |
+
If there is no "device_id" equivalent (such as for the main CPU device on
|
| 88 |
+
non-numa systems) returns -1.
|
| 89 |
+
"""
|
| 90 |
+
return self.device.get().device_id()
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def is_cpu(self):
|
| 94 |
+
"""
|
| 95 |
+
Whether this device is the main CPU device.
|
| 96 |
+
|
| 97 |
+
This shorthand method is very useful when deciding whether a memory address
|
| 98 |
+
is CPU-accessible.
|
| 99 |
+
"""
|
| 100 |
+
return self.device.get().is_cpu()
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def device_type(self):
|
| 104 |
+
"""
|
| 105 |
+
Return the DeviceAllocationType of this device.
|
| 106 |
+
"""
|
| 107 |
+
return _wrap_device_allocation_type(self.device.get().device_type())
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
cdef class MemoryManager(_Weakrefable):
|
| 111 |
+
"""
|
| 112 |
+
An object that provides memory management primitives.
|
| 113 |
+
|
| 114 |
+
A MemoryManager is always tied to a particular Device instance.
|
| 115 |
+
It can also have additional parameters (such as a MemoryPool to
|
| 116 |
+
allocate CPU memory).
|
| 117 |
+
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(self):
|
| 121 |
+
raise TypeError("Do not call MemoryManager's constructor directly, "
|
| 122 |
+
"use pyarrow.default_cpu_memory_manager() instead.")
|
| 123 |
+
|
| 124 |
+
cdef void init(self, const shared_ptr[CMemoryManager]& mm):
|
| 125 |
+
self.memory_manager = mm
|
| 126 |
+
|
| 127 |
+
@staticmethod
|
| 128 |
+
cdef wrap(const shared_ptr[CMemoryManager]& mm):
|
| 129 |
+
cdef MemoryManager self = MemoryManager.__new__(MemoryManager)
|
| 130 |
+
self.init(mm)
|
| 131 |
+
return self
|
| 132 |
+
|
| 133 |
+
def __repr__(self):
|
| 134 |
+
return "<pyarrow.MemoryManager device: {}>".format(
|
| 135 |
+
frombytes(self.memory_manager.get().device().get().ToString())
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def device(self):
|
| 140 |
+
"""
|
| 141 |
+
The device this MemoryManager is tied to.
|
| 142 |
+
"""
|
| 143 |
+
return Device.wrap(self.memory_manager.get().device())
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def is_cpu(self):
|
| 147 |
+
"""
|
| 148 |
+
Whether this MemoryManager is tied to the main CPU device.
|
| 149 |
+
|
| 150 |
+
This shorthand method is very useful when deciding whether a memory
|
| 151 |
+
address is CPU-accessible.
|
| 152 |
+
"""
|
| 153 |
+
return self.memory_manager.get().is_cpu()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def default_cpu_memory_manager():
|
| 157 |
+
"""
|
| 158 |
+
Return the default CPU MemoryManager instance.
|
| 159 |
+
|
| 160 |
+
The returned singleton instance uses the default MemoryPool.
|
| 161 |
+
"""
|
| 162 |
+
return MemoryManager.wrap(c_default_cpu_memory_manager())
|
parrot/lib/python3.10/site-packages/pyarrow/flight.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
from pyarrow._flight import ( # noqa:F401
|
| 20 |
+
connect,
|
| 21 |
+
Action,
|
| 22 |
+
ActionType,
|
| 23 |
+
BasicAuth,
|
| 24 |
+
CallInfo,
|
| 25 |
+
CertKeyPair,
|
| 26 |
+
ClientAuthHandler,
|
| 27 |
+
ClientMiddleware,
|
| 28 |
+
ClientMiddlewareFactory,
|
| 29 |
+
DescriptorType,
|
| 30 |
+
FlightCallOptions,
|
| 31 |
+
FlightCancelledError,
|
| 32 |
+
FlightClient,
|
| 33 |
+
FlightDataStream,
|
| 34 |
+
FlightDescriptor,
|
| 35 |
+
FlightEndpoint,
|
| 36 |
+
FlightError,
|
| 37 |
+
FlightInfo,
|
| 38 |
+
FlightInternalError,
|
| 39 |
+
FlightMetadataReader,
|
| 40 |
+
FlightMetadataWriter,
|
| 41 |
+
FlightMethod,
|
| 42 |
+
FlightServerBase,
|
| 43 |
+
FlightServerError,
|
| 44 |
+
FlightStreamChunk,
|
| 45 |
+
FlightStreamReader,
|
| 46 |
+
FlightStreamWriter,
|
| 47 |
+
FlightTimedOutError,
|
| 48 |
+
FlightUnauthenticatedError,
|
| 49 |
+
FlightUnauthorizedError,
|
| 50 |
+
FlightUnavailableError,
|
| 51 |
+
FlightWriteSizeExceededError,
|
| 52 |
+
GeneratorStream,
|
| 53 |
+
Location,
|
| 54 |
+
MetadataRecordBatchReader,
|
| 55 |
+
MetadataRecordBatchWriter,
|
| 56 |
+
RecordBatchStream,
|
| 57 |
+
Result,
|
| 58 |
+
SchemaResult,
|
| 59 |
+
ServerAuthHandler,
|
| 60 |
+
ServerCallContext,
|
| 61 |
+
ServerMiddleware,
|
| 62 |
+
ServerMiddlewareFactory,
|
| 63 |
+
Ticket,
|
| 64 |
+
TracingServerMiddlewareFactory,
|
| 65 |
+
)
|
| 66 |
+
except ImportError as exc:
|
| 67 |
+
raise ImportError(
|
| 68 |
+
f"The pyarrow installation is not built with support for 'flight' ({str(exc)})"
|
| 69 |
+
) from None
|
parrot/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/pyarrow/includes/common.pxd
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from libc.stdint cimport *
|
| 21 |
+
from libcpp cimport bool as c_bool, nullptr
|
| 22 |
+
from libcpp.functional cimport function
|
| 23 |
+
from libcpp.memory cimport shared_ptr, unique_ptr, make_shared
|
| 24 |
+
from libcpp.string cimport string as c_string
|
| 25 |
+
from libcpp.utility cimport pair
|
| 26 |
+
from libcpp.vector cimport vector
|
| 27 |
+
from libcpp.unordered_map cimport unordered_map
|
| 28 |
+
from libcpp.unordered_set cimport unordered_set
|
| 29 |
+
|
| 30 |
+
from cpython cimport PyObject
|
| 31 |
+
from cpython.datetime cimport PyDateTime_DateTime
|
| 32 |
+
cimport cpython
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
cdef extern from * namespace "std" nogil:
|
| 36 |
+
cdef shared_ptr[T] static_pointer_cast[T, U](shared_ptr[U])
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
cdef extern from "<optional>" namespace "std" nogil:
|
| 40 |
+
cdef cppclass optional[T]:
|
| 41 |
+
ctypedef T value_type
|
| 42 |
+
optional()
|
| 43 |
+
optional(nullopt_t)
|
| 44 |
+
optional(optional&) except +
|
| 45 |
+
optional(T&) except +
|
| 46 |
+
c_bool has_value()
|
| 47 |
+
T& value()
|
| 48 |
+
T& value_or[U](U& default_value)
|
| 49 |
+
void swap(optional&)
|
| 50 |
+
void reset()
|
| 51 |
+
T& emplace(...)
|
| 52 |
+
T& operator*()
|
| 53 |
+
# T* operator->() # Not Supported
|
| 54 |
+
optional& operator=(optional&)
|
| 55 |
+
optional& operator=[U](U&)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# vendored from the cymove project https://github.com/ozars/cymove
|
| 59 |
+
cdef extern from * namespace "cymove" nogil:
|
| 60 |
+
"""
|
| 61 |
+
#include <type_traits>
|
| 62 |
+
#include <utility>
|
| 63 |
+
namespace cymove {
|
| 64 |
+
template <typename T>
|
| 65 |
+
inline typename std::remove_reference<T>::type&& cymove(T& t) {
|
| 66 |
+
return std::move(t);
|
| 67 |
+
}
|
| 68 |
+
template <typename T>
|
| 69 |
+
inline typename std::remove_reference<T>::type&& cymove(T&& t) {
|
| 70 |
+
return std::move(t);
|
| 71 |
+
}
|
| 72 |
+
} // namespace cymove
|
| 73 |
+
"""
|
| 74 |
+
cdef T move" cymove::cymove"[T](T)
|
| 75 |
+
|
| 76 |
+
cdef extern from * namespace "arrow::py" nogil:
|
| 77 |
+
"""
|
| 78 |
+
#include <memory>
|
| 79 |
+
#include <utility>
|
| 80 |
+
|
| 81 |
+
namespace arrow {
|
| 82 |
+
namespace py {
|
| 83 |
+
template <typename T>
|
| 84 |
+
std::shared_ptr<T> to_shared(std::unique_ptr<T>& t) {
|
| 85 |
+
return std::move(t);
|
| 86 |
+
}
|
| 87 |
+
template <typename T>
|
| 88 |
+
std::shared_ptr<T> to_shared(std::unique_ptr<T>&& t) {
|
| 89 |
+
return std::move(t);
|
| 90 |
+
}
|
| 91 |
+
} // namespace py
|
| 92 |
+
} // namespace arrow
|
| 93 |
+
"""
|
| 94 |
+
cdef shared_ptr[T] to_shared" arrow::py::to_shared"[T](unique_ptr[T])
|
| 95 |
+
|
| 96 |
+
cdef extern from "arrow/python/platform.h":
|
| 97 |
+
pass
|
| 98 |
+
|
| 99 |
+
cdef extern from "<Python.h>":
|
| 100 |
+
void Py_XDECREF(PyObject* o)
|
| 101 |
+
Py_ssize_t Py_REFCNT(PyObject* o)
|
| 102 |
+
|
| 103 |
+
cdef extern from "numpy/halffloat.h":
|
| 104 |
+
ctypedef uint16_t npy_half
|
| 105 |
+
|
| 106 |
+
cdef extern from "arrow/api.h" namespace "arrow" nogil:
|
| 107 |
+
# We can later add more of the common status factory methods as needed
|
| 108 |
+
cdef CStatus CStatus_OK "arrow::Status::OK"()
|
| 109 |
+
|
| 110 |
+
cdef CStatus CStatus_Invalid "arrow::Status::Invalid"()
|
| 111 |
+
cdef CStatus CStatus_NotImplemented \
|
| 112 |
+
"arrow::Status::NotImplemented"(const c_string& msg)
|
| 113 |
+
cdef CStatus CStatus_UnknownError \
|
| 114 |
+
"arrow::Status::UnknownError"(const c_string& msg)
|
| 115 |
+
|
| 116 |
+
cdef cppclass CStatus "arrow::Status":
|
| 117 |
+
CStatus()
|
| 118 |
+
|
| 119 |
+
c_string ToString()
|
| 120 |
+
c_string message()
|
| 121 |
+
shared_ptr[CStatusDetail] detail()
|
| 122 |
+
|
| 123 |
+
c_bool ok()
|
| 124 |
+
c_bool IsIOError()
|
| 125 |
+
c_bool IsOutOfMemory()
|
| 126 |
+
c_bool IsInvalid()
|
| 127 |
+
c_bool IsKeyError()
|
| 128 |
+
c_bool IsNotImplemented()
|
| 129 |
+
c_bool IsTypeError()
|
| 130 |
+
c_bool IsCapacityError()
|
| 131 |
+
c_bool IsIndexError()
|
| 132 |
+
c_bool IsSerializationError()
|
| 133 |
+
c_bool IsCancelled()
|
| 134 |
+
|
| 135 |
+
void Warn()
|
| 136 |
+
|
| 137 |
+
cdef cppclass CStatusDetail "arrow::StatusDetail":
|
| 138 |
+
c_string ToString()
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
cdef extern from "arrow/result.h" namespace "arrow" nogil:
|
| 142 |
+
cdef cppclass CResult "arrow::Result"[T]:
|
| 143 |
+
CResult()
|
| 144 |
+
CResult(CStatus)
|
| 145 |
+
CResult(T)
|
| 146 |
+
c_bool ok()
|
| 147 |
+
CStatus status()
|
| 148 |
+
CStatus Value(T*)
|
| 149 |
+
T operator*()
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
cdef extern from "arrow/util/future.h" namespace "arrow" nogil:
|
| 153 |
+
cdef cppclass CFuture "arrow::Future"[T]:
|
| 154 |
+
CFuture()
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
cdef extern from "arrow/python/async.h" namespace "arrow::py" nogil:
|
| 158 |
+
# BindFuture's third argument is really a C++ callable with
|
| 159 |
+
# the signature `object(T*)`, but Cython does not allow declaring that.
|
| 160 |
+
# We use an ellipsis as a workaround.
|
| 161 |
+
# Another possibility is to type-erase the argument by making it
|
| 162 |
+
# `object(void*)`, but it would lose compile-time C++ type safety.
|
| 163 |
+
void BindFuture[T](CFuture[T], object cb, ...)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil:
|
| 167 |
+
T GetResultValue[T](CResult[T]) except *
|
| 168 |
+
cdef function[F] BindFunction[F](void* unbound, object bound, ...)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
cdef inline object PyObject_to_object(PyObject* o):
|
| 172 |
+
# Cast to "object" increments reference count
|
| 173 |
+
cdef object result = <object> o
|
| 174 |
+
cpython.Py_DECREF(result)
|
| 175 |
+
return result
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.common cimport *
|
| 21 |
+
from pyarrow.includes.libarrow cimport *
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
cdef extern from "arrow/acero/options.h" namespace "arrow::acero" nogil:
|
| 25 |
+
cdef enum CJoinType "arrow::acero::JoinType":
|
| 26 |
+
CJoinType_LEFT_SEMI "arrow::acero::JoinType::LEFT_SEMI"
|
| 27 |
+
CJoinType_RIGHT_SEMI "arrow::acero::JoinType::RIGHT_SEMI"
|
| 28 |
+
CJoinType_LEFT_ANTI "arrow::acero::JoinType::LEFT_ANTI"
|
| 29 |
+
CJoinType_RIGHT_ANTI "arrow::acero::JoinType::RIGHT_ANTI"
|
| 30 |
+
CJoinType_INNER "arrow::acero::JoinType::INNER"
|
| 31 |
+
CJoinType_LEFT_OUTER "arrow::acero::JoinType::LEFT_OUTER"
|
| 32 |
+
CJoinType_RIGHT_OUTER "arrow::acero::JoinType::RIGHT_OUTER"
|
| 33 |
+
CJoinType_FULL_OUTER "arrow::acero::JoinType::FULL_OUTER"
|
| 34 |
+
|
| 35 |
+
cdef cppclass CExecNodeOptions "arrow::acero::ExecNodeOptions":
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
cdef cppclass CSourceNodeOptions "arrow::acero::SourceNodeOptions"(CExecNodeOptions):
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
cdef cppclass CTableSourceNodeOptions "arrow::acero::TableSourceNodeOptions"(CExecNodeOptions):
|
| 42 |
+
CTableSourceNodeOptions(shared_ptr[CTable] table)
|
| 43 |
+
CTableSourceNodeOptions(shared_ptr[CTable] table, int64_t max_batch_size)
|
| 44 |
+
|
| 45 |
+
cdef cppclass CSinkNodeOptions "arrow::acero::SinkNodeOptions"(CExecNodeOptions):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
cdef cppclass CFilterNodeOptions "arrow::acero::FilterNodeOptions"(CExecNodeOptions):
|
| 49 |
+
CFilterNodeOptions(CExpression)
|
| 50 |
+
|
| 51 |
+
cdef cppclass CProjectNodeOptions "arrow::acero::ProjectNodeOptions"(CExecNodeOptions):
|
| 52 |
+
CProjectNodeOptions(vector[CExpression] expressions)
|
| 53 |
+
CProjectNodeOptions(vector[CExpression] expressions,
|
| 54 |
+
vector[c_string] names)
|
| 55 |
+
|
| 56 |
+
cdef cppclass CAggregateNodeOptions "arrow::acero::AggregateNodeOptions"(CExecNodeOptions):
|
| 57 |
+
CAggregateNodeOptions(vector[CAggregate] aggregates, vector[CFieldRef] names)
|
| 58 |
+
|
| 59 |
+
cdef cppclass COrderByNodeOptions "arrow::acero::OrderByNodeOptions"(CExecNodeOptions):
|
| 60 |
+
COrderByNodeOptions(COrdering ordering)
|
| 61 |
+
|
| 62 |
+
cdef cppclass CHashJoinNodeOptions "arrow::acero::HashJoinNodeOptions"(CExecNodeOptions):
|
| 63 |
+
CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys,
|
| 64 |
+
vector[CFieldRef] in_right_keys)
|
| 65 |
+
CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys,
|
| 66 |
+
vector[CFieldRef] in_right_keys,
|
| 67 |
+
CExpression filter,
|
| 68 |
+
c_string output_suffix_for_left,
|
| 69 |
+
c_string output_suffix_for_right)
|
| 70 |
+
CHashJoinNodeOptions(CJoinType join_type,
|
| 71 |
+
vector[CFieldRef] left_keys,
|
| 72 |
+
vector[CFieldRef] right_keys,
|
| 73 |
+
vector[CFieldRef] left_output,
|
| 74 |
+
vector[CFieldRef] right_output,
|
| 75 |
+
CExpression filter,
|
| 76 |
+
c_string output_suffix_for_left,
|
| 77 |
+
c_string output_suffix_for_right)
|
| 78 |
+
|
| 79 |
+
cdef struct CAsofJoinKeys "arrow::acero::AsofJoinNodeOptions::Keys":
|
| 80 |
+
CFieldRef on_key
|
| 81 |
+
vector[CFieldRef] by_key
|
| 82 |
+
|
| 83 |
+
cdef cppclass CAsofJoinNodeOptions "arrow::acero::AsofJoinNodeOptions"(CExecNodeOptions):
|
| 84 |
+
CAsofJoinNodeOptions(vector[CAsofJoinKeys] keys, int64_t tolerance)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
cdef extern from "arrow/acero/exec_plan.h" namespace "arrow::acero" nogil:
|
| 88 |
+
cdef cppclass CDeclaration "arrow::acero::Declaration":
|
| 89 |
+
cppclass Input:
|
| 90 |
+
Input(CExecNode*)
|
| 91 |
+
Input(CDeclaration)
|
| 92 |
+
|
| 93 |
+
c_string label
|
| 94 |
+
vector[Input] inputs
|
| 95 |
+
|
| 96 |
+
CDeclaration()
|
| 97 |
+
CDeclaration(c_string factory_name, CExecNodeOptions options)
|
| 98 |
+
CDeclaration(c_string factory_name, vector[Input] inputs, shared_ptr[CExecNodeOptions] options)
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
CDeclaration Sequence(vector[CDeclaration] decls)
|
| 102 |
+
|
| 103 |
+
cdef cppclass CExecNode "arrow::acero::ExecNode":
|
| 104 |
+
const vector[CExecNode*]& inputs() const
|
| 105 |
+
const shared_ptr[CSchema]& output_schema() const
|
| 106 |
+
|
| 107 |
+
CResult[shared_ptr[CTable]] DeclarationToTable(
|
| 108 |
+
CDeclaration declaration, c_bool use_threads
|
| 109 |
+
)
|
| 110 |
+
CResult[shared_ptr[CTable]] DeclarationToTable(
|
| 111 |
+
CDeclaration declaration, c_bool use_threads,
|
| 112 |
+
CMemoryPool* memory_pool, CFunctionRegistry* function_registry
|
| 113 |
+
)
|
| 114 |
+
CResult[unique_ptr[CRecordBatchReader]] DeclarationToReader(
|
| 115 |
+
CDeclaration declaration, c_bool use_threads
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
CResult[c_string] DeclarationToString(const CDeclaration& declaration)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.libarrow cimport *
|
| 21 |
+
|
| 22 |
+
cdef extern from "arrow/gpu/cuda_api.h" namespace "arrow::cuda" nogil:
|
| 23 |
+
|
| 24 |
+
cdef cppclass CCudaDeviceManager" arrow::cuda::CudaDeviceManager":
|
| 25 |
+
@staticmethod
|
| 26 |
+
CResult[CCudaDeviceManager*] Instance()
|
| 27 |
+
CResult[shared_ptr[CCudaContext]] GetContext(int gpu_number)
|
| 28 |
+
CResult[shared_ptr[CCudaContext]] GetSharedContext(int gpu_number,
|
| 29 |
+
void* handle)
|
| 30 |
+
CStatus AllocateHost(int device_number, int64_t nbytes,
|
| 31 |
+
shared_ptr[CCudaHostBuffer]* buffer)
|
| 32 |
+
int num_devices() const
|
| 33 |
+
|
| 34 |
+
cdef cppclass CCudaContext" arrow::cuda::CudaContext":
|
| 35 |
+
CResult[shared_ptr[CCudaBuffer]] Allocate(int64_t nbytes)
|
| 36 |
+
CResult[shared_ptr[CCudaBuffer]] View(uint8_t* data, int64_t nbytes)
|
| 37 |
+
CResult[shared_ptr[CCudaBuffer]] OpenIpcBuffer(
|
| 38 |
+
const CCudaIpcMemHandle& ipc_handle)
|
| 39 |
+
CStatus Synchronize()
|
| 40 |
+
int64_t bytes_allocated() const
|
| 41 |
+
const void* handle() const
|
| 42 |
+
int device_number() const
|
| 43 |
+
CResult[uintptr_t] GetDeviceAddress(uintptr_t addr)
|
| 44 |
+
|
| 45 |
+
cdef cppclass CCudaIpcMemHandle" arrow::cuda::CudaIpcMemHandle":
|
| 46 |
+
@staticmethod
|
| 47 |
+
CResult[shared_ptr[CCudaIpcMemHandle]] FromBuffer(
|
| 48 |
+
const void* opaque_handle)
|
| 49 |
+
CResult[shared_ptr[CBuffer]] Serialize(CMemoryPool* pool) const
|
| 50 |
+
|
| 51 |
+
cdef cppclass CCudaBuffer" arrow::cuda::CudaBuffer"(CBuffer):
|
| 52 |
+
CCudaBuffer(uint8_t* data, int64_t size,
|
| 53 |
+
const shared_ptr[CCudaContext]& context,
|
| 54 |
+
c_bool own_data=false, c_bool is_ipc=false)
|
| 55 |
+
CCudaBuffer(const shared_ptr[CCudaBuffer]& parent,
|
| 56 |
+
const int64_t offset, const int64_t size)
|
| 57 |
+
|
| 58 |
+
@staticmethod
|
| 59 |
+
CResult[shared_ptr[CCudaBuffer]] FromBuffer(shared_ptr[CBuffer] buf)
|
| 60 |
+
|
| 61 |
+
CStatus CopyToHost(const int64_t position, const int64_t nbytes,
|
| 62 |
+
void* out) const
|
| 63 |
+
CStatus CopyFromHost(const int64_t position, const void* data,
|
| 64 |
+
int64_t nbytes)
|
| 65 |
+
CStatus CopyFromDevice(const int64_t position, const void* data,
|
| 66 |
+
int64_t nbytes)
|
| 67 |
+
CStatus CopyFromAnotherDevice(const shared_ptr[CCudaContext]& src_ctx,
|
| 68 |
+
const int64_t position, const void* data,
|
| 69 |
+
int64_t nbytes)
|
| 70 |
+
CResult[shared_ptr[CCudaIpcMemHandle]] ExportForIpc()
|
| 71 |
+
shared_ptr[CCudaContext] context() const
|
| 72 |
+
|
| 73 |
+
cdef cppclass \
|
| 74 |
+
CCudaHostBuffer" arrow::cuda::CudaHostBuffer"(CMutableBuffer):
|
| 75 |
+
pass
|
| 76 |
+
|
| 77 |
+
cdef cppclass \
|
| 78 |
+
CCudaBufferReader" arrow::cuda::CudaBufferReader"(CBufferReader):
|
| 79 |
+
CCudaBufferReader(const shared_ptr[CBuffer]& buffer)
|
| 80 |
+
CResult[int64_t] Read(int64_t nbytes, void* buffer)
|
| 81 |
+
CResult[shared_ptr[CBuffer]] Read(int64_t nbytes)
|
| 82 |
+
|
| 83 |
+
cdef cppclass \
|
| 84 |
+
CCudaBufferWriter" arrow::cuda::CudaBufferWriter"(WritableFile):
|
| 85 |
+
CCudaBufferWriter(const shared_ptr[CCudaBuffer]& buffer)
|
| 86 |
+
CStatus Close()
|
| 87 |
+
CStatus Write(const void* data, int64_t nbytes)
|
| 88 |
+
CStatus WriteAt(int64_t position, const void* data, int64_t nbytes)
|
| 89 |
+
CStatus SetBufferSize(const int64_t buffer_size)
|
| 90 |
+
int64_t buffer_size()
|
| 91 |
+
int64_t num_bytes_buffered() const
|
| 92 |
+
|
| 93 |
+
CResult[shared_ptr[CCudaHostBuffer]] AllocateCudaHostBuffer(
|
| 94 |
+
int device_number, const int64_t size)
|
| 95 |
+
|
| 96 |
+
# Cuda prefix is added to avoid picking up arrow::cuda functions
|
| 97 |
+
# from arrow namespace.
|
| 98 |
+
CResult[shared_ptr[CCudaBuffer]] \
|
| 99 |
+
CudaSerializeRecordBatch" arrow::cuda::SerializeRecordBatch"\
|
| 100 |
+
(const CRecordBatch& batch,
|
| 101 |
+
CCudaContext* ctx)
|
| 102 |
+
CResult[shared_ptr[CRecordBatch]] \
|
| 103 |
+
CudaReadRecordBatch" arrow::cuda::ReadRecordBatch"\
|
| 104 |
+
(const shared_ptr[CSchema]& schema,
|
| 105 |
+
CDictionaryMemo* dictionary_memo,
|
| 106 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
| 107 |
+
CMemoryPool* pool)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from libcpp.unordered_map cimport unordered_map
|
| 21 |
+
from libcpp cimport bool as c_bool
|
| 22 |
+
|
| 23 |
+
from pyarrow.includes.common cimport *
|
| 24 |
+
from pyarrow.includes.libarrow cimport *
|
| 25 |
+
from pyarrow.includes.libarrow_acero cimport *
|
| 26 |
+
from pyarrow.includes.libarrow_fs cimport *
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
cdef extern from "arrow/dataset/plan.h" namespace "arrow::dataset::internal" nogil:
|
| 30 |
+
|
| 31 |
+
cdef void Initialize()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
ctypedef CStatus cb_writer_finish_internal(CFileWriter*)
|
| 35 |
+
ctypedef void cb_writer_finish(dict, CFileWriter*)
|
| 36 |
+
|
| 37 |
+
cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil:
|
| 38 |
+
|
| 39 |
+
cdef enum ExistingDataBehavior" arrow::dataset::ExistingDataBehavior":
|
| 40 |
+
ExistingDataBehavior_DELETE_MATCHING" \
|
| 41 |
+
arrow::dataset::ExistingDataBehavior::kDeleteMatchingPartitions"
|
| 42 |
+
ExistingDataBehavior_OVERWRITE_OR_IGNORE" \
|
| 43 |
+
arrow::dataset::ExistingDataBehavior::kOverwriteOrIgnore"
|
| 44 |
+
ExistingDataBehavior_ERROR" \
|
| 45 |
+
arrow::dataset::ExistingDataBehavior::kError"
|
| 46 |
+
|
| 47 |
+
cdef cppclass CScanOptions "arrow::dataset::ScanOptions":
|
| 48 |
+
shared_ptr[CSchema] dataset_schema
|
| 49 |
+
shared_ptr[CSchema] projected_schema
|
| 50 |
+
c_bool use_threads
|
| 51 |
+
CExpression filter
|
| 52 |
+
|
| 53 |
+
cdef cppclass CScanNodeOptions "arrow::dataset::ScanNodeOptions"(CExecNodeOptions):
|
| 54 |
+
CScanNodeOptions(shared_ptr[CDataset] dataset, shared_ptr[CScanOptions] scan_options)
|
| 55 |
+
|
| 56 |
+
shared_ptr[CScanOptions] scan_options
|
| 57 |
+
|
| 58 |
+
cdef cppclass CFragmentScanOptions "arrow::dataset::FragmentScanOptions":
|
| 59 |
+
c_string type_name() const
|
| 60 |
+
|
| 61 |
+
ctypedef CIterator[shared_ptr[CScanTask]] CScanTaskIterator \
|
| 62 |
+
"arrow::dataset::ScanTaskIterator"
|
| 63 |
+
|
| 64 |
+
cdef cppclass CScanTask" arrow::dataset::ScanTask":
|
| 65 |
+
CResult[CRecordBatchIterator] Execute()
|
| 66 |
+
|
| 67 |
+
cdef cppclass CFragment "arrow::dataset::Fragment":
|
| 68 |
+
CResult[shared_ptr[CSchema]] ReadPhysicalSchema()
|
| 69 |
+
CResult[CScanTaskIterator] Scan(shared_ptr[CScanOptions] options)
|
| 70 |
+
c_bool splittable() const
|
| 71 |
+
c_string type_name() const
|
| 72 |
+
const CExpression& partition_expression() const
|
| 73 |
+
|
| 74 |
+
ctypedef vector[shared_ptr[CFragment]] CFragmentVector \
|
| 75 |
+
"arrow::dataset::FragmentVector"
|
| 76 |
+
|
| 77 |
+
ctypedef CIterator[shared_ptr[CFragment]] CFragmentIterator \
|
| 78 |
+
"arrow::dataset::FragmentIterator"
|
| 79 |
+
|
| 80 |
+
cdef cppclass CInMemoryFragment "arrow::dataset::InMemoryFragment"(
|
| 81 |
+
CFragment):
|
| 82 |
+
CInMemoryFragment(vector[shared_ptr[CRecordBatch]] record_batches,
|
| 83 |
+
CExpression partition_expression)
|
| 84 |
+
|
| 85 |
+
cdef cppclass CTaggedRecordBatch "arrow::dataset::TaggedRecordBatch":
|
| 86 |
+
shared_ptr[CRecordBatch] record_batch
|
| 87 |
+
shared_ptr[CFragment] fragment
|
| 88 |
+
|
| 89 |
+
ctypedef CIterator[CTaggedRecordBatch] CTaggedRecordBatchIterator \
|
| 90 |
+
"arrow::dataset::TaggedRecordBatchIterator"
|
| 91 |
+
|
| 92 |
+
cdef cppclass CScanner "arrow::dataset::Scanner":
|
| 93 |
+
CScanner(shared_ptr[CDataset], shared_ptr[CScanOptions])
|
| 94 |
+
CScanner(shared_ptr[CFragment], shared_ptr[CScanOptions])
|
| 95 |
+
CResult[CScanTaskIterator] Scan()
|
| 96 |
+
CResult[CTaggedRecordBatchIterator] ScanBatches()
|
| 97 |
+
CResult[shared_ptr[CTable]] ToTable()
|
| 98 |
+
CResult[shared_ptr[CTable]] TakeRows(const CArray& indices)
|
| 99 |
+
CResult[shared_ptr[CTable]] Head(int64_t num_rows)
|
| 100 |
+
CResult[int64_t] CountRows()
|
| 101 |
+
CResult[CFragmentIterator] GetFragments()
|
| 102 |
+
CResult[shared_ptr[CRecordBatchReader]] ToRecordBatchReader()
|
| 103 |
+
const shared_ptr[CScanOptions]& options()
|
| 104 |
+
|
| 105 |
+
cdef cppclass CScannerBuilder "arrow::dataset::ScannerBuilder":
|
| 106 |
+
CScannerBuilder(shared_ptr[CDataset],
|
| 107 |
+
shared_ptr[CScanOptions] scan_options)
|
| 108 |
+
CScannerBuilder(shared_ptr[CSchema], shared_ptr[CFragment],
|
| 109 |
+
shared_ptr[CScanOptions] scan_options)
|
| 110 |
+
|
| 111 |
+
@staticmethod
|
| 112 |
+
shared_ptr[CScannerBuilder] FromRecordBatchReader(
|
| 113 |
+
shared_ptr[CRecordBatchReader] reader)
|
| 114 |
+
CStatus ProjectColumns "Project"(const vector[c_string]& columns)
|
| 115 |
+
CStatus Project(vector[CExpression]& exprs, vector[c_string]& columns)
|
| 116 |
+
CStatus Filter(CExpression filter)
|
| 117 |
+
CStatus UseThreads(c_bool use_threads)
|
| 118 |
+
CStatus Pool(CMemoryPool* pool)
|
| 119 |
+
CStatus BatchSize(int64_t batch_size)
|
| 120 |
+
CStatus BatchReadahead(int32_t batch_readahead)
|
| 121 |
+
CStatus FragmentReadahead(int32_t fragment_readahead)
|
| 122 |
+
CStatus FragmentScanOptions(
|
| 123 |
+
shared_ptr[CFragmentScanOptions] fragment_scan_options)
|
| 124 |
+
CResult[shared_ptr[CScanOptions]] GetScanOptions()
|
| 125 |
+
CResult[shared_ptr[CScanner]] Finish()
|
| 126 |
+
shared_ptr[CSchema] schema() const
|
| 127 |
+
|
| 128 |
+
ctypedef vector[shared_ptr[CDataset]] CDatasetVector \
|
| 129 |
+
"arrow::dataset::DatasetVector"
|
| 130 |
+
|
| 131 |
+
cdef cppclass CDataset "arrow::dataset::Dataset":
|
| 132 |
+
const shared_ptr[CSchema] & schema()
|
| 133 |
+
CResult[CFragmentIterator] GetFragments()
|
| 134 |
+
CResult[CFragmentIterator] GetFragments(CExpression predicate)
|
| 135 |
+
const CExpression & partition_expression()
|
| 136 |
+
c_string type_name()
|
| 137 |
+
|
| 138 |
+
CResult[shared_ptr[CDataset]] ReplaceSchema(shared_ptr[CSchema])
|
| 139 |
+
|
| 140 |
+
CResult[shared_ptr[CScannerBuilder]] NewScan()
|
| 141 |
+
|
| 142 |
+
cdef cppclass CInMemoryDataset "arrow::dataset::InMemoryDataset"(
|
| 143 |
+
CDataset):
|
| 144 |
+
CInMemoryDataset(shared_ptr[CRecordBatchReader])
|
| 145 |
+
CInMemoryDataset(shared_ptr[CTable])
|
| 146 |
+
|
| 147 |
+
cdef cppclass CUnionDataset "arrow::dataset::UnionDataset"(
|
| 148 |
+
CDataset):
|
| 149 |
+
@staticmethod
|
| 150 |
+
CResult[shared_ptr[CUnionDataset]] Make(shared_ptr[CSchema] schema,
|
| 151 |
+
CDatasetVector children)
|
| 152 |
+
|
| 153 |
+
const CDatasetVector& children() const
|
| 154 |
+
|
| 155 |
+
cdef cppclass CInspectOptions "arrow::dataset::InspectOptions":
|
| 156 |
+
int fragments
|
| 157 |
+
|
| 158 |
+
cdef cppclass CFinishOptions "arrow::dataset::FinishOptions":
|
| 159 |
+
shared_ptr[CSchema] schema
|
| 160 |
+
CInspectOptions inspect_options
|
| 161 |
+
c_bool validate_fragments
|
| 162 |
+
|
| 163 |
+
cdef cppclass CDatasetFactory "arrow::dataset::DatasetFactory":
|
| 164 |
+
CResult[vector[shared_ptr[CSchema]]] InspectSchemas(CInspectOptions)
|
| 165 |
+
CResult[shared_ptr[CSchema]] Inspect(CInspectOptions)
|
| 166 |
+
CResult[shared_ptr[CDataset]] FinishWithSchema "Finish"(
|
| 167 |
+
const shared_ptr[CSchema]& schema)
|
| 168 |
+
CResult[shared_ptr[CDataset]] Finish()
|
| 169 |
+
const CExpression& root_partition()
|
| 170 |
+
CStatus SetRootPartition(CExpression partition)
|
| 171 |
+
|
| 172 |
+
cdef cppclass CUnionDatasetFactory "arrow::dataset::UnionDatasetFactory":
|
| 173 |
+
@staticmethod
|
| 174 |
+
CResult[shared_ptr[CDatasetFactory]] Make(
|
| 175 |
+
vector[shared_ptr[CDatasetFactory]] factories)
|
| 176 |
+
|
| 177 |
+
cdef cppclass CFileSource "arrow::dataset::FileSource":
|
| 178 |
+
const c_string& path() const
|
| 179 |
+
const shared_ptr[CFileSystem]& filesystem() const
|
| 180 |
+
const shared_ptr[CBuffer]& buffer() const
|
| 181 |
+
const int64_t size() const
|
| 182 |
+
# HACK: Cython can't handle all the overloads so don't declare them.
|
| 183 |
+
# This means invalid construction of CFileSource won't be caught in
|
| 184 |
+
# the C++ generation phase (though it will still be caught when
|
| 185 |
+
# the generated C++ is compiled).
|
| 186 |
+
CFileSource(...)
|
| 187 |
+
|
| 188 |
+
cdef cppclass CFileWriteOptions \
|
| 189 |
+
"arrow::dataset::FileWriteOptions":
|
| 190 |
+
const shared_ptr[CFileFormat]& format() const
|
| 191 |
+
c_string type_name() const
|
| 192 |
+
|
| 193 |
+
cdef cppclass CFileWriter \
|
| 194 |
+
"arrow::dataset::FileWriter":
|
| 195 |
+
const shared_ptr[CFileFormat]& format() const
|
| 196 |
+
const shared_ptr[CSchema]& schema() const
|
| 197 |
+
const shared_ptr[CFileWriteOptions]& options() const
|
| 198 |
+
const CFileLocator& destination() const
|
| 199 |
+
CResult[int64_t] GetBytesWritten()
|
| 200 |
+
|
| 201 |
+
cdef cppclass CFileFormat "arrow::dataset::FileFormat":
|
| 202 |
+
shared_ptr[CFragmentScanOptions] default_fragment_scan_options
|
| 203 |
+
c_string type_name() const
|
| 204 |
+
CResult[shared_ptr[CSchema]] Inspect(const CFileSource&) const
|
| 205 |
+
CResult[shared_ptr[CFileFragment]] MakeFragment(
|
| 206 |
+
CFileSource source,
|
| 207 |
+
CExpression partition_expression,
|
| 208 |
+
shared_ptr[CSchema] physical_schema)
|
| 209 |
+
shared_ptr[CFileWriteOptions] DefaultWriteOptions()
|
| 210 |
+
|
| 211 |
+
cdef cppclass CFileFragment "arrow::dataset::FileFragment"(
|
| 212 |
+
CFragment):
|
| 213 |
+
const CFileSource& source() const
|
| 214 |
+
const shared_ptr[CFileFormat]& format() const
|
| 215 |
+
|
| 216 |
+
cdef cppclass CFileSystemDatasetWriteOptions \
|
| 217 |
+
"arrow::dataset::FileSystemDatasetWriteOptions":
|
| 218 |
+
shared_ptr[CFileWriteOptions] file_write_options
|
| 219 |
+
shared_ptr[CFileSystem] filesystem
|
| 220 |
+
c_string base_dir
|
| 221 |
+
shared_ptr[CPartitioning] partitioning
|
| 222 |
+
int max_partitions
|
| 223 |
+
c_string basename_template
|
| 224 |
+
function[cb_writer_finish_internal] writer_pre_finish
|
| 225 |
+
function[cb_writer_finish_internal] writer_post_finish
|
| 226 |
+
ExistingDataBehavior existing_data_behavior
|
| 227 |
+
c_bool create_dir
|
| 228 |
+
uint32_t max_open_files
|
| 229 |
+
uint64_t max_rows_per_file
|
| 230 |
+
uint64_t min_rows_per_group
|
| 231 |
+
uint64_t max_rows_per_group
|
| 232 |
+
|
| 233 |
+
cdef cppclass CFileSystemDataset \
|
| 234 |
+
"arrow::dataset::FileSystemDataset"(CDataset):
|
| 235 |
+
@staticmethod
|
| 236 |
+
CResult[shared_ptr[CDataset]] Make(
|
| 237 |
+
shared_ptr[CSchema] schema,
|
| 238 |
+
CExpression source_partition,
|
| 239 |
+
shared_ptr[CFileFormat] format,
|
| 240 |
+
shared_ptr[CFileSystem] filesystem,
|
| 241 |
+
vector[shared_ptr[CFileFragment]] fragments)
|
| 242 |
+
|
| 243 |
+
@staticmethod
|
| 244 |
+
CStatus Write(
|
| 245 |
+
const CFileSystemDatasetWriteOptions& write_options,
|
| 246 |
+
shared_ptr[CScanner] scanner)
|
| 247 |
+
|
| 248 |
+
c_string type()
|
| 249 |
+
vector[c_string] files()
|
| 250 |
+
const shared_ptr[CFileFormat]& format() const
|
| 251 |
+
const shared_ptr[CFileSystem]& filesystem() const
|
| 252 |
+
const shared_ptr[CPartitioning]& partitioning() const
|
| 253 |
+
|
| 254 |
+
cdef cppclass CIpcFileWriteOptions \
|
| 255 |
+
"arrow::dataset::IpcFileWriteOptions"(CFileWriteOptions):
|
| 256 |
+
shared_ptr[CIpcWriteOptions] options
|
| 257 |
+
|
| 258 |
+
cdef cppclass CIpcFileFormat "arrow::dataset::IpcFileFormat"(
|
| 259 |
+
CFileFormat):
|
| 260 |
+
pass
|
| 261 |
+
|
| 262 |
+
cdef cppclass COrcFileFormat "arrow::dataset::OrcFileFormat"(
|
| 263 |
+
CFileFormat):
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
cdef cppclass CCsvFileWriteOptions \
|
| 267 |
+
"arrow::dataset::CsvFileWriteOptions"(CFileWriteOptions):
|
| 268 |
+
shared_ptr[CCSVWriteOptions] write_options
|
| 269 |
+
CMemoryPool* pool
|
| 270 |
+
|
| 271 |
+
cdef cppclass CCsvFileFormat "arrow::dataset::CsvFileFormat"(
|
| 272 |
+
CFileFormat):
|
| 273 |
+
CCSVParseOptions parse_options
|
| 274 |
+
|
| 275 |
+
cdef cppclass CCsvFragmentScanOptions \
|
| 276 |
+
"arrow::dataset::CsvFragmentScanOptions"(CFragmentScanOptions):
|
| 277 |
+
CCSVConvertOptions convert_options
|
| 278 |
+
CCSVReadOptions read_options
|
| 279 |
+
function[StreamWrapFunc] stream_transform_func
|
| 280 |
+
|
| 281 |
+
cdef cppclass CJsonFileFormat "arrow::dataset::JsonFileFormat"(CFileFormat):
|
| 282 |
+
pass
|
| 283 |
+
|
| 284 |
+
cdef cppclass CJsonFragmentScanOptions "arrow::dataset::JsonFragmentScanOptions"(CFragmentScanOptions):
|
| 285 |
+
CJSONParseOptions parse_options
|
| 286 |
+
CJSONReadOptions read_options
|
| 287 |
+
|
| 288 |
+
cdef cppclass CPartitioning "arrow::dataset::Partitioning":
|
| 289 |
+
c_string type_name() const
|
| 290 |
+
CResult[CExpression] Parse(const c_string & path) const
|
| 291 |
+
const shared_ptr[CSchema] & schema()
|
| 292 |
+
c_bool Equals(const CPartitioning& other) const
|
| 293 |
+
|
| 294 |
+
cdef cppclass CSegmentEncoding" arrow::dataset::SegmentEncoding":
|
| 295 |
+
bint operator==(CSegmentEncoding)
|
| 296 |
+
|
| 297 |
+
CSegmentEncoding CSegmentEncoding_None\
|
| 298 |
+
" arrow::dataset::SegmentEncoding::None"
|
| 299 |
+
CSegmentEncoding CSegmentEncoding_Uri\
|
| 300 |
+
" arrow::dataset::SegmentEncoding::Uri"
|
| 301 |
+
|
| 302 |
+
cdef cppclass CKeyValuePartitioningOptions \
|
| 303 |
+
"arrow::dataset::KeyValuePartitioningOptions":
|
| 304 |
+
CSegmentEncoding segment_encoding
|
| 305 |
+
|
| 306 |
+
cdef cppclass CHivePartitioningOptions \
|
| 307 |
+
"arrow::dataset::HivePartitioningOptions":
|
| 308 |
+
CSegmentEncoding segment_encoding
|
| 309 |
+
c_string null_fallback
|
| 310 |
+
|
| 311 |
+
cdef cppclass CPartitioningFactoryOptions \
|
| 312 |
+
"arrow::dataset::PartitioningFactoryOptions":
|
| 313 |
+
c_bool infer_dictionary
|
| 314 |
+
shared_ptr[CSchema] schema
|
| 315 |
+
CSegmentEncoding segment_encoding
|
| 316 |
+
|
| 317 |
+
cdef cppclass CHivePartitioningFactoryOptions \
|
| 318 |
+
"arrow::dataset::HivePartitioningFactoryOptions":
|
| 319 |
+
c_bool infer_dictionary
|
| 320 |
+
c_string null_fallback
|
| 321 |
+
shared_ptr[CSchema] schema
|
| 322 |
+
CSegmentEncoding segment_encoding
|
| 323 |
+
|
| 324 |
+
cdef cppclass CPartitioningFactory "arrow::dataset::PartitioningFactory":
|
| 325 |
+
c_string type_name() const
|
| 326 |
+
|
| 327 |
+
cdef cppclass CKeyValuePartitioning \
|
| 328 |
+
"arrow::dataset::KeyValuePartitioning"(CPartitioning):
|
| 329 |
+
CKeyValuePartitioning(shared_ptr[CSchema] schema,
|
| 330 |
+
vector[shared_ptr[CArray]] dictionaries,
|
| 331 |
+
CKeyValuePartitioningOptions options)
|
| 332 |
+
|
| 333 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
| 334 |
+
CSegmentEncoding segment_encoding()
|
| 335 |
+
|
| 336 |
+
cdef cppclass CDirectoryPartitioning \
|
| 337 |
+
"arrow::dataset::DirectoryPartitioning"(CPartitioning):
|
| 338 |
+
CDirectoryPartitioning(shared_ptr[CSchema] schema,
|
| 339 |
+
vector[shared_ptr[CArray]] dictionaries)
|
| 340 |
+
|
| 341 |
+
@staticmethod
|
| 342 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
| 343 |
+
vector[c_string] field_names, CPartitioningFactoryOptions)
|
| 344 |
+
|
| 345 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
| 346 |
+
|
| 347 |
+
cdef cppclass CHivePartitioning \
|
| 348 |
+
"arrow::dataset::HivePartitioning"(CPartitioning):
|
| 349 |
+
CHivePartitioning(shared_ptr[CSchema] schema,
|
| 350 |
+
vector[shared_ptr[CArray]] dictionaries,
|
| 351 |
+
CHivePartitioningOptions options)
|
| 352 |
+
|
| 353 |
+
@staticmethod
|
| 354 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
| 355 |
+
CHivePartitioningFactoryOptions)
|
| 356 |
+
|
| 357 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
| 358 |
+
c_string null_fallback() const
|
| 359 |
+
|
| 360 |
+
cdef cppclass CFilenamePartitioning \
|
| 361 |
+
"arrow::dataset::FilenamePartitioning"(CPartitioning):
|
| 362 |
+
CFilenamePartitioning(shared_ptr[CSchema] schema,
|
| 363 |
+
vector[shared_ptr[CArray]] dictionaries)
|
| 364 |
+
|
| 365 |
+
@staticmethod
|
| 366 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
| 367 |
+
vector[c_string] field_names, CPartitioningFactoryOptions)
|
| 368 |
+
|
| 369 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
| 370 |
+
|
| 371 |
+
cdef cppclass CPartitioningOrFactory \
|
| 372 |
+
"arrow::dataset::PartitioningOrFactory":
|
| 373 |
+
CPartitioningOrFactory(shared_ptr[CPartitioning])
|
| 374 |
+
CPartitioningOrFactory(shared_ptr[CPartitioningFactory])
|
| 375 |
+
CPartitioningOrFactory & operator = (shared_ptr[CPartitioning])
|
| 376 |
+
CPartitioningOrFactory & operator = (
|
| 377 |
+
shared_ptr[CPartitioningFactory])
|
| 378 |
+
shared_ptr[CPartitioning] partitioning() const
|
| 379 |
+
shared_ptr[CPartitioningFactory] factory() const
|
| 380 |
+
|
| 381 |
+
cdef cppclass CFileSystemFactoryOptions \
|
| 382 |
+
"arrow::dataset::FileSystemFactoryOptions":
|
| 383 |
+
CPartitioningOrFactory partitioning
|
| 384 |
+
c_string partition_base_dir
|
| 385 |
+
c_bool exclude_invalid_files
|
| 386 |
+
vector[c_string] selector_ignore_prefixes
|
| 387 |
+
|
| 388 |
+
cdef cppclass CFileSystemDatasetFactory \
|
| 389 |
+
"arrow::dataset::FileSystemDatasetFactory"(
|
| 390 |
+
CDatasetFactory):
|
| 391 |
+
@staticmethod
|
| 392 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromPaths "Make"(
|
| 393 |
+
shared_ptr[CFileSystem] filesystem,
|
| 394 |
+
vector[c_string] paths,
|
| 395 |
+
shared_ptr[CFileFormat] format,
|
| 396 |
+
CFileSystemFactoryOptions options
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
@staticmethod
|
| 400 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromSelector "Make"(
|
| 401 |
+
shared_ptr[CFileSystem] filesystem,
|
| 402 |
+
CFileSelector,
|
| 403 |
+
shared_ptr[CFileFormat] format,
|
| 404 |
+
CFileSystemFactoryOptions options
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
@staticmethod
|
| 408 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromFileInfos "Make"(
|
| 409 |
+
shared_ptr[CFileSystem] filesystem,
|
| 410 |
+
vector[CFileInfo] files,
|
| 411 |
+
shared_ptr[CFileFormat] format,
|
| 412 |
+
CFileSystemFactoryOptions options
|
| 413 |
+
)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
| 21 |
+
from pyarrow.includes.libparquet_encryption cimport *
|
| 22 |
+
|
| 23 |
+
from pyarrow._parquet cimport *
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
cdef extern from "arrow/dataset/parquet_encryption_config.h" namespace "arrow::dataset" nogil:
|
| 27 |
+
cdef cppclass CParquetEncryptionConfig "arrow::dataset::ParquetEncryptionConfig":
|
| 28 |
+
shared_ptr[CCryptoFactory] crypto_factory
|
| 29 |
+
shared_ptr[CKmsConnectionConfig] kms_connection_config
|
| 30 |
+
shared_ptr[CEncryptionConfiguration] encryption_config
|
| 31 |
+
|
| 32 |
+
cdef cppclass CParquetDecryptionConfig "arrow::dataset::ParquetDecryptionConfig":
|
| 33 |
+
shared_ptr[CCryptoFactory] crypto_factory
|
| 34 |
+
shared_ptr[CKmsConnectionConfig] kms_connection_config
|
| 35 |
+
shared_ptr[CDecryptionConfiguration] decryption_config
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil:
|
| 39 |
+
|
| 40 |
+
cdef cppclass CParquetFileWriter \
|
| 41 |
+
"arrow::dataset::ParquetFileWriter"(CFileWriter):
|
| 42 |
+
const shared_ptr[FileWriter]& parquet_writer() const
|
| 43 |
+
|
| 44 |
+
cdef cppclass CParquetFileWriteOptions \
|
| 45 |
+
"arrow::dataset::ParquetFileWriteOptions"(CFileWriteOptions):
|
| 46 |
+
shared_ptr[WriterProperties] writer_properties
|
| 47 |
+
shared_ptr[ArrowWriterProperties] arrow_writer_properties
|
| 48 |
+
shared_ptr[CParquetEncryptionConfig] parquet_encryption_config
|
| 49 |
+
|
| 50 |
+
cdef cppclass CParquetFileFragment "arrow::dataset::ParquetFileFragment"(
|
| 51 |
+
CFileFragment):
|
| 52 |
+
const vector[int]& row_groups() const
|
| 53 |
+
shared_ptr[CFileMetaData] metadata() const
|
| 54 |
+
CResult[vector[shared_ptr[CFragment]]] SplitByRowGroup(
|
| 55 |
+
CExpression predicate)
|
| 56 |
+
CResult[shared_ptr[CFragment]] SubsetWithFilter "Subset"(
|
| 57 |
+
CExpression predicate)
|
| 58 |
+
CResult[shared_ptr[CFragment]] SubsetWithIds "Subset"(
|
| 59 |
+
vector[int] row_group_ids)
|
| 60 |
+
CStatus EnsureCompleteMetadata()
|
| 61 |
+
|
| 62 |
+
cdef cppclass CParquetFileFormatReaderOptions \
|
| 63 |
+
"arrow::dataset::ParquetFileFormat::ReaderOptions":
|
| 64 |
+
unordered_set[c_string] dict_columns
|
| 65 |
+
TimeUnit coerce_int96_timestamp_unit
|
| 66 |
+
|
| 67 |
+
cdef cppclass CParquetFileFormat "arrow::dataset::ParquetFileFormat"(
|
| 68 |
+
CFileFormat):
|
| 69 |
+
CParquetFileFormatReaderOptions reader_options
|
| 70 |
+
CResult[shared_ptr[CFileFragment]] MakeFragment(
|
| 71 |
+
CFileSource source,
|
| 72 |
+
CExpression partition_expression,
|
| 73 |
+
shared_ptr[CSchema] physical_schema,
|
| 74 |
+
vector[int] row_groups)
|
| 75 |
+
|
| 76 |
+
cdef cppclass CParquetFragmentScanOptions \
|
| 77 |
+
"arrow::dataset::ParquetFragmentScanOptions"(CFragmentScanOptions):
|
| 78 |
+
shared_ptr[CReaderProperties] reader_properties
|
| 79 |
+
shared_ptr[ArrowReaderProperties] arrow_reader_properties
|
| 80 |
+
shared_ptr[CParquetDecryptionConfig] parquet_decryption_config
|
| 81 |
+
|
| 82 |
+
cdef cppclass CParquetFactoryOptions \
|
| 83 |
+
"arrow::dataset::ParquetFactoryOptions":
|
| 84 |
+
CPartitioningOrFactory partitioning
|
| 85 |
+
c_string partition_base_dir
|
| 86 |
+
c_bool validate_column_chunk_paths
|
| 87 |
+
|
| 88 |
+
cdef cppclass CParquetDatasetFactory \
|
| 89 |
+
"arrow::dataset::ParquetDatasetFactory"(CDatasetFactory):
|
| 90 |
+
@staticmethod
|
| 91 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataPath "Make"(
|
| 92 |
+
const c_string& metadata_path,
|
| 93 |
+
shared_ptr[CFileSystem] filesystem,
|
| 94 |
+
shared_ptr[CParquetFileFormat] format,
|
| 95 |
+
CParquetFactoryOptions options
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
@staticmethod
|
| 99 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataSource "Make"(
|
| 100 |
+
const CFileSource& metadata_path,
|
| 101 |
+
const c_string& base_path,
|
| 102 |
+
shared_ptr[CFileSystem] filesystem,
|
| 103 |
+
shared_ptr[CParquetFileFormat] format,
|
| 104 |
+
CParquetFactoryOptions options
|
| 105 |
+
)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.libarrow cimport (CCompressionType, CStatus, CTable,
|
| 21 |
+
COutputStream, CResult, shared_ptr,
|
| 22 |
+
vector, CRandomAccessFile, CSchema,
|
| 23 |
+
c_string, CIpcReadOptions)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil:
|
| 27 |
+
int kFeatherV1Version" arrow::ipc::feather::kFeatherV1Version"
|
| 28 |
+
int kFeatherV2Version" arrow::ipc::feather::kFeatherV2Version"
|
| 29 |
+
|
| 30 |
+
cdef cppclass CFeatherProperties" arrow::ipc::feather::WriteProperties":
|
| 31 |
+
int version
|
| 32 |
+
int chunksize
|
| 33 |
+
CCompressionType compression
|
| 34 |
+
int compression_level
|
| 35 |
+
|
| 36 |
+
CStatus WriteFeather" arrow::ipc::feather::WriteTable" \
|
| 37 |
+
(const CTable& table, COutputStream* out,
|
| 38 |
+
CFeatherProperties properties)
|
| 39 |
+
|
| 40 |
+
cdef cppclass CFeatherReader" arrow::ipc::feather::Reader":
|
| 41 |
+
@staticmethod
|
| 42 |
+
CResult[shared_ptr[CFeatherReader]] Open(
|
| 43 |
+
const shared_ptr[CRandomAccessFile]& file,
|
| 44 |
+
const CIpcReadOptions& options)
|
| 45 |
+
int version()
|
| 46 |
+
shared_ptr[CSchema] schema()
|
| 47 |
+
|
| 48 |
+
CStatus Read(shared_ptr[CTable]* out)
|
| 49 |
+
CStatus Read(const vector[int] indices, shared_ptr[CTable]* out)
|
| 50 |
+
CStatus Read(const vector[c_string] names, shared_ptr[CTable]* out)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.common cimport *
|
| 21 |
+
from pyarrow.includes.libarrow cimport *
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
cdef extern from "arrow/flight/api.h" namespace "arrow" nogil:
|
| 25 |
+
cdef char* CTracingServerMiddlewareName\
|
| 26 |
+
" arrow::flight::TracingServerMiddleware::kMiddlewareName"
|
| 27 |
+
|
| 28 |
+
cdef cppclass CActionType" arrow::flight::ActionType":
|
| 29 |
+
c_string type
|
| 30 |
+
c_string description
|
| 31 |
+
bint operator==(CActionType)
|
| 32 |
+
CResult[c_string] SerializeToString()
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
CResult[CActionType] Deserialize(const c_string& serialized)
|
| 36 |
+
|
| 37 |
+
cdef cppclass CAction" arrow::flight::Action":
|
| 38 |
+
c_string type
|
| 39 |
+
shared_ptr[CBuffer] body
|
| 40 |
+
bint operator==(CAction)
|
| 41 |
+
CResult[c_string] SerializeToString()
|
| 42 |
+
c_string ToString()
|
| 43 |
+
|
| 44 |
+
@staticmethod
|
| 45 |
+
CResult[CAction] Deserialize(const c_string& serialized)
|
| 46 |
+
|
| 47 |
+
cdef cppclass CFlightResult" arrow::flight::Result":
|
| 48 |
+
CFlightResult()
|
| 49 |
+
CFlightResult(CFlightResult)
|
| 50 |
+
shared_ptr[CBuffer] body
|
| 51 |
+
bint operator==(CFlightResult)
|
| 52 |
+
CResult[c_string] SerializeToString()
|
| 53 |
+
c_string ToString()
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
CResult[CFlightResult] Deserialize(const c_string& serialized)
|
| 57 |
+
|
| 58 |
+
cdef cppclass CBasicAuth" arrow::flight::BasicAuth":
|
| 59 |
+
CBasicAuth()
|
| 60 |
+
CBasicAuth(CBuffer)
|
| 61 |
+
CBasicAuth(CBasicAuth)
|
| 62 |
+
c_string username
|
| 63 |
+
c_string password
|
| 64 |
+
bint operator==(CBasicAuth)
|
| 65 |
+
CResult[c_string] SerializeToString()
|
| 66 |
+
c_string ToString()
|
| 67 |
+
|
| 68 |
+
@staticmethod
|
| 69 |
+
CResult[CBasicAuth] Deserialize(const c_string& serialized)
|
| 70 |
+
|
| 71 |
+
cdef cppclass CResultStream" arrow::flight::ResultStream":
|
| 72 |
+
CResult[unique_ptr[CFlightResult]] Next()
|
| 73 |
+
|
| 74 |
+
cdef cppclass CDescriptorType \
|
| 75 |
+
" arrow::flight::FlightDescriptor::DescriptorType":
|
| 76 |
+
bint operator==(CDescriptorType)
|
| 77 |
+
|
| 78 |
+
CDescriptorType CDescriptorTypeUnknown\
|
| 79 |
+
" arrow::flight::FlightDescriptor::UNKNOWN"
|
| 80 |
+
CDescriptorType CDescriptorTypePath\
|
| 81 |
+
" arrow::flight::FlightDescriptor::PATH"
|
| 82 |
+
CDescriptorType CDescriptorTypeCmd\
|
| 83 |
+
" arrow::flight::FlightDescriptor::CMD"
|
| 84 |
+
|
| 85 |
+
cdef cppclass CFlightDescriptor" arrow::flight::FlightDescriptor":
|
| 86 |
+
CDescriptorType type
|
| 87 |
+
c_string cmd
|
| 88 |
+
vector[c_string] path
|
| 89 |
+
bint operator==(CFlightDescriptor)
|
| 90 |
+
CResult[c_string] SerializeToString()
|
| 91 |
+
c_string ToString()
|
| 92 |
+
|
| 93 |
+
@staticmethod
|
| 94 |
+
CResult[CFlightDescriptor] Deserialize(const c_string& serialized)
|
| 95 |
+
|
| 96 |
+
cdef cppclass CTicket" arrow::flight::Ticket":
|
| 97 |
+
CTicket()
|
| 98 |
+
c_string ticket
|
| 99 |
+
bint operator==(CTicket)
|
| 100 |
+
CResult[c_string] SerializeToString()
|
| 101 |
+
c_string ToString()
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
CResult[CTicket] Deserialize(const c_string& serialized)
|
| 105 |
+
|
| 106 |
+
cdef cppclass CCriteria" arrow::flight::Criteria":
|
| 107 |
+
CCriteria()
|
| 108 |
+
c_string expression
|
| 109 |
+
bint operator==(CCriteria)
|
| 110 |
+
CResult[c_string] SerializeToString()
|
| 111 |
+
|
| 112 |
+
@staticmethod
|
| 113 |
+
CResult[CCriteria] Deserialize(const c_string& serialized)
|
| 114 |
+
|
| 115 |
+
cdef cppclass CLocation" arrow::flight::Location":
|
| 116 |
+
CLocation()
|
| 117 |
+
c_string ToString()
|
| 118 |
+
c_bool Equals(const CLocation& other)
|
| 119 |
+
|
| 120 |
+
@staticmethod
|
| 121 |
+
CResult[CLocation] Parse(const c_string& uri_string)
|
| 122 |
+
|
| 123 |
+
@staticmethod
|
| 124 |
+
CResult[CLocation] ForGrpcTcp(const c_string& host, int port)
|
| 125 |
+
|
| 126 |
+
@staticmethod
|
| 127 |
+
CResult[CLocation] ForGrpcTls(const c_string& host, int port)
|
| 128 |
+
|
| 129 |
+
@staticmethod
|
| 130 |
+
CResult[CLocation] ForGrpcUnix(const c_string& path)
|
| 131 |
+
|
| 132 |
+
cdef cppclass CFlightEndpoint" arrow::flight::FlightEndpoint":
|
| 133 |
+
CFlightEndpoint()
|
| 134 |
+
|
| 135 |
+
CTicket ticket
|
| 136 |
+
vector[CLocation] locations
|
| 137 |
+
|
| 138 |
+
bint operator==(CFlightEndpoint)
|
| 139 |
+
CResult[c_string] SerializeToString()
|
| 140 |
+
c_string ToString()
|
| 141 |
+
|
| 142 |
+
@staticmethod
|
| 143 |
+
CResult[CFlightEndpoint] Deserialize(const c_string& serialized)
|
| 144 |
+
|
| 145 |
+
cdef cppclass CFlightInfo" arrow::flight::FlightInfo":
|
| 146 |
+
CFlightInfo(CFlightInfo info)
|
| 147 |
+
int64_t total_records()
|
| 148 |
+
int64_t total_bytes()
|
| 149 |
+
CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo)
|
| 150 |
+
CFlightDescriptor& descriptor()
|
| 151 |
+
const vector[CFlightEndpoint]& endpoints()
|
| 152 |
+
CResult[c_string] SerializeToString()
|
| 153 |
+
c_string ToString()
|
| 154 |
+
bint operator==(CFlightInfo)
|
| 155 |
+
|
| 156 |
+
@staticmethod
|
| 157 |
+
CResult[unique_ptr[CFlightInfo]] Deserialize(
|
| 158 |
+
const c_string& serialized)
|
| 159 |
+
|
| 160 |
+
cdef cppclass CSchemaResult" arrow::flight::SchemaResult":
|
| 161 |
+
CSchemaResult()
|
| 162 |
+
CSchemaResult(CSchemaResult result)
|
| 163 |
+
CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo)
|
| 164 |
+
bint operator==(CSchemaResult)
|
| 165 |
+
CResult[c_string] SerializeToString()
|
| 166 |
+
c_string ToString()
|
| 167 |
+
|
| 168 |
+
@staticmethod
|
| 169 |
+
CResult[CSchemaResult] Deserialize(const c_string& serialized)
|
| 170 |
+
|
| 171 |
+
cdef cppclass CFlightListing" arrow::flight::FlightListing":
|
| 172 |
+
CResult[unique_ptr[CFlightInfo]] Next()
|
| 173 |
+
|
| 174 |
+
cdef cppclass CSimpleFlightListing" arrow::flight::SimpleFlightListing":
|
| 175 |
+
# This doesn't work with Cython >= 3
|
| 176 |
+
# CSimpleFlightListing(vector[CFlightInfo]&& info)
|
| 177 |
+
CSimpleFlightListing(const vector[CFlightInfo]& info)
|
| 178 |
+
|
| 179 |
+
cdef cppclass CFlightPayload" arrow::flight::FlightPayload":
|
| 180 |
+
shared_ptr[CBuffer] descriptor
|
| 181 |
+
shared_ptr[CBuffer] app_metadata
|
| 182 |
+
CIpcPayload ipc_message
|
| 183 |
+
|
| 184 |
+
cdef cppclass CFlightDataStream" arrow::flight::FlightDataStream":
|
| 185 |
+
shared_ptr[CSchema] schema()
|
| 186 |
+
CResult[CFlightPayload] Next()
|
| 187 |
+
|
| 188 |
+
cdef cppclass CFlightStreamChunk" arrow::flight::FlightStreamChunk":
|
| 189 |
+
CFlightStreamChunk()
|
| 190 |
+
shared_ptr[CRecordBatch] data
|
| 191 |
+
shared_ptr[CBuffer] app_metadata
|
| 192 |
+
|
| 193 |
+
cdef cppclass CMetadataRecordBatchReader \
|
| 194 |
+
" arrow::flight::MetadataRecordBatchReader":
|
| 195 |
+
CResult[shared_ptr[CSchema]] GetSchema()
|
| 196 |
+
CResult[CFlightStreamChunk] Next()
|
| 197 |
+
CResult[shared_ptr[CTable]] ToTable()
|
| 198 |
+
|
| 199 |
+
CResult[shared_ptr[CRecordBatchReader]] MakeRecordBatchReader\
|
| 200 |
+
" arrow::flight::MakeRecordBatchReader"(
|
| 201 |
+
shared_ptr[CMetadataRecordBatchReader])
|
| 202 |
+
|
| 203 |
+
cdef cppclass CMetadataRecordBatchWriter \
|
| 204 |
+
" arrow::flight::MetadataRecordBatchWriter"(CRecordBatchWriter):
|
| 205 |
+
CStatus Begin(shared_ptr[CSchema] schema,
|
| 206 |
+
const CIpcWriteOptions& options)
|
| 207 |
+
CStatus WriteMetadata(shared_ptr[CBuffer] app_metadata)
|
| 208 |
+
CStatus WriteWithMetadata(const CRecordBatch& batch,
|
| 209 |
+
shared_ptr[CBuffer] app_metadata)
|
| 210 |
+
|
| 211 |
+
cdef cppclass CFlightStreamReader \
|
| 212 |
+
" arrow::flight::FlightStreamReader"(CMetadataRecordBatchReader):
|
| 213 |
+
void Cancel()
|
| 214 |
+
CResult[shared_ptr[CTable]] ToTableWithStopToken" ToTable"\
|
| 215 |
+
(const CStopToken& stop_token)
|
| 216 |
+
|
| 217 |
+
cdef cppclass CFlightMessageReader \
|
| 218 |
+
" arrow::flight::FlightMessageReader"(CMetadataRecordBatchReader):
|
| 219 |
+
CFlightDescriptor& descriptor()
|
| 220 |
+
|
| 221 |
+
cdef cppclass CFlightMessageWriter \
|
| 222 |
+
" arrow::flight::FlightMessageWriter"(CMetadataRecordBatchWriter):
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
cdef cppclass CFlightStreamWriter \
|
| 226 |
+
" arrow::flight::FlightStreamWriter"(CMetadataRecordBatchWriter):
|
| 227 |
+
CStatus DoneWriting()
|
| 228 |
+
|
| 229 |
+
cdef cppclass CRecordBatchStream \
|
| 230 |
+
" arrow::flight::RecordBatchStream"(CFlightDataStream):
|
| 231 |
+
CRecordBatchStream(shared_ptr[CRecordBatchReader]& reader,
|
| 232 |
+
const CIpcWriteOptions& options)
|
| 233 |
+
|
| 234 |
+
cdef cppclass CFlightMetadataReader" arrow::flight::FlightMetadataReader":
|
| 235 |
+
CStatus ReadMetadata(shared_ptr[CBuffer]* out)
|
| 236 |
+
|
| 237 |
+
cdef cppclass CFlightMetadataWriter" arrow::flight::FlightMetadataWriter":
|
| 238 |
+
CStatus WriteMetadata(const CBuffer& message)
|
| 239 |
+
|
| 240 |
+
cdef cppclass CServerAuthReader" arrow::flight::ServerAuthReader":
|
| 241 |
+
CStatus Read(c_string* token)
|
| 242 |
+
|
| 243 |
+
cdef cppclass CServerAuthSender" arrow::flight::ServerAuthSender":
|
| 244 |
+
CStatus Write(c_string& token)
|
| 245 |
+
|
| 246 |
+
cdef cppclass CClientAuthReader" arrow::flight::ClientAuthReader":
|
| 247 |
+
CStatus Read(c_string* token)
|
| 248 |
+
|
| 249 |
+
cdef cppclass CClientAuthSender" arrow::flight::ClientAuthSender":
|
| 250 |
+
CStatus Write(c_string& token)
|
| 251 |
+
|
| 252 |
+
cdef cppclass CServerAuthHandler" arrow::flight::ServerAuthHandler":
|
| 253 |
+
pass
|
| 254 |
+
|
| 255 |
+
cdef cppclass CClientAuthHandler" arrow::flight::ClientAuthHandler":
|
| 256 |
+
pass
|
| 257 |
+
|
| 258 |
+
cdef cppclass CServerCallContext" arrow::flight::ServerCallContext":
|
| 259 |
+
c_string& peer_identity()
|
| 260 |
+
c_string& peer()
|
| 261 |
+
c_bool is_cancelled()
|
| 262 |
+
void AddHeader(const c_string& key, const c_string& value)
|
| 263 |
+
void AddTrailer(const c_string& key, const c_string& value)
|
| 264 |
+
CServerMiddleware* GetMiddleware(const c_string& key)
|
| 265 |
+
|
| 266 |
+
cdef cppclass CTimeoutDuration" arrow::flight::TimeoutDuration":
|
| 267 |
+
CTimeoutDuration(double)
|
| 268 |
+
|
| 269 |
+
cdef cppclass CFlightCallOptions" arrow::flight::FlightCallOptions":
|
| 270 |
+
CFlightCallOptions()
|
| 271 |
+
CTimeoutDuration timeout
|
| 272 |
+
CIpcWriteOptions write_options
|
| 273 |
+
CIpcReadOptions read_options
|
| 274 |
+
vector[pair[c_string, c_string]] headers
|
| 275 |
+
CStopToken stop_token
|
| 276 |
+
|
| 277 |
+
cdef cppclass CCertKeyPair" arrow::flight::CertKeyPair":
|
| 278 |
+
CCertKeyPair()
|
| 279 |
+
c_string pem_cert
|
| 280 |
+
c_string pem_key
|
| 281 |
+
|
| 282 |
+
cdef cppclass CFlightMethod" arrow::flight::FlightMethod":
|
| 283 |
+
bint operator==(CFlightMethod)
|
| 284 |
+
|
| 285 |
+
CFlightMethod CFlightMethodInvalid\
|
| 286 |
+
" arrow::flight::FlightMethod::Invalid"
|
| 287 |
+
CFlightMethod CFlightMethodHandshake\
|
| 288 |
+
" arrow::flight::FlightMethod::Handshake"
|
| 289 |
+
CFlightMethod CFlightMethodListFlights\
|
| 290 |
+
" arrow::flight::FlightMethod::ListFlights"
|
| 291 |
+
CFlightMethod CFlightMethodGetFlightInfo\
|
| 292 |
+
" arrow::flight::FlightMethod::GetFlightInfo"
|
| 293 |
+
CFlightMethod CFlightMethodGetSchema\
|
| 294 |
+
" arrow::flight::FlightMethod::GetSchema"
|
| 295 |
+
CFlightMethod CFlightMethodDoGet\
|
| 296 |
+
" arrow::flight::FlightMethod::DoGet"
|
| 297 |
+
CFlightMethod CFlightMethodDoPut\
|
| 298 |
+
" arrow::flight::FlightMethod::DoPut"
|
| 299 |
+
CFlightMethod CFlightMethodDoAction\
|
| 300 |
+
" arrow::flight::FlightMethod::DoAction"
|
| 301 |
+
CFlightMethod CFlightMethodListActions\
|
| 302 |
+
" arrow::flight::FlightMethod::ListActions"
|
| 303 |
+
CFlightMethod CFlightMethodDoExchange\
|
| 304 |
+
" arrow::flight::FlightMethod::DoExchange"
|
| 305 |
+
|
| 306 |
+
cdef cppclass CCallInfo" arrow::flight::CallInfo":
|
| 307 |
+
CFlightMethod method
|
| 308 |
+
|
| 309 |
+
# This is really std::unordered_multimap, but Cython has no
|
| 310 |
+
# bindings for it, so treat it as an opaque class and bind the
|
| 311 |
+
# methods we need
|
| 312 |
+
cdef cppclass CCallHeaders" arrow::flight::CallHeaders":
|
| 313 |
+
cppclass const_iterator:
|
| 314 |
+
pair[c_string, c_string] operator*()
|
| 315 |
+
# For Cython < 3
|
| 316 |
+
const_iterator operator++()
|
| 317 |
+
# For Cython >= 3
|
| 318 |
+
const_iterator operator++(int)
|
| 319 |
+
bint operator==(const_iterator)
|
| 320 |
+
bint operator!=(const_iterator)
|
| 321 |
+
const_iterator cbegin()
|
| 322 |
+
const_iterator cend()
|
| 323 |
+
|
| 324 |
+
cdef cppclass CAddCallHeaders" arrow::flight::AddCallHeaders":
|
| 325 |
+
void AddHeader(const c_string& key, const c_string& value)
|
| 326 |
+
|
| 327 |
+
cdef cppclass CServerMiddleware" arrow::flight::ServerMiddleware":
|
| 328 |
+
c_string name()
|
| 329 |
+
|
| 330 |
+
cdef cppclass CServerMiddlewareFactory\
|
| 331 |
+
" arrow::flight::ServerMiddlewareFactory":
|
| 332 |
+
pass
|
| 333 |
+
|
| 334 |
+
cdef cppclass CClientMiddleware" arrow::flight::ClientMiddleware":
|
| 335 |
+
pass
|
| 336 |
+
|
| 337 |
+
cdef cppclass CClientMiddlewareFactory\
|
| 338 |
+
" arrow::flight::ClientMiddlewareFactory":
|
| 339 |
+
pass
|
| 340 |
+
|
| 341 |
+
cpdef cppclass CTracingServerMiddlewareTraceKey\
|
| 342 |
+
" arrow::flight::TracingServerMiddleware::TraceKey":
|
| 343 |
+
CTracingServerMiddlewareTraceKey()
|
| 344 |
+
c_string key
|
| 345 |
+
c_string value
|
| 346 |
+
|
| 347 |
+
cdef cppclass CTracingServerMiddleware\
|
| 348 |
+
" arrow::flight::TracingServerMiddleware"(CServerMiddleware):
|
| 349 |
+
vector[CTracingServerMiddlewareTraceKey] GetTraceContext()
|
| 350 |
+
|
| 351 |
+
cdef shared_ptr[CServerMiddlewareFactory] \
|
| 352 |
+
MakeTracingServerMiddlewareFactory\
|
| 353 |
+
" arrow::flight::MakeTracingServerMiddlewareFactory"()
|
| 354 |
+
|
| 355 |
+
cdef cppclass CFlightServerOptions" arrow::flight::FlightServerOptions":
|
| 356 |
+
CFlightServerOptions(const CLocation& location)
|
| 357 |
+
CLocation location
|
| 358 |
+
unique_ptr[CServerAuthHandler] auth_handler
|
| 359 |
+
vector[CCertKeyPair] tls_certificates
|
| 360 |
+
c_bool verify_client
|
| 361 |
+
c_string root_certificates
|
| 362 |
+
vector[pair[c_string, shared_ptr[CServerMiddlewareFactory]]] middleware
|
| 363 |
+
|
| 364 |
+
cdef cppclass CFlightClientOptions" arrow::flight::FlightClientOptions":
|
| 365 |
+
c_string tls_root_certs
|
| 366 |
+
c_string cert_chain
|
| 367 |
+
c_string private_key
|
| 368 |
+
c_string override_hostname
|
| 369 |
+
vector[shared_ptr[CClientMiddlewareFactory]] middleware
|
| 370 |
+
int64_t write_size_limit_bytes
|
| 371 |
+
vector[pair[c_string, CIntStringVariant]] generic_options
|
| 372 |
+
c_bool disable_server_verification
|
| 373 |
+
|
| 374 |
+
@staticmethod
|
| 375 |
+
CFlightClientOptions Defaults()
|
| 376 |
+
|
| 377 |
+
cdef cppclass CDoPutResult" arrow::flight::FlightClient::DoPutResult":
|
| 378 |
+
unique_ptr[CFlightStreamWriter] writer
|
| 379 |
+
unique_ptr[CFlightMetadataReader] reader
|
| 380 |
+
|
| 381 |
+
cdef cppclass CDoExchangeResult" arrow::flight::FlightClient::DoExchangeResult":
|
| 382 |
+
unique_ptr[CFlightStreamWriter] writer
|
| 383 |
+
unique_ptr[CFlightStreamReader] reader
|
| 384 |
+
|
| 385 |
+
cdef cppclass CFlightClient" arrow::flight::FlightClient":
|
| 386 |
+
@staticmethod
|
| 387 |
+
CResult[unique_ptr[CFlightClient]] Connect(const CLocation& location,
|
| 388 |
+
const CFlightClientOptions& options)
|
| 389 |
+
|
| 390 |
+
c_bool supports_async()
|
| 391 |
+
CStatus CheckAsyncSupport()
|
| 392 |
+
|
| 393 |
+
CStatus Authenticate(CFlightCallOptions& options,
|
| 394 |
+
unique_ptr[CClientAuthHandler] auth_handler)
|
| 395 |
+
|
| 396 |
+
CResult[pair[c_string, c_string]] AuthenticateBasicToken(
|
| 397 |
+
CFlightCallOptions& options,
|
| 398 |
+
const c_string& username,
|
| 399 |
+
const c_string& password)
|
| 400 |
+
|
| 401 |
+
CResult[unique_ptr[CResultStream]] DoAction(CFlightCallOptions& options, CAction& action)
|
| 402 |
+
CResult[vector[CActionType]] ListActions(CFlightCallOptions& options)
|
| 403 |
+
|
| 404 |
+
CResult[unique_ptr[CFlightListing]] ListFlights(CFlightCallOptions& options, CCriteria criteria)
|
| 405 |
+
CResult[unique_ptr[CFlightInfo]] GetFlightInfo(CFlightCallOptions& options,
|
| 406 |
+
CFlightDescriptor& descriptor)
|
| 407 |
+
CFuture[CFlightInfo] GetFlightInfoAsync(CFlightCallOptions& options,
|
| 408 |
+
CFlightDescriptor& descriptor)
|
| 409 |
+
CResult[unique_ptr[CSchemaResult]] GetSchema(CFlightCallOptions& options,
|
| 410 |
+
CFlightDescriptor& descriptor)
|
| 411 |
+
CResult[unique_ptr[CFlightStreamReader]] DoGet(CFlightCallOptions& options, CTicket& ticket)
|
| 412 |
+
CResult[CDoPutResult] DoPut(CFlightCallOptions& options,
|
| 413 |
+
CFlightDescriptor& descriptor,
|
| 414 |
+
shared_ptr[CSchema]& schema)
|
| 415 |
+
CResult[CDoExchangeResult] DoExchange(CFlightCallOptions& options,
|
| 416 |
+
CFlightDescriptor& descriptor)
|
| 417 |
+
CStatus Close()
|
| 418 |
+
|
| 419 |
+
cdef cppclass CFlightStatusCode" arrow::flight::FlightStatusCode":
|
| 420 |
+
bint operator==(CFlightStatusCode)
|
| 421 |
+
|
| 422 |
+
CFlightStatusCode CFlightStatusInternal \
|
| 423 |
+
" arrow::flight::FlightStatusCode::Internal"
|
| 424 |
+
CFlightStatusCode CFlightStatusTimedOut \
|
| 425 |
+
" arrow::flight::FlightStatusCode::TimedOut"
|
| 426 |
+
CFlightStatusCode CFlightStatusCancelled \
|
| 427 |
+
" arrow::flight::FlightStatusCode::Cancelled"
|
| 428 |
+
CFlightStatusCode CFlightStatusUnauthenticated \
|
| 429 |
+
" arrow::flight::FlightStatusCode::Unauthenticated"
|
| 430 |
+
CFlightStatusCode CFlightStatusUnauthorized \
|
| 431 |
+
" arrow::flight::FlightStatusCode::Unauthorized"
|
| 432 |
+
CFlightStatusCode CFlightStatusUnavailable \
|
| 433 |
+
" arrow::flight::FlightStatusCode::Unavailable"
|
| 434 |
+
CFlightStatusCode CFlightStatusFailed \
|
| 435 |
+
" arrow::flight::FlightStatusCode::Failed"
|
| 436 |
+
|
| 437 |
+
cdef cppclass FlightStatusDetail" arrow::flight::FlightStatusDetail":
|
| 438 |
+
CFlightStatusCode code()
|
| 439 |
+
c_string extra_info()
|
| 440 |
+
|
| 441 |
+
@staticmethod
|
| 442 |
+
shared_ptr[FlightStatusDetail] UnwrapStatus(const CStatus& status)
|
| 443 |
+
|
| 444 |
+
cdef cppclass FlightWriteSizeStatusDetail\
|
| 445 |
+
" arrow::flight::FlightWriteSizeStatusDetail":
|
| 446 |
+
int64_t limit()
|
| 447 |
+
int64_t actual()
|
| 448 |
+
|
| 449 |
+
@staticmethod
|
| 450 |
+
shared_ptr[FlightWriteSizeStatusDetail] UnwrapStatus(
|
| 451 |
+
const CStatus& status)
|
| 452 |
+
|
| 453 |
+
cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \
|
| 454 |
+
(CFlightStatusCode code, const c_string& message)
|
| 455 |
+
|
| 456 |
+
cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \
|
| 457 |
+
(CFlightStatusCode code,
|
| 458 |
+
const c_string& message,
|
| 459 |
+
const c_string& extra_info)
|
| 460 |
+
|
| 461 |
+
# Callbacks for implementing Flight servers
|
| 462 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
| 463 |
+
ctypedef CStatus cb_list_flights(object, const CServerCallContext&,
|
| 464 |
+
const CCriteria*,
|
| 465 |
+
unique_ptr[CFlightListing]*)
|
| 466 |
+
ctypedef CStatus cb_get_flight_info(object, const CServerCallContext&,
|
| 467 |
+
const CFlightDescriptor&,
|
| 468 |
+
unique_ptr[CFlightInfo]*)
|
| 469 |
+
ctypedef CStatus cb_get_schema(object, const CServerCallContext&,
|
| 470 |
+
const CFlightDescriptor&,
|
| 471 |
+
unique_ptr[CSchemaResult]*)
|
| 472 |
+
ctypedef CStatus cb_do_put(object, const CServerCallContext&,
|
| 473 |
+
unique_ptr[CFlightMessageReader],
|
| 474 |
+
unique_ptr[CFlightMetadataWriter])
|
| 475 |
+
ctypedef CStatus cb_do_get(object, const CServerCallContext&,
|
| 476 |
+
const CTicket&,
|
| 477 |
+
unique_ptr[CFlightDataStream]*)
|
| 478 |
+
ctypedef CStatus cb_do_exchange(object, const CServerCallContext&,
|
| 479 |
+
unique_ptr[CFlightMessageReader],
|
| 480 |
+
unique_ptr[CFlightMessageWriter])
|
| 481 |
+
ctypedef CStatus cb_do_action(object, const CServerCallContext&,
|
| 482 |
+
const CAction&,
|
| 483 |
+
unique_ptr[CResultStream]*)
|
| 484 |
+
ctypedef CStatus cb_list_actions(object, const CServerCallContext&,
|
| 485 |
+
vector[CActionType]*)
|
| 486 |
+
ctypedef CStatus cb_result_next(object, unique_ptr[CFlightResult]*)
|
| 487 |
+
ctypedef CStatus cb_data_stream_next(object, CFlightPayload*)
|
| 488 |
+
ctypedef CStatus cb_server_authenticate(object, CServerAuthSender*,
|
| 489 |
+
CServerAuthReader*)
|
| 490 |
+
ctypedef CStatus cb_is_valid(object, const c_string&, c_string*)
|
| 491 |
+
ctypedef CStatus cb_client_authenticate(object, CClientAuthSender*,
|
| 492 |
+
CClientAuthReader*)
|
| 493 |
+
ctypedef CStatus cb_get_token(object, c_string*)
|
| 494 |
+
|
| 495 |
+
ctypedef CStatus cb_middleware_sending_headers(object, CAddCallHeaders*)
|
| 496 |
+
ctypedef CStatus cb_middleware_call_completed(object, const CStatus&)
|
| 497 |
+
ctypedef CStatus cb_client_middleware_received_headers(
|
| 498 |
+
object, const CCallHeaders&)
|
| 499 |
+
ctypedef CStatus cb_server_middleware_start_call(
|
| 500 |
+
object,
|
| 501 |
+
const CCallInfo&,
|
| 502 |
+
const CCallHeaders&,
|
| 503 |
+
shared_ptr[CServerMiddleware]*)
|
| 504 |
+
ctypedef CStatus cb_client_middleware_start_call(
|
| 505 |
+
object,
|
| 506 |
+
const CCallInfo&,
|
| 507 |
+
unique_ptr[CClientMiddleware]*)
|
| 508 |
+
|
| 509 |
+
cdef extern from "arrow/python/flight.h" namespace "arrow::py::flight" nogil:
|
| 510 |
+
cdef char* CPyServerMiddlewareName\
|
| 511 |
+
" arrow::py::flight::kPyServerMiddlewareName"
|
| 512 |
+
|
| 513 |
+
cdef cppclass PyFlightServerVtable:
|
| 514 |
+
PyFlightServerVtable()
|
| 515 |
+
function[cb_list_flights] list_flights
|
| 516 |
+
function[cb_get_flight_info] get_flight_info
|
| 517 |
+
function[cb_get_schema] get_schema
|
| 518 |
+
function[cb_do_put] do_put
|
| 519 |
+
function[cb_do_get] do_get
|
| 520 |
+
function[cb_do_exchange] do_exchange
|
| 521 |
+
function[cb_do_action] do_action
|
| 522 |
+
function[cb_list_actions] list_actions
|
| 523 |
+
|
| 524 |
+
cdef cppclass PyServerAuthHandlerVtable:
|
| 525 |
+
PyServerAuthHandlerVtable()
|
| 526 |
+
function[cb_server_authenticate] authenticate
|
| 527 |
+
function[cb_is_valid] is_valid
|
| 528 |
+
|
| 529 |
+
cdef cppclass PyClientAuthHandlerVtable:
|
| 530 |
+
PyClientAuthHandlerVtable()
|
| 531 |
+
function[cb_client_authenticate] authenticate
|
| 532 |
+
function[cb_get_token] get_token
|
| 533 |
+
|
| 534 |
+
cdef cppclass PyFlightServer:
|
| 535 |
+
PyFlightServer(object server, PyFlightServerVtable vtable)
|
| 536 |
+
|
| 537 |
+
CStatus Init(CFlightServerOptions& options)
|
| 538 |
+
int port()
|
| 539 |
+
CStatus ServeWithSignals() except *
|
| 540 |
+
CStatus Shutdown()
|
| 541 |
+
CStatus Wait()
|
| 542 |
+
|
| 543 |
+
cdef cppclass PyServerAuthHandler\
|
| 544 |
+
" arrow::py::flight::PyServerAuthHandler"(CServerAuthHandler):
|
| 545 |
+
PyServerAuthHandler(object handler, PyServerAuthHandlerVtable vtable)
|
| 546 |
+
|
| 547 |
+
cdef cppclass PyClientAuthHandler\
|
| 548 |
+
" arrow::py::flight::PyClientAuthHandler"(CClientAuthHandler):
|
| 549 |
+
PyClientAuthHandler(object handler, PyClientAuthHandlerVtable vtable)
|
| 550 |
+
|
| 551 |
+
cdef cppclass CPyFlightResultStream\
|
| 552 |
+
" arrow::py::flight::PyFlightResultStream"(CResultStream):
|
| 553 |
+
CPyFlightResultStream(object generator,
|
| 554 |
+
function[cb_result_next] callback)
|
| 555 |
+
|
| 556 |
+
cdef cppclass CPyFlightDataStream\
|
| 557 |
+
" arrow::py::flight::PyFlightDataStream"(CFlightDataStream):
|
| 558 |
+
CPyFlightDataStream(object data_source,
|
| 559 |
+
unique_ptr[CFlightDataStream] stream)
|
| 560 |
+
|
| 561 |
+
cdef cppclass CPyGeneratorFlightDataStream\
|
| 562 |
+
" arrow::py::flight::PyGeneratorFlightDataStream"\
|
| 563 |
+
(CFlightDataStream):
|
| 564 |
+
CPyGeneratorFlightDataStream(object generator,
|
| 565 |
+
shared_ptr[CSchema] schema,
|
| 566 |
+
function[cb_data_stream_next] callback,
|
| 567 |
+
const CIpcWriteOptions& options)
|
| 568 |
+
|
| 569 |
+
cdef cppclass PyServerMiddlewareVtable\
|
| 570 |
+
" arrow::py::flight::PyServerMiddleware::Vtable":
|
| 571 |
+
PyServerMiddlewareVtable()
|
| 572 |
+
function[cb_middleware_sending_headers] sending_headers
|
| 573 |
+
function[cb_middleware_call_completed] call_completed
|
| 574 |
+
|
| 575 |
+
cdef cppclass PyClientMiddlewareVtable\
|
| 576 |
+
" arrow::py::flight::PyClientMiddleware::Vtable":
|
| 577 |
+
PyClientMiddlewareVtable()
|
| 578 |
+
function[cb_middleware_sending_headers] sending_headers
|
| 579 |
+
function[cb_client_middleware_received_headers] received_headers
|
| 580 |
+
function[cb_middleware_call_completed] call_completed
|
| 581 |
+
|
| 582 |
+
cdef cppclass CPyServerMiddleware\
|
| 583 |
+
" arrow::py::flight::PyServerMiddleware"(CServerMiddleware):
|
| 584 |
+
CPyServerMiddleware(object middleware, PyServerMiddlewareVtable vtable)
|
| 585 |
+
void* py_object()
|
| 586 |
+
|
| 587 |
+
cdef cppclass CPyServerMiddlewareFactory\
|
| 588 |
+
" arrow::py::flight::PyServerMiddlewareFactory"\
|
| 589 |
+
(CServerMiddlewareFactory):
|
| 590 |
+
CPyServerMiddlewareFactory(
|
| 591 |
+
object factory,
|
| 592 |
+
function[cb_server_middleware_start_call] start_call)
|
| 593 |
+
|
| 594 |
+
cdef cppclass CPyClientMiddleware\
|
| 595 |
+
" arrow::py::flight::PyClientMiddleware"(CClientMiddleware):
|
| 596 |
+
CPyClientMiddleware(object middleware, PyClientMiddlewareVtable vtable)
|
| 597 |
+
|
| 598 |
+
cdef cppclass CPyClientMiddlewareFactory\
|
| 599 |
+
" arrow::py::flight::PyClientMiddlewareFactory"\
|
| 600 |
+
(CClientMiddlewareFactory):
|
| 601 |
+
CPyClientMiddlewareFactory(
|
| 602 |
+
object factory,
|
| 603 |
+
function[cb_client_middleware_start_call] start_call)
|
| 604 |
+
|
| 605 |
+
cdef CStatus CreateFlightInfo" arrow::py::flight::CreateFlightInfo"(
|
| 606 |
+
shared_ptr[CSchema] schema,
|
| 607 |
+
CFlightDescriptor& descriptor,
|
| 608 |
+
vector[CFlightEndpoint] endpoints,
|
| 609 |
+
int64_t total_records,
|
| 610 |
+
int64_t total_bytes,
|
| 611 |
+
unique_ptr[CFlightInfo]* out)
|
| 612 |
+
|
| 613 |
+
cdef CStatus CreateSchemaResult" arrow::py::flight::CreateSchemaResult"(
|
| 614 |
+
shared_ptr[CSchema] schema,
|
| 615 |
+
unique_ptr[CSchemaResult]* out)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
cdef extern from "<variant>" namespace "std" nogil:
|
| 619 |
+
cdef cppclass CIntStringVariant" std::variant<int, std::string>":
|
| 620 |
+
CIntStringVariant()
|
| 621 |
+
CIntStringVariant(int)
|
| 622 |
+
CIntStringVariant(c_string)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.common cimport *
|
| 21 |
+
from pyarrow.includes.libarrow cimport *
|
| 22 |
+
from pyarrow.includes.libarrow_python cimport CTimePoint
|
| 23 |
+
|
| 24 |
+
cdef extern from "arrow/filesystem/api.h" namespace "arrow::fs" nogil:
|
| 25 |
+
|
| 26 |
+
ctypedef enum CFileType "arrow::fs::FileType":
|
| 27 |
+
CFileType_NotFound "arrow::fs::FileType::NotFound"
|
| 28 |
+
CFileType_Unknown "arrow::fs::FileType::Unknown"
|
| 29 |
+
CFileType_File "arrow::fs::FileType::File"
|
| 30 |
+
CFileType_Directory "arrow::fs::FileType::Directory"
|
| 31 |
+
|
| 32 |
+
cdef cppclass CFileInfo "arrow::fs::FileInfo":
|
| 33 |
+
CFileInfo()
|
| 34 |
+
CFileInfo(CFileInfo)
|
| 35 |
+
CFileInfo& operator=(CFileInfo)
|
| 36 |
+
CFileInfo(const CFileInfo&)
|
| 37 |
+
CFileInfo& operator=(const CFileInfo&)
|
| 38 |
+
|
| 39 |
+
CFileType type()
|
| 40 |
+
void set_type(CFileType type)
|
| 41 |
+
c_string path()
|
| 42 |
+
void set_path(const c_string& path)
|
| 43 |
+
c_string base_name()
|
| 44 |
+
int64_t size()
|
| 45 |
+
void set_size(int64_t size)
|
| 46 |
+
c_string extension()
|
| 47 |
+
CTimePoint mtime()
|
| 48 |
+
void set_mtime(CTimePoint mtime)
|
| 49 |
+
|
| 50 |
+
cdef cppclass CFileSelector "arrow::fs::FileSelector":
|
| 51 |
+
CFileSelector()
|
| 52 |
+
c_string base_dir
|
| 53 |
+
c_bool allow_not_found
|
| 54 |
+
c_bool recursive
|
| 55 |
+
|
| 56 |
+
cdef cppclass CFileLocator "arrow::fs::FileLocator":
|
| 57 |
+
shared_ptr[CFileSystem] filesystem
|
| 58 |
+
c_string path
|
| 59 |
+
|
| 60 |
+
cdef cppclass CFileSystem "arrow::fs::FileSystem":
|
| 61 |
+
shared_ptr[CFileSystem] shared_from_this()
|
| 62 |
+
c_string type_name() const
|
| 63 |
+
CResult[c_string] NormalizePath(c_string path)
|
| 64 |
+
CResult[c_string] MakeUri(c_string path)
|
| 65 |
+
CResult[CFileInfo] GetFileInfo(const c_string& path)
|
| 66 |
+
CResult[vector[CFileInfo]] GetFileInfo(
|
| 67 |
+
const vector[c_string]& paths)
|
| 68 |
+
CResult[vector[CFileInfo]] GetFileInfo(const CFileSelector& select)
|
| 69 |
+
CStatus CreateDir(const c_string& path, c_bool recursive)
|
| 70 |
+
CStatus DeleteDir(const c_string& path)
|
| 71 |
+
CStatus DeleteDirContents(const c_string& path, c_bool missing_dir_ok)
|
| 72 |
+
CStatus DeleteRootDirContents()
|
| 73 |
+
CStatus DeleteFile(const c_string& path)
|
| 74 |
+
CStatus DeleteFiles(const vector[c_string]& paths)
|
| 75 |
+
CStatus Move(const c_string& src, const c_string& dest)
|
| 76 |
+
CStatus CopyFile(const c_string& src, const c_string& dest)
|
| 77 |
+
CResult[shared_ptr[CInputStream]] OpenInputStream(
|
| 78 |
+
const c_string& path)
|
| 79 |
+
CResult[shared_ptr[CRandomAccessFile]] OpenInputFile(
|
| 80 |
+
const c_string& path)
|
| 81 |
+
CResult[shared_ptr[COutputStream]] OpenOutputStream(
|
| 82 |
+
const c_string& path, const shared_ptr[const CKeyValueMetadata]&)
|
| 83 |
+
CResult[shared_ptr[COutputStream]] OpenAppendStream(
|
| 84 |
+
const c_string& path, const shared_ptr[const CKeyValueMetadata]&)
|
| 85 |
+
c_bool Equals(const CFileSystem& other)
|
| 86 |
+
c_bool Equals(shared_ptr[CFileSystem] other)
|
| 87 |
+
|
| 88 |
+
CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \
|
| 89 |
+
"arrow::fs::FileSystemFromUri"(const c_string& uri)
|
| 90 |
+
CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \
|
| 91 |
+
"arrow::fs::FileSystemFromUri"(const c_string& uri, c_string* out_path)
|
| 92 |
+
CResult[shared_ptr[CFileSystem]] CFileSystemFromUriOrPath \
|
| 93 |
+
"arrow::fs::FileSystemFromUriOrPath"(const c_string& uri,
|
| 94 |
+
c_string* out_path)
|
| 95 |
+
|
| 96 |
+
cdef cppclass CFileSystemGlobalOptions \
|
| 97 |
+
"arrow::fs::FileSystemGlobalOptions":
|
| 98 |
+
c_string tls_ca_file_path
|
| 99 |
+
c_string tls_ca_dir_path
|
| 100 |
+
|
| 101 |
+
CStatus CFileSystemsInitialize "arrow::fs::Initialize" \
|
| 102 |
+
(const CFileSystemGlobalOptions& options)
|
| 103 |
+
|
| 104 |
+
cdef cppclass CSubTreeFileSystem \
|
| 105 |
+
"arrow::fs::SubTreeFileSystem"(CFileSystem):
|
| 106 |
+
CSubTreeFileSystem(const c_string& base_path,
|
| 107 |
+
shared_ptr[CFileSystem] base_fs)
|
| 108 |
+
c_string base_path()
|
| 109 |
+
shared_ptr[CFileSystem] base_fs()
|
| 110 |
+
|
| 111 |
+
ctypedef enum CS3LogLevel "arrow::fs::S3LogLevel":
|
| 112 |
+
CS3LogLevel_Off "arrow::fs::S3LogLevel::Off"
|
| 113 |
+
CS3LogLevel_Fatal "arrow::fs::S3LogLevel::Fatal"
|
| 114 |
+
CS3LogLevel_Error "arrow::fs::S3LogLevel::Error"
|
| 115 |
+
CS3LogLevel_Warn "arrow::fs::S3LogLevel::Warn"
|
| 116 |
+
CS3LogLevel_Info "arrow::fs::S3LogLevel::Info"
|
| 117 |
+
CS3LogLevel_Debug "arrow::fs::S3LogLevel::Debug"
|
| 118 |
+
CS3LogLevel_Trace "arrow::fs::S3LogLevel::Trace"
|
| 119 |
+
|
| 120 |
+
cdef struct CS3GlobalOptions "arrow::fs::S3GlobalOptions":
|
| 121 |
+
CS3LogLevel log_level
|
| 122 |
+
int num_event_loop_threads
|
| 123 |
+
|
| 124 |
+
cdef cppclass CS3ProxyOptions "arrow::fs::S3ProxyOptions":
|
| 125 |
+
c_string scheme
|
| 126 |
+
c_string host
|
| 127 |
+
int port
|
| 128 |
+
c_string username
|
| 129 |
+
c_string password
|
| 130 |
+
c_bool Equals(const CS3ProxyOptions& other)
|
| 131 |
+
|
| 132 |
+
@staticmethod
|
| 133 |
+
CResult[CS3ProxyOptions] FromUriString "FromUri"(
|
| 134 |
+
const c_string& uri_string)
|
| 135 |
+
|
| 136 |
+
ctypedef enum CS3CredentialsKind "arrow::fs::S3CredentialsKind":
|
| 137 |
+
CS3CredentialsKind_Anonymous "arrow::fs::S3CredentialsKind::Anonymous"
|
| 138 |
+
CS3CredentialsKind_Default "arrow::fs::S3CredentialsKind::Default"
|
| 139 |
+
CS3CredentialsKind_Explicit "arrow::fs::S3CredentialsKind::Explicit"
|
| 140 |
+
CS3CredentialsKind_Role "arrow::fs::S3CredentialsKind::Role"
|
| 141 |
+
CS3CredentialsKind_WebIdentity \
|
| 142 |
+
"arrow::fs::S3CredentialsKind::WebIdentity"
|
| 143 |
+
|
| 144 |
+
cdef cppclass CS3RetryStrategy "arrow::fs::S3RetryStrategy":
|
| 145 |
+
@staticmethod
|
| 146 |
+
shared_ptr[CS3RetryStrategy] GetAwsDefaultRetryStrategy(int64_t max_attempts)
|
| 147 |
+
|
| 148 |
+
@staticmethod
|
| 149 |
+
shared_ptr[CS3RetryStrategy] GetAwsStandardRetryStrategy(int64_t max_attempts)
|
| 150 |
+
|
| 151 |
+
cdef cppclass CS3Options "arrow::fs::S3Options":
|
| 152 |
+
c_string region
|
| 153 |
+
double connect_timeout
|
| 154 |
+
double request_timeout
|
| 155 |
+
c_string endpoint_override
|
| 156 |
+
c_string scheme
|
| 157 |
+
c_bool background_writes
|
| 158 |
+
c_bool allow_bucket_creation
|
| 159 |
+
c_bool allow_bucket_deletion
|
| 160 |
+
c_bool check_directory_existence_before_creation
|
| 161 |
+
c_bool force_virtual_addressing
|
| 162 |
+
shared_ptr[const CKeyValueMetadata] default_metadata
|
| 163 |
+
c_string role_arn
|
| 164 |
+
c_string session_name
|
| 165 |
+
c_string external_id
|
| 166 |
+
int load_frequency
|
| 167 |
+
CS3ProxyOptions proxy_options
|
| 168 |
+
CS3CredentialsKind credentials_kind
|
| 169 |
+
shared_ptr[CS3RetryStrategy] retry_strategy
|
| 170 |
+
void ConfigureDefaultCredentials()
|
| 171 |
+
void ConfigureAccessKey(const c_string& access_key,
|
| 172 |
+
const c_string& secret_key,
|
| 173 |
+
const c_string& session_token)
|
| 174 |
+
c_string GetAccessKey()
|
| 175 |
+
c_string GetSecretKey()
|
| 176 |
+
c_string GetSessionToken()
|
| 177 |
+
c_bool Equals(const CS3Options& other)
|
| 178 |
+
|
| 179 |
+
@staticmethod
|
| 180 |
+
CS3Options Defaults()
|
| 181 |
+
|
| 182 |
+
@staticmethod
|
| 183 |
+
CS3Options Anonymous()
|
| 184 |
+
|
| 185 |
+
@staticmethod
|
| 186 |
+
CS3Options FromAccessKey(const c_string& access_key,
|
| 187 |
+
const c_string& secret_key,
|
| 188 |
+
const c_string& session_token)
|
| 189 |
+
|
| 190 |
+
@staticmethod
|
| 191 |
+
CS3Options FromAssumeRole(const c_string& role_arn,
|
| 192 |
+
const c_string& session_name,
|
| 193 |
+
const c_string& external_id,
|
| 194 |
+
const int load_frequency)
|
| 195 |
+
|
| 196 |
+
cdef cppclass CS3FileSystem "arrow::fs::S3FileSystem"(CFileSystem):
|
| 197 |
+
@staticmethod
|
| 198 |
+
CResult[shared_ptr[CS3FileSystem]] Make(const CS3Options& options)
|
| 199 |
+
CS3Options options()
|
| 200 |
+
c_string region()
|
| 201 |
+
|
| 202 |
+
cdef CStatus CInitializeS3 "arrow::fs::InitializeS3"(
|
| 203 |
+
const CS3GlobalOptions& options)
|
| 204 |
+
cdef CStatus CEnsureS3Initialized "arrow::fs::EnsureS3Initialized"()
|
| 205 |
+
cdef CStatus CFinalizeS3 "arrow::fs::FinalizeS3"()
|
| 206 |
+
cdef CStatus CEnsureS3Finalized "arrow::fs::EnsureS3Finalized"()
|
| 207 |
+
|
| 208 |
+
cdef CResult[c_string] ResolveS3BucketRegion(const c_string& bucket)
|
| 209 |
+
|
| 210 |
+
cdef cppclass CGcsCredentials "arrow::fs::GcsCredentials":
|
| 211 |
+
c_bool anonymous()
|
| 212 |
+
CTimePoint expiration()
|
| 213 |
+
c_string access_token()
|
| 214 |
+
c_string target_service_account()
|
| 215 |
+
|
| 216 |
+
cdef cppclass CGcsOptions "arrow::fs::GcsOptions":
|
| 217 |
+
CGcsCredentials credentials
|
| 218 |
+
c_string endpoint_override
|
| 219 |
+
c_string scheme
|
| 220 |
+
c_string default_bucket_location
|
| 221 |
+
optional[c_string] project_id
|
| 222 |
+
optional[double] retry_limit_seconds
|
| 223 |
+
shared_ptr[const CKeyValueMetadata] default_metadata
|
| 224 |
+
c_bool Equals(const CS3Options& other)
|
| 225 |
+
|
| 226 |
+
@staticmethod
|
| 227 |
+
CGcsOptions Defaults()
|
| 228 |
+
|
| 229 |
+
@staticmethod
|
| 230 |
+
CGcsOptions Anonymous()
|
| 231 |
+
|
| 232 |
+
@staticmethod
|
| 233 |
+
CGcsOptions FromAccessToken(const c_string& access_token,
|
| 234 |
+
CTimePoint expiration)
|
| 235 |
+
|
| 236 |
+
@staticmethod
|
| 237 |
+
CGcsOptions FromImpersonatedServiceAccount(const CGcsCredentials& base_credentials,
|
| 238 |
+
c_string& target_service_account)
|
| 239 |
+
|
| 240 |
+
cdef cppclass CGcsFileSystem "arrow::fs::GcsFileSystem":
|
| 241 |
+
@staticmethod
|
| 242 |
+
CResult[shared_ptr[CGcsFileSystem]] Make(const CGcsOptions& options)
|
| 243 |
+
CGcsOptions options()
|
| 244 |
+
|
| 245 |
+
cdef cppclass CAzureOptions "arrow::fs::AzureOptions":
|
| 246 |
+
c_string account_name
|
| 247 |
+
c_string blob_storage_authority
|
| 248 |
+
c_string dfs_storage_authority
|
| 249 |
+
c_string blob_storage_scheme
|
| 250 |
+
c_string dfs_storage_scheme
|
| 251 |
+
|
| 252 |
+
c_bool Equals(const CAzureOptions& other)
|
| 253 |
+
CStatus ConfigureDefaultCredential()
|
| 254 |
+
CStatus ConfigureAccountKeyCredential(c_string account_key)
|
| 255 |
+
|
| 256 |
+
cdef cppclass CAzureFileSystem "arrow::fs::AzureFileSystem":
|
| 257 |
+
@staticmethod
|
| 258 |
+
CResult[shared_ptr[CAzureFileSystem]] Make(const CAzureOptions& options)
|
| 259 |
+
CAzureOptions options()
|
| 260 |
+
|
| 261 |
+
cdef cppclass CHdfsOptions "arrow::fs::HdfsOptions":
|
| 262 |
+
HdfsConnectionConfig connection_config
|
| 263 |
+
int32_t buffer_size
|
| 264 |
+
int16_t replication
|
| 265 |
+
int64_t default_block_size
|
| 266 |
+
|
| 267 |
+
@staticmethod
|
| 268 |
+
CResult[CHdfsOptions] FromUriString "FromUri"(
|
| 269 |
+
const c_string& uri_string)
|
| 270 |
+
void ConfigureEndPoint(c_string host, int port)
|
| 271 |
+
void ConfigureDriver(c_bool use_hdfs3)
|
| 272 |
+
void ConfigureReplication(int16_t replication)
|
| 273 |
+
void ConfigureUser(c_string user_name)
|
| 274 |
+
void ConfigureBufferSize(int32_t buffer_size)
|
| 275 |
+
void ConfigureBlockSize(int64_t default_block_size)
|
| 276 |
+
void ConfigureKerberosTicketCachePath(c_string path)
|
| 277 |
+
void ConfigureExtraConf(c_string key, c_string value)
|
| 278 |
+
|
| 279 |
+
cdef cppclass CHadoopFileSystem "arrow::fs::HadoopFileSystem"(CFileSystem):
|
| 280 |
+
@staticmethod
|
| 281 |
+
CResult[shared_ptr[CHadoopFileSystem]] Make(
|
| 282 |
+
const CHdfsOptions& options)
|
| 283 |
+
CHdfsOptions options()
|
| 284 |
+
|
| 285 |
+
cdef cppclass CMockFileSystem "arrow::fs::internal::MockFileSystem"(
|
| 286 |
+
CFileSystem):
|
| 287 |
+
CMockFileSystem(CTimePoint current_time)
|
| 288 |
+
|
| 289 |
+
CStatus CCopyFiles "arrow::fs::CopyFiles"(
|
| 290 |
+
const vector[CFileLocator]& sources,
|
| 291 |
+
const vector[CFileLocator]& destinations,
|
| 292 |
+
const CIOContext& io_context,
|
| 293 |
+
int64_t chunk_size, c_bool use_threads)
|
| 294 |
+
CStatus CCopyFilesWithSelector "arrow::fs::CopyFiles"(
|
| 295 |
+
const shared_ptr[CFileSystem]& source_fs,
|
| 296 |
+
const CFileSelector& source_sel,
|
| 297 |
+
const shared_ptr[CFileSystem]& destination_fs,
|
| 298 |
+
const c_string& destination_base_dir,
|
| 299 |
+
const CIOContext& io_context,
|
| 300 |
+
int64_t chunk_size, c_bool use_threads)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
# Callbacks for implementing Python filesystems
|
| 304 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
| 305 |
+
ctypedef void CallbackGetTypeName(object, c_string*)
|
| 306 |
+
ctypedef c_bool CallbackEquals(object, const CFileSystem&)
|
| 307 |
+
|
| 308 |
+
ctypedef void CallbackGetFileInfo(object, const c_string&, CFileInfo*)
|
| 309 |
+
ctypedef void CallbackGetFileInfoVector(object, const vector[c_string]&,
|
| 310 |
+
vector[CFileInfo]*)
|
| 311 |
+
ctypedef void CallbackGetFileInfoSelector(object, const CFileSelector&,
|
| 312 |
+
vector[CFileInfo]*)
|
| 313 |
+
ctypedef void CallbackCreateDir(object, const c_string&, c_bool)
|
| 314 |
+
ctypedef void CallbackDeleteDir(object, const c_string&)
|
| 315 |
+
ctypedef void CallbackDeleteDirContents(object, const c_string&, c_bool)
|
| 316 |
+
ctypedef void CallbackDeleteRootDirContents(object)
|
| 317 |
+
ctypedef void CallbackDeleteFile(object, const c_string&)
|
| 318 |
+
ctypedef void CallbackMove(object, const c_string&, const c_string&)
|
| 319 |
+
ctypedef void CallbackCopyFile(object, const c_string&, const c_string&)
|
| 320 |
+
|
| 321 |
+
ctypedef void CallbackOpenInputStream(object, const c_string&,
|
| 322 |
+
shared_ptr[CInputStream]*)
|
| 323 |
+
ctypedef void CallbackOpenInputFile(object, const c_string&,
|
| 324 |
+
shared_ptr[CRandomAccessFile]*)
|
| 325 |
+
ctypedef void CallbackOpenOutputStream(
|
| 326 |
+
object, const c_string&, const shared_ptr[const CKeyValueMetadata]&,
|
| 327 |
+
shared_ptr[COutputStream]*)
|
| 328 |
+
ctypedef void CallbackNormalizePath(object, const c_string&, c_string*)
|
| 329 |
+
|
| 330 |
+
cdef extern from "arrow/python/filesystem.h" namespace "arrow::py::fs" nogil:
|
| 331 |
+
|
| 332 |
+
cdef cppclass CPyFileSystemVtable "arrow::py::fs::PyFileSystemVtable":
|
| 333 |
+
PyFileSystemVtable()
|
| 334 |
+
function[CallbackGetTypeName] get_type_name
|
| 335 |
+
function[CallbackEquals] equals
|
| 336 |
+
function[CallbackGetFileInfo] get_file_info
|
| 337 |
+
function[CallbackGetFileInfoVector] get_file_info_vector
|
| 338 |
+
function[CallbackGetFileInfoSelector] get_file_info_selector
|
| 339 |
+
function[CallbackCreateDir] create_dir
|
| 340 |
+
function[CallbackDeleteDir] delete_dir
|
| 341 |
+
function[CallbackDeleteDirContents] delete_dir_contents
|
| 342 |
+
function[CallbackDeleteRootDirContents] delete_root_dir_contents
|
| 343 |
+
function[CallbackDeleteFile] delete_file
|
| 344 |
+
function[CallbackMove] move
|
| 345 |
+
function[CallbackCopyFile] copy_file
|
| 346 |
+
function[CallbackOpenInputStream] open_input_stream
|
| 347 |
+
function[CallbackOpenInputFile] open_input_file
|
| 348 |
+
function[CallbackOpenOutputStream] open_output_stream
|
| 349 |
+
function[CallbackOpenOutputStream] open_append_stream
|
| 350 |
+
function[CallbackNormalizePath] normalize_path
|
| 351 |
+
|
| 352 |
+
cdef cppclass CPyFileSystem "arrow::py::fs::PyFileSystem":
|
| 353 |
+
@staticmethod
|
| 354 |
+
shared_ptr[CPyFileSystem] Make(object handler,
|
| 355 |
+
CPyFileSystemVtable vtable)
|
| 356 |
+
|
| 357 |
+
PyObject* handler()
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.common cimport *
|
| 21 |
+
from pyarrow.includes.libarrow cimport *
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
ctypedef CInvalidRowResult PyInvalidRowCallback(object,
|
| 25 |
+
const CCSVInvalidRow&)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
cdef extern from "arrow/python/csv.h" namespace "arrow::py::csv":
|
| 29 |
+
|
| 30 |
+
function[CInvalidRowHandler] MakeInvalidRowHandler(
|
| 31 |
+
function[PyInvalidRowCallback], object handler)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py":
|
| 35 |
+
# Requires GIL
|
| 36 |
+
CResult[shared_ptr[CDataType]] InferArrowType(
|
| 37 |
+
object obj, object mask, c_bool pandas_null_sentinels)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py::internal":
|
| 41 |
+
object NewMonthDayNanoTupleType()
|
| 42 |
+
CResult[PyObject*] MonthDayNanoIntervalArrayToPyList(
|
| 43 |
+
const CMonthDayNanoIntervalArray& array)
|
| 44 |
+
CResult[PyObject*] MonthDayNanoIntervalScalarToPyObject(
|
| 45 |
+
const CMonthDayNanoIntervalScalar& scalar)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
cdef extern from "arrow/python/arrow_to_pandas.h" namespace "arrow::py::MapConversionType":
|
| 49 |
+
cdef enum MapConversionType "arrow::py::MapConversionType":
|
| 50 |
+
DEFAULT,
|
| 51 |
+
LOSSY,
|
| 52 |
+
STRICT_
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil:
|
| 56 |
+
shared_ptr[CDataType] GetPrimitiveType(Type type)
|
| 57 |
+
|
| 58 |
+
object PyHalf_FromHalf(npy_half value)
|
| 59 |
+
|
| 60 |
+
cdef cppclass PyConversionOptions:
|
| 61 |
+
PyConversionOptions()
|
| 62 |
+
|
| 63 |
+
shared_ptr[CDataType] type
|
| 64 |
+
int64_t size
|
| 65 |
+
CMemoryPool* pool
|
| 66 |
+
c_bool from_pandas
|
| 67 |
+
c_bool ignore_timezone
|
| 68 |
+
c_bool strict
|
| 69 |
+
|
| 70 |
+
# TODO Some functions below are not actually "nogil"
|
| 71 |
+
|
| 72 |
+
CResult[shared_ptr[CChunkedArray]] ConvertPySequence(
|
| 73 |
+
object obj, object mask, const PyConversionOptions& options,
|
| 74 |
+
CMemoryPool* pool)
|
| 75 |
+
|
| 76 |
+
CResult[shared_ptr[CDataType]] NumPyDtypeToArrow(object dtype)
|
| 77 |
+
|
| 78 |
+
CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo,
|
| 79 |
+
c_bool from_pandas,
|
| 80 |
+
const shared_ptr[CDataType]& type,
|
| 81 |
+
shared_ptr[CChunkedArray]* out)
|
| 82 |
+
|
| 83 |
+
CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo,
|
| 84 |
+
c_bool from_pandas,
|
| 85 |
+
const shared_ptr[CDataType]& type,
|
| 86 |
+
const CCastOptions& cast_options,
|
| 87 |
+
shared_ptr[CChunkedArray]* out)
|
| 88 |
+
|
| 89 |
+
CStatus NdarrayToTensor(CMemoryPool* pool, object ao,
|
| 90 |
+
const vector[c_string]& dim_names,
|
| 91 |
+
shared_ptr[CTensor]* out)
|
| 92 |
+
|
| 93 |
+
CStatus TensorToNdarray(const shared_ptr[CTensor]& tensor, object base,
|
| 94 |
+
PyObject** out)
|
| 95 |
+
|
| 96 |
+
CStatus SparseCOOTensorToNdarray(
|
| 97 |
+
const shared_ptr[CSparseCOOTensor]& sparse_tensor, object base,
|
| 98 |
+
PyObject** out_data, PyObject** out_coords)
|
| 99 |
+
|
| 100 |
+
CStatus SparseCSRMatrixToNdarray(
|
| 101 |
+
const shared_ptr[CSparseCSRMatrix]& sparse_tensor, object base,
|
| 102 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
| 103 |
+
|
| 104 |
+
CStatus SparseCSCMatrixToNdarray(
|
| 105 |
+
const shared_ptr[CSparseCSCMatrix]& sparse_tensor, object base,
|
| 106 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
| 107 |
+
|
| 108 |
+
CStatus SparseCSFTensorToNdarray(
|
| 109 |
+
const shared_ptr[CSparseCSFTensor]& sparse_tensor, object base,
|
| 110 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
| 111 |
+
|
| 112 |
+
CStatus NdarraysToSparseCOOTensor(CMemoryPool* pool, object data_ao,
|
| 113 |
+
object coords_ao,
|
| 114 |
+
const vector[int64_t]& shape,
|
| 115 |
+
const vector[c_string]& dim_names,
|
| 116 |
+
shared_ptr[CSparseCOOTensor]* out)
|
| 117 |
+
|
| 118 |
+
CStatus NdarraysToSparseCSRMatrix(CMemoryPool* pool, object data_ao,
|
| 119 |
+
object indptr_ao, object indices_ao,
|
| 120 |
+
const vector[int64_t]& shape,
|
| 121 |
+
const vector[c_string]& dim_names,
|
| 122 |
+
shared_ptr[CSparseCSRMatrix]* out)
|
| 123 |
+
|
| 124 |
+
CStatus NdarraysToSparseCSCMatrix(CMemoryPool* pool, object data_ao,
|
| 125 |
+
object indptr_ao, object indices_ao,
|
| 126 |
+
const vector[int64_t]& shape,
|
| 127 |
+
const vector[c_string]& dim_names,
|
| 128 |
+
shared_ptr[CSparseCSCMatrix]* out)
|
| 129 |
+
|
| 130 |
+
CStatus NdarraysToSparseCSFTensor(CMemoryPool* pool, object data_ao,
|
| 131 |
+
object indptr_ao, object indices_ao,
|
| 132 |
+
const vector[int64_t]& shape,
|
| 133 |
+
const vector[int64_t]& axis_order,
|
| 134 |
+
const vector[c_string]& dim_names,
|
| 135 |
+
shared_ptr[CSparseCSFTensor]* out)
|
| 136 |
+
|
| 137 |
+
CStatus TensorToSparseCOOTensor(shared_ptr[CTensor],
|
| 138 |
+
shared_ptr[CSparseCOOTensor]* out)
|
| 139 |
+
|
| 140 |
+
CStatus TensorToSparseCSRMatrix(shared_ptr[CTensor],
|
| 141 |
+
shared_ptr[CSparseCSRMatrix]* out)
|
| 142 |
+
|
| 143 |
+
CStatus TensorToSparseCSCMatrix(shared_ptr[CTensor],
|
| 144 |
+
shared_ptr[CSparseCSCMatrix]* out)
|
| 145 |
+
|
| 146 |
+
CStatus TensorToSparseCSFTensor(shared_ptr[CTensor],
|
| 147 |
+
shared_ptr[CSparseCSFTensor]* out)
|
| 148 |
+
|
| 149 |
+
CStatus ConvertArrayToPandas(const PandasOptions& options,
|
| 150 |
+
shared_ptr[CArray] arr,
|
| 151 |
+
object py_ref, PyObject** out)
|
| 152 |
+
|
| 153 |
+
CStatus ConvertChunkedArrayToPandas(const PandasOptions& options,
|
| 154 |
+
shared_ptr[CChunkedArray] arr,
|
| 155 |
+
object py_ref, PyObject** out)
|
| 156 |
+
|
| 157 |
+
CStatus ConvertTableToPandas(const PandasOptions& options,
|
| 158 |
+
shared_ptr[CTable] table,
|
| 159 |
+
PyObject** out)
|
| 160 |
+
|
| 161 |
+
void c_set_default_memory_pool \
|
| 162 |
+
" arrow::py::set_default_memory_pool"(CMemoryPool* pool)\
|
| 163 |
+
|
| 164 |
+
CMemoryPool* c_get_memory_pool \
|
| 165 |
+
" arrow::py::get_memory_pool"()
|
| 166 |
+
|
| 167 |
+
cdef cppclass PyBuffer(CBuffer):
|
| 168 |
+
@staticmethod
|
| 169 |
+
CResult[shared_ptr[CBuffer]] FromPyObject(object obj)
|
| 170 |
+
|
| 171 |
+
cdef cppclass PyForeignBuffer(CBuffer):
|
| 172 |
+
@staticmethod
|
| 173 |
+
CStatus Make(const uint8_t* data, int64_t size, object base,
|
| 174 |
+
shared_ptr[CBuffer]* out)
|
| 175 |
+
|
| 176 |
+
cdef cppclass PyReadableFile(CRandomAccessFile):
|
| 177 |
+
PyReadableFile(object fo)
|
| 178 |
+
|
| 179 |
+
cdef cppclass PyOutputStream(COutputStream):
|
| 180 |
+
PyOutputStream(object fo)
|
| 181 |
+
|
| 182 |
+
cdef cppclass PandasOptions:
|
| 183 |
+
CMemoryPool* pool
|
| 184 |
+
c_bool strings_to_categorical
|
| 185 |
+
c_bool zero_copy_only
|
| 186 |
+
c_bool integer_object_nulls
|
| 187 |
+
c_bool date_as_object
|
| 188 |
+
c_bool timestamp_as_object
|
| 189 |
+
c_bool use_threads
|
| 190 |
+
c_bool coerce_temporal_nanoseconds
|
| 191 |
+
c_bool ignore_timezone
|
| 192 |
+
c_bool deduplicate_objects
|
| 193 |
+
c_bool safe_cast
|
| 194 |
+
c_bool split_blocks
|
| 195 |
+
c_bool self_destruct
|
| 196 |
+
MapConversionType maps_as_pydicts
|
| 197 |
+
c_bool decode_dictionaries
|
| 198 |
+
unordered_set[c_string] categorical_columns
|
| 199 |
+
unordered_set[c_string] extension_columns
|
| 200 |
+
c_bool to_numpy
|
| 201 |
+
|
| 202 |
+
cdef cppclass CSerializedPyObject" arrow::py::SerializedPyObject":
|
| 203 |
+
shared_ptr[CRecordBatch] batch
|
| 204 |
+
vector[shared_ptr[CTensor]] tensors
|
| 205 |
+
|
| 206 |
+
CStatus WriteTo(COutputStream* dst)
|
| 207 |
+
CStatus GetComponents(CMemoryPool* pool, PyObject** dst)
|
| 208 |
+
|
| 209 |
+
CStatus SerializeObject(object context, object sequence,
|
| 210 |
+
CSerializedPyObject* out)
|
| 211 |
+
|
| 212 |
+
CStatus DeserializeObject(object context,
|
| 213 |
+
const CSerializedPyObject& obj,
|
| 214 |
+
PyObject* base, PyObject** out)
|
| 215 |
+
|
| 216 |
+
CStatus ReadSerializedObject(CRandomAccessFile* src,
|
| 217 |
+
CSerializedPyObject* out)
|
| 218 |
+
|
| 219 |
+
cdef cppclass SparseTensorCounts:
|
| 220 |
+
SparseTensorCounts()
|
| 221 |
+
int coo
|
| 222 |
+
int csr
|
| 223 |
+
int csc
|
| 224 |
+
int csf
|
| 225 |
+
int ndim_csf
|
| 226 |
+
int num_total_tensors() const
|
| 227 |
+
int num_total_buffers() const
|
| 228 |
+
|
| 229 |
+
CStatus GetSerializedFromComponents(
|
| 230 |
+
int num_tensors,
|
| 231 |
+
const SparseTensorCounts& num_sparse_tensors,
|
| 232 |
+
int num_ndarrays,
|
| 233 |
+
int num_buffers,
|
| 234 |
+
object buffers,
|
| 235 |
+
CSerializedPyObject* out)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py::internal" nogil:
|
| 239 |
+
cdef cppclass CTimePoint "arrow::py::internal::TimePoint":
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
CTimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime)
|
| 243 |
+
int64_t TimePoint_to_ns(CTimePoint val)
|
| 244 |
+
CTimePoint TimePoint_from_s(double val)
|
| 245 |
+
CTimePoint TimePoint_from_ns(int64_t val)
|
| 246 |
+
|
| 247 |
+
CResult[c_string] TzinfoToString(PyObject* pytzinfo)
|
| 248 |
+
CResult[PyObject*] StringToTzinfo(c_string)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
cdef extern from "arrow/python/init.h":
|
| 252 |
+
int arrow_init_numpy() except -1
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
|
| 256 |
+
int import_pyarrow() except -1
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py":
|
| 260 |
+
c_bool IsPyError(const CStatus& status)
|
| 261 |
+
void RestorePyError(const CStatus& status) except *
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil:
|
| 265 |
+
cdef cppclass SharedPtrNoGIL[T](shared_ptr[T]):
|
| 266 |
+
# This looks like the only way to satisfy both Cython 2 and Cython 3
|
| 267 |
+
SharedPtrNoGIL& operator=(...)
|
| 268 |
+
cdef cppclass UniquePtrNoGIL[T, DELETER=*](unique_ptr[T, DELETER]):
|
| 269 |
+
UniquePtrNoGIL& operator=(...)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
cdef extern from "arrow/python/inference.h" namespace "arrow::py":
|
| 273 |
+
c_bool IsPyBool(object o)
|
| 274 |
+
c_bool IsPyInt(object o)
|
| 275 |
+
c_bool IsPyFloat(object o)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
cdef extern from "arrow/python/ipc.h" namespace "arrow::py":
|
| 279 |
+
cdef cppclass CPyRecordBatchReader" arrow::py::PyRecordBatchReader" \
|
| 280 |
+
(CRecordBatchReader):
|
| 281 |
+
@staticmethod
|
| 282 |
+
CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CSchema],
|
| 283 |
+
object)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
cdef extern from "arrow/python/ipc.h" namespace "arrow::py" nogil:
|
| 287 |
+
cdef cppclass CCastingRecordBatchReader" arrow::py::CastingRecordBatchReader" \
|
| 288 |
+
(CRecordBatchReader):
|
| 289 |
+
@staticmethod
|
| 290 |
+
CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CRecordBatchReader],
|
| 291 |
+
shared_ptr[CSchema])
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
cdef extern from "arrow/python/extension_type.h" namespace "arrow::py":
|
| 295 |
+
cdef cppclass CPyExtensionType \
|
| 296 |
+
" arrow::py::PyExtensionType"(CExtensionType):
|
| 297 |
+
@staticmethod
|
| 298 |
+
CStatus FromClass(const shared_ptr[CDataType] storage_type,
|
| 299 |
+
const c_string extension_name, object typ,
|
| 300 |
+
shared_ptr[CExtensionType]* out)
|
| 301 |
+
|
| 302 |
+
@staticmethod
|
| 303 |
+
CStatus FromInstance(shared_ptr[CDataType] storage_type,
|
| 304 |
+
object inst, shared_ptr[CExtensionType]* out)
|
| 305 |
+
|
| 306 |
+
object GetInstance()
|
| 307 |
+
CStatus SetInstance(object)
|
| 308 |
+
|
| 309 |
+
c_string PyExtensionName()
|
| 310 |
+
CStatus RegisterPyExtensionType(shared_ptr[CDataType])
|
| 311 |
+
CStatus UnregisterPyExtensionType(c_string type_name)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
cdef extern from "arrow/python/benchmark.h" namespace "arrow::py::benchmark":
|
| 315 |
+
void Benchmark_PandasObjectIsNull(object lst) except *
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
cdef extern from "arrow/python/gdb.h" namespace "arrow::gdb" nogil:
|
| 319 |
+
void GdbTestSession "arrow::gdb::TestSession"()
|
| 320 |
+
|
| 321 |
+
cdef extern from "arrow/python/helpers.h" namespace "arrow::py::internal":
|
| 322 |
+
c_bool IsThreadingEnabled()
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from libcpp.vector cimport vector as std_vector
|
| 21 |
+
|
| 22 |
+
from pyarrow.includes.common cimport *
|
| 23 |
+
from pyarrow.includes.libarrow cimport *
|
| 24 |
+
from pyarrow.includes.libarrow_acero cimport *
|
| 25 |
+
|
| 26 |
+
ctypedef CResult[CDeclaration] CNamedTableProvider(const std_vector[c_string]&, const CSchema&)
|
| 27 |
+
|
| 28 |
+
cdef extern from "arrow/engine/substrait/options.h" namespace "arrow::engine" nogil:
|
| 29 |
+
cdef enum ConversionStrictness \
|
| 30 |
+
"arrow::engine::ConversionStrictness":
|
| 31 |
+
EXACT_ROUNDTRIP \
|
| 32 |
+
"arrow::engine::ConversionStrictness::EXACT_ROUNDTRIP"
|
| 33 |
+
PRESERVE_STRUCTURE \
|
| 34 |
+
"arrow::engine::ConversionStrictness::PRESERVE_STRUCTURE"
|
| 35 |
+
BEST_EFFORT \
|
| 36 |
+
"arrow::engine::ConversionStrictness::BEST_EFFORT"
|
| 37 |
+
|
| 38 |
+
cdef cppclass CConversionOptions \
|
| 39 |
+
"arrow::engine::ConversionOptions":
|
| 40 |
+
CConversionOptions()
|
| 41 |
+
ConversionStrictness strictness
|
| 42 |
+
function[CNamedTableProvider] named_table_provider
|
| 43 |
+
c_bool allow_arrow_extensions
|
| 44 |
+
|
| 45 |
+
cdef extern from "arrow/engine/substrait/extension_set.h" \
|
| 46 |
+
namespace "arrow::engine" nogil:
|
| 47 |
+
|
| 48 |
+
cdef cppclass ExtensionIdRegistry:
|
| 49 |
+
std_vector[c_string] GetSupportedSubstraitFunctions()
|
| 50 |
+
|
| 51 |
+
ExtensionIdRegistry* default_extension_id_registry()
|
| 52 |
+
|
| 53 |
+
cdef extern from "arrow/engine/substrait/relation.h" namespace "arrow::engine" nogil:
|
| 54 |
+
|
| 55 |
+
cdef cppclass CNamedExpression "arrow::engine::NamedExpression":
|
| 56 |
+
CExpression expression
|
| 57 |
+
c_string name
|
| 58 |
+
|
| 59 |
+
cdef cppclass CBoundExpressions "arrow::engine::BoundExpressions":
|
| 60 |
+
std_vector[CNamedExpression] named_expressions
|
| 61 |
+
shared_ptr[CSchema] schema
|
| 62 |
+
|
| 63 |
+
cdef extern from "arrow/engine/substrait/serde.h" namespace "arrow::engine" nogil:
|
| 64 |
+
|
| 65 |
+
CResult[shared_ptr[CBuffer]] SerializeExpressions(
|
| 66 |
+
const CBoundExpressions& bound_expressions, const CConversionOptions& conversion_options)
|
| 67 |
+
|
| 68 |
+
CResult[CBoundExpressions] DeserializeExpressions(
|
| 69 |
+
const CBuffer& serialized_expressions)
|
| 70 |
+
|
| 71 |
+
cdef extern from "arrow/engine/substrait/util.h" namespace "arrow::engine" nogil:
|
| 72 |
+
CResult[shared_ptr[CRecordBatchReader]] ExecuteSerializedPlan(
|
| 73 |
+
const CBuffer& substrait_buffer, const ExtensionIdRegistry* registry,
|
| 74 |
+
CFunctionRegistry* func_registry, const CConversionOptions& conversion_options,
|
| 75 |
+
c_bool use_threads)
|
| 76 |
+
|
| 77 |
+
CResult[shared_ptr[CBuffer]] SerializeJsonPlan(const c_string& substrait_json)
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from libcpp.string cimport string as c_string
|
| 21 |
+
from libcpp.unordered_set cimport unordered_set as c_unordered_set
|
| 22 |
+
from libc.stdint cimport int64_t, int32_t, uint8_t, uintptr_t
|
| 23 |
+
|
| 24 |
+
from pyarrow.includes.common cimport *
|
| 25 |
+
from pyarrow.includes.libarrow cimport *
|
| 26 |
+
|
| 27 |
+
cdef extern from "gandiva/node.h" namespace "gandiva" nogil:
|
| 28 |
+
|
| 29 |
+
cdef cppclass CNode" gandiva::Node":
|
| 30 |
+
c_string ToString()
|
| 31 |
+
shared_ptr[CDataType] return_type()
|
| 32 |
+
|
| 33 |
+
cdef cppclass CGandivaExpression" gandiva::Expression":
|
| 34 |
+
c_string ToString()
|
| 35 |
+
shared_ptr[CNode] root()
|
| 36 |
+
shared_ptr[CField] result()
|
| 37 |
+
|
| 38 |
+
ctypedef vector[shared_ptr[CNode]] CNodeVector" gandiva::NodeVector"
|
| 39 |
+
|
| 40 |
+
ctypedef vector[shared_ptr[CGandivaExpression]] \
|
| 41 |
+
CExpressionVector" gandiva::ExpressionVector"
|
| 42 |
+
|
| 43 |
+
cdef extern from "gandiva/selection_vector.h" namespace "gandiva" nogil:
|
| 44 |
+
|
| 45 |
+
cdef cppclass CSelectionVector" gandiva::SelectionVector":
|
| 46 |
+
|
| 47 |
+
shared_ptr[CArray] ToArray()
|
| 48 |
+
|
| 49 |
+
enum CSelectionVector_Mode" gandiva::SelectionVector::Mode":
|
| 50 |
+
CSelectionVector_Mode_NONE" gandiva::SelectionVector::Mode::MODE_NONE"
|
| 51 |
+
CSelectionVector_Mode_UINT16" \
|
| 52 |
+
gandiva::SelectionVector::Mode::MODE_UINT16"
|
| 53 |
+
CSelectionVector_Mode_UINT32" \
|
| 54 |
+
gandiva::SelectionVector::Mode::MODE_UINT32"
|
| 55 |
+
CSelectionVector_Mode_UINT64" \
|
| 56 |
+
gandiva::SelectionVector::Mode::MODE_UINT64"
|
| 57 |
+
|
| 58 |
+
cdef CStatus SelectionVector_MakeInt16\
|
| 59 |
+
"gandiva::SelectionVector::MakeInt16"(
|
| 60 |
+
int64_t max_slots, CMemoryPool* pool,
|
| 61 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
| 62 |
+
|
| 63 |
+
cdef CStatus SelectionVector_MakeInt32\
|
| 64 |
+
"gandiva::SelectionVector::MakeInt32"(
|
| 65 |
+
int64_t max_slots, CMemoryPool* pool,
|
| 66 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
| 67 |
+
|
| 68 |
+
cdef CStatus SelectionVector_MakeInt64\
|
| 69 |
+
"gandiva::SelectionVector::MakeInt64"(
|
| 70 |
+
int64_t max_slots, CMemoryPool* pool,
|
| 71 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
| 72 |
+
|
| 73 |
+
cdef inline CSelectionVector_Mode _ensure_selection_mode(str name) except *:
|
| 74 |
+
uppercase = name.upper()
|
| 75 |
+
if uppercase == 'NONE':
|
| 76 |
+
return CSelectionVector_Mode_NONE
|
| 77 |
+
elif uppercase == 'UINT16':
|
| 78 |
+
return CSelectionVector_Mode_UINT16
|
| 79 |
+
elif uppercase == 'UINT32':
|
| 80 |
+
return CSelectionVector_Mode_UINT32
|
| 81 |
+
elif uppercase == 'UINT64':
|
| 82 |
+
return CSelectionVector_Mode_UINT64
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError('Invalid value for Selection Mode: {!r}'.format(name))
|
| 85 |
+
|
| 86 |
+
cdef inline str _selection_mode_name(CSelectionVector_Mode ctype):
|
| 87 |
+
if ctype == CSelectionVector_Mode_NONE:
|
| 88 |
+
return 'NONE'
|
| 89 |
+
elif ctype == CSelectionVector_Mode_UINT16:
|
| 90 |
+
return 'UINT16'
|
| 91 |
+
elif ctype == CSelectionVector_Mode_UINT32:
|
| 92 |
+
return 'UINT32'
|
| 93 |
+
elif ctype == CSelectionVector_Mode_UINT64:
|
| 94 |
+
return 'UINT64'
|
| 95 |
+
else:
|
| 96 |
+
raise RuntimeError('Unexpected CSelectionVector_Mode value')
|
| 97 |
+
|
| 98 |
+
cdef extern from "gandiva/condition.h" namespace "gandiva" nogil:
|
| 99 |
+
|
| 100 |
+
cdef cppclass CCondition" gandiva::Condition":
|
| 101 |
+
c_string ToString()
|
| 102 |
+
shared_ptr[CNode] root()
|
| 103 |
+
shared_ptr[CField] result()
|
| 104 |
+
|
| 105 |
+
cdef extern from "gandiva/arrow.h" namespace "gandiva" nogil:
|
| 106 |
+
|
| 107 |
+
ctypedef vector[shared_ptr[CArray]] CArrayVector" gandiva::ArrayVector"
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
cdef extern from "gandiva/tree_expr_builder.h" namespace "gandiva" nogil:
|
| 111 |
+
|
| 112 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeBoolLiteral \
|
| 113 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(c_bool value)
|
| 114 |
+
|
| 115 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt8Literal \
|
| 116 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint8_t value)
|
| 117 |
+
|
| 118 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt16Literal \
|
| 119 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint16_t value)
|
| 120 |
+
|
| 121 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt32Literal \
|
| 122 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint32_t value)
|
| 123 |
+
|
| 124 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt64Literal \
|
| 125 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint64_t value)
|
| 126 |
+
|
| 127 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt8Literal \
|
| 128 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int8_t value)
|
| 129 |
+
|
| 130 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt16Literal \
|
| 131 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int16_t value)
|
| 132 |
+
|
| 133 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt32Literal \
|
| 134 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int32_t value)
|
| 135 |
+
|
| 136 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt64Literal \
|
| 137 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int64_t value)
|
| 138 |
+
|
| 139 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeFloatLiteral \
|
| 140 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(float value)
|
| 141 |
+
|
| 142 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeDoubleLiteral \
|
| 143 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(double value)
|
| 144 |
+
|
| 145 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeStringLiteral \
|
| 146 |
+
"gandiva::TreeExprBuilder::MakeStringLiteral"(const c_string& value)
|
| 147 |
+
|
| 148 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeBinaryLiteral \
|
| 149 |
+
"gandiva::TreeExprBuilder::MakeBinaryLiteral"(const c_string& value)
|
| 150 |
+
|
| 151 |
+
cdef shared_ptr[CGandivaExpression] TreeExprBuilder_MakeExpression\
|
| 152 |
+
"gandiva::TreeExprBuilder::MakeExpression"(
|
| 153 |
+
shared_ptr[CNode] root_node, shared_ptr[CField] result_field)
|
| 154 |
+
|
| 155 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeFunction \
|
| 156 |
+
"gandiva::TreeExprBuilder::MakeFunction"(
|
| 157 |
+
const c_string& name, const CNodeVector& children,
|
| 158 |
+
shared_ptr[CDataType] return_type)
|
| 159 |
+
|
| 160 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeField \
|
| 161 |
+
"gandiva::TreeExprBuilder::MakeField"(shared_ptr[CField] field)
|
| 162 |
+
|
| 163 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeIf \
|
| 164 |
+
"gandiva::TreeExprBuilder::MakeIf"(
|
| 165 |
+
shared_ptr[CNode] condition, shared_ptr[CNode] this_node,
|
| 166 |
+
shared_ptr[CNode] else_node, shared_ptr[CDataType] return_type)
|
| 167 |
+
|
| 168 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeAnd \
|
| 169 |
+
"gandiva::TreeExprBuilder::MakeAnd"(const CNodeVector& children)
|
| 170 |
+
|
| 171 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeOr \
|
| 172 |
+
"gandiva::TreeExprBuilder::MakeOr"(const CNodeVector& children)
|
| 173 |
+
|
| 174 |
+
cdef shared_ptr[CCondition] TreeExprBuilder_MakeCondition \
|
| 175 |
+
"gandiva::TreeExprBuilder::MakeCondition"(
|
| 176 |
+
shared_ptr[CNode] condition)
|
| 177 |
+
|
| 178 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt32 \
|
| 179 |
+
"gandiva::TreeExprBuilder::MakeInExpressionInt32"(
|
| 180 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
| 181 |
+
|
| 182 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt64 \
|
| 183 |
+
"gandiva::TreeExprBuilder::MakeInExpressionInt64"(
|
| 184 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
| 185 |
+
|
| 186 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime32 \
|
| 187 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTime32"(
|
| 188 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
| 189 |
+
|
| 190 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime64 \
|
| 191 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTime64"(
|
| 192 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
| 193 |
+
|
| 194 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate32 \
|
| 195 |
+
"gandiva::TreeExprBuilder::MakeInExpressionDate32"(
|
| 196 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
| 197 |
+
|
| 198 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate64 \
|
| 199 |
+
"gandiva::TreeExprBuilder::MakeInExpressionDate64"(
|
| 200 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
| 201 |
+
|
| 202 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTimeStamp \
|
| 203 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTimeStamp"(
|
| 204 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
| 205 |
+
|
| 206 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionString \
|
| 207 |
+
"gandiva::TreeExprBuilder::MakeInExpressionString"(
|
| 208 |
+
shared_ptr[CNode] node, const c_unordered_set[c_string]& values)
|
| 209 |
+
|
| 210 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionBinary \
|
| 211 |
+
"gandiva::TreeExprBuilder::MakeInExpressionBinary"(
|
| 212 |
+
shared_ptr[CNode] node, const c_unordered_set[c_string]& values)
|
| 213 |
+
|
| 214 |
+
cdef extern from "gandiva/projector.h" namespace "gandiva" nogil:
|
| 215 |
+
|
| 216 |
+
cdef cppclass CProjector" gandiva::Projector":
|
| 217 |
+
|
| 218 |
+
CStatus Evaluate(
|
| 219 |
+
const CRecordBatch& batch, CMemoryPool* pool,
|
| 220 |
+
const CArrayVector* output)
|
| 221 |
+
|
| 222 |
+
CStatus Evaluate(
|
| 223 |
+
const CRecordBatch& batch,
|
| 224 |
+
const CSelectionVector* selection,
|
| 225 |
+
CMemoryPool* pool,
|
| 226 |
+
const CArrayVector* output)
|
| 227 |
+
|
| 228 |
+
c_string DumpIR()
|
| 229 |
+
|
| 230 |
+
cdef CStatus Projector_Make \
|
| 231 |
+
"gandiva::Projector::Make"(
|
| 232 |
+
shared_ptr[CSchema] schema, const CExpressionVector& children,
|
| 233 |
+
shared_ptr[CProjector]* projector)
|
| 234 |
+
|
| 235 |
+
cdef CStatus Projector_Make \
|
| 236 |
+
"gandiva::Projector::Make"(
|
| 237 |
+
shared_ptr[CSchema] schema, const CExpressionVector& children,
|
| 238 |
+
CSelectionVector_Mode mode,
|
| 239 |
+
shared_ptr[CConfiguration] configuration,
|
| 240 |
+
shared_ptr[CProjector]* projector)
|
| 241 |
+
|
| 242 |
+
cdef extern from "gandiva/filter.h" namespace "gandiva" nogil:
|
| 243 |
+
|
| 244 |
+
cdef cppclass CFilter" gandiva::Filter":
|
| 245 |
+
|
| 246 |
+
CStatus Evaluate(
|
| 247 |
+
const CRecordBatch& batch,
|
| 248 |
+
shared_ptr[CSelectionVector] out_selection)
|
| 249 |
+
|
| 250 |
+
c_string DumpIR()
|
| 251 |
+
|
| 252 |
+
cdef CStatus Filter_Make \
|
| 253 |
+
"gandiva::Filter::Make"(
|
| 254 |
+
shared_ptr[CSchema] schema, shared_ptr[CCondition] condition,
|
| 255 |
+
shared_ptr[CConfiguration] configuration,
|
| 256 |
+
shared_ptr[CFilter]* filter)
|
| 257 |
+
|
| 258 |
+
cdef extern from "gandiva/function_signature.h" namespace "gandiva" nogil:
|
| 259 |
+
|
| 260 |
+
cdef cppclass CFunctionSignature" gandiva::FunctionSignature":
|
| 261 |
+
|
| 262 |
+
CFunctionSignature(const c_string& base_name,
|
| 263 |
+
vector[shared_ptr[CDataType]] param_types,
|
| 264 |
+
shared_ptr[CDataType] ret_type)
|
| 265 |
+
|
| 266 |
+
shared_ptr[CDataType] ret_type() const
|
| 267 |
+
|
| 268 |
+
const c_string& base_name() const
|
| 269 |
+
|
| 270 |
+
vector[shared_ptr[CDataType]] param_types() const
|
| 271 |
+
|
| 272 |
+
c_string ToString() const
|
| 273 |
+
|
| 274 |
+
cdef extern from "gandiva/expression_registry.h" namespace "gandiva" nogil:
|
| 275 |
+
|
| 276 |
+
cdef vector[shared_ptr[CFunctionSignature]] \
|
| 277 |
+
GetRegisteredFunctionSignatures()
|
| 278 |
+
|
| 279 |
+
cdef extern from "gandiva/configuration.h" namespace "gandiva" nogil:
|
| 280 |
+
|
| 281 |
+
cdef cppclass CConfiguration" gandiva::Configuration":
|
| 282 |
+
|
| 283 |
+
CConfiguration()
|
| 284 |
+
|
| 285 |
+
CConfiguration(bint optimize, bint dump_ir)
|
| 286 |
+
|
| 287 |
+
void set_optimize(bint optimize)
|
| 288 |
+
|
| 289 |
+
void set_dump_ir(bint dump_ir)
|
| 290 |
+
|
| 291 |
+
cdef cppclass CConfigurationBuilder \
|
| 292 |
+
" gandiva::ConfigurationBuilder":
|
| 293 |
+
@staticmethod
|
| 294 |
+
shared_ptr[CConfiguration] DefaultConfiguration()
|
| 295 |
+
|
| 296 |
+
CConfigurationBuilder()
|
| 297 |
+
|
| 298 |
+
shared_ptr[CConfiguration] build()
|
parrot/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# distutils: language = c++
|
| 19 |
+
|
| 20 |
+
from pyarrow.includes.common cimport *
|
| 21 |
+
from pyarrow._parquet cimport (ParquetCipher,
|
| 22 |
+
CFileEncryptionProperties,
|
| 23 |
+
CFileDecryptionProperties,
|
| 24 |
+
ParquetCipher_AES_GCM_V1,
|
| 25 |
+
ParquetCipher_AES_GCM_CTR_V1)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
cdef extern from "parquet/encryption/kms_client.h" \
|
| 29 |
+
namespace "parquet::encryption" nogil:
|
| 30 |
+
cdef cppclass CKmsClient" parquet::encryption::KmsClient":
|
| 31 |
+
c_string WrapKey(const c_string& key_bytes,
|
| 32 |
+
const c_string& master_key_identifier) except +
|
| 33 |
+
c_string UnwrapKey(const c_string& wrapped_key,
|
| 34 |
+
const c_string& master_key_identifier) except +
|
| 35 |
+
|
| 36 |
+
cdef cppclass CKeyAccessToken" parquet::encryption::KeyAccessToken":
|
| 37 |
+
CKeyAccessToken(const c_string value)
|
| 38 |
+
void Refresh(const c_string& new_value)
|
| 39 |
+
const c_string& value() const
|
| 40 |
+
|
| 41 |
+
cdef cppclass CKmsConnectionConfig \
|
| 42 |
+
" parquet::encryption::KmsConnectionConfig":
|
| 43 |
+
CKmsConnectionConfig()
|
| 44 |
+
c_string kms_instance_id
|
| 45 |
+
c_string kms_instance_url
|
| 46 |
+
shared_ptr[CKeyAccessToken] refreshable_key_access_token
|
| 47 |
+
unordered_map[c_string, c_string] custom_kms_conf
|
| 48 |
+
|
| 49 |
+
# Callbacks for implementing Python kms clients
|
| 50 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
| 51 |
+
ctypedef void CallbackWrapKey(
|
| 52 |
+
object, const c_string&, const c_string&, c_string*)
|
| 53 |
+
ctypedef void CallbackUnwrapKey(
|
| 54 |
+
object, const c_string&, const c_string&, c_string*)
|
| 55 |
+
|
| 56 |
+
cdef extern from "parquet/encryption/kms_client_factory.h" \
|
| 57 |
+
namespace "parquet::encryption" nogil:
|
| 58 |
+
cdef cppclass CKmsClientFactory" parquet::encryption::KmsClientFactory":
|
| 59 |
+
shared_ptr[CKmsClient] CreateKmsClient(
|
| 60 |
+
const CKmsConnectionConfig& kms_connection_config) except +
|
| 61 |
+
|
| 62 |
+
# Callbacks for implementing Python kms client factories
|
| 63 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
| 64 |
+
ctypedef void CallbackCreateKmsClient(
|
| 65 |
+
object,
|
| 66 |
+
const CKmsConnectionConfig&, shared_ptr[CKmsClient]*)
|
| 67 |
+
|
| 68 |
+
cdef extern from "parquet/encryption/crypto_factory.h" \
|
| 69 |
+
namespace "parquet::encryption" nogil:
|
| 70 |
+
cdef cppclass CEncryptionConfiguration\
|
| 71 |
+
" parquet::encryption::EncryptionConfiguration":
|
| 72 |
+
CEncryptionConfiguration(const c_string& footer_key) except +
|
| 73 |
+
c_string footer_key
|
| 74 |
+
c_string column_keys
|
| 75 |
+
ParquetCipher encryption_algorithm
|
| 76 |
+
c_bool plaintext_footer
|
| 77 |
+
c_bool double_wrapping
|
| 78 |
+
double cache_lifetime_seconds
|
| 79 |
+
c_bool internal_key_material
|
| 80 |
+
int32_t data_key_length_bits
|
| 81 |
+
|
| 82 |
+
cdef cppclass CDecryptionConfiguration\
|
| 83 |
+
" parquet::encryption::DecryptionConfiguration":
|
| 84 |
+
CDecryptionConfiguration() except +
|
| 85 |
+
double cache_lifetime_seconds
|
| 86 |
+
|
| 87 |
+
cdef cppclass CCryptoFactory" parquet::encryption::CryptoFactory":
|
| 88 |
+
void RegisterKmsClientFactory(
|
| 89 |
+
shared_ptr[CKmsClientFactory] kms_client_factory) except +
|
| 90 |
+
shared_ptr[CFileEncryptionProperties] GetFileEncryptionProperties(
|
| 91 |
+
const CKmsConnectionConfig& kms_connection_config,
|
| 92 |
+
const CEncryptionConfiguration& encryption_config) except +*
|
| 93 |
+
shared_ptr[CFileDecryptionProperties] GetFileDecryptionProperties(
|
| 94 |
+
const CKmsConnectionConfig& kms_connection_config,
|
| 95 |
+
const CDecryptionConfiguration& decryption_config) except +*
|
| 96 |
+
void RemoveCacheEntriesForToken(const c_string& access_token) except +
|
| 97 |
+
void RemoveCacheEntriesForAllTokens() except +
|
| 98 |
+
|
| 99 |
+
cdef extern from "arrow/python/parquet_encryption.h" \
|
| 100 |
+
namespace "arrow::py::parquet::encryption" nogil:
|
| 101 |
+
cdef cppclass CPyKmsClientVtable \
|
| 102 |
+
" arrow::py::parquet::encryption::PyKmsClientVtable":
|
| 103 |
+
CPyKmsClientVtable()
|
| 104 |
+
function[CallbackWrapKey] wrap_key
|
| 105 |
+
function[CallbackUnwrapKey] unwrap_key
|
| 106 |
+
|
| 107 |
+
cdef cppclass CPyKmsClient\
|
| 108 |
+
" arrow::py::parquet::encryption::PyKmsClient"(CKmsClient):
|
| 109 |
+
CPyKmsClient(object handler, CPyKmsClientVtable vtable)
|
| 110 |
+
|
| 111 |
+
cdef cppclass CPyKmsClientFactoryVtable\
|
| 112 |
+
" arrow::py::parquet::encryption::PyKmsClientFactoryVtable":
|
| 113 |
+
CPyKmsClientFactoryVtable()
|
| 114 |
+
function[CallbackCreateKmsClient] create_kms_client
|
| 115 |
+
|
| 116 |
+
cdef cppclass CPyKmsClientFactory\
|
| 117 |
+
" arrow::py::parquet::encryption::PyKmsClientFactory"(
|
| 118 |
+
CKmsClientFactory):
|
| 119 |
+
CPyKmsClientFactory(object handler, CPyKmsClientFactoryVtable vtable)
|
| 120 |
+
|
| 121 |
+
cdef cppclass CPyCryptoFactory\
|
| 122 |
+
" arrow::py::parquet::encryption::PyCryptoFactory"(CCryptoFactory):
|
| 123 |
+
CResult[shared_ptr[CFileEncryptionProperties]] \
|
| 124 |
+
SafeGetFileEncryptionProperties(
|
| 125 |
+
const CKmsConnectionConfig& kms_connection_config,
|
| 126 |
+
const CEncryptionConfiguration& encryption_config)
|
| 127 |
+
CResult[shared_ptr[CFileDecryptionProperties]] \
|
| 128 |
+
SafeGetFileDecryptionProperties(
|
| 129 |
+
const CKmsConnectionConfig& kms_connection_config,
|
| 130 |
+
const CDecryptionConfiguration& decryption_config)
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
# flake8: noqa
|
| 19 |
+
|
| 20 |
+
from .from_dataframe import from_dataframe
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (206 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc
ADDED
|
Binary file (3.32 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc
ADDED
|
Binary file (16.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc
ADDED
|
Binary file (7.43 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/pyarrow/interchange/buffer.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
| 2 |
+
# or more contributor license agreements. See the NOTICE file
|
| 3 |
+
# distributed with this work for additional information
|
| 4 |
+
# regarding copyright ownership. The ASF licenses this file
|
| 5 |
+
# to you under the Apache License, Version 2.0 (the
|
| 6 |
+
# "License"); you may not use this file except in compliance
|
| 7 |
+
# with the License. You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing,
|
| 12 |
+
# software distributed under the License is distributed on an
|
| 13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 14 |
+
# KIND, either express or implied. See the License for the
|
| 15 |
+
# specific language governing permissions and limitations
|
| 16 |
+
# under the License.
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
import enum
|
| 20 |
+
|
| 21 |
+
import pyarrow as pa
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DlpackDeviceType(enum.IntEnum):
|
| 25 |
+
"""Integer enum for device type codes matching DLPack."""
|
| 26 |
+
|
| 27 |
+
CPU = 1
|
| 28 |
+
CUDA = 2
|
| 29 |
+
CPU_PINNED = 3
|
| 30 |
+
OPENCL = 4
|
| 31 |
+
VULKAN = 7
|
| 32 |
+
METAL = 8
|
| 33 |
+
VPI = 9
|
| 34 |
+
ROCM = 10
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class _PyArrowBuffer:
|
| 38 |
+
"""
|
| 39 |
+
Data in the buffer is guaranteed to be contiguous in memory.
|
| 40 |
+
|
| 41 |
+
Note that there is no dtype attribute present, a buffer can be thought of
|
| 42 |
+
as simply a block of memory. However, if the column that the buffer is
|
| 43 |
+
attached to has a dtype that's supported by DLPack and ``__dlpack__`` is
|
| 44 |
+
implemented, then that dtype information will be contained in the return
|
| 45 |
+
value from ``__dlpack__``.
|
| 46 |
+
|
| 47 |
+
This distinction is useful to support both data exchange via DLPack on a
|
| 48 |
+
buffer and (b) dtypes like variable-length strings which do not have a
|
| 49 |
+
fixed number of bytes per element.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(self, x: pa.Buffer, allow_copy: bool = True) -> None:
|
| 53 |
+
"""
|
| 54 |
+
Handle PyArrow Buffers.
|
| 55 |
+
"""
|
| 56 |
+
self._x = x
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def bufsize(self) -> int:
|
| 60 |
+
"""
|
| 61 |
+
Buffer size in bytes.
|
| 62 |
+
"""
|
| 63 |
+
return self._x.size
|
| 64 |
+
|
| 65 |
+
@property
|
| 66 |
+
def ptr(self) -> int:
|
| 67 |
+
"""
|
| 68 |
+
Pointer to start of the buffer as an integer.
|
| 69 |
+
"""
|
| 70 |
+
return self._x.address
|
| 71 |
+
|
| 72 |
+
def __dlpack__(self):
|
| 73 |
+
"""
|
| 74 |
+
Produce DLPack capsule (see array API standard).
|
| 75 |
+
|
| 76 |
+
Raises:
|
| 77 |
+
- TypeError : if the buffer contains unsupported dtypes.
|
| 78 |
+
- NotImplementedError : if DLPack support is not implemented
|
| 79 |
+
|
| 80 |
+
Useful to have to connect to array libraries. Support optional because
|
| 81 |
+
it's not completely trivial to implement for a Python-only library.
|
| 82 |
+
"""
|
| 83 |
+
raise NotImplementedError("__dlpack__")
|
| 84 |
+
|
| 85 |
+
def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
|
| 86 |
+
"""
|
| 87 |
+
Device type and device ID for where the data in the buffer resides.
|
| 88 |
+
Uses device type codes matching DLPack.
|
| 89 |
+
Note: must be implemented even if ``__dlpack__`` is not.
|
| 90 |
+
"""
|
| 91 |
+
if self._x.is_cpu:
|
| 92 |
+
return (DlpackDeviceType.CPU, None)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError("__dlpack_device__")
|
| 95 |
+
|
| 96 |
+
def __repr__(self) -> str:
|
| 97 |
+
return (
|
| 98 |
+
"PyArrowBuffer(" +
|
| 99 |
+
str(
|
| 100 |
+
{
|
| 101 |
+
"bufsize": self.bufsize,
|
| 102 |
+
"ptr": self.ptr,
|
| 103 |
+
"device": self.__dlpack_device__()[0].name,
|
| 104 |
+
}
|
| 105 |
+
) +
|
| 106 |
+
")"
|
| 107 |
+
)
|