diff --git a/.gitattributes b/.gitattributes index dbfead2c5755bc73154422357f558eab886e1ad2..13bb8b0726ff3b236c052b361e1725a8953f0081 100644 --- a/.gitattributes +++ b/.gitattributes @@ -102,3 +102,4 @@ parrot/lib/libncursesw.so.6.4 filter=lfs diff=lfs merge=lfs -text parrot/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text parrot/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/libtinfo.so.6 filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/libtinfo.so.6 b/parrot/lib/libtinfo.so.6 new file mode 100644 index 0000000000000000000000000000000000000000..cc8cf878bbd7930738828148050b77fb02c2de4e --- /dev/null +++ b/parrot/lib/libtinfo.so.6 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ff9b333bc4b796b31c188c2dadd7840788cb963dbf4f34567deb3f326326b02 +size 287080 diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ece0d069014427cbfd83c80c5f8803dfaad0f158 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD @@ -0,0 +1,119 @@ +../../../bin/pygrun,sha256=zdiJT5vo3EKhHl5thuM5QFtN0Dn5DetmpY_ZBW8J4XA,6133 +antlr4/BufferedTokenStream.py,sha256=_BwmzOH1TO6yL2yC_ZaUzkghq8wzc0UPHfI3UpnZUwM,10780 +antlr4/CommonTokenFactory.py,sha256=Tv16zg_pWD1Dv3IphsxFu8nwWdLeXYcqJ8CC5yHwjH8,2110 +antlr4/CommonTokenStream.py,sha256=NNJHXwRg2_Zn46ZhJyDxZtvZzsPWhb6JjXa7BjM45eg,2770 +antlr4/FileStream.py,sha256=-ZR_-jl_If9IIBYLINIwlQrlTSmu5k1VUKDc3ie7WR4,868 +antlr4/InputStream.py,sha256=sggjE2jEGvSgQmxFvqeeuT3aOVgcH5tS7mMybW8wKS4,2334 +antlr4/IntervalSet.py,sha256=Cd0WKhd_kYbiLYKkDNncgSM19GAuS7OaTOC4-5Yubs4,5965 +antlr4/LL1Analyzer.py,sha256=oJBvO7_S8cAlb_D4qWNxd2IlK0qP4ka-oeoDxx16CZ4,7752 +antlr4/Lexer.py,sha256=C72hqayfkympxb46AcSnhPD9kVZ0quWgboGxa6gcIcg,11542 +antlr4/ListTokenSource.py,sha256=IffLMo7YQnD_CjKryrrgNWSk0q5QSYd7puZyyUk7vOk,5356 +antlr4/Parser.py,sha256=F2Q25z0-__KHfa354KQhDu3ZOVzLFfag3s2ixJ4dl_o,22883 +antlr4/ParserInterpreter.py,sha256=-QU9kn4x3WCQ-LSA99R231HoicTqakiHZ5KM72l-hIo,7206 +antlr4/ParserRuleContext.py,sha256=wHAVdOxMAO5jkUqloTXVzn_xYnJhiHbvvuhZpth0ZF8,6762 +antlr4/PredictionContext.py,sha256=cb4KI6EGpS7sRzJ8UvPEkxphINZuWhyiZ95752g3prI,22977 +antlr4/Recognizer.py,sha256=vmKAtSjIgR9LQr5YzuK5OmPZWMJ3x69OuVZQ_FTzQHE,5383 +antlr4/RuleContext.py,sha256=GiviRv2k_al1IBgdJOEEoD0ohJaVd-_h5T_CPG_Bsmg,8099 +antlr4/StdinStream.py,sha256=MMSH4zN8T6i_nu-3_TlN-3E4nPM4b5KgK4GT6n_FUQA,303 +antlr4/Token.py,sha256=OtWCab4Ut52X_nLLAA-8x4Zl6xaF6TEN-0033uaoaEo,5206 +antlr4/TokenStreamRewriter.py,sha256=cuErQTrXwC_0kqVv3MsTWGZSm-E1Vy1yzA-3SOhKd_s,10324 +antlr4/Utils.py,sha256=Oyg8CJCRL1TrF_QSB_LLlVdWOB4loVcKOgFNT-icO7c,931 +antlr4/__init__.py,sha256=g8UGpflnlMWcAyLtihejzrgAP1Uo3b9GhwfI8QnZjtw,1125 +antlr4/__pycache__/BufferedTokenStream.cpython-310.pyc,, +antlr4/__pycache__/CommonTokenFactory.cpython-310.pyc,, +antlr4/__pycache__/CommonTokenStream.cpython-310.pyc,, +antlr4/__pycache__/FileStream.cpython-310.pyc,, +antlr4/__pycache__/InputStream.cpython-310.pyc,, +antlr4/__pycache__/IntervalSet.cpython-310.pyc,, +antlr4/__pycache__/LL1Analyzer.cpython-310.pyc,, +antlr4/__pycache__/Lexer.cpython-310.pyc,, +antlr4/__pycache__/ListTokenSource.cpython-310.pyc,, +antlr4/__pycache__/Parser.cpython-310.pyc,, +antlr4/__pycache__/ParserInterpreter.cpython-310.pyc,, +antlr4/__pycache__/ParserRuleContext.cpython-310.pyc,, +antlr4/__pycache__/PredictionContext.cpython-310.pyc,, +antlr4/__pycache__/Recognizer.cpython-310.pyc,, +antlr4/__pycache__/RuleContext.cpython-310.pyc,, +antlr4/__pycache__/StdinStream.cpython-310.pyc,, +antlr4/__pycache__/Token.cpython-310.pyc,, +antlr4/__pycache__/TokenStreamRewriter.cpython-310.pyc,, +antlr4/__pycache__/Utils.cpython-310.pyc,, +antlr4/__pycache__/__init__.cpython-310.pyc,, +antlr4/atn/ATN.py,sha256=LYE8kT-D8FpUd5fpOtyOLqvXLFkUSa83TVFowhCWAiY,5789 +antlr4/atn/ATNConfig.py,sha256=tNdIC6_GrxXllHBx3npAWyDh6KrohLZDV_XyPrydRMY,6565 +antlr4/atn/ATNConfigSet.py,sha256=qRzVsBeMqk2txjG3DrGptwF6Vb2hHC5w3umkSL0GNJw,8312 +antlr4/atn/ATNDeserializationOptions.py,sha256=lUV_bGW6mxj7t20esda5Yv-X9m-U_x1-0xaLifhXIPo,1010 +antlr4/atn/ATNDeserializer.py,sha256=aYLDDtQ-wyo3gId6A-wD1E3QmpfrPZlXxj4_IDm-mUY,22252 +antlr4/atn/ATNSimulator.py,sha256=mDc-G3GF3kSeqpfGDabUOLJ0WLVTqibxZlkvXQYmBRk,2298 +antlr4/atn/ATNState.py,sha256=NbndISWUwFDF_vuBfbTiZZ8GPHoQa6UXdqbD-yjJE7c,7663 +antlr4/atn/ATNType.py,sha256=xgv8AMVU7tc07U73_hRTm1AiZ7MvGhoaP5fTiOrrCGg,422 +antlr4/atn/LexerATNSimulator.py,sha256=kYXRwUvHptSRU8T_K9pSrGlCk9YypWeHlAcjgry1VVo,25465 +antlr4/atn/LexerAction.py,sha256=KUeJwKekBch0m1poSPskHIh-15dcKAG4lR7zlq98tzc,10014 +antlr4/atn/LexerActionExecutor.py,sha256=7rlg17THcwLsuTmh7NsLrTbRH4DTrm8qIdW9_235CEc,6420 +antlr4/atn/ParserATNSimulator.py,sha256=IKCzsDLcznROSVojU-daAygKr3svl0DmK5DhkUllASY,80365 +antlr4/atn/PredictionMode.py,sha256=i8B7MULA7v-qbXeCY_xp6sgi21kHM6kybqIrG6rSrro,22486 +antlr4/atn/SemanticContext.py,sha256=ds0TmM4qenb0LN-rl2Fp_N_xB959abN67I19EF6rs8o,10495 +antlr4/atn/Transition.py,sha256=ZAsEFpa5I_n-zxD6U-DauM5_33jFK65x3PWu6-NW0RA,8762 +antlr4/atn/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/atn/__pycache__/ATN.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNConfig.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNConfigSet.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNDeserializationOptions.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNDeserializer.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNSimulator.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNState.cpython-310.pyc,, +antlr4/atn/__pycache__/ATNType.cpython-310.pyc,, +antlr4/atn/__pycache__/LexerATNSimulator.cpython-310.pyc,, +antlr4/atn/__pycache__/LexerAction.cpython-310.pyc,, +antlr4/atn/__pycache__/LexerActionExecutor.cpython-310.pyc,, +antlr4/atn/__pycache__/ParserATNSimulator.cpython-310.pyc,, +antlr4/atn/__pycache__/PredictionMode.cpython-310.pyc,, +antlr4/atn/__pycache__/SemanticContext.cpython-310.pyc,, +antlr4/atn/__pycache__/Transition.cpython-310.pyc,, +antlr4/atn/__pycache__/__init__.cpython-310.pyc,, +antlr4/dfa/DFA.py,sha256=weIh0uaRfakP12mFvHo7U0tqO3GONV3-nHFkc2xk-ZE,5388 +antlr4/dfa/DFASerializer.py,sha256=1st_HO85yXLYy7gInTEnkztgA6am4CT-yReh-mazp9E,2518 +antlr4/dfa/DFAState.py,sha256=R7JwKf0GtAEs9J_MD_Y0WKcuzdt0BVX1sow-uv9yFYc,5583 +antlr4/dfa/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/dfa/__pycache__/DFA.cpython-310.pyc,, +antlr4/dfa/__pycache__/DFASerializer.cpython-310.pyc,, +antlr4/dfa/__pycache__/DFAState.cpython-310.pyc,, +antlr4/dfa/__pycache__/__init__.cpython-310.pyc,, +antlr4/error/DiagnosticErrorListener.py,sha256=EwS2D_Ox6CmvCa16NPJ9ud4QYPHmlPXt6-Wdn1h5Kg8,4430 +antlr4/error/ErrorListener.py,sha256=yP_MDguol4Cj0_pEPyNzeH3v4ZvUjW5iwDjhYTVAHbE,2722 +antlr4/error/ErrorStrategy.py,sha256=0mhzFL57ZVnjKkGrtadta93Zm3NXdF-HW10DVD07VXs,30391 +antlr4/error/Errors.py,sha256=hlKngclBfXdkDiAymhYsvh2OCXlvmHM2kTl_A1vgp-w,6759 +antlr4/error/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/error/__pycache__/DiagnosticErrorListener.cpython-310.pyc,, +antlr4/error/__pycache__/ErrorListener.cpython-310.pyc,, +antlr4/error/__pycache__/ErrorStrategy.cpython-310.pyc,, +antlr4/error/__pycache__/Errors.cpython-310.pyc,, +antlr4/error/__pycache__/__init__.cpython-310.pyc,, +antlr4/tree/Chunk.py,sha256=oCIZjolLq9xkxtVDROEDxfUGgndcEnsDW0eUmLM7Gpk,695 +antlr4/tree/ParseTreeMatch.py,sha256=Dc6GVWSUqoIAFXUaUZqUwCUlZfTcgUbGLGzNf6QxQvE,4485 +antlr4/tree/ParseTreePattern.py,sha256=ASBNaQORh3f7f8KnFeZJC2yWFFx4uQlxvC2Y55ifhY0,2825 +antlr4/tree/ParseTreePatternMatcher.py,sha256=HtE9yi1Urr2QPLGLJDBvr0lxv6bjuj9CHl-4clahSe8,16388 +antlr4/tree/RuleTagToken.py,sha256=n4zXcmrrfsGyl91pj5ZYcc_CeKMhPrvYkUdppgMBpbY,2022 +antlr4/tree/TokenTagToken.py,sha256=S3o3DJhfzL5kpClxsKyI-Il-xvuuZQiBAIsLCKFjRHo,1576 +antlr4/tree/Tree.py,sha256=ZI7U_5IxBLm_IrnfJOtb12BCPIWyzfeZtLnhHKVVZIw,5572 +antlr4/tree/Trees.py,sha256=JtQ7cYWmKwI9TIBP6y9XIgjlNS4mYjv3ARwOfwWc5Vg,3968 +antlr4/tree/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +antlr4/tree/__pycache__/Chunk.cpython-310.pyc,, +antlr4/tree/__pycache__/ParseTreeMatch.cpython-310.pyc,, +antlr4/tree/__pycache__/ParseTreePattern.cpython-310.pyc,, +antlr4/tree/__pycache__/ParseTreePatternMatcher.cpython-310.pyc,, +antlr4/tree/__pycache__/RuleTagToken.cpython-310.pyc,, +antlr4/tree/__pycache__/TokenTagToken.cpython-310.pyc,, +antlr4/tree/__pycache__/Tree.cpython-310.pyc,, +antlr4/tree/__pycache__/Trees.cpython-310.pyc,, +antlr4/tree/__pycache__/__init__.cpython-310.pyc,, +antlr4/xpath/XPath.py,sha256=O9s4-EDvLbAbYytP_bae9Z2khLl0iAtRzPAtVbuWUM4,13015 +antlr4/xpath/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/xpath/__pycache__/XPath.cpython-310.pyc,, +antlr4/xpath/__pycache__/__init__.cpython-310.pyc,, +antlr4_python3_runtime-4.9.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +antlr4_python3_runtime-4.9.3.dist-info/METADATA,sha256=huEAVSqtQkF-nXL3LBE0uElH1B5hGyPPX_41eQ03Cik,403 +antlr4_python3_runtime-4.9.3.dist-info/RECORD,, +antlr4_python3_runtime-4.9.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +antlr4_python3_runtime-4.9.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91 +antlr4_python3_runtime-4.9.3.dist-info/top_level.txt,sha256=OsoZsh9bb30wgXb2zBUjdDwYg46MfV-RVZA6Pk8pcB0,7 diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dee19f3e13c2cbbb49b818e6ac67c63f77b865f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt @@ -0,0 +1 @@ +antlr4 diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/anchors/__init__.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/anchors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9c4f054bae09ac38f7758c4a499d8e3b64bc7c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/anchors/__init__.py @@ -0,0 +1 @@ +from .index import anchors_plugin # noqa F401 diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__init__.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9359cf8a0d289b1fe786abd11cac24353baee7ac --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__init__.py @@ -0,0 +1 @@ +from .index import attrs_plugin # noqa: F401 diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3f7fa2be217a8b814dec987d48e09a8b0badd27 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/index.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95cb9024094f1ed08d4b31bd0df677e42cc4a20c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/index.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/parse.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/parse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c014137950d38d496bf147e9595772eb98fa64ca Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/parse.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/index.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/index.py new file mode 100644 index 0000000000000000000000000000000000000000..1adea3f34fc837b86e2d7b900f669b8abe377653 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/index.py @@ -0,0 +1,123 @@ +from typing import List, Optional + +from markdown_it import MarkdownIt +from markdown_it.rules_inline import StateInline +from markdown_it.token import Token + +from .parse import ParseError, parse + + +def attrs_plugin( + md: MarkdownIt, + *, + after=("image", "code_inline", "link_close", "span_close"), + spans=False, + span_after="link", +): + """Parse inline attributes that immediately follow certain inline elements:: + + ![alt](https://image.com){#id .a b=c} + + This syntax is inspired by + `Djot spans + `_. + + Inside the curly braces, the following syntax is possible: + + - `.foo` specifies foo as a class. + Multiple classes may be given in this way; they will be combined. + - `#foo` specifies foo as an identifier. + An element may have only one identifier; + if multiple identifiers are given, the last one is used. + - `key="value"` or `key=value` specifies a key-value attribute. + Quotes are not needed when the value consists entirely of + ASCII alphanumeric characters or `_` or `:` or `-`. + Backslash escapes may be used inside quoted values. + - `%` begins a comment, which ends with the next `%` or the end of the attribute (`}`). + + Multiple attribute blocks are merged. + + :param md: The MarkdownIt instance to modify. + :param after: The names of inline elements after which attributes may be specified. + This plugin does not support attributes after emphasis, strikethrough or text elements, + which all require post-parse processing. + :param spans: If True, also parse attributes after spans of text, encapsulated by `[]`. + Note Markdown link references take precedence over this syntax. + :param span_after: The name of an inline rule after which spans may be specified. + """ + + def _attr_rule(state: StateInline, silent: bool): + if state.pending or not state.tokens: + return False + token = state.tokens[-1] + if token.type not in after: + return False + try: + new_pos, attrs = parse(state.src[state.pos :]) + except ParseError: + return False + token_index = _find_opening(state.tokens, len(state.tokens) - 1) + if token_index is None: + return False + state.pos += new_pos + 1 + if not silent: + attr_token = state.tokens[token_index] + if "class" in attrs and "class" in token.attrs: + attrs["class"] = f"{attr_token.attrs['class']} {attrs['class']}" + attr_token.attrs.update(attrs) + return True + + if spans: + md.inline.ruler.after(span_after, "span", _span_rule) + md.inline.ruler.push("attr", _attr_rule) + + +def _find_opening(tokens: List[Token], index: int) -> Optional[int]: + """Find the opening token index, if the token is closing.""" + if tokens[index].nesting != -1: + return index + level = 0 + while index >= 0: + level += tokens[index].nesting + if level == 0: + return index + index -= 1 + return None + + +def _span_rule(state: StateInline, silent: bool): + if state.srcCharCode[state.pos] != 0x5B: # /* [ */ + return False + + maximum = state.posMax + labelStart = state.pos + 1 + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, False) + + # parser failed to find ']', so it's not a valid span + if labelEnd < 0: + return False + + pos = labelEnd + 1 + + # check not at end of inline + if pos >= maximum: + return False + + try: + new_pos, attrs = parse(state.src[pos:]) + except ParseError: + return False + + pos += new_pos + 1 + + if not silent: + state.pos = labelStart + state.posMax = labelEnd + token = state.push("span_open", "span", 1) + token.attrs = attrs + state.md.inline.tokenize(state) + token = state.push("span_close", "span", -1) + + state.pos = pos + state.posMax = maximum + return True diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/deflist/__pycache__/index.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/deflist/__pycache__/index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c70cbe1f339a0c9718f2f1606cc81125b1626a68 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/deflist/__pycache__/index.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/LICENSE b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2fd4e3dc74c1a292521480b8b652e70654003213 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014-2015 Vitaly Puzrin, Alex Kocharin. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b7934250536ac52500f205906636cf336e93424 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/index.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/index.py new file mode 100644 index 0000000000000000000000000000000000000000..119fb7139359c82317b0891a1a9dc4b33d9dbcda --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/index.py @@ -0,0 +1,430 @@ +# Process footnotes +# + +from typing import List, Optional + +from markdown_it import MarkdownIt +from markdown_it.common.utils import isSpace +from markdown_it.helpers import parseLinkLabel +from markdown_it.rules_block import StateBlock +from markdown_it.rules_inline import StateInline +from markdown_it.token import Token + + +def footnote_plugin(md: MarkdownIt): + """Plugin ported from + `markdown-it-footnote `__. + + It is based on the + `pandoc definition `__: + + .. code-block:: md + + Normal footnote: + + Here is a footnote reference,[^1] and another.[^longnote] + + [^1]: Here is the footnote. + + [^longnote]: Here's one with multiple blocks. + + Subsequent paragraphs are indented to show that they + belong to the previous footnote. + + """ + md.block.ruler.before( + "reference", "footnote_def", footnote_def, {"alt": ["paragraph", "reference"]} + ) + md.inline.ruler.after("image", "footnote_inline", footnote_inline) + md.inline.ruler.after("footnote_inline", "footnote_ref", footnote_ref) + md.core.ruler.after("inline", "footnote_tail", footnote_tail) + + md.add_render_rule("footnote_ref", render_footnote_ref) + md.add_render_rule("footnote_block_open", render_footnote_block_open) + md.add_render_rule("footnote_block_close", render_footnote_block_close) + md.add_render_rule("footnote_open", render_footnote_open) + md.add_render_rule("footnote_close", render_footnote_close) + md.add_render_rule("footnote_anchor", render_footnote_anchor) + + # helpers (only used in other rules, no tokens are attached to those) + md.add_render_rule("footnote_caption", render_footnote_caption) + md.add_render_rule("footnote_anchor_name", render_footnote_anchor_name) + + +# ## RULES ## + + +def footnote_def(state: StateBlock, startLine: int, endLine: int, silent: bool): + """Process footnote block definition""" + + start = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + # line should be at least 5 chars - "[^x]:" + if start + 4 > maximum: + return False + + if state.srcCharCode[start] != 0x5B: # /* [ */ + return False + if state.srcCharCode[start + 1] != 0x5E: # /* ^ */ + return False + + pos = start + 2 + while pos < maximum: + if state.srcCharCode[pos] == 0x20: + return False + if state.srcCharCode[pos] == 0x5D: # /* ] */ + break + pos += 1 + + if pos == start + 2: # no empty footnote labels + return False + pos += 1 + if pos >= maximum or state.srcCharCode[pos] != 0x3A: # /* : */ + return False + if silent: + return True + pos += 1 + + label = state.src[start + 2 : pos - 2] + state.env.setdefault("footnotes", {}).setdefault("refs", {})[":" + label] = -1 + + open_token = Token("footnote_reference_open", "", 1) + open_token.meta = {"label": label} + open_token.level = state.level + state.level += 1 + state.tokens.append(open_token) + + oldBMark = state.bMarks[startLine] + oldTShift = state.tShift[startLine] + oldSCount = state.sCount[startLine] + oldParentType = state.parentType + + posAfterColon = pos + initial = offset = ( + state.sCount[startLine] + + pos + - (state.bMarks[startLine] + state.tShift[startLine]) + ) + + while pos < maximum: + ch = state.srcCharCode[pos] + + if isSpace(ch): + if ch == 0x09: + offset += 4 - offset % 4 + else: + offset += 1 + + else: + break + + pos += 1 + + state.tShift[startLine] = pos - posAfterColon + state.sCount[startLine] = offset - initial + + state.bMarks[startLine] = posAfterColon + state.blkIndent += 4 + state.parentType = "footnote" + + if state.sCount[startLine] < state.blkIndent: + state.sCount[startLine] += state.blkIndent + + state.md.block.tokenize(state, startLine, endLine, True) + + state.parentType = oldParentType + state.blkIndent -= 4 + state.tShift[startLine] = oldTShift + state.sCount[startLine] = oldSCount + state.bMarks[startLine] = oldBMark + + open_token.map = [startLine, state.line] + + token = Token("footnote_reference_close", "", -1) + state.level -= 1 + token.level = state.level + state.tokens.append(token) + + return True + + +def footnote_inline(state: StateInline, silent: bool): + """Process inline footnotes (^[...])""" + + maximum = state.posMax + start = state.pos + + if start + 2 >= maximum: + return False + if state.srcCharCode[start] != 0x5E: # /* ^ */ + return False + if state.srcCharCode[start + 1] != 0x5B: # /* [ */ + return False + + labelStart = start + 2 + labelEnd = parseLinkLabel(state, start + 1) + + # parser failed to find ']', so it's not a valid note + if labelEnd < 0: + return False + + # We found the end of the link, and know for a fact it's a valid link + # so all that's left to do is to call tokenizer. + # + if not silent: + refs = state.env.setdefault("footnotes", {}).setdefault("list", {}) + footnoteId = len(refs) + + tokens: List[Token] = [] + state.md.inline.parse( + state.src[labelStart:labelEnd], state.md, state.env, tokens + ) + + token = state.push("footnote_ref", "", 0) + token.meta = {"id": footnoteId} + + refs[footnoteId] = {"content": state.src[labelStart:labelEnd], "tokens": tokens} + + state.pos = labelEnd + 1 + state.posMax = maximum + return True + + +def footnote_ref(state: StateInline, silent: bool): + """Process footnote references ([^...])""" + + maximum = state.posMax + start = state.pos + + # should be at least 4 chars - "[^x]" + if start + 3 > maximum: + return False + + if "footnotes" not in state.env or "refs" not in state.env["footnotes"]: + return False + if state.srcCharCode[start] != 0x5B: # /* [ */ + return False + if state.srcCharCode[start + 1] != 0x5E: # /* ^ */ + return False + + pos = start + 2 + while pos < maximum: + if state.srcCharCode[pos] == 0x20: + return False + if state.srcCharCode[pos] == 0x0A: + return False + if state.srcCharCode[pos] == 0x5D: # /* ] */ + break + pos += 1 + + if pos == start + 2: # no empty footnote labels + return False + if pos >= maximum: + return False + pos += 1 + + label = state.src[start + 2 : pos - 1] + if (":" + label) not in state.env["footnotes"]["refs"]: + return False + + if not silent: + if "list" not in state.env["footnotes"]: + state.env["footnotes"]["list"] = {} + + if state.env["footnotes"]["refs"][":" + label] < 0: + footnoteId = len(state.env["footnotes"]["list"]) + state.env["footnotes"]["list"][footnoteId] = {"label": label, "count": 0} + state.env["footnotes"]["refs"][":" + label] = footnoteId + else: + footnoteId = state.env["footnotes"]["refs"][":" + label] + + footnoteSubId = state.env["footnotes"]["list"][footnoteId]["count"] + state.env["footnotes"]["list"][footnoteId]["count"] += 1 + + token = state.push("footnote_ref", "", 0) + token.meta = {"id": footnoteId, "subId": footnoteSubId, "label": label} + + state.pos = pos + state.posMax = maximum + return True + + +def footnote_tail(state: StateBlock, *args, **kwargs): + """Post-processing step, to move footnote tokens to end of the token stream. + + Also removes un-referenced tokens. + """ + + insideRef = False + refTokens = {} + + if "footnotes" not in state.env: + return + + current: List[Token] = [] + tok_filter = [] + for tok in state.tokens: + + if tok.type == "footnote_reference_open": + insideRef = True + current = [] + currentLabel = tok.meta["label"] + tok_filter.append(False) + continue + + if tok.type == "footnote_reference_close": + insideRef = False + # prepend ':' to avoid conflict with Object.prototype members + refTokens[":" + currentLabel] = current + tok_filter.append(False) + continue + + if insideRef: + current.append(tok) + + tok_filter.append((not insideRef)) + + state.tokens = [t for t, f in zip(state.tokens, tok_filter) if f] + + if "list" not in state.env.get("footnotes", {}): + return + foot_list = state.env["footnotes"]["list"] + + token = Token("footnote_block_open", "", 1) + state.tokens.append(token) + + for i, foot_note in foot_list.items(): + token = Token("footnote_open", "", 1) + token.meta = {"id": i, "label": foot_note.get("label", None)} + # TODO propagate line positions of original foot note + # (but don't store in token.map, because this is used for scroll syncing) + state.tokens.append(token) + + if "tokens" in foot_note: + tokens = [] + + token = Token("paragraph_open", "p", 1) + token.block = True + tokens.append(token) + + token = Token("inline", "", 0) + token.children = foot_note["tokens"] + token.content = foot_note["content"] + tokens.append(token) + + token = Token("paragraph_close", "p", -1) + token.block = True + tokens.append(token) + + elif "label" in foot_note: + tokens = refTokens[":" + foot_note["label"]] + + state.tokens.extend(tokens) + if state.tokens[len(state.tokens) - 1].type == "paragraph_close": + lastParagraph: Optional[Token] = state.tokens.pop() + else: + lastParagraph = None + + t = ( + foot_note["count"] + if (("count" in foot_note) and (foot_note["count"] > 0)) + else 1 + ) + j = 0 + while j < t: + token = Token("footnote_anchor", "", 0) + token.meta = {"id": i, "subId": j, "label": foot_note.get("label", None)} + state.tokens.append(token) + j += 1 + + if lastParagraph: + state.tokens.append(lastParagraph) + + token = Token("footnote_close", "", -1) + state.tokens.append(token) + + token = Token("footnote_block_close", "", -1) + state.tokens.append(token) + + +######################################## +# Renderer partials + + +def render_footnote_anchor_name(self, tokens, idx, options, env): + n = str(tokens[idx].meta["id"] + 1) + prefix = "" + + doc_id = env.get("docId", None) + if isinstance(doc_id, str): + prefix = f"-{doc_id}-" + + return prefix + n + + +def render_footnote_caption(self, tokens, idx, options, env): + n = str(tokens[idx].meta["id"] + 1) + + if tokens[idx].meta.get("subId", -1) > 0: + n += ":" + str(tokens[idx].meta["subId"]) + + return "[" + n + "]" + + +def render_footnote_ref(self, tokens, idx, options, env): + ident = self.rules["footnote_anchor_name"](tokens, idx, options, env) + caption = self.rules["footnote_caption"](tokens, idx, options, env) + refid = ident + + if tokens[idx].meta.get("subId", -1) > 0: + refid += ":" + str(tokens[idx].meta["subId"]) + + return ( + '' + + caption + + "" + ) + + +def render_footnote_block_open(self, tokens, idx, options, env): + return ( + ( + '
\n' + if options.xhtmlOut + else '
\n' + ) + + '
\n' + + '
    \n' + ) + + +def render_footnote_block_close(self, tokens, idx, options, env): + return "
\n
\n" + + +def render_footnote_open(self, tokens, idx, options, env): + ident = self.rules["footnote_anchor_name"](tokens, idx, options, env) + + if tokens[idx].meta.get("subId", -1) > 0: + ident += ":" + tokens[idx].meta["subId"] + + return '
  • ' + + +def render_footnote_close(self, tokens, idx, options, env): + return "
  • \n" + + +def render_footnote_anchor(self, tokens, idx, options, env): + ident = self.rules["footnote_anchor_name"](tokens, idx, options, env) + + if tokens[idx].meta["subId"] > 0: + ident += ":" + str(tokens[idx].meta["subId"]) + + # ↩ with escape code to prevent display as Apple Emoji on iOS + return ' \u21a9\uFE0E' diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/front_matter/port.yaml b/parrot/lib/python3.10/site-packages/mdit_py_plugins/front_matter/port.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7d145f10d3acef1650d43b94d3bf0415a8af530 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/front_matter/port.yaml @@ -0,0 +1,4 @@ +- package: markdown-it-front-matter + commit: b404f5d8fd536e7e9ddb276267ae0b6f76e9cf9d + date: Feb 7, 2020 + version: 0.2.1 diff --git a/parrot/lib/python3.10/site-packages/pyarrow/__init__.pxd b/parrot/lib/python3.10/site-packages/pyarrow/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..8cc54b4c6bfdaa0e347b3927d7932934916a1ade --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/__init__.pxd @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from libcpp.memory cimport shared_ptr +from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType, + CField, CRecordBatch, CSchema, + CTable, CTensor, CSparseCOOTensor, + CSparseCSRMatrix, CSparseCSCMatrix, + CSparseCSFTensor) + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + cdef int import_pyarrow() except -1 + cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer) + cdef object wrap_data_type(const shared_ptr[CDataType]& type) + cdef object wrap_field(const shared_ptr[CField]& field) + cdef object wrap_schema(const shared_ptr[CSchema]& schema) + cdef object wrap_array(const shared_ptr[CArray]& sp_array) + cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor) + cdef object wrap_sparse_tensor_coo( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csr( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csc( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csf( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) + cdef object wrap_table(const shared_ptr[CTable]& ctable) + cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_acero.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4553aee9d6f16c391340aa45489471bdcfe0cb76 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_acero.pxd @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + + +cdef class ExecNodeOptions(_Weakrefable): + cdef: + shared_ptr[CExecNodeOptions] wrapped + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp) + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil + + +cdef class Declaration(_Weakrefable): + + cdef: + CDeclaration decl + + cdef void init(self, const CDeclaration& c_decl) + + @staticmethod + cdef wrap(const CDeclaration& c_decl) + + cdef inline CDeclaration unwrap(self) nogil diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_compute.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_compute.pxd new file mode 100644 index 0000000000000000000000000000000000000000..29b37da3ac4ef36106b10a09d7583bdba8d1a260 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_compute.pxd @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + +cdef class UdfContext(_Weakrefable): + cdef: + CUdfContext c_context + + cdef void init(self, const CUdfContext& c_context) + + +cdef class FunctionOptions(_Weakrefable): + cdef: + shared_ptr[CFunctionOptions] wrapped + + cdef const CFunctionOptions* get_options(self) except NULL + cdef void init(self, const shared_ptr[CFunctionOptions]& sp) + + cdef inline shared_ptr[CFunctionOptions] unwrap(self) + + +cdef class _SortOptions(FunctionOptions): + pass + + +cdef CExpression _bind(Expression filter, Schema schema) except * + + +cdef class Expression(_Weakrefable): + + cdef: + CExpression expr + + cdef void init(self, const CExpression& sp) + + @staticmethod + cdef wrap(const CExpression& sp) + + cdef inline CExpression unwrap(self) + + @staticmethod + cdef Expression _expr_or_scalar(object expr) + + +cdef CExpression _true + +cdef CFieldRef _ensure_field_ref(value) except * + +cdef CSortOrder unwrap_sort_order(order) except * + +cdef CNullPlacement unwrap_null_placement(null_placement) except * diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py b/parrot/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..150dbdb1175803e3c40a1bd2469a4df34ea57e4e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Custom documentation additions for compute functions. +""" + +function_doc_additions = {} + +function_doc_additions["filter"] = """ + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e"]) + >>> mask = pa.array([True, False, None, False, True]) + >>> arr.filter(mask) + + [ + "a", + "e" + ] + >>> arr.filter(mask, null_selection_behavior='emit_null') + + [ + "a", + null, + "e" + ] + """ + +function_doc_additions["mode"] = """ + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2]) + >>> modes = pc.mode(arr, 2) + >>> modes[0] + + >>> modes[1] + + """ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_csv.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_csv.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dcc562a41c795896d12fc7cdd3baebf0122bedc9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_csv.pxd @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable + + +cdef class ConvertOptions(_Weakrefable): + cdef: + unique_ptr[CCSVConvertOptions] options + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options) + + +cdef class ParseOptions(_Weakrefable): + cdef: + unique_ptr[CCSVParseOptions] options + object _invalid_row_handler + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options) + + +cdef class ReadOptions(_Weakrefable): + cdef: + unique_ptr[CCSVReadOptions] options + public object encoding + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options) + + +cdef class WriteOptions(_Weakrefable): + cdef: + unique_ptr[CCSVWriteOptions] options + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx new file mode 100644 index 0000000000000000000000000000000000000000..a8cce3362225adcfd7e70b51e521f26d43d9a102 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for ORC file format.""" + +from pyarrow.lib cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_dataset cimport * + +from pyarrow._dataset cimport FileFormat + + +cdef class OrcFileFormat(FileFormat): + + def __init__(self): + self.init(shared_ptr[CFileFormat](new COrcFileFormat())) + + def equals(self, OrcFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.OrcFileFormat + + Returns + ------- + True + """ + return True + + @property + def default_extname(self): + return "orc" + + def __reduce__(self): + return OrcFileFormat, tuple() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c8f5e5b01bf81f32d641d70341fe74bf6bfbbc80 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx @@ -0,0 +1,178 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet encryption.""" + +from pyarrow.includes.libarrow_dataset_parquet cimport * +from pyarrow._parquet_encryption cimport * +from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions + + +cdef class ParquetEncryptionConfig(_Weakrefable): + """ + Core configuration class encapsulating parameters for high-level encryption + within the Parquet framework. + + The ParquetEncryptionConfig class serves as a bridge for passing encryption-related + parameters to the appropriate components within the Parquet library. It maintains references + to objects that define the encryption strategy, Key Management Service (KMS) configuration, + and specific encryption configurations for Parquet data. + + Parameters + ---------- + crypto_factory : pyarrow.parquet.encryption.CryptoFactory + Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for + creating cryptographic components, such as encryptors and decryptors. + kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig + Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration + parameters necessary for connecting to a Key Management Service (KMS). + encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration + Shared pointer to an `EncryptionConfiguration` object. This object defines specific + encryption settings for Parquet data, including the keys assigned to different columns. + + Raises + ------ + ValueError + Raised if `encryption_config` is None. + """ + cdef: + shared_ptr[CParquetEncryptionConfig] c_config + + # Avoid mistakenly creating attributes + __slots__ = () + + def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config, + EncryptionConfiguration encryption_config): + + cdef shared_ptr[CEncryptionConfiguration] c_encryption_config + + if crypto_factory is None: + raise ValueError("crypto_factory cannot be None") + + if kms_connection_config is None: + raise ValueError("kms_connection_config cannot be None") + + if encryption_config is None: + raise ValueError("encryption_config cannot be None") + + self.c_config.reset(new CParquetEncryptionConfig()) + + c_encryption_config = pyarrow_unwrap_encryptionconfig( + encryption_config) + + self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory) + self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig( + kms_connection_config) + self.c_config.get().encryption_config = c_encryption_config + + @staticmethod + cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config): + cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig) + python_config.c_config = c_config + return python_config + + cdef shared_ptr[CParquetEncryptionConfig] unwrap(self): + return self.c_config + + +cdef class ParquetDecryptionConfig(_Weakrefable): + """ + Core configuration class encapsulating parameters for high-level decryption + within the Parquet framework. + + ParquetDecryptionConfig is designed to pass decryption-related parameters to + the appropriate decryption components within the Parquet library. It holds references to + objects that define the decryption strategy, Key Management Service (KMS) configuration, + and specific decryption configurations for reading encrypted Parquet data. + + Parameters + ---------- + crypto_factory : pyarrow.parquet.encryption.CryptoFactory + Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic + components for the decryption process. + kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig + Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary + for connecting to a Key Management Service (KMS) during decryption. + decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration + Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings + for reading encrypted Parquet data. + + Raises + ------ + ValueError + Raised if `decryption_config` is None. + """ + + cdef: + shared_ptr[CParquetDecryptionConfig] c_config + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config, + DecryptionConfiguration decryption_config): + + cdef shared_ptr[CDecryptionConfiguration] c_decryption_config + + if decryption_config is None: + raise ValueError( + "decryption_config cannot be None") + + self.c_config.reset(new CParquetDecryptionConfig()) + + c_decryption_config = pyarrow_unwrap_decryptionconfig( + decryption_config) + + self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory) + self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig( + kms_connection_config) + self.c_config.get().decryption_config = c_decryption_config + + @staticmethod + cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config): + cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig) + python_config.c_config = c_config + return python_config + + cdef shared_ptr[CParquetDecryptionConfig] unwrap(self): + return self.c_config + + +def set_encryption_config( + ParquetFileWriteOptions opts not None, + ParquetEncryptionConfig config not None +): + cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap() + opts.parquet_options.parquet_encryption_config = c_config + + +def set_decryption_properties( + ParquetFragmentScanOptions opts not None, + FileDecryptionProperties config not None +): + cdef CReaderProperties* reader_props = opts.reader_properties() + reader_props.file_decryption_properties(config.unwrap()) + + +def set_decryption_config( + ParquetFragmentScanOptions opts not None, + ParquetDecryptionConfig config not None +): + cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap() + opts.parquet_options.parquet_decryption_config = c_config diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dlpack.pxi b/parrot/lib/python3.10/site-packages/pyarrow/_dlpack.pxi new file mode 100644 index 0000000000000000000000000000000000000000..c2f4cff64069195ad70f2ea271a842dfd166058c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dlpack.pxi @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +cimport cpython +from cpython.pycapsule cimport PyCapsule_New + + +cdef void dlpack_pycapsule_deleter(object dltensor) noexcept: + cdef DLManagedTensor* dlm_tensor + cdef PyObject* err_type + cdef PyObject* err_value + cdef PyObject* err_traceback + + # Do nothing if the capsule has been consumed + if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"): + return + + # An exception may be in-flight, we must save it in case + # we create another one + cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback) + + dlm_tensor = cpython.PyCapsule_GetPointer(dltensor, 'dltensor') + if dlm_tensor == NULL: + cpython.PyErr_WriteUnraisable(dltensor) + # The deleter can be NULL if there is no way for the caller + # to provide a reasonable destructor + elif dlm_tensor.deleter: + dlm_tensor.deleter(dlm_tensor) + assert (not cpython.PyErr_Occurred()) + + # Set the error indicator from err_type, err_value, err_traceback + cpython.PyErr_Restore(err_type, err_value, err_traceback) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_feather.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_feather.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7dd61c9a986ff1044fb7b5c22a2f24725710afd7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_feather.pyx @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Feather file format + +# cython: profile=False +# distutils: language = c++ +# cython: language_level=3 + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_feather cimport * +from pyarrow.lib cimport (check_status, Table, _Weakrefable, + get_writer, get_reader, pyarrow_wrap_table) +from pyarrow.lib import tobytes + + +class FeatherError(Exception): + pass + + +def write_feather(Table table, object dest, compression=None, + compression_level=None, chunksize=None, version=2): + cdef shared_ptr[COutputStream] sink + get_writer(dest, &sink) + + cdef CFeatherProperties properties + if version == 2: + properties.version = kFeatherV2Version + else: + properties.version = kFeatherV1Version + + if compression == 'zstd': + properties.compression = CCompressionType_ZSTD + elif compression == 'lz4': + properties.compression = CCompressionType_LZ4_FRAME + else: + properties.compression = CCompressionType_UNCOMPRESSED + + if chunksize is not None: + properties.chunksize = chunksize + + if compression_level is not None: + properties.compression_level = compression_level + + with nogil: + check_status(WriteFeather(deref(table.table), sink.get(), + properties)) + + +cdef class FeatherReader(_Weakrefable): + cdef: + shared_ptr[CFeatherReader] reader + + def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads): + cdef: + shared_ptr[CRandomAccessFile] reader + CIpcReadOptions options = CIpcReadOptions.Defaults() + options.use_threads = use_threads + + get_reader(source, use_memory_map, &reader) + with nogil: + self.reader = GetResultValue(CFeatherReader.Open(reader, options)) + + @property + def version(self): + return self.reader.get().version() + + def read(self): + cdef shared_ptr[CTable] sp_table + with nogil: + check_status(self.reader.get() + .Read(&sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_indices(self, indices): + cdef: + shared_ptr[CTable] sp_table + vector[int] c_indices + + for index in indices: + c_indices.push_back(index) + with nogil: + check_status(self.reader.get() + .Read(c_indices, &sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_names(self, names): + cdef: + shared_ptr[CTable] sp_table + vector[c_string] c_names + + for name in names: + c_names.push_back(tobytes(name)) + with nogil: + check_status(self.reader.get() + .Read(c_names, &sp_table)) + + return pyarrow_wrap_table(sp_table) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_fs.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0df75530bbd6ec3552131e11acc5b0406627fe65 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_fs.pxd @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow.lib import _detect_compression, frombytes, tobytes +from pyarrow.lib cimport * + + +cpdef enum FileType: + NotFound = CFileType_NotFound + Unknown = CFileType_Unknown + File = CFileType_File + Directory = CFileType_Directory + + +cdef class FileInfo(_Weakrefable): + cdef: + CFileInfo info + + @staticmethod + cdef wrap(CFileInfo info) + + cdef inline CFileInfo unwrap(self) nogil + + @staticmethod + cdef CFileInfo unwrap_safe(obj) + + +cdef class FileSelector(_Weakrefable): + cdef: + CFileSelector selector + + @staticmethod + cdef FileSelector wrap(CFileSelector selector) + + cdef inline CFileSelector unwrap(self) nogil + + +cdef class FileSystem(_Weakrefable): + cdef: + shared_ptr[CFileSystem] wrapped + CFileSystem* fs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[CFileSystem]& sp) + + cdef inline shared_ptr[CFileSystem] unwrap(self) nogil + + +cdef class LocalFileSystem(FileSystem): + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class SubTreeFileSystem(FileSystem): + cdef: + CSubTreeFileSystem* subtreefs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class _MockFileSystem(FileSystem): + cdef: + CMockFileSystem* mockfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class PyFileSystem(FileSystem): + cdef: + CPyFileSystem* pyfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..5e69413cea953639e36ba5485cb383b88193748b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.lib cimport (pyarrow_wrap_metadata, + pyarrow_unwrap_metadata) +from pyarrow.lib import frombytes, tobytes, ensure_metadata +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem, TimePoint_to_ns, PyDateTime_to_TimePoint + +from datetime import datetime, timedelta, timezone + + +cdef class GcsFileSystem(FileSystem): + """ + Google Cloud Storage (GCS) backed FileSystem implementation + + By default uses the process described in https://google.aip.dev/auth/4110 + to resolve credentials. If not running on Google Cloud Platform (GCP), + this generally requires the environment variable + GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file + containing credentials. + + Note: GCS buckets are special and the operations available on them may be + limited or more expensive than expected compared to local file systems. + + Note: When pickling a GcsFileSystem that uses default credentials, resolution + credentials are not stored in the serialized data. Therefore, when unpickling + it is assumed that the necessary credentials are in place for the target + process. + + Parameters + ---------- + anonymous : boolean, default False + Whether to connect anonymously. + If true, will not attempt to look up credentials using standard GCP + configuration methods. + access_token : str, default None + GCP access token. If provided, temporary credentials will be fetched by + assuming this role; also, a `credential_token_expiration` must be + specified as well. + target_service_account : str, default None + An optional service account to try to impersonate when accessing GCS. This + requires the specified credential user or service account to have the necessary + permissions. + credential_token_expiration : datetime, default None + Expiration for credential generated with an access token. Must be specified + if `access_token` is specified. + default_bucket_location : str, default 'US' + GCP region to create buckets in. + scheme : str, default 'https' + GCS connection transport scheme. + endpoint_override : str, default None + Override endpoint with a connect string such as "localhost:9000" + default_metadata : mapping or pyarrow.KeyValueMetadata, default None + Default metadata for `open_output_stream`. This will be ignored if + non-empty metadata is passed to `open_output_stream`. + retry_time_limit : timedelta, default None + Set the maximum amount of time the GCS client will attempt to retry + transient errors. Subsecond granularity is ignored. + project_id : str, default None + The GCP project identifier to use for creating buckets. + If not set, the library uses the GOOGLE_CLOUD_PROJECT environment + variable. Most I/O operations do not need a project id, only applications + that create new buckets need a project id. + """ + + cdef: + CGcsFileSystem* gcsfs + + def __init__(self, *, bint anonymous=False, access_token=None, + target_service_account=None, credential_token_expiration=None, + default_bucket_location='US', + scheme=None, + endpoint_override=None, + default_metadata=None, + retry_time_limit=None, + project_id=None): + cdef: + CGcsOptions options + shared_ptr[CGcsFileSystem] wrapped + double time_limit_seconds + + # Intentional use of truthiness because empty strings aren't valid and + # for reconstruction from pickling will give empty strings. + if anonymous and (target_service_account or access_token): + raise ValueError( + 'anonymous option is not compatible with target_service_account and ' + 'access_token' + ) + elif bool(access_token) != bool(credential_token_expiration): + raise ValueError( + 'access_token and credential_token_expiration must be ' + 'specified together' + ) + + elif anonymous: + options = CGcsOptions.Anonymous() + elif access_token: + if not isinstance(credential_token_expiration, datetime): + raise ValueError( + "credential_token_expiration must be a datetime") + options = CGcsOptions.FromAccessToken( + tobytes(access_token), + PyDateTime_to_TimePoint(credential_token_expiration)) + else: + options = CGcsOptions.Defaults() + + # Target service account requires base credentials so + # it is not part of the if/else chain above which only + # handles base credentials. + if target_service_account: + options = CGcsOptions.FromImpersonatedServiceAccount( + options.credentials, tobytes(target_service_account)) + + options.default_bucket_location = tobytes(default_bucket_location) + + if scheme is not None: + options.scheme = tobytes(scheme) + if endpoint_override is not None: + options.endpoint_override = tobytes(endpoint_override) + if default_metadata is not None: + options.default_metadata = pyarrow_unwrap_metadata( + ensure_metadata(default_metadata)) + if retry_time_limit is not None: + time_limit_seconds = retry_time_limit.total_seconds() + options.retry_limit_seconds = time_limit_seconds + if project_id is not None: + options.project_id = tobytes(project_id) + + with nogil: + wrapped = GetResultValue(CGcsFileSystem.Make(options)) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.gcsfs = wrapped.get() + + def _expiration_datetime_from_options(self): + expiration_ns = TimePoint_to_ns( + self.gcsfs.options().credentials.expiration()) + if expiration_ns == 0: + return None + return datetime.fromtimestamp(expiration_ns / 1.0e9, timezone.utc) + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return GcsFileSystem(**kwargs) + + def __reduce__(self): + cdef CGcsOptions opts = self.gcsfs.options() + service_account = frombytes(opts.credentials.target_service_account()) + expiration_dt = self._expiration_datetime_from_options() + retry_time_limit = None + if opts.retry_limit_seconds.has_value(): + retry_time_limit = timedelta( + seconds=opts.retry_limit_seconds.value()) + project_id = None + if opts.project_id.has_value(): + project_id = frombytes(opts.project_id.value()) + return ( + GcsFileSystem._reconstruct, (dict( + access_token=frombytes(opts.credentials.access_token()), + anonymous=opts.credentials.anonymous(), + credential_token_expiration=expiration_dt, + target_service_account=service_account, + scheme=frombytes(opts.scheme), + endpoint_override=frombytes(opts.endpoint_override), + default_bucket_location=frombytes( + opts.default_bucket_location), + default_metadata=pyarrow_wrap_metadata(opts.default_metadata), + retry_time_limit=retry_time_limit, + project_id=project_id + ),)) + + @property + def default_bucket_location(self): + """ + The GCP location this filesystem will write to. + """ + return frombytes(self.gcsfs.options().default_bucket_location) + + @property + def project_id(self): + """ + The GCP project id this filesystem will use. + """ + if self.gcsfs.options().project_id.has_value(): + return frombytes(self.gcsfs.options().project_id.value()) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_generated_version.py b/parrot/lib/python3.10/site-packages/pyarrow/_generated_version.py new file mode 100644 index 0000000000000000000000000000000000000000..db7664fbc2d7887d421b45d627eeb4cb83045d3e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_generated_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '17.0.0' +__version_tuple__ = version_tuple = (17, 0, 0) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_hdfs.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_hdfs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c426337a12ec184feb2d699e1e685228c249466e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_hdfs.pyx @@ -0,0 +1,160 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + +from pyarrow.lib import frombytes, tobytes +from pyarrow.util import _stringify_path + + +cdef class HadoopFileSystem(FileSystem): + """ + HDFS backed FileSystem implementation + + Parameters + ---------- + host : str + HDFS host to connect to. Set to "default" for fs.defaultFS from + core-site.xml. + port : int, default 8020 + HDFS port to connect to. Set to 0 for default or logical (HA) nodes. + user : str, default None + Username when connecting to HDFS; None implies login user. + replication : int, default 3 + Number of copies each block will have. + buffer_size : int, default 0 + If 0, no buffering will happen otherwise the size of the temporary read + and write buffer. + default_block_size : int, default None + None means the default configuration for HDFS, a typical block size is + 128 MB. + kerb_ticket : string or path, default None + If not None, the path to the Kerberos ticket cache. + extra_conf : dict, default None + Extra key/value pairs for configuration; will override any + hdfs-site.xml properties. + + Examples + -------- + >>> from pyarrow import fs + >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + + cdef: + CHadoopFileSystem* hdfs + + def __init__(self, str host, int port=8020, *, str user=None, + int replication=3, int buffer_size=0, + default_block_size=None, kerb_ticket=None, + extra_conf=None): + cdef: + CHdfsOptions options + shared_ptr[CHadoopFileSystem] wrapped + + if not host.startswith(('hdfs://', 'viewfs://')) and host != "default": + # TODO(kszucs): do more sanitization + host = 'hdfs://{}'.format(host) + + options.ConfigureEndPoint(tobytes(host), int(port)) + options.ConfigureReplication(replication) + options.ConfigureBufferSize(buffer_size) + + if user is not None: + options.ConfigureUser(tobytes(user)) + if default_block_size is not None: + options.ConfigureBlockSize(default_block_size) + if kerb_ticket is not None: + options.ConfigureKerberosTicketCachePath( + tobytes(_stringify_path(kerb_ticket))) + if extra_conf is not None: + for k, v in extra_conf.items(): + options.ConfigureExtraConf(tobytes(k), tobytes(v)) + + with nogil: + wrapped = GetResultValue(CHadoopFileSystem.Make(options)) + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.hdfs = wrapped.get() + + @staticmethod + def from_uri(uri): + """ + Instantiate HadoopFileSystem object from an URI string. + + The following two calls are equivalent + + * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\ +&replication=1')`` + * ``HadoopFileSystem('localhost', port=8020, user='test', \ +replication=1)`` + + Parameters + ---------- + uri : str + A string URI describing the connection to HDFS. + In order to change the user, replication, buffer_size or + default_block_size pass the values as query parts. + + Returns + ------- + HadoopFileSystem + """ + cdef: + HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem) + shared_ptr[CHadoopFileSystem] wrapped + CHdfsOptions options + + options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri))) + with nogil: + wrapped = GetResultValue(CHadoopFileSystem.Make(options)) + + self.init( wrapped) + return self + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return HadoopFileSystem(**kwargs) + + def __reduce__(self): + cdef CHdfsOptions opts = self.hdfs.options() + return ( + HadoopFileSystem._reconstruct, (dict( + host=frombytes(opts.connection_config.host), + port=opts.connection_config.port, + user=frombytes(opts.connection_config.user), + replication=opts.replication, + buffer_size=opts.buffer_size, + default_block_size=opts.default_block_size, + kerb_ticket=frombytes(opts.connection_config.kerb_ticket), + extra_conf={frombytes(k): frombytes(v) + for k, v in opts.connection_config.extra_conf}, + ),) + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_json.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_json.pxd new file mode 100644 index 0000000000000000000000000000000000000000..42a0a678a9b6a543c657c905f3eb4fa4490b6edf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_json.pxd @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable + + +cdef class ParseOptions(_Weakrefable): + cdef: + CJSONParseOptions options + + @staticmethod + cdef ParseOptions wrap(CJSONParseOptions options) + +cdef class ReadOptions(_Weakrefable): + cdef: + CJSONReadOptions options + + @staticmethod + cdef ReadOptions wrap(CJSONReadOptions options) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_json.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_json.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d36dad67abbaa575d8963273c884dd9e8f047b13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_json.pyx @@ -0,0 +1,310 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (_Weakrefable, MemoryPool, + maybe_unbox_memory_pool, + get_input_stream, pyarrow_wrap_table, + pyarrow_wrap_schema, pyarrow_unwrap_schema) + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading JSON files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual chunks in the Table. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, use_threads=None, block_size=None): + self.options = CJSONReadOptions.Defaults() + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return self.options.use_threads + + @use_threads.setter + def use_threads(self, value): + self.options.use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + + This will determine multi-threading granularity as well as the size of + individual chunks in the Table. + """ + return self.options.block_size + + @block_size.setter + def block_size(self, value): + self.options.block_size = value + + def __reduce__(self): + return ReadOptions, ( + self.use_threads, + self.block_size + ) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ReadOptions wrap(CJSONReadOptions options): + out = ReadOptions() + out.options = options # shallow copy + return out + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing JSON files. + + Parameters + ---------- + explicit_schema : Schema, optional (default None) + Optional explicit schema (no type inference, ignores other fields). + newlines_in_values : bool, optional (default False) + Whether objects may be printed across multiple lines (for example + pretty printed). If false, input must end with an empty line. + unexpected_field_behavior : str, default "infer" + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + """ + + __slots__ = () + + def __init__(self, explicit_schema=None, newlines_in_values=None, + unexpected_field_behavior=None): + self.options = CJSONParseOptions.Defaults() + if explicit_schema is not None: + self.explicit_schema = explicit_schema + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if unexpected_field_behavior is not None: + self.unexpected_field_behavior = unexpected_field_behavior + + def __reduce__(self): + return ParseOptions, ( + self.explicit_schema, + self.newlines_in_values, + self.unexpected_field_behavior + ) + + @property + def explicit_schema(self): + """ + Optional explicit schema (no type inference, ignores other fields) + """ + if self.options.explicit_schema.get() == NULL: + return None + else: + return pyarrow_wrap_schema(self.options.explicit_schema) + + @explicit_schema.setter + def explicit_schema(self, value): + self.options.explicit_schema = pyarrow_unwrap_schema(value) + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in JSON values. + Setting this to True reduces the performance of multi-threaded + JSON reading. + """ + return self.options.newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + self.options.newlines_in_values = value + + @property + def unexpected_field_behavior(self): + """ + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + + Set to "infer" by default. + """ + v = self.options.unexpected_field_behavior + if v == CUnexpectedFieldBehavior_Ignore: + return "ignore" + elif v == CUnexpectedFieldBehavior_Error: + return "error" + elif v == CUnexpectedFieldBehavior_InferType: + return "infer" + else: + raise ValueError('Unexpected value for unexpected_field_behavior') + + @unexpected_field_behavior.setter + def unexpected_field_behavior(self, value): + cdef CUnexpectedFieldBehavior v + + if value == "ignore": + v = CUnexpectedFieldBehavior_Ignore + elif value == "error": + v = CUnexpectedFieldBehavior_Error + elif value == "infer": + v = CUnexpectedFieldBehavior_InferType + else: + raise ValueError( + "Unexpected value `{}` for `unexpected_field_behavior`, pass " + "either `ignore`, `error` or `infer`.".format(value) + ) + + self.options.unexpected_field_behavior = v + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ParseOptions + + Returns + ------- + bool + """ + return ( + self.explicit_schema == other.explicit_schema and + self.newlines_in_values == other.newlines_in_values and + self.unexpected_field_behavior == other.unexpected_field_behavior + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ParseOptions wrap(CJSONParseOptions options): + out = ParseOptions() + out.options = options # shallow copy + return out + + +cdef _get_reader(input_file, shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + +cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out): + if read_options is None: + out[0] = CJSONReadOptions.Defaults() + else: + out[0] = read_options.options + +cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out): + if parse_options is None: + out[0] = CJSONParseOptions.Defaults() + else: + out[0] = parse_options.options + + +def read_json(input_file, read_options=None, parse_options=None, + MemoryPool memory_pool=None): + """ + Read a Table from a stream of JSON data. + + Parameters + ---------- + input_file : str, path or file-like object + The location of JSON data. Currently only the line-delimited JSON + format is supported. + read_options : pyarrow.json.ReadOptions, optional + Options for the JSON reader (see ReadOptions constructor for defaults). + parse_options : pyarrow.json.ParseOptions, optional + Options for the JSON parser + (see ParseOptions constructor for defaults). + memory_pool : MemoryPool, optional + Pool to allocate Table memory from. + + Returns + ------- + :class:`pyarrow.Table` + Contents of the JSON file as a in-memory table. + """ + cdef: + shared_ptr[CInputStream] stream + CJSONReadOptions c_read_options + CJSONParseOptions c_parse_options + shared_ptr[CJSONReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + + reader = GetResultValue( + CJSONReader.Make(maybe_unbox_memory_pool(memory_pool), + stream, c_read_options, c_parse_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_orc.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_orc.pxd new file mode 100644 index 0000000000000000000000000000000000000000..aecbba317aecd1b331261ca600058e30e0c4f184 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_orc.pxd @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp cimport bool as c_bool +from libc.string cimport const_char +from libcpp.vector cimport vector as std_vector +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus, + CResult, CTable, CMemoryPool, + CKeyValueMetadata, + CRecordBatch, + CTable, CCompressionType, + CRandomAccessFile, COutputStream, + TimeUnit) + +cdef extern from "arrow/adapters/orc/options.h" \ + namespace "arrow::adapters::orc" nogil: + cdef enum CompressionStrategy \ + " arrow::adapters::orc::CompressionStrategy": + _CompressionStrategy_SPEED \ + " arrow::adapters::orc::CompressionStrategy::kSpeed" + _CompressionStrategy_COMPRESSION \ + " arrow::adapters::orc::CompressionStrategy::kCompression" + + cdef enum WriterId" arrow::adapters::orc::WriterId": + _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava" + _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp" + _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto" + _WriterId_SCRITCHLEY_GO \ + " arrow::adapters::orc::WriterId::kScritchleyGo" + _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino" + _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown" + + cdef enum WriterVersion" arrow::adapters::orc::WriterVersion": + _WriterVersion_ORIGINAL \ + " arrow::adapters::orc::WriterVersion::kOriginal" + _WriterVersion_HIVE_8732 \ + " arrow::adapters::orc::WriterVersion::kHive8732" + _WriterVersion_HIVE_4243 \ + " arrow::adapters::orc::WriterVersion::kHive4243" + _WriterVersion_HIVE_12055 \ + " arrow::adapters::orc::WriterVersion::kHive12055" + _WriterVersion_HIVE_13083 \ + " arrow::adapters::orc::WriterVersion::kHive13083" + _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101" + _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135" + _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517" + _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203" + _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14" + _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax" + + cdef cppclass FileVersion" arrow::adapters::orc::FileVersion": + FileVersion(uint32_t major_version, uint32_t minor_version) + uint32_t major_version() + uint32_t minor_version() + c_string ToString() + + cdef struct WriteOptions" arrow::adapters::orc::WriteOptions": + int64_t batch_size + FileVersion file_version + int64_t stripe_size + CCompressionType compression + int64_t compression_block_size + CompressionStrategy compression_strategy + int64_t row_index_stride + double padding_tolerance + double dictionary_key_size_threshold + std_vector[int64_t] bloom_filter_columns + double bloom_filter_fpp + + +cdef extern from "arrow/adapters/orc/adapter.h" \ + namespace "arrow::adapters::orc" nogil: + + cdef cppclass ORCFileReader: + @staticmethod + CResult[unique_ptr[ORCFileReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + CMemoryPool* pool) + + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + CResult[shared_ptr[CSchema]] ReadSchema() + + CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe) + CResult[shared_ptr[CRecordBatch]] ReadStripe( + int64_t stripe, std_vector[c_string]) + + CResult[shared_ptr[CTable]] Read() + CResult[shared_ptr[CTable]] Read(std_vector[c_string]) + + int64_t NumberOfStripes() + int64_t NumberOfRows() + FileVersion GetFileVersion() + c_string GetSoftwareVersion() + CResult[CCompressionType] GetCompression() + int64_t GetCompressionSize() + int64_t GetRowIndexStride() + WriterId GetWriterId() + int32_t GetWriterIdValue() + WriterVersion GetWriterVersion() + int64_t GetNumberOfStripeStatistics() + int64_t GetContentLength() + int64_t GetStripeStatisticsLength() + int64_t GetFileFooterLength() + int64_t GetFilePostscriptLength() + int64_t GetFileLength() + c_string GetSerializedFileTail() + + cdef cppclass ORCFileWriter: + @staticmethod + CResult[unique_ptr[ORCFileWriter]] Open( + COutputStream* output_stream, const WriteOptions& writer_options) + + CStatus Write(const CTable& table) + + CStatus Close() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d52669501a4044838e576d3dac8f8a422874eaa6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libparquet_encryption cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + FileEncryptionProperties, + FileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) +from pyarrow.lib cimport _Weakrefable + +cdef class CryptoFactory(_Weakrefable): + cdef shared_ptr[CPyCryptoFactory] factory + cdef init(self, callable_client_factory) + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self) + +cdef class EncryptionConfiguration(_Weakrefable): + cdef shared_ptr[CEncryptionConfiguration] configuration + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil + +cdef class DecryptionConfiguration(_Weakrefable): + cdef shared_ptr[CDecryptionConfiguration] configuration + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil + +cdef class KmsConnectionConfig(_Weakrefable): + cdef shared_ptr[CKmsConnectionConfig] configuration + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config) + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except * +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except * +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except * +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except * diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d0a9a6612328c547bc724d6fcf2d37ae5e7badd3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx @@ -0,0 +1,484 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from datetime import timedelta + +from cython.operator cimport dereference as deref +from libcpp.memory cimport shared_ptr +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable +from pyarrow.lib import tobytes, frombytes + + +cdef ParquetCipher cipher_from_name(name): + name = name.upper() + if name == 'AES_GCM_V1': + return ParquetCipher_AES_GCM_V1 + elif name == 'AES_GCM_CTR_V1': + return ParquetCipher_AES_GCM_CTR_V1 + else: + raise ValueError(f'Invalid cipher name: {name!r}') + + +cdef cipher_to_name(ParquetCipher cipher): + if ParquetCipher_AES_GCM_V1 == cipher: + return 'AES_GCM_V1' + elif ParquetCipher_AES_GCM_CTR_V1 == cipher: + return 'AES_GCM_CTR_V1' + else: + raise ValueError('Invalid cipher value: {0}'.format(cipher)) + +cdef class EncryptionConfiguration(_Weakrefable): + """Configuration of the encryption, such as which columns to encrypt""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, footer_key, *, column_keys=None, + encryption_algorithm=None, + plaintext_footer=None, double_wrapping=None, + cache_lifetime=None, internal_key_material=None, + data_key_length_bits=None): + self.configuration.reset( + new CEncryptionConfiguration(tobytes(footer_key))) + if column_keys is not None: + self.column_keys = column_keys + if encryption_algorithm is not None: + self.encryption_algorithm = encryption_algorithm + if plaintext_footer is not None: + self.plaintext_footer = plaintext_footer + if double_wrapping is not None: + self.double_wrapping = double_wrapping + if cache_lifetime is not None: + self.cache_lifetime = cache_lifetime + if internal_key_material is not None: + self.internal_key_material = internal_key_material + if data_key_length_bits is not None: + self.data_key_length_bits = data_key_length_bits + + @property + def footer_key(self): + """ID of the master key for footer encryption/signing""" + return frombytes(self.configuration.get().footer_key) + + @property + def column_keys(self): + """ + List of columns to encrypt, with master key IDs. + """ + column_keys_str = frombytes(self.configuration.get().column_keys) + # Convert from "masterKeyID:colName,colName;masterKeyID:colName..." + # (see HIVE-21848) to dictionary of master key ID to column name lists + column_keys_to_key_list_str = dict(subString.replace(" ", "").split( + ":") for subString in column_keys_str.split(";")) + column_keys_dict = {k: v.split( + ",") for k, v in column_keys_to_key_list_str.items()} + return column_keys_dict + + @column_keys.setter + def column_keys(self, dict value): + if value is not None: + # convert a dictionary such as + # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}'' + # to the string defined by the spec + # 'key1: col1 , col2; key2: col3 , col4' + column_keys = "; ".join( + ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()]) + self.configuration.get().column_keys = tobytes(column_keys) + + @property + def encryption_algorithm(self): + """Parquet encryption algorithm. + Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1".""" + return cipher_to_name(self.configuration.get().encryption_algorithm) + + @encryption_algorithm.setter + def encryption_algorithm(self, value): + cipher = cipher_from_name(value) + self.configuration.get().encryption_algorithm = cipher + + @property + def plaintext_footer(self): + """Write files with plaintext footer.""" + return self.configuration.get().plaintext_footer + + @plaintext_footer.setter + def plaintext_footer(self, value): + self.configuration.get().plaintext_footer = value + + @property + def double_wrapping(self): + """Use double wrapping - where data encryption keys (DEKs) are + encrypted with key encryption keys (KEKs), which in turn are + encrypted with master keys. + If set to false, use single wrapping - where DEKs are + encrypted directly with master keys.""" + return self.configuration.get().double_wrapping + + @double_wrapping.setter + def double_wrapping(self, value): + self.configuration.get().double_wrapping = value + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + if not isinstance(value, timedelta): + raise TypeError("cache_lifetime should be a timedelta") + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + @property + def internal_key_material(self): + """Store key material inside Parquet file footers; this mode doesn’t + produce additional files. If set to false, key material is stored in + separate files in the same folder, which enables key rotation for + immutable Parquet files.""" + return self.configuration.get().internal_key_material + + @internal_key_material.setter + def internal_key_material(self, value): + self.configuration.get().internal_key_material = value + + @property + def data_key_length_bits(self): + """Length of data encryption keys (DEKs), randomly generated by parquet key + management tools. Can be 128, 192 or 256 bits.""" + return self.configuration.get().data_key_length_bits + + @data_key_length_bits.setter + def data_key_length_bits(self, value): + self.configuration.get().data_key_length_bits = value + + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class DecryptionConfiguration(_Weakrefable): + """Configuration of the decryption, such as cache timeout.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, cache_lifetime=None): + self.configuration.reset(new CDecryptionConfiguration()) + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class KmsConnectionConfig(_Weakrefable): + """Configuration of the connection to the Key Management Service (KMS)""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, kms_instance_id=None, kms_instance_url=None, + key_access_token=None, custom_kms_conf=None): + self.configuration.reset(new CKmsConnectionConfig()) + if kms_instance_id is not None: + self.kms_instance_id = kms_instance_id + if kms_instance_url is not None: + self.kms_instance_url = kms_instance_url + if key_access_token is None: + self.key_access_token = b'DEFAULT' + else: + self.key_access_token = key_access_token + if custom_kms_conf is not None: + self.custom_kms_conf = custom_kms_conf + + @property + def kms_instance_id(self): + """ID of the KMS instance that will be used for encryption + (if multiple KMS instances are available).""" + return frombytes(self.configuration.get().kms_instance_id) + + @kms_instance_id.setter + def kms_instance_id(self, value): + self.configuration.get().kms_instance_id = tobytes(value) + + @property + def kms_instance_url(self): + """URL of the KMS instance.""" + return frombytes(self.configuration.get().kms_instance_url) + + @kms_instance_url.setter + def kms_instance_url(self, value): + self.configuration.get().kms_instance_url = tobytes(value) + + @property + def key_access_token(self): + """Authorization token that will be passed to KMS.""" + return frombytes(self.configuration.get() + .refreshable_key_access_token.get().value()) + + @key_access_token.setter + def key_access_token(self, value): + self.refresh_key_access_token(value) + + @property + def custom_kms_conf(self): + """A dictionary with KMS-type-specific configuration""" + custom_kms_conf = { + frombytes(k): frombytes(v) + for k, v in self.configuration.get().custom_kms_conf + } + return custom_kms_conf + + @custom_kms_conf.setter + def custom_kms_conf(self, dict value): + if value is not None: + for k, v in value.items(): + if isinstance(k, str) and isinstance(v, str): + self.configuration.get().custom_kms_conf[tobytes(k)] = \ + tobytes(v) + else: + raise TypeError("Expected custom_kms_conf to be " + + "a dictionary of strings") + + def refresh_key_access_token(self, value): + cdef: + shared_ptr[CKeyAccessToken] c_key_access_token = \ + self.configuration.get().refreshable_key_access_token + + c_key_access_token.get().Refresh(tobytes(value)) + + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil: + return self.configuration + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config): + result = KmsConnectionConfig() + result.configuration = make_shared[CKmsConnectionConfig](move(config)) + return result + + +# Callback definitions for CPyKmsClientVtable +cdef void _cb_wrap_key( + handler, const c_string& key_bytes, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wrapped_key = handler.wrap_key(key_bytes, mkid_str) + out[0] = tobytes(wrapped_key) + + +cdef void _cb_unwrap_key( + handler, const c_string& wrapped_key, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wk_str = frombytes(wrapped_key) + key = handler.unwrap_key(wk_str, mkid_str) + out[0] = tobytes(key) + + +cdef class KmsClient(_Weakrefable): + """The abstract base class for KmsClient implementations.""" + cdef: + shared_ptr[CKmsClient] client + + def __init__(self): + self.init() + + cdef init(self): + cdef: + CPyKmsClientVtable vtable = CPyKmsClientVtable() + + vtable.wrap_key = _cb_wrap_key + vtable.unwrap_key = _cb_unwrap_key + + self.client.reset(new CPyKmsClient(self, vtable)) + + def wrap_key(self, key_bytes, master_key_identifier): + """Wrap a key - encrypt it with the master key.""" + raise NotImplementedError() + + def unwrap_key(self, wrapped_key, master_key_identifier): + """Unwrap a key - decrypt it with the master key.""" + raise NotImplementedError() + + cdef inline shared_ptr[CKmsClient] unwrap(self) nogil: + return self.client + + +# Callback definition for CPyKmsClientFactoryVtable +cdef void _cb_create_kms_client( + handler, + const CKmsConnectionConfig& kms_connection_config, + shared_ptr[CKmsClient]* out) except *: + connection_config = KmsConnectionConfig.wrap(kms_connection_config) + + result = handler(connection_config) + if not isinstance(result, KmsClient): + raise TypeError( + "callable must return KmsClient instances, but got {}".format( + type(result))) + + out[0] = ( result).unwrap() + + +cdef class CryptoFactory(_Weakrefable): + """ A factory that produces the low-level FileEncryptionProperties and + FileDecryptionProperties objects, from the high-level parameters.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, kms_client_factory): + """Create CryptoFactory. + + Parameters + ---------- + kms_client_factory : a callable that accepts KmsConnectionConfig + and returns a KmsClient + """ + self.factory.reset(new CPyCryptoFactory()) + + if callable(kms_client_factory): + self.init(kms_client_factory) + else: + raise TypeError("Parameter kms_client_factory must be a callable") + + cdef init(self, callable_client_factory): + cdef: + CPyKmsClientFactoryVtable vtable + shared_ptr[CPyKmsClientFactory] kms_client_factory + + vtable.create_kms_client = _cb_create_kms_client + kms_client_factory.reset( + new CPyKmsClientFactory(callable_client_factory, vtable)) + # A KmsClientFactory object must be registered + # via this method before calling any of + # file_encryption_properties()/file_decryption_properties() methods. + self.factory.get().RegisterKmsClientFactory( + static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory]( + kms_client_factory)) + + def file_encryption_properties(self, + KmsConnectionConfig kms_connection_config, + EncryptionConfiguration encryption_config): + """Create file encryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + encryption_config : EncryptionConfiguration + Configuration of the encryption, such as which columns to encrypt + + Returns + ------- + file_encryption_properties : FileEncryptionProperties + File encryption properties. + """ + cdef: + CResult[shared_ptr[CFileEncryptionProperties]] \ + file_encryption_properties_result + with nogil: + file_encryption_properties_result = \ + self.factory.get().SafeGetFileEncryptionProperties( + deref(kms_connection_config.unwrap().get()), + deref(encryption_config.unwrap().get())) + file_encryption_properties = GetResultValue( + file_encryption_properties_result) + return FileEncryptionProperties.wrap(file_encryption_properties) + + def file_decryption_properties( + self, + KmsConnectionConfig kms_connection_config, + DecryptionConfiguration decryption_config=None): + """Create file decryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + decryption_config : DecryptionConfiguration, default None + Configuration of the decryption, such as cache timeout. + Can be None. + + Returns + ------- + file_decryption_properties : FileDecryptionProperties + File decryption properties. + """ + cdef: + CDecryptionConfiguration c_decryption_config + CResult[shared_ptr[CFileDecryptionProperties]] \ + c_file_decryption_properties + if decryption_config is None: + c_decryption_config = CDecryptionConfiguration() + else: + c_decryption_config = deref(decryption_config.unwrap().get()) + with nogil: + c_file_decryption_properties = \ + self.factory.get().SafeGetFileDecryptionProperties( + deref(kms_connection_config.unwrap().get()), + c_decryption_config) + file_decryption_properties = GetResultValue( + c_file_decryption_properties) + return FileDecryptionProperties.wrap(file_decryption_properties) + + def remove_cache_entries_for_token(self, access_token): + self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token)) + + def remove_cache_entries_for_all_tokens(self): + self.factory.get().RemoveCacheEntriesForAllTokens() + + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self): + return self.factory + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *: + if isinstance(crypto_factory, CryptoFactory): + pycf = ( crypto_factory).unwrap() + return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf) + raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory)) + + +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *: + if isinstance(kmsconnectionconfig, KmsConnectionConfig): + return ( kmsconnectionconfig).unwrap() + raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig)) + + +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *: + if isinstance(encryptionconfig, EncryptionConfiguration): + return ( encryptionconfig).unwrap() + raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig)) + + +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *: + if isinstance(decryptionconfig, DecryptionConfiguration): + return ( decryptionconfig).unwrap() + raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig)) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd new file mode 100644 index 0000000000000000000000000000000000000000..91c0220d7310870a7803ecceb2c32b8b32f8c11d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport CStatus + + +ctypedef CStatus cb_test_func() + +cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil: + + cdef cppclass CTestCase "arrow::py::testing::TestCase": + c_string name + cb_test_func func + + vector[CTestCase] GetCppTestCases() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx new file mode 100644 index 0000000000000000000000000000000000000000..adb148351306c02667346b3750c08f2efd8a6625 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False, binding=True +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport check_status + +from pyarrow.lib import frombytes + + +cdef class CppTestCase: + """ + A simple wrapper for a C++ test case. + """ + cdef: + CTestCase c_case + + @staticmethod + cdef wrap(CTestCase c_case): + cdef: + CppTestCase obj + obj = CppTestCase.__new__(CppTestCase) + obj.c_case = c_case + return obj + + @property + def name(self): + return frombytes(self.c_case.name) + + def __repr__(self): + return f"<{self.__class__.__name__} {self.name!r}>" + + def __call__(self): + check_status(self.c_case.func()) + + +def get_cpp_tests(): + """ + Get a list of C++ test cases. + """ + cases = [] + c_cases = GetCppTestCases() + for c_case in c_cases: + cases.append(CppTestCase.wrap(c_case)) + return cases diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_s3fs.pyx b/parrot/lib/python3.10/site-packages/pyarrow/_s3fs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..ba6603322838dd14400ecc0cc71ac20340a4a83a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_s3fs.pyx @@ -0,0 +1,479 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata, + pyarrow_unwrap_metadata) +from pyarrow.lib import frombytes, tobytes, KeyValueMetadata +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + + +cpdef enum S3LogLevel: + Off = CS3LogLevel_Off + Fatal = CS3LogLevel_Fatal + Error = CS3LogLevel_Error + Warn = CS3LogLevel_Warn + Info = CS3LogLevel_Info + Debug = CS3LogLevel_Debug + Trace = CS3LogLevel_Trace + + +def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1): + """ + Initialize S3 support + + Parameters + ---------- + log_level : S3LogLevel + level of logging + num_event_loop_threads : int, default 1 + how many threads to use for the AWS SDK's I/O event loop + + Examples + -------- + >>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP + """ + cdef CS3GlobalOptions options + options.log_level = log_level + options.num_event_loop_threads = num_event_loop_threads + check_status(CInitializeS3(options)) + + +def ensure_s3_initialized(): + """ + Initialize S3 (with default options) if not already initialized + """ + check_status(CEnsureS3Initialized()) + + +def finalize_s3(): + check_status(CFinalizeS3()) + + +def ensure_s3_finalized(): + """ + Finalize S3 if already initialized + """ + check_status(CEnsureS3Finalized()) + + +def resolve_s3_region(bucket): + """ + Resolve the S3 region of a bucket. + + Parameters + ---------- + bucket : str + A S3 bucket name + + Returns + ------- + region : str + A S3 region name + + Examples + -------- + >>> fs.resolve_s3_region('voltrondata-labs-datasets') + 'us-east-2' + """ + cdef: + c_string c_bucket + c_string c_region + + ensure_s3_initialized() + + c_bucket = tobytes(bucket) + with nogil: + c_region = GetResultValue(ResolveS3BucketRegion(c_bucket)) + + return frombytes(c_region) + + +class S3RetryStrategy: + """ + Base class for AWS retry strategies for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + + def __init__(self, max_attempts=3): + self.max_attempts = max_attempts + + +class AwsStandardS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Standard retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +class AwsDefaultS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Default retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +cdef class S3FileSystem(FileSystem): + """ + S3-backed FileSystem implementation + + AWS access_key and secret_key can be provided explicitly. + + If role_arn is provided instead of access_key and secret_key, temporary + credentials will be fetched by issuing a request to STS to assume the + specified role. + + If neither access_key nor secret_key are provided, and role_arn is also not + provided, then attempts to establish the credentials automatically. + S3FileSystem will try the following methods, in order: + + * ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables + * configuration files such as ``~/.aws/credentials`` and ``~/.aws/config`` + * for nodes on Amazon EC2, the EC2 Instance Metadata Service + + Note: S3 buckets are special and the operations available on them may be + limited or more expensive than desired. + + When S3FileSystem creates new buckets (assuming allow_bucket_creation is + True), it does not pass any non-default settings. In AWS S3, the bucket and + all objects will be not publicly visible, and will have no bucket policies + and no resource tags. To have more control over how buckets are created, + use a different API to create them. + + Parameters + ---------- + access_key : str, default None + AWS Access Key ID. Pass None to use the standard AWS environment + variables and/or configuration file. + secret_key : str, default None + AWS Secret Access key. Pass None to use the standard AWS environment + variables and/or configuration file. + session_token : str, default None + AWS Session Token. An optional session token, required if access_key + and secret_key are temporary credentials from STS. + anonymous : bool, default False + Whether to connect anonymously if access_key and secret_key are None. + If true, will not attempt to look up credentials using standard AWS + configuration methods. + role_arn : str, default None + AWS Role ARN. If provided instead of access_key and secret_key, + temporary credentials will be fetched by assuming this role. + session_name : str, default None + An optional identifier for the assumed role session. + external_id : str, default None + An optional unique identifier that might be required when you assume + a role in another account. + load_frequency : int, default 900 + The frequency (in seconds) with which temporary credentials from an + assumed role session will be refreshed. + region : str, default None + AWS region to connect to. If not set, the AWS SDK will attempt to + determine the region using heuristics such as environment variables, + configuration profile, EC2 metadata, or default to 'us-east-1' when SDK + version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to + automatically resolve the region from a bucket name. + request_timeout : double, default None + Socket read timeouts on Windows and macOS, in seconds. + If omitted, the AWS SDK default value is used (typically 3 seconds). + This option is ignored on non-Windows, non-macOS systems. + connect_timeout : double, default None + Socket connection timeout, in seconds. + If omitted, the AWS SDK default value is used (typically 1 second). + scheme : str, default 'https' + S3 connection transport scheme. + endpoint_override : str, default None + Override region with a connect string such as "localhost:9000" + background_writes : bool, default True + Whether file writes will be issued in the background, without + blocking. + default_metadata : mapping or pyarrow.KeyValueMetadata, default None + Default metadata for open_output_stream. This will be ignored if + non-empty metadata is passed to open_output_stream. + proxy_options : dict or str, default None + If a proxy is used, provide the options here. Supported options are: + 'scheme' (str: 'http' or 'https'; required), 'host' (str; required), + 'port' (int; required), 'username' (str; optional), + 'password' (str; optional). + A proxy URI (str) can also be provided, in which case these options + will be derived from the provided URI. + The following are equivalent:: + + S3FileSystem(proxy_options='http://username:password@localhost:8020') + S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost', + 'port': 8020, 'username': 'username', + 'password': 'password'}) + allow_bucket_creation : bool, default False + Whether to allow directory creation at the bucket-level. This option may also be + passed in a URI query parameter. + allow_bucket_deletion : bool, default False + Whether to allow directory deletion at the bucket-level. This option may also be + passed in a URI query parameter. + check_directory_existence_before_creation : bool, default false + Whether to check the directory existence before creating it. + If false, when creating a directory the code will not check if it already + exists or not. It's an optimization to try directory creation and catch the error, + rather than issue two dependent I/O calls. + If true, when creating a directory the code will only create the directory when necessary + at the cost of extra I/O calls. This can be used for key/value cloud storage which has + a hard rate limit to number of object mutation operations or scenerios such as + the directories already exist and you do not have creation access. + retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3) + The retry strategy to use with S3; fail after max_attempts. Available + strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy. + force_virtual_addressing : bool, default False + Whether to use virtual addressing of buckets. + If true, then virtual addressing is always enabled. + If false, then virtual addressing is only enabled if `endpoint_override` is empty. + This can be used for non-AWS backends that only support virtual hosted-style access. + + Examples + -------- + >>> from pyarrow import fs + >>> s3 = fs.S3FileSystem(region='us-west-2') + >>> s3.get_file_info(fs.FileSelector( + ... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True + ... )) + [ wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.s3fs = wrapped.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return S3FileSystem(**kwargs) + + def __reduce__(self): + cdef CS3Options opts = self.s3fs.options() + + # if creds were explicitly provided, then use them + # else obtain them as they were last time. + if opts.credentials_kind == CS3CredentialsKind_Explicit: + access_key = frombytes(opts.GetAccessKey()) + secret_key = frombytes(opts.GetSecretKey()) + session_token = frombytes(opts.GetSessionToken()) + else: + access_key = None + secret_key = None + session_token = None + + return ( + S3FileSystem._reconstruct, (dict( + access_key=access_key, + secret_key=secret_key, + session_token=session_token, + anonymous=(opts.credentials_kind == + CS3CredentialsKind_Anonymous), + region=frombytes(opts.region), + scheme=frombytes(opts.scheme), + connect_timeout=opts.connect_timeout, + request_timeout=opts.request_timeout, + endpoint_override=frombytes(opts.endpoint_override), + role_arn=frombytes(opts.role_arn), + session_name=frombytes(opts.session_name), + external_id=frombytes(opts.external_id), + load_frequency=opts.load_frequency, + background_writes=opts.background_writes, + allow_bucket_creation=opts.allow_bucket_creation, + allow_bucket_deletion=opts.allow_bucket_deletion, + check_directory_existence_before_creation=opts.check_directory_existence_before_creation, + default_metadata=pyarrow_wrap_metadata(opts.default_metadata), + proxy_options={'scheme': frombytes(opts.proxy_options.scheme), + 'host': frombytes(opts.proxy_options.host), + 'port': opts.proxy_options.port, + 'username': frombytes( + opts.proxy_options.username), + 'password': frombytes( + opts.proxy_options.password)}, + force_virtual_addressing=opts.force_virtual_addressing, + ),) + ) + + @property + def region(self): + """ + The AWS region this filesystem connects to. + """ + return frombytes(self.s3fs.region()) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/acero.py b/parrot/lib/python3.10/site-packages/pyarrow/acero.py new file mode 100644 index 0000000000000000000000000000000000000000..77ba3ab1ce85ddba5b50e2370928fc611ee00478 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/acero.py @@ -0,0 +1,403 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Internal ExecPlan bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.lib import Table, RecordBatch +from pyarrow.compute import Expression, field + +try: + from pyarrow._acero import ( # noqa + Declaration, + ExecNodeOptions, + TableSourceNodeOptions, + FilterNodeOptions, + ProjectNodeOptions, + AggregateNodeOptions, + OrderByNodeOptions, + HashJoinNodeOptions, + AsofJoinNodeOptions, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'acero' ({str(exc)})" + ) from None + + +try: + import pyarrow.dataset as ds + from pyarrow._dataset import ScanNodeOptions +except ImportError: + class DatasetModuleStub: + class Dataset: + pass + + class InMemoryDataset: + pass + ds = DatasetModuleStub + + +def _dataset_to_decl(dataset, use_threads=True): + decl = Declaration("scan", ScanNodeOptions(dataset, use_threads=use_threads)) + + # Get rid of special dataset columns + # "__fragment_index", "__batch_index", "__last_in_fragment", "__filename" + projections = [field(f) for f in dataset.schema.names] + decl = Declaration.from_sequence( + [decl, Declaration("project", ProjectNodeOptions(projections))] + ) + + filter_expr = dataset._scan_options.get("filter") + if filter_expr is not None: + # Filters applied in CScanNodeOptions are "best effort" for the scan node itself + # so we always need to inject an additional Filter node to apply them for real. + decl = Declaration.from_sequence( + [decl, Declaration("filter", FilterNodeOptions(filter_expr))] + ) + + return decl + + +def _perform_join(join_type, left_operand, left_keys, + right_operand, right_keys, + left_suffix=None, right_suffix=None, + use_threads=True, coalesce_keys=False, + output_type=Table): + """ + Perform join of two tables or datasets. + + The result will be an output table with the result of the join operation + + Parameters + ---------- + join_type : str + One of supported join types. + left_operand : Table or Dataset + The left operand for the join operation. + left_keys : str or list[str] + The left key (or keys) on which the join operation should be performed. + right_operand : Table or Dataset + The right operand for the join operation. + right_keys : str or list[str] + The right key (or keys) on which the join operation should be performed. + left_suffix : str, default None + Which suffix to add to left column names. This prevents confusion + when the columns in left and right operands have colliding names. + right_suffix : str, default None + Which suffix to add to the right column names. This prevents confusion + when the columns in left and right operands have colliding names. + use_threads : bool, default True + Whether to use multithreading or not. + coalesce_keys : bool, default False + If the duplicated keys should be omitted from one of the sides + in the join result. + output_type: Table or InMemoryDataset + The output type for the exec plan result. + + Returns + ------- + result_table : Table or InMemoryDataset + """ + if not isinstance(left_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}") + if not isinstance(right_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}") + + # Prepare left and right tables Keys to send them to the C++ function + left_keys_order = {} + if not isinstance(left_keys, (tuple, list)): + left_keys = [left_keys] + for idx, key in enumerate(left_keys): + left_keys_order[key] = idx + + right_keys_order = {} + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for idx, key in enumerate(right_keys): + right_keys_order[key] = idx + + # By default expose all columns on both left and right table + left_columns = left_operand.schema.names + right_columns = right_operand.schema.names + + # Pick the join type + if join_type == "left semi" or join_type == "left anti": + right_columns = [] + elif join_type == "right semi" or join_type == "right anti": + left_columns = [] + elif join_type == "inner" or join_type == "left outer": + right_columns = [ + col for col in right_columns if col not in right_keys_order + ] + elif join_type == "right outer": + left_columns = [ + col for col in left_columns if col not in left_keys_order + ] + + # Turn the columns to vectors of FieldRefs + # and set aside indices of keys. + left_column_keys_indices = {} + for idx, colname in enumerate(left_columns): + if colname in left_keys: + left_column_keys_indices[colname] = idx + right_column_keys_indices = {} + for idx, colname in enumerate(right_columns): + if colname in right_keys: + right_column_keys_indices[colname] = idx + + # Add the join node to the execplan + if isinstance(left_operand, ds.Dataset): + left_source = _dataset_to_decl(left_operand, use_threads=use_threads) + else: + left_source = Declaration("table_source", TableSourceNodeOptions(left_operand)) + if isinstance(right_operand, ds.Dataset): + right_source = _dataset_to_decl(right_operand, use_threads=use_threads) + else: + right_source = Declaration( + "table_source", TableSourceNodeOptions(right_operand) + ) + + if coalesce_keys: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, left_columns, right_columns, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + else: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + decl = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source] + ) + + if coalesce_keys and join_type == "full outer": + # In case of full outer joins, the join operation will output all columns + # so that we can coalesce the keys and exclude duplicates in a subsequent + # projection. + left_columns_set = set(left_columns) + right_columns_set = set(right_columns) + # Where the right table columns start. + right_operand_index = len(left_columns) + projected_col_names = [] + projections = [] + for idx, col in enumerate(left_columns + right_columns): + if idx < len(left_columns) and col in left_column_keys_indices: + # Include keys only once and coalesce left+right table keys. + projected_col_names.append(col) + # Get the index of the right key that is being paired + # with this left key. We do so by retrieving the name + # of the right key that is in the same position in the provided keys + # and then looking up the index for that name in the right table. + right_key_index = right_column_keys_indices[ + right_keys[left_keys_order[col]]] + projections.append( + Expression._call("coalesce", [ + Expression._field(idx), Expression._field( + right_operand_index+right_key_index) + ]) + ) + elif idx >= right_operand_index and col in right_column_keys_indices: + # Do not include right table keys. As they would lead to duplicated keys + continue + else: + # For all the other columns include them as they are. + # Just recompute the suffixes that the join produced as the projection + # would lose them otherwise. + if ( + left_suffix and idx < right_operand_index + and col in right_columns_set + ): + col += left_suffix + if ( + right_suffix and idx >= right_operand_index + and col in left_columns_set + ): + col += right_suffix + projected_col_names.append(col) + projections.append( + Expression._field(idx) + ) + projection = Declaration( + "project", ProjectNodeOptions(projections, projected_col_names) + ) + decl = Declaration.from_sequence([decl, projection]) + + result_table = decl.to_table(use_threads=use_threads) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _perform_join_asof(left_operand, left_on, left_by, + right_operand, right_on, right_by, + tolerance, use_threads=True, + output_type=Table): + """ + Perform asof join of two tables or datasets. + + The result will be an output table with the result of the join operation + + Parameters + ---------- + left_operand : Table or Dataset + The left operand for the join operation. + left_on : str + The left key (or keys) on which the join operation should be performed. + left_by: str or list[str] + The left key (or keys) on which the join operation should be performed. + right_operand : Table or Dataset + The right operand for the join operation. + right_on : str or list[str] + The right key (or keys) on which the join operation should be performed. + right_by: str or list[str] + The right key (or keys) on which the join operation should be performed. + tolerance : int + The tolerance to use for the asof join. The tolerance is interpreted in + the same units as the "on" key. + output_type: Table or InMemoryDataset + The output type for the exec plan result. + + Returns + ------- + result_table : Table or InMemoryDataset + """ + if not isinstance(left_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}") + if not isinstance(right_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}") + + if not isinstance(left_by, (tuple, list)): + left_by = [left_by] + if not isinstance(right_by, (tuple, list)): + right_by = [right_by] + + # AsofJoin does not return on or by columns for right_operand. + right_columns = [ + col for col in right_operand.schema.names + if col not in [right_on] + right_by + ] + columns_collisions = set(left_operand.schema.names) & set(right_columns) + if columns_collisions: + raise ValueError( + "Columns {} present in both tables. AsofJoin does not support " + "column collisions.".format(columns_collisions), + ) + + # Add the join node to the execplan + if isinstance(left_operand, ds.Dataset): + left_source = _dataset_to_decl(left_operand, use_threads=use_threads) + else: + left_source = Declaration( + "table_source", TableSourceNodeOptions(left_operand), + ) + if isinstance(right_operand, ds.Dataset): + right_source = _dataset_to_decl(right_operand, use_threads=use_threads) + else: + right_source = Declaration( + "table_source", TableSourceNodeOptions(right_operand) + ) + + join_opts = AsofJoinNodeOptions( + left_on, left_by, right_on, right_by, tolerance + ) + decl = Declaration( + "asofjoin", options=join_opts, inputs=[left_source, right_source] + ) + + result_table = decl.to_table(use_threads=use_threads) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _filter_table(table, expression): + """Filter rows of a table based on the provided expression. + + The result will be an output table with only the rows matching + the provided expression. + + Parameters + ---------- + table : Table or RecordBatch + Table that should be filtered. + expression : Expression + The expression on which rows should be filtered. + + Returns + ------- + Table + """ + is_batch = False + if isinstance(table, RecordBatch): + table = Table.from_batches([table]) + is_batch = True + + decl = Declaration.from_sequence([ + Declaration("table_source", options=TableSourceNodeOptions(table)), + Declaration("filter", options=FilterNodeOptions(expression)) + ]) + result = decl.to_table(use_threads=True) + if is_batch: + result = result.combine_chunks().to_batches()[0] + return result + + +def _sort_source(table_or_dataset, sort_keys, output_type=Table, **kwargs): + + if isinstance(table_or_dataset, ds.Dataset): + data_source = _dataset_to_decl(table_or_dataset, use_threads=True) + else: + data_source = Declaration( + "table_source", TableSourceNodeOptions(table_or_dataset) + ) + + order_by = Declaration("order_by", OrderByNodeOptions(sort_keys, **kwargs)) + + decl = Declaration.from_sequence([data_source, order_by]) + result_table = decl.to_table(use_threads=True) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _group_by(table, aggregates, keys, use_threads=True): + + decl = Declaration.from_sequence([ + Declaration("table_source", TableSourceNodeOptions(table)), + Declaration("aggregate", AggregateNodeOptions(aggregates, keys=keys)) + ]) + return decl.to_table(use_threads=use_threads) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/array.pxi b/parrot/lib/python3.10/site-packages/pyarrow/array.pxi new file mode 100644 index 0000000000000000000000000000000000000000..b1f90cd16537b2697e2e87354ab6c550f030d79b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/array.pxi @@ -0,0 +1,4594 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +import os +import warnings +from cython import sizeof + + +cdef _sequence_to_array(object sequence, object mask, object size, + DataType type, CMemoryPool* pool, c_bool from_pandas): + cdef: + int64_t c_size + PyConversionOptions options + shared_ptr[CChunkedArray] chunked + + if type is not None: + options.type = type.sp_type + + if size is not None: + options.size = size + + options.from_pandas = from_pandas + options.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False) + + with nogil: + chunked = GetResultValue( + ConvertPySequence(sequence, mask, options, pool) + ) + + if chunked.get().num_chunks() == 1: + return pyarrow_wrap_array(chunked.get().chunk(0)) + else: + return pyarrow_wrap_chunked_array(chunked) + + +cdef inline _is_array_like(obj): + if isinstance(obj, np.ndarray): + return True + return pandas_api._have_pandas_internal() and pandas_api.is_array_like(obj) + + +def _ndarray_to_arrow_type(object values, DataType type): + return pyarrow_wrap_data_type(_ndarray_to_type(values, type)) + + +cdef shared_ptr[CDataType] _ndarray_to_type(object values, + DataType type) except *: + cdef shared_ptr[CDataType] c_type + + dtype = values.dtype + + if type is None and dtype != object: + c_type = GetResultValue(NumPyDtypeToArrow(dtype)) + + if type is not None: + c_type = type.sp_type + + return c_type + + +cdef _ndarray_to_array(object values, object mask, DataType type, + c_bool from_pandas, c_bool safe, CMemoryPool* pool): + cdef: + shared_ptr[CChunkedArray] chunked_out + shared_ptr[CDataType] c_type = _ndarray_to_type(values, type) + CCastOptions cast_options = CCastOptions(safe) + + with nogil: + check_status(NdarrayToArrow(pool, values, mask, from_pandas, + c_type, cast_options, &chunked_out)) + + if chunked_out.get().num_chunks() > 1: + return pyarrow_wrap_chunked_array(chunked_out) + else: + return pyarrow_wrap_array(chunked_out.get().chunk(0)) + + +cdef _codes_to_indices(object codes, object mask, DataType type, + MemoryPool memory_pool): + """ + Convert the codes of a pandas Categorical to indices for a pyarrow + DictionaryArray, taking into account missing values + mask + """ + if mask is None: + mask = codes == -1 + else: + mask = mask | (codes == -1) + return array(codes, mask=mask, type=type, memory_pool=memory_pool) + + +def _handle_arrow_array_protocol(obj, type, mask, size): + if mask is not None or size is not None: + raise ValueError( + "Cannot specify a mask or a size when passing an object that is " + "converted with the __arrow_array__ protocol.") + res = obj.__arrow_array__(type=type) + if not isinstance(res, (Array, ChunkedArray)): + raise TypeError("The object's __arrow_array__ method does not " + "return a pyarrow Array or ChunkedArray.") + if isinstance(res, ChunkedArray) and res.num_chunks==1: + res = res.chunk(0) + return res + + +def array(object obj, type=None, mask=None, size=None, from_pandas=None, + bint safe=True, MemoryPool memory_pool=None): + """ + Create pyarrow.Array instance from a Python object. + + Parameters + ---------- + obj : sequence, iterable, ndarray, pandas.Series, Arrow-compatible array + If both type and size are specified may be a single use iterable. If + not strongly-typed, Arrow type will be inferred for resulting array. + Any Arrow-compatible array that implements the Arrow PyCapsule Protocol + (has an ``__arrow_c_array__`` or ``__arrow_c_device_array__`` method) + can be passed as well. + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred from + the data. + mask : array[bool], optional + Indicate which values are null (True) or not null (False). + size : int64, optional + Size of the elements. If the input is larger than size bail at this + length. For iterators, if size is larger than the input iterator this + will be treated as a "max size", but will involve an initial allocation + of size followed by a resize to the actual size (so if you know the + exact size specifying it correctly will give you better performance). + from_pandas : bool, default None + Use pandas's semantics for inferring nulls from values in + ndarray-like data. If passed, the mask tasks precedence, but + if a value is unmasked (not-null), but still null according to + pandas semantics, then it is null. Defaults to False if not + passed explicitly by user, or True if a pandas object is + passed in. + safe : bool, default True + Check for overflows or other unsafe conversions. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Returns + ------- + array : pyarrow.Array or pyarrow.ChunkedArray + A ChunkedArray instead of an Array is returned if: + + - the object data overflowed binary storage. + - the object's ``__arrow_array__`` protocol method returned a chunked + array. + + Notes + ----- + Timezone will be preserved in the returned array for timezone-aware data, + else no timezone will be returned for naive timestamps. + Internally, UTC values are stored for timezone-aware data with the + timezone set in the data type. + + Pandas's DateOffsets and dateutil.relativedelta.relativedelta are by + default converted as MonthDayNanoIntervalArray. relativedelta leapdays + are ignored as are all absolute fields on both objects. datetime.timedelta + can also be converted to MonthDayNanoIntervalArray but this requires + passing MonthDayNanoIntervalType explicitly. + + Converting to dictionary array will promote to a wider integer type for + indices if the number of distinct values cannot be represented, even if + the index type was explicitly set. This means that if there are more than + 127 values the returned dictionary array's index type will be at least + pa.int16() even if pa.int8() was passed to the function. Note that an + explicit index type will not be demoted even if it is wider than required. + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> pa.array(pd.Series([1, 2])) + + [ + 1, + 2 + ] + + >>> pa.array(["a", "b", "a"], type=pa.dictionary(pa.int8(), pa.string())) + + ... + -- dictionary: + [ + "a", + "b" + ] + -- indices: + [ + 0, + 1, + 0 + ] + + >>> import numpy as np + >>> pa.array(pd.Series([1, 2]), mask=np.array([0, 1], dtype=bool)) + + [ + 1, + null + ] + + >>> arr = pa.array(range(1024), type=pa.dictionary(pa.int8(), pa.int64())) + >>> arr.type.index_type + DataType(int16) + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + bint is_pandas_object = False + bint c_from_pandas + + type = ensure_type(type, allow_none=True) + + extension_type = None + if type is not None and type.id == _Type_EXTENSION: + extension_type = type + type = type.storage_type + + if from_pandas is None: + c_from_pandas = False + else: + c_from_pandas = from_pandas + + if isinstance(obj, Array): + if type is not None and not obj.type.equals(type): + obj = obj.cast(type, safe=safe, memory_pool=memory_pool) + return obj + + if hasattr(obj, '__arrow_array__'): + return _handle_arrow_array_protocol(obj, type, mask, size) + elif hasattr(obj, '__arrow_c_device_array__'): + if type is not None: + requested_type = type.__arrow_c_schema__() + else: + requested_type = None + schema_capsule, array_capsule = obj.__arrow_c_device_array__(requested_type) + out_array = Array._import_from_c_device_capsule(schema_capsule, array_capsule) + if type is not None and out_array.type != type: + # PyCapsule interface type coercion is best effort, so we need to + # check the type of the returned array and cast if necessary + out_array = array.cast(type, safe=safe, memory_pool=memory_pool) + return out_array + elif hasattr(obj, '__arrow_c_array__'): + if type is not None: + requested_type = type.__arrow_c_schema__() + else: + requested_type = None + schema_capsule, array_capsule = obj.__arrow_c_array__(requested_type) + out_array = Array._import_from_c_capsule(schema_capsule, array_capsule) + if type is not None and out_array.type != type: + # PyCapsule interface type coercion is best effort, so we need to + # check the type of the returned array and cast if necessary + out_array = array.cast(type, safe=safe, memory_pool=memory_pool) + return out_array + elif _is_array_like(obj): + if mask is not None: + if _is_array_like(mask): + mask = get_values(mask, &is_pandas_object) + else: + raise TypeError("Mask must be a numpy array " + "when converting numpy arrays") + + values = get_values(obj, &is_pandas_object) + if is_pandas_object and from_pandas is None: + c_from_pandas = True + + if isinstance(values, np.ma.MaskedArray): + if mask is not None: + raise ValueError("Cannot pass a numpy masked array and " + "specify a mask at the same time") + else: + # don't use shrunken masks + mask = None if values.mask is np.ma.nomask else values.mask + values = values.data + + if mask is not None: + if mask.dtype != np.bool_: + raise TypeError("Mask must be boolean dtype") + if mask.ndim != 1: + raise ValueError("Mask must be 1D array") + if len(values) != len(mask): + raise ValueError( + "Mask is a different length from sequence being converted") + + if hasattr(values, '__arrow_array__'): + return _handle_arrow_array_protocol(values, type, mask, size) + elif (pandas_api.is_categorical(values) and + type is not None and type.id != Type_DICTIONARY): + result = _ndarray_to_array( + np.asarray(values), mask, type, c_from_pandas, safe, pool + ) + elif pandas_api.is_categorical(values): + if type is not None: + index_type = type.index_type + value_type = type.value_type + if values.ordered != type.ordered: + raise ValueError( + "The 'ordered' flag of the passed categorical values " + "does not match the 'ordered' of the specified type. ") + else: + index_type = None + value_type = None + + indices = _codes_to_indices( + values.codes, mask, index_type, memory_pool) + try: + dictionary = array( + values.categories.values, type=value_type, + memory_pool=memory_pool) + except TypeError: + # TODO when removing the deprecation warning, this whole + # try/except can be removed (to bubble the TypeError of + # the first array(..) call) + if value_type is not None: + warnings.warn( + "The dtype of the 'categories' of the passed " + "categorical values ({0}) does not match the " + "specified type ({1}). For now ignoring the specified " + "type, but in the future this mismatch will raise a " + "TypeError".format( + values.categories.dtype, value_type), + FutureWarning, stacklevel=2) + dictionary = array( + values.categories.values, memory_pool=memory_pool) + else: + raise + + return DictionaryArray.from_arrays( + indices, dictionary, ordered=values.ordered, safe=safe) + else: + if pandas_api.have_pandas: + values, type = pandas_api.compat.get_datetimetz_type( + values, obj.dtype, type) + if type and type.id == _Type_RUN_END_ENCODED: + arr = _ndarray_to_array( + values, mask, type.value_type, c_from_pandas, safe, pool) + result = _pc().run_end_encode(arr, run_end_type=type.run_end_type, + memory_pool=memory_pool) + else: + result = _ndarray_to_array(values, mask, type, c_from_pandas, safe, + pool) + else: + if type and type.id == _Type_RUN_END_ENCODED: + arr = _sequence_to_array( + obj, mask, size, type.value_type, pool, from_pandas) + result = _pc().run_end_encode(arr, run_end_type=type.run_end_type, + memory_pool=memory_pool) + # ConvertPySequence does strict conversion if type is explicitly passed + else: + result = _sequence_to_array(obj, mask, size, type, pool, c_from_pandas) + + if extension_type is not None: + result = ExtensionArray.from_storage(extension_type, result) + return result + + +def asarray(values, type=None): + """ + Convert to pyarrow.Array, inferring type if not provided. + + Parameters + ---------- + values : array-like + This can be a sequence, numpy.ndarray, pyarrow.Array or + pyarrow.ChunkedArray. If a ChunkedArray is passed, the output will be + a ChunkedArray, otherwise the output will be a Array. + type : string or DataType + Explicitly construct the array with this type. Attempt to cast if + indicated type is different. + + Returns + ------- + arr : Array or ChunkedArray + """ + if isinstance(values, (Array, ChunkedArray)): + if type is not None and not values.type.equals(type): + values = values.cast(type) + return values + else: + return array(values, type=type) + + +def nulls(size, type=None, MemoryPool memory_pool=None): + """ + Create a strongly-typed Array instance with all elements null. + + Parameters + ---------- + size : int + Array length. + type : pyarrow.DataType, default None + Explicit type for the array. By default use NullType. + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + + Returns + ------- + arr : Array + + Examples + -------- + >>> import pyarrow as pa + >>> pa.nulls(10) + + 10 nulls + + >>> pa.nulls(3, pa.uint32()) + + [ + null, + null, + null + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + int64_t length = size + shared_ptr[CDataType] ty + shared_ptr[CArray] arr + + type = ensure_type(type, allow_none=True) + if type is None: + type = null() + + ty = pyarrow_unwrap_data_type(type) + with nogil: + arr = GetResultValue(MakeArrayOfNull(ty, length, pool)) + + return pyarrow_wrap_array(arr) + + +def repeat(value, size, MemoryPool memory_pool=None): + """ + Create an Array instance whose slots are the given scalar. + + Parameters + ---------- + value : Scalar-like object + Either a pyarrow.Scalar or any python object coercible to a Scalar. + size : int + Number of times to repeat the scalar in the output Array. + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + + Returns + ------- + arr : Array + + Examples + -------- + >>> import pyarrow as pa + >>> pa.repeat(10, 3) + + [ + 10, + 10, + 10 + ] + + >>> pa.repeat([1, 2], 2) + + [ + [ + 1, + 2 + ], + [ + 1, + 2 + ] + ] + + >>> pa.repeat("string", 3) + + [ + "string", + "string", + "string" + ] + + >>> pa.repeat(pa.scalar({'a': 1, 'b': [1, 2]}), 2) + + -- is_valid: all not null + -- child 0 type: int64 + [ + 1, + 1 + ] + -- child 1 type: list + [ + [ + 1, + 2 + ], + [ + 1, + 2 + ] + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + int64_t length = size + shared_ptr[CArray] c_array + shared_ptr[CScalar] c_scalar + + if not isinstance(value, Scalar): + value = scalar(value, memory_pool=memory_pool) + + c_scalar = ( value).unwrap() + with nogil: + c_array = GetResultValue( + MakeArrayFromScalar(deref(c_scalar), length, pool) + ) + + return pyarrow_wrap_array(c_array) + + +def infer_type(values, mask=None, from_pandas=False): + """ + Attempt to infer Arrow data type that can hold the passed Python + sequence type in an Array object + + Parameters + ---------- + values : array-like + Sequence to infer type from. + mask : ndarray (bool type), optional + Optional exclusion mask where True marks null, False non-null. + from_pandas : bool, default False + Use pandas's NA/null sentinel values for type inference. + + Returns + ------- + type : DataType + """ + cdef: + shared_ptr[CDataType] out + c_bool use_pandas_sentinels = from_pandas + + if mask is not None and not isinstance(mask, np.ndarray): + mask = np.array(mask, dtype=bool) + + out = GetResultValue(InferArrowType(values, mask, use_pandas_sentinels)) + return pyarrow_wrap_data_type(out) + + +def _normalize_slice(object arrow_obj, slice key): + """ + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + """ + cdef: + Py_ssize_t start, stop, step + Py_ssize_t n = len(arrow_obj) + + start, stop, step = key.indices(n) + + if step != 1: + indices = np.arange(start, stop, step) + return arrow_obj.take(indices) + else: + length = max(stop - start, 0) + return arrow_obj.slice(start, length) + + +cdef Py_ssize_t _normalize_index(Py_ssize_t index, + Py_ssize_t length) except -1: + if index < 0: + index += length + if index < 0: + raise IndexError("index out of bounds") + elif index >= length: + raise IndexError("index out of bounds") + return index + + +cdef wrap_datum(const CDatum& datum): + if datum.kind() == DatumType_ARRAY: + return pyarrow_wrap_array(MakeArray(datum.array())) + elif datum.kind() == DatumType_CHUNKED_ARRAY: + return pyarrow_wrap_chunked_array(datum.chunked_array()) + elif datum.kind() == DatumType_RECORD_BATCH: + return pyarrow_wrap_batch(datum.record_batch()) + elif datum.kind() == DatumType_TABLE: + return pyarrow_wrap_table(datum.table()) + elif datum.kind() == DatumType_SCALAR: + return pyarrow_wrap_scalar(datum.scalar()) + else: + raise ValueError("Unable to wrap Datum in a Python object") + + +cdef _append_array_buffers(const CArrayData* ad, list res): + """ + Recursively append Buffer wrappers from *ad* and its children. + """ + cdef size_t i, n + assert ad != NULL + n = ad.buffers.size() + for i in range(n): + buf = ad.buffers[i] + res.append(pyarrow_wrap_buffer(buf) + if buf.get() != NULL else None) + n = ad.child_data.size() + for i in range(n): + _append_array_buffers(ad.child_data[i].get(), res) + + +cdef _reduce_array_data(const CArrayData* ad): + """ + Recursively dissect ArrayData to (pickable) tuples. + """ + cdef size_t i, n + assert ad != NULL + + n = ad.buffers.size() + buffers = [] + for i in range(n): + buf = ad.buffers[i] + buffers.append(pyarrow_wrap_buffer(buf) + if buf.get() != NULL else None) + + children = [] + n = ad.child_data.size() + for i in range(n): + children.append(_reduce_array_data(ad.child_data[i].get())) + + if ad.dictionary.get() != NULL: + dictionary = _reduce_array_data(ad.dictionary.get()) + else: + dictionary = None + + return pyarrow_wrap_data_type(ad.type), ad.length, ad.null_count, \ + ad.offset, buffers, children, dictionary + + +cdef shared_ptr[CArrayData] _reconstruct_array_data(data): + """ + Reconstruct CArrayData objects from the tuple structure generated + by _reduce_array_data. + """ + cdef: + int64_t length, null_count, offset, i + DataType dtype + Buffer buf + vector[shared_ptr[CBuffer]] c_buffers + vector[shared_ptr[CArrayData]] c_children + shared_ptr[CArrayData] c_dictionary + + dtype, length, null_count, offset, buffers, children, dictionary = data + + for i in range(len(buffers)): + buf = buffers[i] + if buf is None: + c_buffers.push_back(shared_ptr[CBuffer]()) + else: + c_buffers.push_back(buf.buffer) + + for i in range(len(children)): + c_children.push_back(_reconstruct_array_data(children[i])) + + if dictionary is not None: + c_dictionary = _reconstruct_array_data(dictionary) + + return CArrayData.MakeWithChildrenAndDictionary( + dtype.sp_type, + length, + c_buffers, + c_children, + c_dictionary, + null_count, + offset) + + +def _restore_array(data): + """ + Reconstruct an Array from pickled ArrayData. + """ + cdef shared_ptr[CArrayData] ad = _reconstruct_array_data(data) + return pyarrow_wrap_array(MakeArray(ad)) + + +cdef class _PandasConvertible(_Weakrefable): + + def to_pandas( + self, + memory_pool=None, + categories=None, + bint strings_to_categorical=False, + bint zero_copy_only=False, + bint integer_object_nulls=False, + bint date_as_object=True, + bint timestamp_as_object=False, + bint use_threads=True, + bint deduplicate_objects=True, + bint ignore_metadata=False, + bint safe=True, + bint split_blocks=False, + bint self_destruct=False, + str maps_as_pydicts=None, + types_mapper=None, + bint coerce_temporal_nanoseconds=False + ): + """ + Convert to a pandas-compatible NumPy array or DataFrame, as appropriate + + Parameters + ---------- + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + categories : list, default empty + List of fields that should be returned as pandas.Categorical. Only + applies to table-like data structures. + strings_to_categorical : bool, default False + Encode string (UTF8) and binary types to pandas.Categorical. + zero_copy_only : bool, default False + Raise an ArrowException if this function call would require copying + the underlying data. + integer_object_nulls : bool, default False + Cast integers with nulls to objects + date_as_object : bool, default True + Cast dates to objects. If False, convert to datetime64 dtype with + the equivalent time unit (if supported). Note: in pandas version + < 2.0, only datetime64[ns] conversion is supported. + timestamp_as_object : bool, default False + Cast non-nanosecond timestamps (np.datetime64) to objects. This is + useful in pandas version 1.x if you have timestamps that don't fit + in the normal date range of nanosecond timestamps (1678 CE-2262 CE). + Non-nanosecond timestamps are supported in pandas version 2.0. + If False, all timestamps are converted to datetime64 dtype. + use_threads : bool, default True + Whether to parallelize the conversion using multiple threads. + deduplicate_objects : bool, default True + Do not create multiple copies Python objects when created, to save + on memory use. Conversion will be slower. + ignore_metadata : bool, default False + If True, do not use the 'pandas' metadata to reconstruct the + DataFrame index, if present + safe : bool, default True + For certain data types, a cast is needed in order to store the + data in a pandas DataFrame or Series (e.g. timestamps are always + stored as nanoseconds in pandas). This option controls whether it + is a safe cast or not. + split_blocks : bool, default False + If True, generate one internal "block" for each column when + creating a pandas.DataFrame from a RecordBatch or Table. While this + can temporarily reduce memory note that various pandas operations + can trigger "consolidation" which may balloon memory use. + self_destruct : bool, default False + EXPERIMENTAL: If True, attempt to deallocate the originating Arrow + memory while converting the Arrow object to pandas. If you use the + object after calling to_pandas with this option it will crash your + program. + + Note that you may not see always memory usage improvements. For + example, if multiple columns share an underlying allocation, + memory can't be freed until all columns are converted. + maps_as_pydicts : str, optional, default `None` + Valid values are `None`, 'lossy', or 'strict'. + The default behavior (`None`), is to convert Arrow Map arrays to + Python association lists (list-of-tuples) in the same order as the + Arrow Map, as in [(key1, value1), (key2, value2), ...]. + + If 'lossy' or 'strict', convert Arrow Map arrays to native Python dicts. + This can change the ordering of (key, value) pairs, and will + deduplicate multiple keys, resulting in a possible loss of data. + + If 'lossy', this key deduplication results in a warning printed + when detected. If 'strict', this instead results in an exception + being raised when detected. + types_mapper : function, default None + A function mapping a pyarrow DataType to a pandas ExtensionDtype. + This can be used to override the default pandas type for conversion + of built-in pyarrow types or in absence of pandas_metadata in the + Table schema. The function receives a pyarrow DataType and is + expected to return a pandas ExtensionDtype or ``None`` if the + default conversion should be used for that type. If you have + a dictionary mapping, you can pass ``dict.get`` as function. + coerce_temporal_nanoseconds : bool, default False + Only applicable to pandas version >= 2.0. + A legacy option to coerce date32, date64, duration, and timestamp + time units to nanoseconds when converting to pandas. This is the + default behavior in pandas version 1.x. Set this option to True if + you'd like to use this coercion when using pandas version >= 2.0 + for backwards compatibility (not recommended otherwise). + + Returns + ------- + pandas.Series or pandas.DataFrame depending on type of object + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + + Convert a Table to pandas DataFrame: + + >>> table = pa.table([ + ... pa.array([2, 4, 5, 100]), + ... pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + ... ], names=['n_legs', 'animals']) + >>> table.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + >>> isinstance(table.to_pandas(), pd.DataFrame) + True + + Convert a RecordBatch to pandas DataFrame: + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.record_batch([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + >>> isinstance(batch.to_pandas(), pd.DataFrame) + True + + Convert a Chunked Array to pandas Series: + + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_pandas() + 0 2 + 1 2 + 2 4 + 3 4 + 4 5 + 5 100 + dtype: int64 + >>> isinstance(n_legs.to_pandas(), pd.Series) + True + """ + options = dict( + pool=memory_pool, + strings_to_categorical=strings_to_categorical, + zero_copy_only=zero_copy_only, + integer_object_nulls=integer_object_nulls, + date_as_object=date_as_object, + timestamp_as_object=timestamp_as_object, + use_threads=use_threads, + deduplicate_objects=deduplicate_objects, + safe=safe, + split_blocks=split_blocks, + self_destruct=self_destruct, + maps_as_pydicts=maps_as_pydicts, + coerce_temporal_nanoseconds=coerce_temporal_nanoseconds + ) + return self._to_pandas(options, categories=categories, + ignore_metadata=ignore_metadata, + types_mapper=types_mapper) + + +cdef PandasOptions _convert_pandas_options(dict options): + cdef PandasOptions result + result.pool = maybe_unbox_memory_pool(options['pool']) + result.strings_to_categorical = options['strings_to_categorical'] + result.zero_copy_only = options['zero_copy_only'] + result.integer_object_nulls = options['integer_object_nulls'] + result.date_as_object = options['date_as_object'] + result.timestamp_as_object = options['timestamp_as_object'] + result.use_threads = options['use_threads'] + result.deduplicate_objects = options['deduplicate_objects'] + result.safe_cast = options['safe'] + result.split_blocks = options['split_blocks'] + result.self_destruct = options['self_destruct'] + result.coerce_temporal_nanoseconds = options['coerce_temporal_nanoseconds'] + result.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False) + + maps_as_pydicts = options['maps_as_pydicts'] + if maps_as_pydicts is None: + result.maps_as_pydicts = MapConversionType.DEFAULT + elif maps_as_pydicts == "lossy": + result.maps_as_pydicts = MapConversionType.LOSSY + elif maps_as_pydicts == "strict": + result.maps_as_pydicts = MapConversionType.STRICT_ + else: + raise ValueError( + "Invalid value for 'maps_as_pydicts': " + + "valid values are 'lossy', 'strict' or `None` (default). " + + f"Received '{maps_as_pydicts}'." + ) + return result + + +cdef class Array(_PandasConvertible): + """ + The base class for all Arrow arrays. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use one of " + "the `pyarrow.Array.from_*` functions instead." + .format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CArray]& sp_array) except *: + self.sp_array = sp_array + self.ap = sp_array.get() + self.type = pyarrow_wrap_data_type(self.sp_array.get().type()) + + def _debug_print(self): + with nogil: + check_status(DebugPrint(deref(self.ap), 0)) + + def diff(self, Array other): + """ + Compare contents of this array against another one. + + Return a string containing the result of diffing this array + (on the left side) against the other array (on the right side). + + Parameters + ---------- + other : Array + The other array to compare this array with. + + Returns + ------- + diff : str + A human-readable printout of the differences. + + Examples + -------- + >>> import pyarrow as pa + >>> left = pa.array(["one", "two", "three"]) + >>> right = pa.array(["two", None, "two-and-a-half", "three"]) + >>> print(left.diff(right)) # doctest: +SKIP + + @@ -0, +0 @@ + -"one" + @@ -2, +1 @@ + +null + +"two-and-a-half" + + """ + self._assert_cpu() + cdef c_string result + with nogil: + result = self.ap.Diff(deref(other.ap)) + return frombytes(result, safe=True) + + def cast(self, object target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast array values to another data type + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, default None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Returns + ------- + cast : Array + """ + self._assert_cpu() + return _pc().cast(self, target_type, safe=safe, + options=options, memory_pool=memory_pool) + + def view(self, object target_type): + """ + Return zero-copy "view" of array as another data type. + + The data types must have compatible columnar buffer layouts + + Parameters + ---------- + target_type : DataType + Type to construct view as. + + Returns + ------- + view : Array + """ + self._assert_cpu() + cdef DataType type = ensure_type(target_type) + cdef shared_ptr[CArray] result + with nogil: + result = GetResultValue(self.ap.View(type.sp_type)) + return pyarrow_wrap_array(result) + + def sum(self, **kwargs): + """ + Sum the values in a numerical array. + + See :func:`pyarrow.compute.sum` for full usage. + + Parameters + ---------- + **kwargs : dict, optional + Options to pass to :func:`pyarrow.compute.sum`. + + Returns + ------- + sum : Scalar + A scalar containing the sum value. + """ + self._assert_cpu() + options = _pc().ScalarAggregateOptions(**kwargs) + return _pc().call_function('sum', [self], options) + + def unique(self): + """ + Compute distinct elements in array. + + Returns + ------- + unique : Array + An array of the same data type, with deduplicated elements. + """ + self._assert_cpu() + return _pc().call_function('unique', [self]) + + def dictionary_encode(self, null_encoding='mask'): + """ + Compute dictionary-encoded representation of array. + + See :func:`pyarrow.compute.dictionary_encode` for full usage. + + Parameters + ---------- + null_encoding : str, default "mask" + How to handle null entries. + + Returns + ------- + encoded : DictionaryArray + A dictionary-encoded version of this array. + """ + self._assert_cpu() + options = _pc().DictionaryEncodeOptions(null_encoding) + return _pc().call_function('dictionary_encode', [self], options) + + def value_counts(self): + """ + Compute counts of unique elements in array. + + Returns + ------- + StructArray + An array of structs + """ + self._assert_cpu() + return _pc().call_function('value_counts', [self]) + + @staticmethod + def from_pandas(obj, mask=None, type=None, bint safe=True, + MemoryPool memory_pool=None): + """ + Convert pandas.Series to an Arrow Array. + + This method uses Pandas semantics about what values indicate + nulls. See pyarrow.array for more general conversion from arrays or + sequences to Arrow arrays. + + Parameters + ---------- + obj : ndarray, pandas.Series, array-like + mask : array (boolean), optional + Indicate which values are null (True) or not null (False). + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred + from the data. + safe : bool, default True + Check for overflows or other unsafe conversions. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Notes + ----- + Localized timestamps will currently be returned as UTC (pandas's native + representation). Timezone-naive data will be implicitly interpreted as + UTC. + + Returns + ------- + array : pyarrow.Array or pyarrow.ChunkedArray + ChunkedArray is returned if object data overflows binary buffer. + """ + return array(obj, mask=mask, type=type, safe=safe, from_pandas=True, + memory_pool=memory_pool) + + def __reduce__(self): + self._assert_cpu() + return _restore_array, \ + (_reduce_array_data(self.sp_array.get().data().get()),) + + @staticmethod + def from_buffers(DataType type, length, buffers, null_count=-1, offset=0, + children=None): + """ + Construct an Array from a sequence of buffers. + + The concrete type returned depends on the datatype. + + Parameters + ---------- + type : DataType + The value type of the array. + length : int + The number of values in the array. + buffers : List[Buffer] + The buffers backing this array. + null_count : int, default -1 + The number of null entries in the array. Negative value means that + the null count is not known. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + children : List[Array], default None + Nested type children with length matching type.num_fields. + + Returns + ------- + array : Array + """ + cdef: + Buffer buf + Array child + vector[shared_ptr[CBuffer]] c_buffers + vector[shared_ptr[CArrayData]] c_child_data + shared_ptr[CArrayData] array_data + + children = children or [] + + if type.num_fields != len(children): + raise ValueError("Type's expected number of children " + "({0}) did not match the passed number " + "({1}).".format(type.num_fields, len(children))) + + if type.num_buffers != len(buffers): + raise ValueError("Type's expected number of buffers " + "({0}) did not match the passed number " + "({1}).".format(type.num_buffers, len(buffers))) + + for buf in buffers: + # None will produce a null buffer pointer + c_buffers.push_back(pyarrow_unwrap_buffer(buf)) + + for child in children: + c_child_data.push_back(child.ap.data()) + + array_data = CArrayData.MakeWithChildren(type.sp_type, length, + c_buffers, c_child_data, + null_count, offset) + cdef Array result = pyarrow_wrap_array(MakeArray(array_data)) + result.validate() + return result + + @property + def null_count(self): + self._assert_cpu() + return self.sp_array.get().null_count() + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the array. + + In other words, the sum of bytes from all buffer + ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + """ + self._assert_cpu() + cdef CResult[int64_t] c_size_res + with nogil: + c_size_res = ReferencedBufferSize(deref(self.ap)) + size = GetResultValue(c_size_res) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the array. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + """ + self._assert_cpu() + cdef int64_t total_buffer_size + total_buffer_size = TotalBufferSize(deref(self.ap)) + return total_buffer_size + + def __sizeof__(self): + self._assert_cpu() + return super(Array, self).__sizeof__() + self.nbytes + + def __iter__(self): + self._assert_cpu() + for i in range(len(self)): + yield self.getitem(i) + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def to_string(self, *, int indent=2, int top_level_indent=0, int window=10, + int container_window=2, c_bool skip_new_lines=False): + """ + Render a "pretty-printed" string representation of the Array. + + Note: for data on a non-CPU device, the full array is copied to CPU + memory. + + Parameters + ---------- + indent : int, default 2 + How much to indent the internal items in the string to + the right, by default ``2``. + top_level_indent : int, default 0 + How much to indent right the entire content of the array, + by default ``0``. + window : int + How many primitive items to preview at the begin and end + of the array when the array is bigger than the window. + The other items will be ellipsed. + container_window : int + How many container items (such as a list in a list array) + to preview at the begin and end of the array when the array + is bigger than the window. + skip_new_lines : bool + If the array should be rendered as a single line of text + or if each element should be on its own line. + """ + cdef: + c_string result + PrettyPrintOptions options + + with nogil: + options = PrettyPrintOptions(top_level_indent, window) + options.skip_new_lines = skip_new_lines + options.indent_size = indent + check_status( + PrettyPrint( + deref(self.ap), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def format(self, **kwargs): + """ + DEPRECATED, use pyarrow.Array.to_string + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + str + """ + import warnings + warnings.warn('Array.format is deprecated, use Array.to_string') + return self.to_string(**kwargs) + + def __str__(self): + return self.to_string() + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + # This also handles comparing with None + # as Array.equals(None) raises a TypeError. + return NotImplemented + + def equals(Array self, Array other not None): + """ + Parameters + ---------- + other : pyarrow.Array + + Returns + ------- + bool + """ + self._assert_cpu() + other._assert_cpu() + return self.ap.Equals(deref(other.ap)) + + def __len__(self): + return self.length() + + cdef int64_t length(self): + if self.sp_array.get(): + return self.sp_array.get().length() + else: + return 0 + + def is_null(self, *, nan_is_null=False): + """ + Return BooleanArray indicating the null values. + + Parameters + ---------- + nan_is_null : bool (optional, default False) + Whether floating-point NaN values should also be considered null. + + Returns + ------- + array : boolean Array + """ + self._assert_cpu() + options = _pc().NullOptions(nan_is_null=nan_is_null) + return _pc().call_function('is_null', [self], options) + + def is_nan(self): + """ + Return BooleanArray indicating the NaN values. + + Returns + ------- + array : boolean Array + """ + self._assert_cpu() + return _pc().call_function('is_nan', [self]) + + def is_valid(self): + """ + Return BooleanArray indicating the non-null values. + """ + self._assert_cpu() + return _pc().is_valid(self) + + def fill_null(self, fill_value): + """ + See :func:`pyarrow.compute.fill_null` for usage. + + Parameters + ---------- + fill_value : any + The replacement value for null entries. + + Returns + ------- + result : Array + A new array with nulls replaced by the given value. + """ + self._assert_cpu() + return _pc().fill_null(self, fill_value) + + def __getitem__(self, key): + """ + Slice or return value at given index + + Parameters + ---------- + key : integer or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + value : Scalar (index) or Array (slice) + """ + self._assert_cpu() + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.length())) + + cdef getitem(self, int64_t i): + self._assert_cpu() + return Scalar.wrap(GetResultValue(self.ap.GetScalar(i))) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this array. + + Parameters + ---------- + offset : int, default 0 + Offset from start of array to slice. + length : int, default None + Length of slice (default is until end of Array starting from + offset). + + Returns + ------- + sliced : RecordBatch + """ + cdef shared_ptr[CArray] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.ap.Slice(offset) + else: + if length < 0: + raise ValueError('Length must be non-negative') + result = self.ap.Slice(offset, length) + + return pyarrow_wrap_array(result) + + def take(self, object indices): + """ + Select values from an array. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the array whose values will be returned. + + Returns + ------- + taken : Array + An array with the same datatype, containing the taken values. + """ + self._assert_cpu() + return _pc().take(self, indices) + + def drop_null(self): + """ + Remove missing values from an array. + """ + self._assert_cpu() + return _pc().drop_null(self) + + def filter(self, object mask, *, null_selection_behavior='drop'): + """ + Select values from an array. + + See :func:`pyarrow.compute.filter` for full usage. + + Parameters + ---------- + mask : Array or array-like + The boolean mask to filter the array with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled. + + Returns + ------- + filtered : Array + An array of the same type, with only the elements selected by + the boolean mask. + """ + self._assert_cpu() + return _pc().filter(self, mask, + null_selection_behavior=null_selection_behavior) + + def index(self, value, start=None, end=None, *, memory_pool=None): + """ + Find the first index of a value. + + See :func:`pyarrow.compute.index` for full usage. + + Parameters + ---------- + value : Scalar or object + The value to look for in the array. + start : int, optional + The start index where to look for `value`. + end : int, optional + The end index where to look for `value`. + memory_pool : MemoryPool, optional + A memory pool for potential memory allocations. + + Returns + ------- + index : Int64Scalar + The index of the value in the array (-1 if not found). + """ + self._assert_cpu() + return _pc().index(self, value, start, end, memory_pool=memory_pool) + + def sort(self, order="ascending", **kwargs): + """ + Sort the Array + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : Array + """ + self._assert_cpu() + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=[("", order)], **kwargs) + ) + return self.take(indices) + + def _to_pandas(self, options, types_mapper=None, **kwargs): + self._assert_cpu() + return _array_like_to_pandas(self, options, types_mapper=types_mapper) + + def __array__(self, dtype=None, copy=None): + self._assert_cpu() + + if copy is False: + try: + values = self.to_numpy(zero_copy_only=True) + except ArrowInvalid: + raise ValueError( + "Unable to avoid a copy while creating a numpy array as requested.\n" + "If using `np.array(obj, copy=False)` replace it with " + "`np.asarray(obj)` to allow a copy when needed" + ) + # values is already a numpy array at this point, but calling np.array(..) + # again to handle the `dtype` keyword with a no-copy guarantee + return np.array(values, dtype=dtype, copy=False) + + values = self.to_numpy(zero_copy_only=False) + if copy is True and is_numeric(self.type.id) and self.null_count == 0: + # to_numpy did not yet make a copy (is_numeric = integer/floats, no decimal) + return np.array(values, dtype=dtype, copy=True) + + if dtype is None: + return values + return np.asarray(values, dtype=dtype) + + def to_numpy(self, zero_copy_only=True, writable=False): + """ + Return a NumPy view or copy of this array (experimental). + + By default, tries to return a view of this array. This is only + supported for primitive arrays with the same memory layout as NumPy + (i.e. integers, floating point, ..) and without any nulls. + + For the extension arrays, this method simply delegates to the + underlying storage array. + + Parameters + ---------- + zero_copy_only : bool, default True + If True, an exception will be raised if the conversion to a numpy + array would require copying the underlying data (e.g. in presence + of nulls, or for non-primitive types). + writable : bool, default False + For numpy arrays created with zero copy (view on the Arrow data), + the resulting array is not writable (Arrow data is immutable). + By setting this to True, a copy of the array is made to ensure + it is writable. + + Returns + ------- + array : numpy.ndarray + """ + self._assert_cpu() + + cdef: + PyObject* out + PandasOptions c_options + object values + + if zero_copy_only and writable: + raise ValueError( + "Cannot return a writable array if asking for zero-copy") + + # If there are nulls and the array is a DictionaryArray + # decoding the dictionary will make sure nulls are correctly handled. + # Decoding a dictionary does imply a copy by the way, + # so it can't be done if the user requested a zero_copy. + c_options.decode_dictionaries = True + c_options.zero_copy_only = zero_copy_only + c_options.to_numpy = True + + with nogil: + check_status(ConvertArrayToPandas(c_options, self.sp_array, + self, &out)) + + # wrap_array_output uses pandas to convert to Categorical, here + # always convert to numpy array without pandas dependency + array = PyObject_to_object(out) + + if writable and not array.flags.writeable: + # if the conversion already needed to a copy, writeable is True + array = array.copy() + return array + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + Returns + ------- + lst : list + """ + self._assert_cpu() + return [x.as_py() for x in self] + + def tolist(self): + """ + Alias of to_pylist for compatibility with NumPy. + """ + return self.to_pylist() + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + self._assert_cpu() + with nogil: + check_status(self.ap.ValidateFull()) + else: + with nogil: + check_status(self.ap.Validate()) + + @property + def offset(self): + """ + A relative position into another array's data. + + The purpose is to enable zero-copy slicing. This value defaults to zero + but must be applied on all operations with the physical storage + buffers. + """ + return self.sp_array.get().offset() + + def buffers(self): + """ + Return a list of Buffer objects pointing to this array's physical + storage. + + To correctly interpret these buffers, you need to also apply the offset + multiplied with the size of the stored data type. + """ + res = [] + _append_array_buffers(self.sp_array.get().data().get(), res) + return res + + def _export_to_c(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the array type + is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportArray(deref(self.sp_array), + c_ptr, + c_schema_ptr)) + + @staticmethod + def _import_from_c(in_ptr, type): + """ + Import Array from a C ArrowArray struct, given its pointer + and the imported array type. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArray struct. + type: DataType or int + Either a DataType object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + void* c_type_ptr + shared_ptr[CArray] c_array + + c_type = pyarrow_unwrap_data_type(type) + if c_type == nullptr: + # Not a DataType object, perhaps a raw ArrowSchema pointer + c_type_ptr = _as_c_pointer(type) + with nogil: + c_array = GetResultValue(ImportArray( + c_ptr, c_type_ptr)) + else: + with nogil: + c_array = GetResultValue(ImportArray( c_ptr, + c_type)) + return pyarrow_wrap_array(c_array) + + def __arrow_c_array__(self, requested_schema=None): + """ + Get a pair of PyCapsules containing a C ArrowArray representation of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the array to this data type. + If None, the array will be returned as-is, with a type matching the + one returned by :meth:`__arrow_c_schema__()`. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowArray, + respectively. + """ + self._assert_cpu() + + cdef: + ArrowArray* c_array + ArrowSchema* c_schema + shared_ptr[CArray] inner_array + + if requested_schema is not None: + target_type = DataType._import_from_c_capsule(requested_schema) + + if target_type != self.type: + try: + casted_array = _pc().cast(self, target_type, safe=True) + inner_array = pyarrow_unwrap_array(casted_array) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.type} to requested type {target_type}: {e}" + ) + else: + inner_array = self.sp_array + else: + inner_array = self.sp_array + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_array(&c_array) + + with nogil: + check_status(ExportArray(deref(inner_array), c_array, c_schema)) + + return schema_capsule, array_capsule + + @staticmethod + def _import_from_c_capsule(schema_capsule, array_capsule): + cdef: + ArrowSchema* c_schema + ArrowArray* c_array + shared_ptr[CArray] array + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer(array_capsule, 'arrow_array') + + with nogil: + array = GetResultValue(ImportArray(c_array, c_schema)) + + return pyarrow_wrap_array(array) + + def _export_to_c_device(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowDeviceArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the array type + is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowDeviceArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowDeviceArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportDeviceArray( + deref(self.sp_array), NULL, + c_ptr, c_schema_ptr)) + + @staticmethod + def _import_from_c_device(in_ptr, type): + """ + Import Array from a C ArrowDeviceArray struct, given its pointer + and the imported array type. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowDeviceArray struct. + type: DataType or int + Either a DataType object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + ArrowDeviceArray* c_device_array = _as_c_pointer(in_ptr) + void* c_type_ptr + shared_ptr[CArray] c_array + + if c_device_array.device_type == ARROW_DEVICE_CUDA: + _ensure_cuda_loaded() + + c_type = pyarrow_unwrap_data_type(type) + if c_type == nullptr: + # Not a DataType object, perhaps a raw ArrowSchema pointer + c_type_ptr = _as_c_pointer(type) + with nogil: + c_array = GetResultValue( + ImportDeviceArray(c_device_array, c_type_ptr) + ) + else: + with nogil: + c_array = GetResultValue( + ImportDeviceArray(c_device_array, c_type) + ) + return pyarrow_wrap_array(c_array) + + def __arrow_c_device_array__(self, requested_schema=None, **kwargs): + """ + Get a pair of PyCapsules containing a C ArrowDeviceArray representation + of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the array to this data type. + If None, the array will be returned as-is, with a type matching the + one returned by :meth:`__arrow_c_schema__()`. + kwargs + Currently no additional keyword arguments are supported, but + this method will accept any keyword with a value of ``None`` + for compatibility with future keywords. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowDeviceArray, + respectively. + """ + cdef: + ArrowDeviceArray* c_array + ArrowSchema* c_schema + shared_ptr[CArray] inner_array + + non_default_kwargs = [ + name for name, value in kwargs.items() if value is not None + ] + if non_default_kwargs: + raise NotImplementedError( + f"Received unsupported keyword argument(s): {non_default_kwargs}" + ) + + if requested_schema is not None: + target_type = DataType._import_from_c_capsule(requested_schema) + + if target_type != self.type: + if not self.is_cpu: + raise NotImplementedError( + "Casting to a requested schema is only supported for CPU data" + ) + try: + casted_array = _pc().cast(self, target_type, safe=True) + inner_array = pyarrow_unwrap_array(casted_array) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.type} to requested type {target_type}: {e}" + ) + else: + inner_array = self.sp_array + else: + inner_array = self.sp_array + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_device_array(&c_array) + + with nogil: + check_status(ExportDeviceArray( + deref(inner_array), NULL, + c_array, c_schema)) + + return schema_capsule, array_capsule + + @staticmethod + def _import_from_c_device_capsule(schema_capsule, array_capsule): + cdef: + ArrowSchema* c_schema + ArrowDeviceArray* c_array + shared_ptr[CArray] array + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer( + array_capsule, 'arrow_device_array' + ) + + with nogil: + array = GetResultValue(ImportDeviceArray(c_array, c_schema)) + + return pyarrow_wrap_array(array) + + def __dlpack__(self, stream=None): + """Export a primitive array as a DLPack capsule. + + Parameters + ---------- + stream : int, optional + A Python integer representing a pointer to a stream. Currently not supported. + Stream is provided by the consumer to the producer to instruct the producer + to ensure that operations can safely be performed on the array. + + Returns + ------- + capsule : PyCapsule + A DLPack capsule for the array, pointing to a DLManagedTensor. + """ + if stream is None: + dlm_tensor = GetResultValue(ExportToDLPack(self.sp_array)) + + return PyCapsule_New(dlm_tensor, 'dltensor', dlpack_pycapsule_deleter) + else: + raise NotImplementedError( + "Only stream=None is supported." + ) + + def __dlpack_device__(self): + """ + Return the DLPack device tuple this arrays resides on. + + Returns + ------- + tuple : Tuple[int, int] + Tuple with index specifying the type of the device (where + CPU = 1, see cpp/src/arrow/c/dpack_abi.h) and index of the + device which is 0 by default for CPU. + """ + device = GetResultValue(ExportDevice(self.sp_array)) + return device.device_type, device.device_id + + @property + def device_type(self): + """ + The device type where the array resides. + + Returns + ------- + DeviceAllocationType + """ + return _wrap_device_allocation_type(self.sp_array.get().device_type()) + + @property + def is_cpu(self): + """ + Whether the array is CPU-accessible. + """ + return self.device_type == DeviceAllocationType.CPU + + cdef void _assert_cpu(self) except *: + if self.sp_array.get().device_type() != CDeviceAllocationType_kCPU: + raise NotImplementedError("Implemented only for data on CPU device") + + +cdef _array_like_to_pandas(obj, options, types_mapper): + cdef: + PyObject* out + PandasOptions c_options = _convert_pandas_options(options) + + original_type = obj.type + name = obj._name + dtype = None + + if types_mapper: + dtype = types_mapper(original_type) + elif original_type.id == _Type_EXTENSION: + try: + dtype = original_type.to_pandas_dtype() + except NotImplementedError: + pass + + # Only call __from_arrow__ for Arrow extension types or when explicitly + # overridden via types_mapper + if hasattr(dtype, '__from_arrow__'): + arr = dtype.__from_arrow__(obj) + return pandas_api.series(arr, name=name, copy=False) + + if pandas_api.is_v1(): + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + c_options.coerce_temporal_nanoseconds = True + + if isinstance(obj, Array): + with nogil: + check_status(ConvertArrayToPandas(c_options, + ( obj).sp_array, + obj, &out)) + elif isinstance(obj, ChunkedArray): + with nogil: + check_status(libarrow_python.ConvertChunkedArrayToPandas( + c_options, + ( obj).sp_chunked_array, + obj, &out)) + + arr = wrap_array_output(out) + + if (isinstance(original_type, TimestampType) and + options["timestamp_as_object"]): + # ARROW-5359 - need to specify object dtype to avoid pandas to + # coerce back to ns resolution + dtype = "object" + elif types_mapper: + dtype = types_mapper(original_type) + else: + dtype = None + + result = pandas_api.series(arr, dtype=dtype, name=name, copy=False) + + if (isinstance(original_type, TimestampType) and + original_type.tz is not None and + # can be object dtype for non-ns and timestamp_as_object=True + result.dtype.kind == "M"): + from pyarrow.pandas_compat import make_tz_aware + result = make_tz_aware(result, original_type.tz) + + return result + + +cdef wrap_array_output(PyObject* output): + cdef object obj = PyObject_to_object(output) + + if isinstance(obj, dict): + return _pandas_api.categorical_type.from_codes( + obj['indices'], categories=obj['dictionary'], ordered=obj['ordered'] + ) + else: + return obj + + +cdef class NullArray(Array): + """ + Concrete class for Arrow arrays of null data type. + """ + + +cdef class BooleanArray(Array): + """ + Concrete class for Arrow arrays of boolean data type. + """ + @property + def false_count(self): + return ( self.ap).false_count() + + @property + def true_count(self): + return ( self.ap).true_count() + + +cdef class NumericArray(Array): + """ + A base class for Arrow numeric arrays. + """ + + +cdef class IntegerArray(NumericArray): + """ + A base class for Arrow integer arrays. + """ + + +cdef class FloatingPointArray(NumericArray): + """ + A base class for Arrow floating-point arrays. + """ + + +cdef class Int8Array(IntegerArray): + """ + Concrete class for Arrow arrays of int8 data type. + """ + + +cdef class UInt8Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint8 data type. + """ + + +cdef class Int16Array(IntegerArray): + """ + Concrete class for Arrow arrays of int16 data type. + """ + + +cdef class UInt16Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint16 data type. + """ + + +cdef class Int32Array(IntegerArray): + """ + Concrete class for Arrow arrays of int32 data type. + """ + + +cdef class UInt32Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint32 data type. + """ + + +cdef class Int64Array(IntegerArray): + """ + Concrete class for Arrow arrays of int64 data type. + """ + + +cdef class UInt64Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint64 data type. + """ + + +cdef class Date32Array(NumericArray): + """ + Concrete class for Arrow arrays of date32 data type. + """ + + +cdef class Date64Array(NumericArray): + """ + Concrete class for Arrow arrays of date64 data type. + """ + + +cdef class TimestampArray(NumericArray): + """ + Concrete class for Arrow arrays of timestamp data type. + """ + + +cdef class Time32Array(NumericArray): + """ + Concrete class for Arrow arrays of time32 data type. + """ + + +cdef class Time64Array(NumericArray): + """ + Concrete class for Arrow arrays of time64 data type. + """ + + +cdef class DurationArray(NumericArray): + """ + Concrete class for Arrow arrays of duration data type. + """ + + +cdef class MonthDayNanoIntervalArray(Array): + """ + Concrete class for Arrow arrays of interval[MonthDayNano] type. + """ + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + pyarrow.MonthDayNano is used as the native representation. + + Returns + ------- + lst : list + """ + cdef: + CResult[PyObject*] maybe_py_list + PyObject* py_list + CMonthDayNanoIntervalArray* array + array = self.sp_array.get() + maybe_py_list = MonthDayNanoIntervalArrayToPyList(deref(array)) + py_list = GetResultValue(maybe_py_list) + return PyObject_to_object(py_list) + + +cdef class HalfFloatArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float16 data type. + """ + + +cdef class FloatArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float32 data type. + """ + + +cdef class DoubleArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float64 data type. + """ + + +cdef class FixedSizeBinaryArray(Array): + """ + Concrete class for Arrow arrays of a fixed-size binary data type. + """ + + +cdef class Decimal128Array(FixedSizeBinaryArray): + """ + Concrete class for Arrow arrays of decimal128 data type. + """ + + +cdef class Decimal256Array(FixedSizeBinaryArray): + """ + Concrete class for Arrow arrays of decimal256 data type. + """ + +cdef class BaseListArray(Array): + + def flatten(self, recursive=False): + """ + Unnest this [Large]ListArray/[Large]ListViewArray/FixedSizeListArray + according to 'recursive'. + + Note that this method is different from ``self.values`` in that + it takes care of the slicing offset as well as null elements backed + by non-empty sub-lists. + + Parameters + ---------- + recursive : bool, default False, optional + When True, flatten this logical list-array recursively until an + array of non-list values is formed. + + When False, flatten only the top level. + + Returns + ------- + result : Array + + Examples + -------- + + Basic logical list-array's flatten + >>> import pyarrow as pa + >>> values = [1, 2, 3, 4] + >>> offsets = [2, 1, 0] + >>> sizes = [2, 2, 2] + >>> array = pa.ListViewArray.from_arrays(offsets, sizes, values) + >>> array + + [ + [ + 3, + 4 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ] + ] + >>> array.flatten() + + [ + 3, + 4, + 2, + 3, + 1, + 2 + ] + + When recursive=True, nested list arrays are flattened recursively + until an array of non-list values is formed. + + >>> array = pa.array([ + ... None, + ... [ + ... [1, None, 2], + ... None, + ... [3, 4] + ... ], + ... [], + ... [ + ... [], + ... [5, 6], + ... None + ... ], + ... [ + ... [7, 8] + ... ] + ... ], type=pa.list_(pa.list_(pa.int64()))) + >>> array.flatten(True) + + [ + 1, + null, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ] + """ + options = _pc().ListFlattenOptions(recursive) + return _pc().list_flatten(self, options=options) + + def value_parent_indices(self): + """ + Return array of same length as list child values array where each + output value is the index of the parent list array slot containing each + child value. + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([[1, 2, 3], [], None, [4]], + ... type=pa.list_(pa.int32())) + >>> arr.value_parent_indices() + + [ + 0, + 0, + 0, + 3 + ] + """ + return _pc().list_parent_indices(self) + + def value_lengths(self): + """ + Return integers array with values equal to the respective length of + each list element. Null list values are null in the output. + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([[1, 2, 3], [], None, [4]], + ... type=pa.list_(pa.int32())) + >>> arr.value_lengths() + + [ + 3, + 0, + null, + 1 + ] + """ + return _pc().list_value_length(self) + + +cdef class ListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a list data type. + """ + + @staticmethod + def from_arrays(offsets, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct ListArray from arrays of int32 offsets and values. + + Parameters + ---------- + offsets : Array (int32 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_array : ListArray + + Examples + -------- + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> offsets = pa.array([0, 2, 4]) + >>> pa.ListArray.from_arrays(offsets, values) + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + >>> # nulls in the offsets array become null lists + >>> offsets = pa.array([0, None, 2, 4]) + >>> pa.ListArray.from_arrays(offsets, values) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + """ + cdef: + Array _offsets, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int32') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CListArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CListArray.FromArrays( + _offsets.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the ListArray + ignoring the array's offset. + + If any of the list elements are null, but are backed by a + non-empty sub-list, those elements will be included in the + output. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's offset. + + Returns + ------- + values : Array + + See Also + -------- + ListArray.flatten : ... + + Examples + -------- + + The values include null elements from sub-lists: + + >>> import pyarrow as pa + >>> array = pa.array([[1, 2], None, [3, 4, None, 6]]) + >>> array.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + If an array is sliced, the slice still uses the same + underlying data as the original array, just with an + offset. Since values ignores the offset, the values are the + same: + + >>> sliced = array.slice(1, 2) + >>> sliced + + [ + null, + [ + 3, + 4, + null, + 6 + ] + ] + >>> sliced.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + """ + cdef CListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list offsets as an int32 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `ListArray.from_arrays` and get back the same + list array if the original one has nulls. + + Returns + ------- + offsets : Int32Array + + Examples + -------- + >>> import pyarrow as pa + >>> array = pa.array([[1, 2], None, [3, 4, 5]]) + >>> array.offsets + + [ + 0, + 2, + 2, + 5 + ] + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + +cdef class LargeListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a large list data type. + + Identical to ListArray, but 64-bit offsets. + """ + + @staticmethod + def from_arrays(offsets, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct LargeListArray from arrays of int64 offsets and values. + + Parameters + ---------- + offsets : Array (int64 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_array : LargeListArray + """ + cdef: + Array _offsets, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int64') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CLargeListArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CLargeListArray.FromArrays( + _offsets.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the LargeListArray + ignoring the array's offset. + + If any of the list elements are null, but are backed by a + non-empty sub-list, those elements will be included in the + output. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's offset. + + Returns + ------- + values : Array + + See Also + -------- + LargeListArray.flatten : ... + + Examples + -------- + + The values include null elements from the sub-lists: + + >>> import pyarrow as pa + >>> array = pa.array( + ... [[1, 2], None, [3, 4, None, 6]], + ... type=pa.large_list(pa.int32()), + ... ) + >>> array.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + If an array is sliced, the slice still uses the same + underlying data as the original array, just with an + offset. Since values ignores the offset, the values are the + same: + + >>> sliced = array.slice(1, 2) + >>> sliced + + [ + null, + [ + 3, + 4, + null, + 6 + ] + ] + >>> sliced.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + """ + cdef CLargeListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list offsets as an int64 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `LargeListArray.from_arrays` and get back the + same list array if the original one has nulls. + + Returns + ------- + offsets : Int64Array + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + +cdef class ListViewArray(BaseListArray): + """ + Concrete class for Arrow arrays of a list view data type. + """ + + @staticmethod + def from_arrays(offsets, sizes, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct ListViewArray from arrays of int32 offsets, sizes, and values. + + Parameters + ---------- + offsets : Array (int32 type) + sizes : Array (int32 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_view_array : ListViewArray + + Examples + -------- + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> offsets = pa.array([0, 1, 2]) + >>> sizes = pa.array([2, 2, 2]) + >>> pa.ListViewArray.from_arrays(offsets, sizes, values) + + [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ] + ] + >>> # use a null mask to represent null values + >>> mask = pa.array([False, True, False]) + >>> pa.ListViewArray.from_arrays(offsets, sizes, values, mask=mask) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + >>> # null values can be defined in either offsets or sizes arrays + >>> # WARNING: this will result in a copy of the offsets or sizes arrays + >>> offsets = pa.array([0, None, 2]) + >>> pa.ListViewArray.from_arrays(offsets, sizes, values) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + """ + cdef: + Array _offsets, _sizes, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int32') + _sizes = asarray(sizes, type='int32') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CListViewArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _sizes.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CListViewArray.FromArrays( + _offsets.ap[0], _sizes.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the ListViewArray + ignoring the array's offset and sizes. + + The values array may be out of order and/or contain additional values + that are not found in the logical representation of the array. The only + guarantee is that each non-null value in the ListView Array is contiguous. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's order and offset. + + Returns + ------- + values : Array + + Examples + -------- + The values include null elements from sub-lists: + + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.ListViewArray.from_arrays(offsets, sizes, values) + >>> array + + [ + [ + 1, + 2 + ], + [], + [ + 2, + null, + 3, + 4 + ] + ] + >>> array.values + + [ + 1, + 2, + null, + 3, + 4 + ] + """ + cdef CListViewArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list offsets as an int32 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `ListViewArray.from_arrays` and get back the same + list array if the original one has nulls. + + Returns + ------- + offsets : Int32Array + + Examples + -------- + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.ListViewArray.from_arrays(offsets, sizes, values) + >>> array.offsets + + [ + 0, + 0, + 1 + ] + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + @property + def sizes(self): + """ + Return the list sizes as an int32 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `ListViewArray.from_arrays` and get back the same + list array if the original one has nulls. + + Returns + ------- + sizes : Int32Array + + Examples + -------- + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.ListViewArray.from_arrays(offsets, sizes, values) + >>> array.sizes + + [ + 2, + 0, + 4 + ] + """ + return pyarrow_wrap_array(( self.ap).sizes()) + + +cdef class LargeListViewArray(BaseListArray): + """ + Concrete class for Arrow arrays of a large list view data type. + + Identical to ListViewArray, but with 64-bit offsets. + """ + @staticmethod + def from_arrays(offsets, sizes, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct LargeListViewArray from arrays of int64 offsets and values. + + Parameters + ---------- + offsets : Array (int64 type) + sizes : Array (int64 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_view_array : LargeListViewArray + + Examples + -------- + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> offsets = pa.array([0, 1, 2]) + >>> sizes = pa.array([2, 2, 2]) + >>> pa.LargeListViewArray.from_arrays(offsets, sizes, values) + + [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ] + ] + >>> # use a null mask to represent null values + >>> mask = pa.array([False, True, False]) + >>> pa.LargeListViewArray.from_arrays(offsets, sizes, values, mask=mask) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + >>> # null values can be defined in either offsets or sizes arrays + >>> # WARNING: this will result in a copy of the offsets or sizes arrays + >>> offsets = pa.array([0, None, 2]) + >>> pa.LargeListViewArray.from_arrays(offsets, sizes, values) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + """ + cdef: + Array _offsets, _sizes, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int64') + _sizes = asarray(sizes, type='int64') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CLargeListViewArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _sizes.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CLargeListViewArray.FromArrays( + _offsets.ap[0], _sizes.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the LargeListArray + ignoring the array's offset. + + The values array may be out of order and/or contain additional values + that are not found in the logical representation of the array. The only + guarantee is that each non-null value in the ListView Array is contiguous. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's order and offset. + + Returns + ------- + values : Array + + See Also + -------- + LargeListArray.flatten : ... + + Examples + -------- + + The values include null elements from sub-lists: + + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.LargeListViewArray.from_arrays(offsets, sizes, values) + >>> array + + [ + [ + 1, + 2 + ], + [], + [ + 2, + null, + 3, + 4 + ] + ] + >>> array.values + + [ + 1, + 2, + null, + 3, + 4 + ] + """ + cdef CLargeListViewArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list view offsets as an int64 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `LargeListViewArray.from_arrays` and get back the + same list array if the original one has nulls. + + Returns + ------- + offsets : Int64Array + + Examples + -------- + + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.LargeListViewArray.from_arrays(offsets, sizes, values) + >>> array.offsets + + [ + 0, + 0, + 1 + ] + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + @property + def sizes(self): + """ + Return the list view sizes as an int64 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `LargeListViewArray.from_arrays` and get back the + same list array if the original one has nulls. + + Returns + ------- + sizes : Int64Array + + Examples + -------- + + >>> import pyarrow as pa + >>> values = [1, 2, None, 3, 4] + >>> offsets = [0, 0, 1] + >>> sizes = [2, 0, 4] + >>> array = pa.LargeListViewArray.from_arrays(offsets, sizes, values) + >>> array.sizes + + [ + 2, + 0, + 4 + ] + """ + return pyarrow_wrap_array(( self.ap).sizes()) + + +cdef class MapArray(ListArray): + """ + Concrete class for Arrow arrays of a map data type. + """ + + @staticmethod + def from_arrays(offsets, keys, items, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct MapArray from arrays of int32 offsets and key, item arrays. + + Parameters + ---------- + offsets : array-like or sequence (int32 type) + keys : array-like or sequence (any type) + items : array-like or sequence (any type) + type : DataType, optional + If not specified, a default MapArray with the keys' and items' type is used. + pool : MemoryPool + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + map_array : MapArray + + Examples + -------- + First, let's understand the structure of our dataset when viewed in a rectangular data model. + The total of 5 respondents answered the question "How much did you like the movie x?". + The value -1 in the integer array means that the value is missing. The boolean array + represents the null bitmask corresponding to the missing values in the integer array. + + >>> import pyarrow as pa + >>> movies_rectangular = np.ma.masked_array([ + ... [10, -1, -1], + ... [8, 4, 5], + ... [-1, 10, 3], + ... [-1, -1, -1], + ... [-1, -1, -1] + ... ], + ... [ + ... [False, True, True], + ... [False, False, False], + ... [True, False, False], + ... [True, True, True], + ... [True, True, True], + ... ]) + + To represent the same data with the MapArray and from_arrays, the data is + formed like this: + + >>> offsets = [ + ... 0, # -- row 1 start + ... 1, # -- row 2 start + ... 4, # -- row 3 start + ... 6, # -- row 4 start + ... 6, # -- row 5 start + ... 6, # -- row 5 end + ... ] + >>> movies = [ + ... "Dark Knight", # ---------------------------------- row 1 + ... "Dark Knight", "Meet the Parents", "Superman", # -- row 2 + ... "Meet the Parents", "Superman", # ----------------- row 3 + ... ] + >>> likings = [ + ... 10, # -------- row 1 + ... 8, 4, 5, # --- row 2 + ... 10, 3 # ------ row 3 + ... ] + >>> pa.MapArray.from_arrays(offsets, movies, likings).to_pandas() + 0 [(Dark Knight, 10)] + 1 [(Dark Knight, 8), (Meet the Parents, 4), (Sup... + 2 [(Meet the Parents, 10), (Superman, 3)] + 3 [] + 4 [] + dtype: object + + If the data in the empty rows needs to be marked as missing, it's possible + to do so by modifying the offsets argument, so that we specify `None` as + the starting positions of the rows we want marked as missing. The end row + offset still has to refer to the existing value from keys (and values): + + >>> offsets = [ + ... 0, # ----- row 1 start + ... 1, # ----- row 2 start + ... 4, # ----- row 3 start + ... None, # -- row 4 start + ... None, # -- row 5 start + ... 6, # ----- row 5 end + ... ] + >>> pa.MapArray.from_arrays(offsets, movies, likings).to_pandas() + 0 [(Dark Knight, 10)] + 1 [(Dark Knight, 8), (Meet the Parents, 4), (Sup... + 2 [(Meet the Parents, 10), (Superman, 3)] + 3 None + 4 None + dtype: object + """ + cdef: + Array _offsets, _keys, _items + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int32') + _keys = asarray(keys) + _items = asarray(items) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CMapArray.FromArraysAndType( + type.sp_type, _offsets.sp_array, + _keys.sp_array, _items.sp_array, cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CMapArray.FromArrays(_offsets.sp_array, + _keys.sp_array, + _items.sp_array, cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def keys(self): + """Flattened array of keys across all maps in array""" + return pyarrow_wrap_array(( self.ap).keys()) + + @property + def items(self): + """Flattened array of items across all maps in array""" + return pyarrow_wrap_array(( self.ap).items()) + + +cdef class FixedSizeListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a fixed size list data type. + """ + + @staticmethod + def from_arrays(values, list_size=None, DataType type=None, mask=None): + """ + Construct FixedSizeListArray from array of values and a list length. + + Parameters + ---------- + values : Array (any type) + list_size : int + The fixed length of the lists. + type : DataType, optional + If not specified, a default ListType with the values' type and + `list_size` length is used. + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + + Returns + ------- + FixedSizeListArray + + Examples + -------- + + Create from a values array and a list size: + + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> arr = pa.FixedSizeListArray.from_arrays(values, 2) + >>> arr + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + + Or create from a values array, list size and matching type: + + >>> typ = pa.list_(pa.field("values", pa.int64()), 2) + >>> arr = pa.FixedSizeListArray.from_arrays(values,type=typ) + >>> arr + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + """ + cdef: + Array _values + int32_t _list_size + CResult[shared_ptr[CArray]] c_result + + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, None) + + if type is not None: + if list_size is not None: + raise ValueError("Cannot specify both list_size and type") + with nogil: + c_result = CFixedSizeListArray.FromArraysAndType( + _values.sp_array, type.sp_type, c_mask) + else: + if list_size is None: + raise ValueError("Should specify one of list_size and type") + _list_size = list_size + with nogil: + c_result = CFixedSizeListArray.FromArrays( + _values.sp_array, _list_size, c_mask) + cdef Array result = pyarrow_wrap_array(GetResultValue(c_result)) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the + FixedSizeListArray. + + Note even null elements are included. + + Compare with :meth:`flatten`, which returns only the non-null + sub-list values. + + Returns + ------- + values : Array + + See Also + -------- + FixedSizeListArray.flatten : ... + + Examples + -------- + >>> import pyarrow as pa + >>> array = pa.array( + ... [[1, 2], None, [3, None]], + ... type=pa.list_(pa.int32(), 2) + ... ) + >>> array.values + + [ + 1, + 2, + null, + null, + 3, + null + ] + + """ + cdef CFixedSizeListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + +cdef class UnionArray(Array): + """ + Concrete class for Arrow arrays of a Union data type. + """ + + def child(self, int pos): + """ + DEPRECATED, use field() instead. + + Parameters + ---------- + pos : int + The physical index of the union child field (not its type code). + + Returns + ------- + field : pyarrow.Field + The given child field. + """ + import warnings + warnings.warn("child is deprecated, use field", FutureWarning) + return self.field(pos) + + def field(self, int pos): + """ + Return the given child field as an individual array. + + For sparse unions, the returned array has its offset, length, + and null count adjusted. + + For dense unions, the returned array is unchanged. + + Parameters + ---------- + pos : int + The physical index of the union child field (not its type code). + + Returns + ------- + field : Array + The given child field. + """ + cdef shared_ptr[CArray] result + result = ( self.ap).field(pos) + if result != NULL: + return pyarrow_wrap_array(result) + raise KeyError("UnionArray does not have child {}".format(pos)) + + @property + def type_codes(self): + """Get the type codes array.""" + buf = pyarrow_wrap_buffer(( self.ap).type_codes()) + return Array.from_buffers(int8(), len(self), [None, buf]) + + @property + def offsets(self): + """ + Get the value offsets array (dense arrays only). + + Does not account for any slice offset. + """ + if self.type.mode != "dense": + raise ArrowTypeError("Can only get value offsets for dense arrays") + cdef CDenseUnionArray* dense = self.ap + buf = pyarrow_wrap_buffer(dense.value_offsets()) + return Array.from_buffers(int32(), len(self), [None, buf]) + + @staticmethod + def from_dense(Array types, Array value_offsets, list children, + list field_names=None, list type_codes=None): + """ + Construct dense UnionArray from arrays of int8 types, int32 offsets and + children arrays + + Parameters + ---------- + types : Array (int8 type) + value_offsets : Array (int32 type) + children : list + field_names : list + type_codes : list + + Returns + ------- + union_array : UnionArray + """ + cdef: + shared_ptr[CArray] out + vector[shared_ptr[CArray]] c + Array child + vector[c_string] c_field_names + vector[int8_t] c_type_codes + + for child in children: + c.push_back(child.sp_array) + if field_names is not None: + for x in field_names: + c_field_names.push_back(tobytes(x)) + if type_codes is not None: + for x in type_codes: + c_type_codes.push_back(x) + + with nogil: + out = GetResultValue(CDenseUnionArray.Make( + deref(types.ap), deref(value_offsets.ap), c, c_field_names, + c_type_codes)) + + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @staticmethod + def from_sparse(Array types, list children, list field_names=None, + list type_codes=None): + """ + Construct sparse UnionArray from arrays of int8 types and children + arrays + + Parameters + ---------- + types : Array (int8 type) + children : list + field_names : list + type_codes : list + + Returns + ------- + union_array : UnionArray + """ + cdef: + shared_ptr[CArray] out + vector[shared_ptr[CArray]] c + Array child + vector[c_string] c_field_names + vector[int8_t] c_type_codes + + for child in children: + c.push_back(child.sp_array) + if field_names is not None: + for x in field_names: + c_field_names.push_back(tobytes(x)) + if type_codes is not None: + for x in type_codes: + c_type_codes.push_back(x) + + with nogil: + out = GetResultValue(CSparseUnionArray.Make( + deref(types.ap), c, c_field_names, c_type_codes)) + + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + +cdef class StringArray(Array): + """ + Concrete class for Arrow arrays of string (or utf8) data type. + """ + + @staticmethod + def from_buffers(int length, Buffer value_offsets, Buffer data, + Buffer null_bitmap=None, int null_count=-1, + int offset=0): + """ + Construct a StringArray from value_offsets and data buffers. + If there are nulls in the data, also a null_bitmap and the matching + null_count must be passed. + + Parameters + ---------- + length : int + value_offsets : Buffer + data : Buffer + null_bitmap : Buffer, optional + null_count : int, default 0 + offset : int, default 0 + + Returns + ------- + string_array : StringArray + """ + return Array.from_buffers(utf8(), length, + [null_bitmap, value_offsets, data], + null_count, offset) + + +cdef class LargeStringArray(Array): + """ + Concrete class for Arrow arrays of large string (or utf8) data type. + """ + + @staticmethod + def from_buffers(int length, Buffer value_offsets, Buffer data, + Buffer null_bitmap=None, int null_count=-1, + int offset=0): + """ + Construct a LargeStringArray from value_offsets and data buffers. + If there are nulls in the data, also a null_bitmap and the matching + null_count must be passed. + + Parameters + ---------- + length : int + value_offsets : Buffer + data : Buffer + null_bitmap : Buffer, optional + null_count : int, default 0 + offset : int, default 0 + + Returns + ------- + string_array : StringArray + """ + return Array.from_buffers(large_utf8(), length, + [null_bitmap, value_offsets, data], + null_count, offset) + + +cdef class StringViewArray(Array): + """ + Concrete class for Arrow arrays of string (or utf8) view data type. + """ + + +cdef class BinaryArray(Array): + """ + Concrete class for Arrow arrays of variable-sized binary data type. + """ + @property + def total_values_length(self): + """ + The number of bytes from beginning to end of the data buffer addressed + by the offsets of this BinaryArray. + """ + return ( self.ap).total_values_length() + + +cdef class LargeBinaryArray(Array): + """ + Concrete class for Arrow arrays of large variable-sized binary data type. + """ + @property + def total_values_length(self): + """ + The number of bytes from beginning to end of the data buffer addressed + by the offsets of this LargeBinaryArray. + """ + return ( self.ap).total_values_length() + + +cdef class BinaryViewArray(Array): + """ + Concrete class for Arrow arrays of variable-sized binary view data type. + """ + + +cdef class DictionaryArray(Array): + """ + Concrete class for dictionary-encoded Arrow arrays. + """ + + def dictionary_encode(self): + return self + + def dictionary_decode(self): + """ + Decodes the DictionaryArray to an Array. + """ + return self.dictionary.take(self.indices) + + @property + def dictionary(self): + cdef CDictionaryArray* darr = (self.ap) + + if self._dictionary is None: + self._dictionary = pyarrow_wrap_array(darr.dictionary()) + + return self._dictionary + + @property + def indices(self): + cdef CDictionaryArray* darr = (self.ap) + + if self._indices is None: + self._indices = pyarrow_wrap_array(darr.indices()) + + return self._indices + + @staticmethod + def from_buffers(DataType type, int64_t length, buffers, Array dictionary, + int64_t null_count=-1, int64_t offset=0): + """ + Construct a DictionaryArray from buffers. + + Parameters + ---------- + type : pyarrow.DataType + length : int + The number of values in the array. + buffers : List[Buffer] + The buffers backing the indices array. + dictionary : pyarrow.Array, ndarray or pandas.Series + The array of values referenced by the indices. + null_count : int, default -1 + The number of null entries in the indices array. Negative value means that + the null count is not known. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + + Returns + ------- + dict_array : DictionaryArray + """ + cdef: + vector[shared_ptr[CBuffer]] c_buffers + shared_ptr[CDataType] c_type + shared_ptr[CArrayData] c_data + shared_ptr[CArray] c_result + + for buf in buffers: + c_buffers.push_back(pyarrow_unwrap_buffer(buf)) + + c_type = pyarrow_unwrap_data_type(type) + + with nogil: + c_data = CArrayData.Make( + c_type, length, c_buffers, null_count, offset) + c_data.get().dictionary = dictionary.sp_array.get().data() + c_result.reset(new CDictionaryArray(c_data)) + + cdef Array result = pyarrow_wrap_array(c_result) + result.validate() + return result + + @staticmethod + def from_arrays(indices, dictionary, mask=None, bint ordered=False, + bint from_pandas=False, bint safe=True, + MemoryPool memory_pool=None): + """ + Construct a DictionaryArray from indices and values. + + Parameters + ---------- + indices : pyarrow.Array, numpy.ndarray or pandas.Series, int type + Non-negative integers referencing the dictionary values by zero + based index. + dictionary : pyarrow.Array, ndarray or pandas.Series + The array of values referenced by the indices. + mask : ndarray or pandas.Series, bool type + True values indicate that indices are actually null. + ordered : bool, default False + Set to True if the category values are ordered. + from_pandas : bool, default False + If True, the indices should be treated as though they originated in + a pandas.Categorical (null encoded as -1). + safe : bool, default True + If True, check that the dictionary indices are in range. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise uses default pool. + + Returns + ------- + dict_array : DictionaryArray + """ + cdef: + Array _indices, _dictionary + shared_ptr[CDataType] c_type + shared_ptr[CArray] c_result + + if isinstance(indices, Array): + if mask is not None: + raise NotImplementedError( + "mask not implemented with Arrow array inputs yet") + _indices = indices + else: + if from_pandas: + _indices = _codes_to_indices(indices, mask, None, memory_pool) + else: + _indices = array(indices, mask=mask, memory_pool=memory_pool) + + if isinstance(dictionary, Array): + _dictionary = dictionary + else: + _dictionary = array(dictionary, memory_pool=memory_pool) + + if not isinstance(_indices, IntegerArray): + raise ValueError('Indices must be integer type') + + cdef c_bool c_ordered = ordered + + c_type.reset(new CDictionaryType(_indices.type.sp_type, + _dictionary.sp_array.get().type(), + c_ordered)) + + if safe: + with nogil: + c_result = GetResultValue( + CDictionaryArray.FromArrays(c_type, _indices.sp_array, + _dictionary.sp_array)) + else: + c_result.reset(new CDictionaryArray(c_type, _indices.sp_array, + _dictionary.sp_array)) + + cdef Array result = pyarrow_wrap_array(c_result) + result.validate() + return result + + +cdef class StructArray(Array): + """ + Concrete class for Arrow arrays of a struct data type. + """ + + def field(self, index): + """ + Retrieves the child array belonging to field. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + + Returns + ------- + result : Array + """ + cdef: + CStructArray* arr = self.ap + shared_ptr[CArray] child + + if isinstance(index, (bytes, str)): + child = arr.GetFieldByName(tobytes(index)) + if child == nullptr: + raise KeyError(index) + elif isinstance(index, int): + child = arr.field( + _normalize_index(index, self.ap.num_fields())) + else: + raise TypeError('Expected integer or string index') + + return pyarrow_wrap_array(child) + + def _flattened_field(self, index, MemoryPool memory_pool=None): + """ + Retrieves the child array belonging to field, + accounting for the parent array null bitmap. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + result : Array + """ + cdef: + CStructArray* arr = self.ap + shared_ptr[CArray] child + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + if isinstance(index, (bytes, str)): + int_index = self.type.get_field_index(index) + if int_index < 0: + raise KeyError(index) + elif isinstance(index, int): + int_index = _normalize_index(index, self.ap.num_fields()) + else: + raise TypeError('Expected integer or string index') + + child = GetResultValue(arr.GetFlattenedField(int_index, pool)) + return pyarrow_wrap_array(child) + + def flatten(self, MemoryPool memory_pool=None): + """ + Return one individual array for each field in the struct. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + result : List[Array] + """ + cdef: + vector[shared_ptr[CArray]] arrays + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + CStructArray* sarr = self.ap + + with nogil: + arrays = GetResultValue(sarr.Flatten(pool)) + + return [pyarrow_wrap_array(arr) for arr in arrays] + + @staticmethod + def from_arrays(arrays, names=None, fields=None, mask=None, + memory_pool=None): + """ + Construct StructArray from collection of arrays representing + each field in the struct. + + Either field names or field instances must be passed. + + Parameters + ---------- + arrays : sequence of Array + names : List[str] (optional) + Field names for each struct child. + fields : List[Field] (optional) + Field instances for each struct child. + mask : pyarrow.Array[bool] (optional) + Indicate which values are null (True) or not null (False). + memory_pool : MemoryPool (optional) + For memory allocations, if required, otherwise uses default pool. + + Returns + ------- + result : StructArray + """ + cdef: + shared_ptr[CArray] c_array + shared_ptr[CBuffer] c_mask + vector[shared_ptr[CArray]] c_arrays + vector[c_string] c_names + vector[shared_ptr[CField]] c_fields + CResult[shared_ptr[CArray]] c_result + ssize_t num_arrays + ssize_t length + ssize_t i + Field py_field + DataType struct_type + + if names is None and fields is None: + raise ValueError('Must pass either names or fields') + if names is not None and fields is not None: + raise ValueError('Must pass either names or fields, not both') + + c_mask = c_mask_inverted_from_obj(mask, memory_pool) + + arrays = [asarray(x) for x in arrays] + for arr in arrays: + c_array = pyarrow_unwrap_array(arr) + if c_array == nullptr: + raise TypeError(f"Expected Array, got {arr.__class__}") + c_arrays.push_back(c_array) + if names is not None: + for name in names: + c_names.push_back(tobytes(name)) + else: + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + c_fields.push_back(py_field.sp_field) + + if (c_arrays.size() == 0 and c_names.size() == 0 and + c_fields.size() == 0): + # The C++ side doesn't allow this + if mask is None: + return array([], struct([])) + else: + return array([{}] * len(mask), struct([]), mask=mask) + + if names is not None: + # XXX Cannot pass "nullptr" for a shared_ptr argument: + # https://github.com/cython/cython/issues/3020 + c_result = CStructArray.MakeFromFieldNames( + c_arrays, c_names, c_mask, -1, 0) + else: + c_result = CStructArray.MakeFromFields( + c_arrays, c_fields, c_mask, -1, 0) + cdef Array result = pyarrow_wrap_array(GetResultValue(c_result)) + result.validate() + return result + + def sort(self, order="ascending", by=None, **kwargs): + """ + Sort the StructArray + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + by : str or None, default None + If to sort the array by one of its fields + or by the whole array. + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : StructArray + """ + if by is not None: + tosort, sort_keys = self._flattened_field(by), [("", order)] + else: + tosort, sort_keys = self, [(field.name, order) for field in self.type] + indices = _pc().sort_indices( + tosort, options=_pc().SortOptions(sort_keys=sort_keys, **kwargs) + ) + return self.take(indices) + + +cdef class RunEndEncodedArray(Array): + """ + Concrete class for Arrow run-end encoded arrays. + """ + + @staticmethod + def _from_arrays(type, allow_none_for_type, logical_length, run_ends, values, logical_offset): + cdef: + int64_t _logical_length + Array _run_ends + Array _values + int64_t _logical_offset + shared_ptr[CDataType] c_type + shared_ptr[CRunEndEncodedArray] ree_array + + _logical_length = logical_length + _logical_offset = logical_offset + + type = ensure_type(type, allow_none=allow_none_for_type) + if type is not None: + _run_ends = asarray(run_ends, type=type.run_end_type) + _values = asarray(values, type=type.value_type) + c_type = pyarrow_unwrap_data_type(type) + with nogil: + ree_array = GetResultValue(CRunEndEncodedArray.Make( + c_type, _logical_length, _run_ends.sp_array, _values.sp_array, _logical_offset)) + else: + _run_ends = asarray(run_ends) + _values = asarray(values) + with nogil: + ree_array = GetResultValue(CRunEndEncodedArray.MakeFromArrays( + _logical_length, _run_ends.sp_array, _values.sp_array, _logical_offset)) + cdef Array result = pyarrow_wrap_array(ree_array) + result.validate(full=True) + return result + + @staticmethod + def from_arrays(run_ends, values, type=None): + """ + Construct RunEndEncodedArray from run_ends and values arrays. + + Parameters + ---------- + run_ends : Array (int16, int32, or int64 type) + The run_ends array. + values : Array (any type) + The values array. + type : pyarrow.DataType, optional + The run_end_encoded(run_end_type, value_type) array type. + + Returns + ------- + RunEndEncodedArray + """ + logical_length = scalar(run_ends[-1]).as_py() if len(run_ends) > 0 else 0 + return RunEndEncodedArray._from_arrays(type, True, logical_length, + run_ends, values, 0) + + @staticmethod + def from_buffers(DataType type, length, buffers, null_count=-1, offset=0, + children=None): + """ + Construct a RunEndEncodedArray from all the parameters that make up an + Array. + + RunEndEncodedArrays do not have buffers, only children arrays, but this + implementation is needed to satisfy the Array interface. + + Parameters + ---------- + type : DataType + The run_end_encoded(run_end_type, value_type) type. + length : int + The logical length of the run-end encoded array. Expected to match + the last value of the run_ends array (children[0]) minus the offset. + buffers : List[Buffer] + Empty List or [None]. + null_count : int, default -1 + The number of null entries in the array. Run-end encoded arrays + are specified to not have valid bits and null_count always equals 0. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + children : List[Array] + Nested type children containing the run_ends and values arrays. + + Returns + ------- + RunEndEncodedArray + """ + children = children or [] + + if type.num_fields != len(children): + raise ValueError("RunEndEncodedType's expected number of children " + "({0}) did not match the passed number " + "({1}).".format(type.num_fields, len(children))) + + # buffers are validated as if we needed to pass them to C++, but + # _make_from_arrays will take care of filling in the expected + # buffers array containing a single NULL buffer on the C++ side + if len(buffers) == 0: + buffers = [None] + if buffers[0] is not None: + raise ValueError("RunEndEncodedType expects None as validity " + "bitmap, buffers[0] is not None") + if type.num_buffers != len(buffers): + raise ValueError("RunEndEncodedType's expected number of buffers " + "({0}) did not match the passed number " + "({1}).".format(type.num_buffers, len(buffers))) + + # null_count is also validated as if we needed it + if null_count != -1 and null_count != 0: + raise ValueError("RunEndEncodedType's expected null_count (0) " + "did not match passed number ({0})".format(null_count)) + + return RunEndEncodedArray._from_arrays(type, False, length, children[0], + children[1], offset) + + @property + def run_ends(self): + """ + An array holding the logical indexes of each run-end. + + The physical offset to the array is applied. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return pyarrow_wrap_array(ree_array.run_ends()) + + @property + def values(self): + """ + An array holding the values of each run. + + The physical offset to the array is applied. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return pyarrow_wrap_array(ree_array.values()) + + def find_physical_offset(self): + """ + Find the physical offset of this REE array. + + This is the offset of the run that contains the value of the first + logical element of this array considering its offset. + + This function uses binary-search, so it has a O(log N) cost. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return ree_array.FindPhysicalOffset() + + def find_physical_length(self): + """ + Find the physical length of this REE array. + + The physical length of an REE is the number of physical values (and + run-ends) necessary to represent the logical range of values from offset + to length. + + This function uses binary-search, so it has a O(log N) cost. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return ree_array.FindPhysicalLength() + + +cdef class ExtensionArray(Array): + """ + Concrete class for Arrow extension arrays. + """ + + @property + def storage(self): + cdef: + CExtensionArray* ext_array = (self.ap) + + return pyarrow_wrap_array(ext_array.storage()) + + @staticmethod + def from_storage(BaseExtensionType typ, Array storage): + """ + Construct ExtensionArray from type and storage array. + + Parameters + ---------- + typ : DataType + The extension type for the result array. + storage : Array + The underlying storage for the result array. + + Returns + ------- + ext_array : ExtensionArray + """ + cdef: + shared_ptr[CExtensionArray] ext_array + + if storage.type != typ.storage_type: + raise TypeError("Incompatible storage type {0} " + "for extension type {1}".format(storage.type, typ)) + + ext_array = make_shared[CExtensionArray](typ.sp_type, storage.sp_array) + cdef Array result = pyarrow_wrap_array( ext_array) + result.validate() + return result + + +cdef class FixedShapeTensorArray(ExtensionArray): + """ + Concrete class for fixed shape tensor extension arrays. + + Examples + -------- + Define the extension type for tensor array + + >>> import pyarrow as pa + >>> tensor_type = pa.fixed_shape_tensor(pa.int32(), [2, 2]) + + Create an extension array + + >>> arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]] + >>> storage = pa.array(arr, pa.list_(pa.int32(), 4)) + >>> pa.ExtensionArray.from_storage(tensor_type, storage) + + [ + [ + 1, + 2, + 3, + 4 + ], + [ + 10, + 20, + 30, + 40 + ], + [ + 100, + 200, + 300, + 400 + ] + ] + """ + + def to_numpy_ndarray(self): + """ + Convert fixed shape tensor extension array to a multi-dimensional numpy.ndarray. + + The resulting ndarray will have (ndim + 1) dimensions. + The size of the first dimension will be the length of the fixed shape tensor array + and the rest of the dimensions will match the permuted shape of the fixed + shape tensor. + + The conversion is zero-copy. + + Returns + ------- + numpy.ndarray + Ndarray representing tensors in the fixed shape tensor array concatenated + along the first dimension. + """ + + return self.to_tensor().to_numpy() + + def to_tensor(self): + """ + Convert fixed shape tensor extension array to a pyarrow.Tensor. + + The resulting Tensor will have (ndim + 1) dimensions. + The size of the first dimension will be the length of the fixed shape tensor array + and the rest of the dimensions will match the permuted shape of the fixed + shape tensor. + + The conversion is zero-copy. + + Returns + ------- + pyarrow.Tensor + Tensor representing tensors in the fixed shape tensor array concatenated + along the first dimension. + """ + + cdef: + CFixedShapeTensorArray* ext_array = (self.ap) + CResult[shared_ptr[CTensor]] ctensor + with nogil: + ctensor = ext_array.ToTensor() + return pyarrow_wrap_tensor(GetResultValue(ctensor)) + + @staticmethod + def from_numpy_ndarray(obj): + """ + Convert numpy tensors (ndarrays) to a fixed shape tensor extension array. + The first dimension of ndarray will become the length of the fixed + shape tensor array. + If input array data is not contiguous a copy will be made. + + Parameters + ---------- + obj : numpy.ndarray + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> arr = np.array( + ... [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], + ... dtype=np.float32) + >>> pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + + [ + [ + 1, + 2, + 3, + 4, + 5, + 6 + ], + [ + 1, + 2, + 3, + 4, + 5, + 6 + ] + ] + """ + + if len(obj.shape) < 2: + raise ValueError( + "Cannot convert 1D array or scalar to fixed shape tensor array") + if np.prod(obj.shape) == 0: + raise ValueError("Expected a non-empty ndarray") + + permutation = (-np.array(obj.strides)).argsort(kind='stable') + if permutation[0] != 0: + raise ValueError('First stride needs to be largest to ensure that ' + 'individual tensor data is contiguous in memory.') + + arrow_type = from_numpy_dtype(obj.dtype) + shape = np.take(obj.shape, permutation) + values = np.ravel(obj, order="K") + + return ExtensionArray.from_storage( + fixed_shape_tensor(arrow_type, shape[1:], permutation=permutation[1:] - 1), + FixedSizeListArray.from_arrays(values, shape[1:].prod()) + ) + + +cdef dict _array_classes = { + _Type_NA: NullArray, + _Type_BOOL: BooleanArray, + _Type_UINT8: UInt8Array, + _Type_UINT16: UInt16Array, + _Type_UINT32: UInt32Array, + _Type_UINT64: UInt64Array, + _Type_INT8: Int8Array, + _Type_INT16: Int16Array, + _Type_INT32: Int32Array, + _Type_INT64: Int64Array, + _Type_DATE32: Date32Array, + _Type_DATE64: Date64Array, + _Type_TIMESTAMP: TimestampArray, + _Type_TIME32: Time32Array, + _Type_TIME64: Time64Array, + _Type_DURATION: DurationArray, + _Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalArray, + _Type_HALF_FLOAT: HalfFloatArray, + _Type_FLOAT: FloatArray, + _Type_DOUBLE: DoubleArray, + _Type_LIST: ListArray, + _Type_LARGE_LIST: LargeListArray, + _Type_LIST_VIEW: ListViewArray, + _Type_LARGE_LIST_VIEW: LargeListViewArray, + _Type_MAP: MapArray, + _Type_FIXED_SIZE_LIST: FixedSizeListArray, + _Type_SPARSE_UNION: UnionArray, + _Type_DENSE_UNION: UnionArray, + _Type_BINARY: BinaryArray, + _Type_STRING: StringArray, + _Type_LARGE_BINARY: LargeBinaryArray, + _Type_LARGE_STRING: LargeStringArray, + _Type_BINARY_VIEW: BinaryViewArray, + _Type_STRING_VIEW: StringViewArray, + _Type_DICTIONARY: DictionaryArray, + _Type_FIXED_SIZE_BINARY: FixedSizeBinaryArray, + _Type_DECIMAL128: Decimal128Array, + _Type_DECIMAL256: Decimal256Array, + _Type_STRUCT: StructArray, + _Type_RUN_END_ENCODED: RunEndEncodedArray, + _Type_EXTENSION: ExtensionArray, +} + + +cdef inline shared_ptr[CBuffer] c_mask_inverted_from_obj(object mask, MemoryPool pool) except *: + """ + Convert mask array obj to c_mask while also inverting to signify 1 for valid and 0 for null + """ + cdef shared_ptr[CBuffer] c_mask + if mask is None: + c_mask = shared_ptr[CBuffer]() + elif isinstance(mask, Array): + if mask.type.id != Type_BOOL: + raise TypeError('Mask must be a pyarrow.Array of type boolean') + if mask.null_count != 0: + raise ValueError('Mask must not contain nulls') + inverted_mask = _pc().invert(mask, memory_pool=pool) + c_mask = pyarrow_unwrap_buffer(inverted_mask.buffers()[1]) + else: + raise TypeError('Mask must be a pyarrow.Array of type boolean') + return c_mask + + +cdef object get_array_class_from_type( + const shared_ptr[CDataType]& sp_data_type): + cdef CDataType* data_type = sp_data_type.get() + if data_type == NULL: + raise ValueError('Array data type was NULL') + + if data_type.id() == _Type_EXTENSION: + py_ext_data_type = pyarrow_wrap_data_type(sp_data_type) + return py_ext_data_type.__arrow_ext_class__() + else: + return _array_classes[data_type.id()] + + +cdef object get_values(object obj, bint* is_series): + if pandas_api.is_series(obj) or pandas_api.is_index(obj): + result = pandas_api.get_values(obj) + is_series[0] = True + elif isinstance(obj, np.ndarray): + result = obj + is_series[0] = False + else: + result = pandas_api.series(obj, copy=False).values + is_series[0] = False + + return result + + +def concat_arrays(arrays, MemoryPool memory_pool=None): + """ + Concatenate the given arrays. + + The contents of the input arrays are copied into the returned array. + + Raises + ------ + ArrowInvalid + If not all of the arrays have the same type. + + Parameters + ---------- + arrays : iterable of pyarrow.Array + Arrays to concatenate, must be identically typed. + memory_pool : MemoryPool, default None + For memory allocations. If None, the default pool is used. + + Examples + -------- + >>> import pyarrow as pa + >>> arr1 = pa.array([2, 4, 5, 100]) + >>> arr2 = pa.array([2, 4]) + >>> pa.concat_arrays([arr1, arr2]) + + [ + 2, + 4, + 5, + 100, + 2, + 4 + ] + + """ + cdef: + vector[shared_ptr[CArray]] c_arrays + shared_ptr[CArray] c_concatenated + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + for array in arrays: + if not isinstance(array, Array): + raise TypeError("Iterable should contain Array objects, " + "got {0} instead".format(type(array))) + c_arrays.push_back(pyarrow_unwrap_array(array)) + + with nogil: + c_concatenated = GetResultValue(Concatenate(c_arrays, pool)) + + return pyarrow_wrap_array(c_concatenated) + + +def _empty_array(DataType type): + """ + Create empty array of the given type. + """ + if type.id == Type_DICTIONARY: + arr = DictionaryArray.from_arrays( + _empty_array(type.index_type), _empty_array(type.value_type), + ordered=type.ordered) + else: + arr = array([], type=type) + return arr diff --git a/parrot/lib/python3.10/site-packages/pyarrow/benchmark.pxi b/parrot/lib/python3.10/site-packages/pyarrow/benchmark.pxi new file mode 100644 index 0000000000000000000000000000000000000000..ab251017db78706c97c7dee8044636c55c80167e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/benchmark.pxi @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +def benchmark_PandasObjectIsNull(list obj): + Benchmark_PandasObjectIsNull(obj) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/compat.pxi b/parrot/lib/python3.10/site-packages/pyarrow/compat.pxi new file mode 100644 index 0000000000000000000000000000000000000000..8cf106d5609b50dd84c082dcfd36aee5b16fbee4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/compat.pxi @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +def encode_file_path(path): + if isinstance(path, str): + # POSIX systems can handle utf-8. UTF8 is converted to utf16-le in + # libarrow + encoded_path = path.encode('utf-8') + else: + encoded_path = path + + # Windows file system requires utf-16le for file names; Arrow C++ libraries + # will convert utf8 to utf16 + return encoded_path + + +# Starting with Python 3.7, dicts are guaranteed to be insertion-ordered. +ordered_dict = dict + + +try: + import cloudpickle as pickle +except ImportError: + import pickle + + +def tobytes(o): + """ + Encode a unicode or bytes string to bytes. + + Parameters + ---------- + o : str or bytes + Input string. + """ + if isinstance(o, str): + return o.encode('utf8') + else: + return o + + +def frombytes(o, *, safe=False): + """ + Decode the given bytestring to unicode. + + Parameters + ---------- + o : bytes-like + Input object. + safe : bool, default False + If true, raise on encoding errors. + """ + if safe: + return o.decode('utf8', errors='replace') + else: + return o.decode('utf8') diff --git a/parrot/lib/python3.10/site-packages/pyarrow/compute.py b/parrot/lib/python3.10/site-packages/pyarrow/compute.py new file mode 100644 index 0000000000000000000000000000000000000000..83612f66d21e2f54bb1ac161de4d5db4463675ac --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/compute.py @@ -0,0 +1,732 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyarrow._compute import ( # noqa + Function, + FunctionOptions, + FunctionRegistry, + HashAggregateFunction, + HashAggregateKernel, + Kernel, + ScalarAggregateFunction, + ScalarAggregateKernel, + ScalarFunction, + ScalarKernel, + VectorFunction, + VectorKernel, + # Option classes + ArraySortOptions, + AssumeTimezoneOptions, + CastOptions, + CountOptions, + CumulativeOptions, + CumulativeSumOptions, + DayOfWeekOptions, + DictionaryEncodeOptions, + RunEndEncodeOptions, + ElementWiseAggregateOptions, + ExtractRegexOptions, + FilterOptions, + IndexOptions, + JoinOptions, + ListSliceOptions, + ListFlattenOptions, + MakeStructOptions, + MapLookupOptions, + MatchSubstringOptions, + ModeOptions, + NullOptions, + PadOptions, + PairwiseOptions, + PartitionNthOptions, + QuantileOptions, + RandomOptions, + RankOptions, + ReplaceSliceOptions, + ReplaceSubstringOptions, + RoundBinaryOptions, + RoundOptions, + RoundTemporalOptions, + RoundToMultipleOptions, + ScalarAggregateOptions, + SelectKOptions, + SetLookupOptions, + SliceOptions, + SortOptions, + SplitOptions, + SplitPatternOptions, + StrftimeOptions, + StrptimeOptions, + StructFieldOptions, + TakeOptions, + TDigestOptions, + TrimOptions, + Utf8NormalizeOptions, + VarianceOptions, + WeekOptions, + # Functions + call_function, + function_registry, + get_function, + list_functions, + # Udf + call_tabular_function, + register_scalar_function, + register_tabular_function, + register_aggregate_function, + register_vector_function, + UdfContext, + # Expressions + Expression, +) + +from collections import namedtuple +import inspect +from textwrap import dedent +import warnings + +import pyarrow as pa +from pyarrow import _compute_docstrings +from pyarrow.vendored import docscrape + + +def _get_arg_names(func): + return func._doc.arg_names + + +_OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',)) + + +def _scrape_options_class_doc(options_class): + if not options_class.__doc__: + return None + doc = docscrape.NumpyDocString(options_class.__doc__) + return _OptionsClassDoc(doc['Parameters']) + + +def _decorate_compute_function(wrapper, exposed_name, func, options_class): + # Decorate the given compute function wrapper with useful metadata + # and documentation. + cpp_doc = func._doc + + wrapper.__arrow_compute_function__ = dict( + name=func.name, + arity=func.arity, + options_class=cpp_doc.options_class, + options_required=cpp_doc.options_required) + wrapper.__name__ = exposed_name + wrapper.__qualname__ = exposed_name + + doc_pieces = [] + + # 1. One-line summary + summary = cpp_doc.summary + if not summary: + arg_str = "arguments" if func.arity > 1 else "argument" + summary = ("Call compute function {!r} with the given {}" + .format(func.name, arg_str)) + + doc_pieces.append(f"{summary}.\n\n") + + # 2. Multi-line description + description = cpp_doc.description + if description: + doc_pieces.append(f"{description}\n\n") + + doc_addition = _compute_docstrings.function_doc_additions.get(func.name) + + # 3. Parameter description + doc_pieces.append(dedent("""\ + Parameters + ---------- + """)) + + # 3a. Compute function parameters + arg_names = _get_arg_names(func) + for arg_name in arg_names: + if func.kind in ('vector', 'scalar_aggregate'): + arg_type = 'Array-like' + else: + arg_type = 'Array-like or scalar-like' + doc_pieces.append(f"{arg_name} : {arg_type}\n") + doc_pieces.append(" Argument to compute function.\n") + + # 3b. Compute function option values + if options_class is not None: + options_class_doc = _scrape_options_class_doc(options_class) + if options_class_doc: + for p in options_class_doc.params: + doc_pieces.append(f"{p.name} : {p.type}\n") + for s in p.desc: + doc_pieces.append(f" {s}\n") + else: + warnings.warn(f"Options class {options_class.__name__} " + f"does not have a docstring", RuntimeWarning) + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + doc_pieces.append(dedent("""\ + {0} : optional + Parameter for {1} constructor. Either `options` + or `{0}` can be passed, but not both at the same time. + """.format(p.name, options_class.__name__))) + doc_pieces.append(dedent(f"""\ + options : pyarrow.compute.{options_class.__name__}, optional + Alternative way of passing options. + """)) + + doc_pieces.append(dedent("""\ + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """)) + + # 4. Custom addition (e.g. examples) + if doc_addition is not None: + doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n"))) + + wrapper.__doc__ = "".join(doc_pieces) + return wrapper + + +def _get_options_class(func): + class_name = func._doc.options_class + if not class_name: + return None + try: + return globals()[class_name] + except KeyError: + warnings.warn("Python binding for {} not exposed" + .format(class_name), RuntimeWarning) + return None + + +def _handle_options(name, options_class, options, args, kwargs): + if args or kwargs: + if options is not None: + raise TypeError( + "Function {!r} called with both an 'options' argument " + "and additional arguments" + .format(name)) + return options_class(*args, **kwargs) + + if options is not None: + if isinstance(options, dict): + return options_class(**options) + elif isinstance(options, options_class): + return options + raise TypeError( + "Function {!r} expected a {} parameter, got {}" + .format(name, options_class, type(options))) + + return None + + +def _make_generic_wrapper(func_name, func, options_class, arity): + if options_class is None: + def wrapper(*args, memory_pool=None): + if arity is not Ellipsis and len(args) != arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args)) + return func.call(args, None, memory_pool) + else: + def wrapper(*args, memory_pool=None, options=None, **kwargs): + if arity is not Ellipsis: + if len(args) < arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + option_args = args[arity:] + args = args[:arity] + else: + option_args = () + options = _handle_options(func_name, options_class, options, + option_args, kwargs) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args), options) + return func.call(args, options, memory_pool) + return wrapper + + +def _make_signature(arg_names, var_arg_names, options_class): + from inspect import Parameter + params = [] + for name in arg_names: + params.append(Parameter(name, Parameter.POSITIONAL_ONLY)) + for name in var_arg_names: + params.append(Parameter(name, Parameter.VAR_POSITIONAL)) + if options_class is not None: + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.KEYWORD_ONLY) + if var_arg_names: + # Cannot have a positional argument after a *args + p = p.replace(kind=Parameter.KEYWORD_ONLY) + params.append(p) + params.append(Parameter("options", Parameter.KEYWORD_ONLY, + default=None)) + params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY, + default=None)) + return inspect.Signature(params) + + +def _wrap_function(name, func): + options_class = _get_options_class(func) + arg_names = _get_arg_names(func) + has_vararg = arg_names and arg_names[-1].startswith('*') + if has_vararg: + var_arg_names = [arg_names.pop().lstrip('*')] + else: + var_arg_names = [] + + wrapper = _make_generic_wrapper( + name, func, options_class, arity=func.arity) + wrapper.__signature__ = _make_signature(arg_names, var_arg_names, + options_class) + return _decorate_compute_function(wrapper, name, func, options_class) + + +def _make_global_functions(): + """ + Make global functions wrapping each compute function. + + Note that some of the automatically-generated wrappers may be overridden + by custom versions below. + """ + g = globals() + reg = function_registry() + + # Avoid clashes with Python keywords + rewrites = {'and': 'and_', + 'or': 'or_'} + + for cpp_name in reg.list_functions(): + name = rewrites.get(cpp_name, cpp_name) + func = reg.get_function(cpp_name) + if func.kind == "hash_aggregate": + # Hash aggregate functions are not callable, + # so let's not expose them at module level. + continue + if func.kind == "scalar_aggregate" and func.arity == 0: + # Nullary scalar aggregate functions are not callable + # directly so let's not expose them at module level. + continue + assert name not in g, name + g[cpp_name] = g[name] = _wrap_function(name, func) + + +_make_global_functions() + + +def cast(arr, target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast array values to another data type. Can also be invoked as an array + instance method. + + Parameters + ---------- + arr : Array-like + target_type : DataType or str + Type to cast to + safe : bool, default True + Check for overflows or other unsafe conversions + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Examples + -------- + >>> from datetime import datetime + >>> import pyarrow as pa + >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)]) + >>> arr.type + TimestampType(timestamp[us]) + + You can use ``pyarrow.DataType`` objects to specify the target type: + + >>> cast(arr, pa.timestamp('ms')) + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + + >>> cast(arr, pa.timestamp('ms')).type + TimestampType(timestamp[ms]) + + Alternatively, it is also supported to use the string aliases for these + types: + + >>> arr.cast('timestamp[ms]') + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + >>> arr.cast('timestamp[ms]').type + TimestampType(timestamp[ms]) + + Returns + ------- + casted : Array + The cast result as a new Array + """ + safe_vars_passed = (safe is not None) or (target_type is not None) + + if safe_vars_passed and (options is not None): + raise ValueError("Must either pass values for 'target_type' and 'safe'" + " or pass a value for 'options'") + + if options is None: + target_type = pa.types.lib.ensure_type(target_type) + if safe is False: + options = CastOptions.unsafe(target_type) + else: + options = CastOptions.safe(target_type) + return call_function("cast", [arr], options, memory_pool) + + +def index(data, value, start=None, end=None, *, memory_pool=None): + """ + Find the index of the first occurrence of a given value. + + Parameters + ---------- + data : Array-like + value : Scalar-like object + The value to search for. + start : int, optional + end : int, optional + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + index : int + the index, or -1 if not found + """ + if start is not None: + if end is not None: + data = data.slice(start, end - start) + else: + data = data.slice(start) + elif end is not None: + data = data.slice(0, end) + + if not isinstance(value, pa.Scalar): + value = pa.scalar(value, type=data.type) + elif data.type != value.type: + value = pa.scalar(value.as_py(), type=data.type) + options = IndexOptions(value=value) + result = call_function('index', [data], options, memory_pool) + if start is not None and result.as_py() >= 0: + result = pa.scalar(result.as_py() + start, type=pa.int64()) + return result + + +def take(data, indices, *, boundscheck=True, memory_pool=None): + """ + Select values (or records) from array- or table-like data given integer + selection indices. + + The result will be of the same type(s) as the input, with elements taken + from the input array (or record batch / table fields) at the given + indices. If an index is null then the corresponding value in the output + will be null. + + Parameters + ---------- + data : Array, ChunkedArray, RecordBatch, or Table + indices : Array, ChunkedArray + Must be of integer type + boundscheck : boolean, default True + Whether to boundscheck the indices. If False and there is an out of + bounds index, will likely cause the process to crash. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : depends on inputs + Selected values for the given indices + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> indices = pa.array([0, None, 4, 3]) + >>> arr.take(indices) + + [ + "a", + null, + "e", + null + ] + """ + options = TakeOptions(boundscheck=boundscheck) + return call_function('take', [data, indices], options, memory_pool) + + +def fill_null(values, fill_value): + """Replace each null element in values with a corresponding + element from fill_value. + + If fill_value is scalar-like, then every null element in values + will be replaced with fill_value. If fill_value is array-like, + then the i-th element in values will be replaced with the i-th + element in fill_value. + + The fill_value's type must be the same as that of values, or it + must be able to be implicitly casted to the array's type. + + This is an alias for :func:`coalesce`. + + Parameters + ---------- + values : Array, ChunkedArray, or Scalar-like object + Each null element is replaced with the corresponding value + from fill_value. + fill_value : Array, ChunkedArray, or Scalar-like object + If not same type as values, will attempt to cast. + + Returns + ------- + result : depends on inputs + Values with all null elements replaced + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([1, 2, None, 3], type=pa.int8()) + >>> fill_value = pa.scalar(5, type=pa.int8()) + >>> arr.fill_null(fill_value) + + [ + 1, + 2, + 5, + 3 + ] + >>> arr = pa.array([1, 2, None, 4, None]) + >>> arr.fill_null(pa.array([10, 20, 30, 40, 50])) + + [ + 1, + 2, + 30, + 4, + 50 + ] + """ + if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)): + fill_value = pa.scalar(fill_value, type=values.type) + elif values.type != fill_value.type: + fill_value = pa.scalar(fill_value.as_py(), type=values.type) + + return call_function("coalesce", [values, fill_value]) + + +def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the top-k ordered elements from array- or table-like + data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get top indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array + Indices of the top-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.top_k_unstable(arr, k=3) + + [ + 5, + 4, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "descending")) + else: + sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the bottom-k ordered elements from + array- or table-like data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get bottom indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array of indices + Indices of the bottom-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.bottom_k_unstable(arr, k=3) + + [ + 0, + 1, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "ascending")) + else: + sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def random(n, *, initializer='system', options=None, memory_pool=None): + """ + Generate numbers in the range [0, 1). + + Generated values are uniformly-distributed, double-precision + in range [0, 1). Algorithm and seed can be changed via RandomOptions. + + Parameters + ---------- + n : int + Number of values to generate, must be greater than or equal to 0 + initializer : int or str + How to initialize the underlying random generator. + If an integer is given, it is used as a seed. + If "system" is given, the random generator is initialized with + a system-specific source of (hopefully true) randomness. + Other values are invalid. + options : pyarrow.compute.RandomOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """ + options = RandomOptions(initializer=initializer) + return call_function("random", [], options, memory_pool, length=n) + + +def field(*name_or_index): + """Reference a column of the dataset. + + Stores only the field's name. Type and other information is known only when + the expression is bound to a dataset having an explicit scheme. + + Nested references are allowed by passing multiple names or a tuple of + names. For example ``('foo', 'bar')`` references the field named "bar" + inside the field named "foo". + + Parameters + ---------- + *name_or_index : string, multiple strings, tuple or int + The name or index of the (possibly nested) field the expression + references to. + + Returns + ------- + field_expr : Expression + Reference to the given field + + Examples + -------- + >>> import pyarrow.compute as pc + >>> pc.field("a") + + >>> pc.field(1) + + >>> pc.field(("a", "b")) + >> pc.field("a", "b") + the path + "/2009/11" would be parsed to ("year"_ == 2009 and "month"_ == 11). + - "HivePartitioning": a scheme for "/$key=$value/" nested directories as + found in Apache Hive. This is a multi-level, directory based partitioning + scheme. Data is partitioned by static values of a particular column in + the schema. Partition keys are represented in the form $key=$value in + directory names. Field order is ignored, as are missing or unrecognized + field names. + For example, given schema, a possible + path would be "/year=2009/month=11/day=15" (but the field order does not + need to match). + - "FilenamePartitioning": this scheme expects the partitions will have + filenames containing the field values separated by "_". + For example, given schema, a possible + partition filename "2009_11_part-0.parquet" would be parsed + to ("year"_ == 2009 and "month"_ == 11). + + Parameters + ---------- + schema : pyarrow.Schema, default None + The schema that describes the partitions present in the file path. + If not specified, and `field_names` and/or `flavor` are specified, + the schema will be inferred from the file path (and a + PartitioningFactory is returned). + field_names : list of str, default None + A list of strings (field names). If specified, the schema's types are + inferred from the file paths (only valid for DirectoryPartitioning). + flavor : str, default None + The default is DirectoryPartitioning. Specify ``flavor="hive"`` for + a HivePartitioning, and ``flavor="filename"`` for a + FilenamePartitioning. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. Alternatively, pass `infer` to have + Arrow discover the dictionary values, in which case a + PartitioningFactory is returned. + + Returns + ------- + Partitioning or PartitioningFactory + The partitioning scheme + + Examples + -------- + + Specify the Schema for paths like "/2009/June": + + >>> import pyarrow as pa + >>> import pyarrow.dataset as ds + >>> part = ds.partitioning(pa.schema([("year", pa.int16()), + ... ("month", pa.string())])) + + or let the types be inferred by only specifying the field names: + + >>> part = ds.partitioning(field_names=["year", "month"]) + + For paths like "/2009/June", the year will be inferred as int32 while month + will be inferred as string. + + Specify a Schema with dictionary encoding, providing dictionary values: + + >>> part = ds.partitioning( + ... pa.schema([ + ... ("year", pa.int16()), + ... ("month", pa.dictionary(pa.int8(), pa.string())) + ... ]), + ... dictionaries={ + ... "month": pa.array(["January", "February", "March"]), + ... }) + + Alternatively, specify a Schema with dictionary encoding, but have Arrow + infer the dictionary values: + + >>> part = ds.partitioning( + ... pa.schema([ + ... ("year", pa.int16()), + ... ("month", pa.dictionary(pa.int8(), pa.string())) + ... ]), + ... dictionaries="infer") + + Create a Hive scheme for a path like "/year=2009/month=11": + + >>> part = ds.partitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())]), + ... flavor="hive") + + A Hive scheme can also be discovered from the directory structure (and + types will be inferred): + + >>> part = ds.partitioning(flavor="hive") + """ + if flavor is None: + # default flavor + if schema is not None: + if field_names is not None: + raise ValueError( + "Cannot specify both 'schema' and 'field_names'") + if dictionaries == 'infer': + return DirectoryPartitioning.discover(schema=schema) + return DirectoryPartitioning(schema, dictionaries) + elif field_names is not None: + if isinstance(field_names, list): + return DirectoryPartitioning.discover(field_names) + else: + raise ValueError( + "Expected list of field names, got {}".format( + type(field_names))) + else: + raise ValueError( + "For the default directory flavor, need to specify " + "a Schema or a list of field names") + if flavor == "filename": + if schema is not None: + if field_names is not None: + raise ValueError( + "Cannot specify both 'schema' and 'field_names'") + if dictionaries == 'infer': + return FilenamePartitioning.discover(schema=schema) + return FilenamePartitioning(schema, dictionaries) + elif field_names is not None: + if isinstance(field_names, list): + return FilenamePartitioning.discover(field_names) + else: + raise ValueError( + "Expected list of field names, got {}".format( + type(field_names))) + else: + raise ValueError( + "For the filename flavor, need to specify " + "a Schema or a list of field names") + elif flavor == 'hive': + if field_names is not None: + raise ValueError("Cannot specify 'field_names' for flavor 'hive'") + elif schema is not None: + if isinstance(schema, pa.Schema): + if dictionaries == 'infer': + return HivePartitioning.discover(schema=schema) + return HivePartitioning(schema, dictionaries) + else: + raise ValueError( + "Expected Schema for 'schema', got {}".format( + type(schema))) + else: + return HivePartitioning.discover() + else: + raise ValueError("Unsupported flavor") + + +def _ensure_partitioning(scheme): + """ + Validate input and return a Partitioning(Factory). + + It passes None through if no partitioning scheme is defined. + """ + if scheme is None: + pass + elif isinstance(scheme, str): + scheme = partitioning(flavor=scheme) + elif isinstance(scheme, list): + scheme = partitioning(field_names=scheme) + elif isinstance(scheme, (Partitioning, PartitioningFactory)): + pass + else: + raise ValueError("Expected Partitioning or PartitioningFactory, got {}" + .format(type(scheme))) + return scheme + + +def _ensure_format(obj): + if isinstance(obj, FileFormat): + return obj + elif obj == "parquet": + if not _parquet_available: + raise ValueError(_parquet_msg) + return ParquetFileFormat() + elif obj in {"ipc", "arrow"}: + return IpcFileFormat() + elif obj == "feather": + return FeatherFileFormat() + elif obj == "csv": + return CsvFileFormat() + elif obj == "orc": + if not _orc_available: + raise ValueError(_orc_msg) + return OrcFileFormat() + elif obj == "json": + return JsonFileFormat() + else: + raise ValueError("format '{}' is not supported".format(obj)) + + +def _ensure_multiple_sources(paths, filesystem=None): + """ + Treat a list of paths as files belonging to a single file system + + If the file system is local then also validates that all paths + are referencing existing *files* otherwise any non-file paths will be + silently skipped (for example on a remote filesystem). + + Parameters + ---------- + paths : list of path-like + Note that URIs are not allowed. + filesystem : FileSystem or str, optional + If an URI is passed, then its path component will act as a prefix for + the file paths. + + Returns + ------- + (FileSystem, list of str) + File system object and a list of normalized paths. + + Raises + ------ + TypeError + If the passed filesystem has wrong type. + IOError + If the file system is local and a referenced path is not available or + not a file. + """ + from pyarrow.fs import ( + LocalFileSystem, SubTreeFileSystem, _MockFileSystem, FileType, + _ensure_filesystem + ) + + if filesystem is None: + # fall back to local file system as the default + filesystem = LocalFileSystem() + else: + # construct a filesystem if it is a valid URI + filesystem = _ensure_filesystem(filesystem) + + is_local = ( + isinstance(filesystem, (LocalFileSystem, _MockFileSystem)) or + (isinstance(filesystem, SubTreeFileSystem) and + isinstance(filesystem.base_fs, LocalFileSystem)) + ) + + # allow normalizing irregular paths such as Windows local paths + paths = [filesystem.normalize_path(_stringify_path(p)) for p in paths] + + # validate that all of the paths are pointing to existing *files* + # possible improvement is to group the file_infos by type and raise for + # multiple paths per error category + if is_local: + for info in filesystem.get_file_info(paths): + file_type = info.type + if file_type == FileType.File: + continue + elif file_type == FileType.NotFound: + raise FileNotFoundError(info.path) + elif file_type == FileType.Directory: + raise IsADirectoryError( + 'Path {} points to a directory, but only file paths are ' + 'supported. To construct a nested or union dataset pass ' + 'a list of dataset objects instead.'.format(info.path) + ) + else: + raise IOError( + 'Path {} exists but its type is unknown (could be a ' + 'special file such as a Unix socket or character device, ' + 'or Windows NUL / CON / ...)'.format(info.path) + ) + + return filesystem, paths + + +def _ensure_single_source(path, filesystem=None): + """ + Treat path as either a recursively traversable directory or a single file. + + Parameters + ---------- + path : path-like + filesystem : FileSystem or str, optional + If an URI is passed, then its path component will act as a prefix for + the file paths. + + Returns + ------- + (FileSystem, list of str or fs.Selector) + File system object and either a single item list pointing to a file or + an fs.Selector object pointing to a directory. + + Raises + ------ + TypeError + If the passed filesystem has wrong type. + FileNotFoundError + If the referenced file or directory doesn't exist. + """ + from pyarrow.fs import FileType, FileSelector, _resolve_filesystem_and_path + + # at this point we already checked that `path` is a path-like + filesystem, path = _resolve_filesystem_and_path(path, filesystem) + + # ensure that the path is normalized before passing to dataset discovery + path = filesystem.normalize_path(path) + + # retrieve the file descriptor + file_info = filesystem.get_file_info(path) + + # depending on the path type either return with a recursive + # directory selector or as a list containing a single file + if file_info.type == FileType.Directory: + paths_or_selector = FileSelector(path, recursive=True) + elif file_info.type == FileType.File: + paths_or_selector = [path] + else: + raise FileNotFoundError(path) + + return filesystem, paths_or_selector + + +def _filesystem_dataset(source, schema=None, filesystem=None, + partitioning=None, format=None, + partition_base_dir=None, exclude_invalid_files=None, + selector_ignore_prefixes=None): + """ + Create a FileSystemDataset which can be used to build a Dataset. + + Parameters are documented in the dataset function. + + Returns + ------- + FileSystemDataset + """ + from pyarrow.fs import LocalFileSystem, _ensure_filesystem, FileInfo + + format = _ensure_format(format or 'parquet') + partitioning = _ensure_partitioning(partitioning) + + if isinstance(source, (list, tuple)): + if source and isinstance(source[0], FileInfo): + if filesystem is None: + # fall back to local file system as the default + fs = LocalFileSystem() + else: + # construct a filesystem if it is a valid URI + fs = _ensure_filesystem(filesystem) + paths_or_selector = source + else: + fs, paths_or_selector = _ensure_multiple_sources(source, filesystem) + else: + fs, paths_or_selector = _ensure_single_source(source, filesystem) + + options = FileSystemFactoryOptions( + partitioning=partitioning, + partition_base_dir=partition_base_dir, + exclude_invalid_files=exclude_invalid_files, + selector_ignore_prefixes=selector_ignore_prefixes + ) + factory = FileSystemDatasetFactory(fs, paths_or_selector, format, options) + + return factory.finish(schema) + + +def _in_memory_dataset(source, schema=None, **kwargs): + if any(v is not None for v in kwargs.values()): + raise ValueError( + "For in-memory datasets, you cannot pass any additional arguments") + return InMemoryDataset(source, schema) + + +def _union_dataset(children, schema=None, **kwargs): + if any(v is not None for v in kwargs.values()): + raise ValueError( + "When passing a list of Datasets, you cannot pass any additional " + "arguments" + ) + + if schema is None: + # unify the children datasets' schemas + schema = pa.unify_schemas([child.schema for child in children]) + + for child in children: + if getattr(child, "_scan_options", None): + raise ValueError( + "Creating an UnionDataset from filtered or projected Datasets " + "is currently not supported. Union the unfiltered datasets " + "and apply the filter to the resulting union." + ) + + # create datasets with the requested schema + children = [child.replace_schema(schema) for child in children] + + return UnionDataset(schema, children) + + +def parquet_dataset(metadata_path, schema=None, filesystem=None, format=None, + partitioning=None, partition_base_dir=None): + """ + Create a FileSystemDataset from a `_metadata` file created via + `pyarrow.parquet.write_metadata`. + + Parameters + ---------- + metadata_path : path, + Path pointing to a single file parquet metadata file + schema : Schema, optional + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. + filesystem : FileSystem or URI string, default None + If a single path is given as source and filesystem is None, then the + filesystem will be inferred from the path. + If an URI string is passed, then a filesystem object is constructed + using the URI's optional path component as a directory prefix. See the + examples below. + Note that the URIs on Windows must follow 'file:///C:...' or + 'file:/C:...' patterns. + format : ParquetFileFormat + An instance of a ParquetFileFormat if special options needs to be + passed. + partitioning : Partitioning, PartitioningFactory, str, list of str + The partitioning scheme specified with the ``partitioning()`` + function. A flavor string can be used as shortcut, and with a list of + field names a DirectoryPartitioning will be inferred. + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + + Returns + ------- + FileSystemDataset + The dataset corresponding to the given metadata + """ + from pyarrow.fs import LocalFileSystem, _ensure_filesystem + + if format is None: + format = ParquetFileFormat() + elif not isinstance(format, ParquetFileFormat): + raise ValueError("format argument must be a ParquetFileFormat") + + if filesystem is None: + filesystem = LocalFileSystem() + else: + filesystem = _ensure_filesystem(filesystem) + + metadata_path = filesystem.normalize_path(_stringify_path(metadata_path)) + options = ParquetFactoryOptions( + partition_base_dir=partition_base_dir, + partitioning=_ensure_partitioning(partitioning) + ) + + factory = ParquetDatasetFactory( + metadata_path, filesystem, format, options=options) + return factory.finish(schema) + + +def dataset(source, schema=None, format=None, filesystem=None, + partitioning=None, partition_base_dir=None, + exclude_invalid_files=None, ignore_prefixes=None): + """ + Open a dataset. + + Datasets provides functionality to efficiently work with tabular, + potentially larger than memory and multi-file dataset. + + - A unified interface for different sources, like Parquet and Feather + - Discovery of sources (crawling directories, handle directory-based + partitioned datasets, basic schema normalization) + - Optimized reading with predicate pushdown (filtering rows), projection + (selecting columns), parallel reading or fine-grained managing of tasks. + + Note that this is the high-level API, to have more control over the dataset + construction use the low-level API classes (FileSystemDataset, + FilesystemDatasetFactory, etc.) + + Parameters + ---------- + source : path, list of paths, dataset, list of datasets, (list of) \ +RecordBatch or Table, iterable of RecordBatch, RecordBatchReader, or URI + Path pointing to a single file: + Open a FileSystemDataset from a single file. + Path pointing to a directory: + The directory gets discovered recursively according to a + partitioning scheme if given. + List of file paths: + Create a FileSystemDataset from explicitly given files. The files + must be located on the same filesystem given by the filesystem + parameter. + Note that in contrary of construction from a single file, passing + URIs as paths is not allowed. + List of datasets: + A nested UnionDataset gets constructed, it allows arbitrary + composition of other datasets. + Note that additional keyword arguments are not allowed. + (List of) batches or tables, iterable of batches, or RecordBatchReader: + Create an InMemoryDataset. If an iterable or empty list is given, + a schema must also be given. If an iterable or RecordBatchReader + is given, the resulting dataset can only be scanned once; further + attempts will raise an error. + schema : Schema, optional + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. + format : FileFormat or str + Currently "parquet", "ipc"/"arrow"/"feather", "csv", "json", and "orc" are + supported. For Feather, only version 2 files are supported. + filesystem : FileSystem or URI string, default None + If a single path is given as source and filesystem is None, then the + filesystem will be inferred from the path. + If an URI string is passed, then a filesystem object is constructed + using the URI's optional path component as a directory prefix. See the + examples below. + Note that the URIs on Windows must follow 'file:///C:...' or + 'file:/C:...' patterns. + partitioning : Partitioning, PartitioningFactory, str, list of str + The partitioning scheme specified with the ``partitioning()`` + function. A flavor string can be used as shortcut, and with a list of + field names a DirectoryPartitioning will be inferred. + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + exclude_invalid_files : bool, optional (default True) + If True, invalid files will be excluded (file format specific check). + This will incur IO for each files in a serial and single threaded + fashion. Disabling this feature will skip the IO, but unsupported + files may be present in the Dataset (resulting in an error at scan + time). + ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. + + Returns + ------- + dataset : Dataset + Either a FileSystemDataset or a UnionDataset depending on the source + parameter. + + Examples + -------- + Creating an example Table: + + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, "file.parquet") + + Opening a single file: + + >>> import pyarrow.dataset as ds + >>> dataset = ds.dataset("file.parquet", format="parquet") + >>> dataset.to_table() + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2020,2022,2021,2022,2019,2021]] + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Opening a single file with an explicit schema: + + >>> myschema = pa.schema([ + ... ('n_legs', pa.int64()), + ... ('animal', pa.string())]) + >>> dataset = ds.dataset("file.parquet", schema=myschema, format="parquet") + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Opening a dataset for a single directory: + + >>> ds.write_dataset(table, "partitioned_dataset", format="parquet", + ... partitioning=['year']) + >>> dataset = ds.dataset("partitioned_dataset", format="parquet") + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[5],[2],[4,100],[2,4]] + animal: [["Brittle stars"],["Flamingo"],...["Parrot","Horse"]] + + For a single directory from a S3 bucket: + + >>> ds.dataset("s3://mybucket/nyc-taxi/", + ... format="parquet") # doctest: +SKIP + + Opening a dataset from a list of relatives local paths: + + >>> dataset = ds.dataset([ + ... "partitioned_dataset/2019/part-0.parquet", + ... "partitioned_dataset/2020/part-0.parquet", + ... "partitioned_dataset/2021/part-0.parquet", + ... ], format='parquet') + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[5],[2],[4,100]] + animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"]] + + With filesystem provided: + + >>> paths = [ + ... 'part0/data.parquet', + ... 'part1/data.parquet', + ... 'part3/data.parquet', + ... ] + >>> ds.dataset(paths, filesystem='file:///directory/prefix, + ... format='parquet') # doctest: +SKIP + + Which is equivalent with: + + >>> fs = SubTreeFileSystem("/directory/prefix", + ... LocalFileSystem()) # doctest: +SKIP + >>> ds.dataset(paths, filesystem=fs, format='parquet') # doctest: +SKIP + + With a remote filesystem URI: + + >>> paths = [ + ... 'nested/directory/part0/data.parquet', + ... 'nested/directory/part1/data.parquet', + ... 'nested/directory/part3/data.parquet', + ... ] + >>> ds.dataset(paths, filesystem='s3://bucket/', + ... format='parquet') # doctest: +SKIP + + Similarly to the local example, the directory prefix may be included in the + filesystem URI: + + >>> ds.dataset(paths, filesystem='s3://bucket/nested/directory', + ... format='parquet') # doctest: +SKIP + + Construction of a nested dataset: + + >>> ds.dataset([ + ... dataset("s3://old-taxi-data", format="parquet"), + ... dataset("local/path/to/data", format="ipc") + ... ]) # doctest: +SKIP + """ + from pyarrow.fs import FileInfo + # collect the keyword arguments for later reuse + kwargs = dict( + schema=schema, + filesystem=filesystem, + partitioning=partitioning, + format=format, + partition_base_dir=partition_base_dir, + exclude_invalid_files=exclude_invalid_files, + selector_ignore_prefixes=ignore_prefixes + ) + + if _is_path_like(source): + return _filesystem_dataset(source, **kwargs) + elif isinstance(source, (tuple, list)): + if all(_is_path_like(elem) or isinstance(elem, FileInfo) for elem in source): + return _filesystem_dataset(source, **kwargs) + elif all(isinstance(elem, Dataset) for elem in source): + return _union_dataset(source, **kwargs) + elif all(isinstance(elem, (pa.RecordBatch, pa.Table)) + for elem in source): + return _in_memory_dataset(source, **kwargs) + else: + unique_types = set(type(elem).__name__ for elem in source) + type_names = ', '.join('{}'.format(t) for t in unique_types) + raise TypeError( + 'Expected a list of path-like or dataset objects, or a list ' + 'of batches or tables. The given list contains the following ' + 'types: {}'.format(type_names) + ) + elif isinstance(source, (pa.RecordBatch, pa.Table)): + return _in_memory_dataset(source, **kwargs) + else: + raise TypeError( + 'Expected a path-like, list of path-likes or a list of Datasets ' + 'instead of the given type: {}'.format(type(source).__name__) + ) + + +def _ensure_write_partitioning(part, schema, flavor): + if isinstance(part, PartitioningFactory): + raise ValueError("A PartitioningFactory cannot be used. " + "Did you call the partitioning function " + "without supplying a schema?") + + if isinstance(part, Partitioning) and flavor: + raise ValueError( + "Providing a partitioning_flavor with " + "a Partitioning object is not supported" + ) + elif isinstance(part, (tuple, list)): + # Name of fields were provided instead of a partitioning object. + # Create a partitioning factory with those field names. + part = partitioning( + schema=pa.schema([schema.field(f) for f in part]), + flavor=flavor + ) + elif part is None: + part = partitioning(pa.schema([]), flavor=flavor) + + if not isinstance(part, Partitioning): + raise ValueError( + "partitioning must be a Partitioning object or " + "a list of column names" + ) + + return part + + +def write_dataset(data, base_dir, *, basename_template=None, format=None, + partitioning=None, partitioning_flavor=None, schema=None, + filesystem=None, file_options=None, use_threads=True, + max_partitions=None, max_open_files=None, + max_rows_per_file=None, min_rows_per_group=None, + max_rows_per_group=None, file_visitor=None, + existing_data_behavior='error', create_dir=True): + """ + Write a dataset to a given format and partitioning. + + Parameters + ---------- + data : Dataset, Table/RecordBatch, RecordBatchReader, list of \ +Table/RecordBatch, or iterable of RecordBatch + The data to write. This can be a Dataset instance or + in-memory Arrow data. If an iterable is given, the schema must + also be given. + base_dir : str + The root directory where to write the dataset. + basename_template : str, optional + A template string used to generate basenames of written data files. + The token '{i}' will be replaced with an automatically incremented + integer. If not specified, it defaults to + "part-{i}." + format.default_extname + format : FileFormat or str + The format in which to write the dataset. Currently supported: + "parquet", "ipc"/"arrow"/"feather", and "csv". If a FileSystemDataset + is being written and `format` is not specified, it defaults to the + same format as the specified FileSystemDataset. When writing a + Table or RecordBatch, this keyword is required. + partitioning : Partitioning or list[str], optional + The partitioning scheme specified with the ``partitioning()`` + function or a list of field names. When providing a list of + field names, you can use ``partitioning_flavor`` to drive which + partitioning type should be used. + partitioning_flavor : str, optional + One of the partitioning flavors supported by + ``pyarrow.dataset.partitioning``. If omitted will use the + default of ``partitioning()`` which is directory partitioning. + schema : Schema, optional + filesystem : FileSystem, optional + file_options : pyarrow.dataset.FileWriteOptions, optional + FileFormat specific write options, created using the + ``FileFormat.make_write_options()`` function. + use_threads : bool, default True + Write files in parallel. If enabled, then maximum parallelism will be + used determined by the number of available CPU cores. + max_partitions : int, default 1024 + Maximum number of partitions any batch may be written into. + max_open_files : int, default 1024 + If greater than 0 then this will limit the maximum number of + files that can be left open. If an attempt is made to open + too many files then the least recently used file will be closed. + If this setting is set too low you may end up fragmenting your + data into many small files. + max_rows_per_file : int, default 0 + Maximum number of rows per file. If greater than 0 then this will + limit how many rows are placed in any single file. Otherwise there + will be no limit and one file will be created in each output + directory unless files need to be closed to respect max_open_files + min_rows_per_group : int, default 0 + Minimum number of rows per group. When the value is greater than 0, + the dataset writer will batch incoming data and only write the row + groups to the disk when sufficient rows have accumulated. + max_rows_per_group : int, default 1024 * 1024 + Maximum number of rows per group. If the value is greater than 0, + then the dataset writer may split up large incoming batches into + multiple row groups. If this value is set, then min_rows_per_group + should also be set. Otherwise it could end up with very small row + groups. + file_visitor : function + If set, this function will be called with a WrittenFile instance + for each file created during the call. This object will have both + a path attribute and a metadata attribute. + + The path attribute will be a string containing the path to + the created file. + + The metadata attribute will be the parquet metadata of the file. + This metadata will have the file path attribute set and can be used + to build a _metadata file. The metadata attribute will be None if + the format is not parquet. + + Example visitor which simple collects the filenames created:: + + visited_paths = [] + + def file_visitor(written_file): + visited_paths.append(written_file.path) + existing_data_behavior : 'error' | 'overwrite_or_ignore' | \ +'delete_matching' + Controls how the dataset will handle data that already exists in + the destination. The default behavior ('error') is to raise an error + if any data exists in the destination. + + 'overwrite_or_ignore' will ignore any existing data and will + overwrite files with the same name as an output file. Other + existing files will be ignored. This behavior, in combination + with a unique basename_template for each write, will allow for + an append workflow. + + 'delete_matching' is useful when you are writing a partitioned + dataset. The first time each partition directory is encountered + the entire directory will be deleted. This allows you to overwrite + old partitions completely. + create_dir : bool, default True + If False, directories will not be created. This can be useful for + filesystems that do not require directories. + """ + from pyarrow.fs import _resolve_filesystem_and_path + + if isinstance(data, (list, tuple)): + schema = schema or data[0].schema + data = InMemoryDataset(data, schema=schema) + elif isinstance(data, (pa.RecordBatch, pa.Table)): + schema = schema or data.schema + data = InMemoryDataset(data, schema=schema) + elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data): + data = Scanner.from_batches(data, schema=schema) + schema = None + elif not isinstance(data, (Dataset, Scanner)): + raise ValueError( + "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, " + "a list of Tables/RecordBatches, or iterable of batches are " + "supported." + ) + + if format is None and isinstance(data, FileSystemDataset): + format = data.format + else: + format = _ensure_format(format) + + if file_options is None: + file_options = format.make_write_options() + + if format != file_options.format: + raise TypeError("Supplied FileWriteOptions have format {}, " + "which doesn't match supplied FileFormat {}".format( + format, file_options)) + + if basename_template is None: + basename_template = "part-{i}." + format.default_extname + + if max_partitions is None: + max_partitions = 1024 + + if max_open_files is None: + max_open_files = 1024 + + if max_rows_per_file is None: + max_rows_per_file = 0 + + if max_rows_per_group is None: + max_rows_per_group = 1 << 20 + + if min_rows_per_group is None: + min_rows_per_group = 0 + + # at this point data is a Scanner or a Dataset, anything else + # was converted to one of those two. So we can grab the schema + # to build the partitioning object from Dataset. + if isinstance(data, Scanner): + partitioning_schema = data.projected_schema + else: + partitioning_schema = data.schema + partitioning = _ensure_write_partitioning(partitioning, + schema=partitioning_schema, + flavor=partitioning_flavor) + + filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem) + + if isinstance(data, Dataset): + scanner = data.scanner(use_threads=use_threads) + else: + # scanner was passed directly by the user, in which case a schema + # cannot be passed + if schema is not None: + raise ValueError("Cannot specify a schema when writing a Scanner") + scanner = data + + _filesystemdataset_write( + scanner, base_dir, basename_template, filesystem, partitioning, + file_options, max_partitions, file_visitor, existing_data_behavior, + max_open_files, max_rows_per_file, + min_rows_per_group, max_rows_per_group, create_dir + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/error.pxi b/parrot/lib/python3.10/site-packages/pyarrow/error.pxi new file mode 100644 index 0000000000000000000000000000000000000000..cbe25522e8d7ecbb8e0b7e5e024b9c22c56e6e9b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/error.pxi @@ -0,0 +1,274 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.exc cimport PyErr_CheckSignals, PyErr_SetInterrupt + +from pyarrow.includes.libarrow cimport CStatus +from pyarrow.includes.libarrow_python cimport IsPyError, RestorePyError +from pyarrow.includes.common cimport c_string + +from contextlib import contextmanager +import os +import signal +import threading + +from pyarrow.lib import is_threading_enabled +from pyarrow.util import _break_traceback_cycle_from_frame + + +class ArrowException(Exception): + pass + + +class ArrowInvalid(ValueError, ArrowException): + pass + + +class ArrowMemoryError(MemoryError, ArrowException): + pass + + +class ArrowKeyError(KeyError, ArrowException): + def __str__(self): + # Override KeyError.__str__, as it uses the repr() of the key + return ArrowException.__str__(self) + + +class ArrowTypeError(TypeError, ArrowException): + pass + + +class ArrowNotImplementedError(NotImplementedError, ArrowException): + pass + + +class ArrowCapacityError(ArrowException): + pass + + +class ArrowIndexError(IndexError, ArrowException): + pass + + +class ArrowSerializationError(ArrowException): + pass + + +class ArrowCancelled(ArrowException): + def __init__(self, message, signum=None): + super().__init__(message) + self.signum = signum + + +# Compatibility alias +ArrowIOError = IOError + + +# check_status() and convert_status() could be written directly in C++ +# if we didn't define Arrow-specific subclasses (ArrowInvalid etc.) +cdef int check_status(const CStatus& status) except -1 nogil: + if status.ok(): + return 0 + + with gil: + if IsPyError(status): + RestorePyError(status) + return -1 + + raise convert_status(status) + + +cdef object convert_status(const CStatus& status): + if IsPyError(status): + try: + RestorePyError(status) + except BaseException as e: + return e + + # We don't use Status::ToString() as it would redundantly include + # the C++ class name. + message = frombytes(status.message(), safe=True) + detail = status.detail() + if detail != nullptr: + message += ". Detail: " + frombytes(detail.get().ToString(), + safe=True) + + if status.IsInvalid(): + return ArrowInvalid(message) + elif status.IsIOError(): + # Note: OSError constructor is + # OSError(message) + # or + # OSError(errno, message, filename=None) + # or (on Windows) + # OSError(errno, message, filename, winerror) + errno = ErrnoFromStatus(status) + winerror = WinErrorFromStatus(status) + if winerror != 0: + return IOError(errno, message, None, winerror) + elif errno != 0: + return IOError(errno, message) + else: + return IOError(message) + elif status.IsOutOfMemory(): + return ArrowMemoryError(message) + elif status.IsKeyError(): + return ArrowKeyError(message) + elif status.IsNotImplemented(): + return ArrowNotImplementedError(message) + elif status.IsTypeError(): + return ArrowTypeError(message) + elif status.IsCapacityError(): + return ArrowCapacityError(message) + elif status.IsIndexError(): + return ArrowIndexError(message) + elif status.IsSerializationError(): + return ArrowSerializationError(message) + elif status.IsCancelled(): + signum = SignalFromStatus(status) + if signum > 0: + return ArrowCancelled(message, signum) + else: + return ArrowCancelled(message) + else: + message = frombytes(status.ToString(), safe=True) + return ArrowException(message) + + +# These are API functions for C++ PyArrow +cdef api int pyarrow_internal_check_status(const CStatus& status) \ + except -1 nogil: + return check_status(status) + +cdef api object pyarrow_internal_convert_status(const CStatus& status): + return convert_status(status) + + +cdef class StopToken: + cdef void init(self, CStopToken stop_token): + self.stop_token = move(stop_token) + + +cdef c_bool signal_handlers_enabled = True + + +def enable_signal_handlers(c_bool enable): + """ + Enable or disable interruption of long-running operations. + + By default, certain long running operations will detect user + interruptions, such as by pressing Ctrl-C. This detection relies + on setting a signal handler for the duration of the long-running + operation, and may therefore interfere with other frameworks or + libraries (such as an event loop). + + Parameters + ---------- + enable : bool + Whether to enable user interruption by setting a temporary + signal handler. + """ + global signal_handlers_enabled + signal_handlers_enabled = enable + + +# For internal use + +# Whether we need a workaround for https://bugs.python.org/issue42248 +have_signal_refcycle = (sys.version_info < (3, 8, 10) or + (3, 9) <= sys.version_info < (3, 9, 5) or + sys.version_info[:2] == (3, 10)) + +cdef class SignalStopHandler: + cdef: + StopToken _stop_token + vector[int] _signals + c_bool _enabled + + def __cinit__(self): + self._enabled = False + + self._init_signals() + if have_signal_refcycle: + _break_traceback_cycle_from_frame(sys._getframe(0)) + + self._stop_token = StopToken() + + if not self._signals.empty(): + maybe_source = SetSignalStopSource() + if not maybe_source.ok(): + # See ARROW-11841 / ARROW-17173: in complex interaction + # scenarios (such as R calling into Python), SetSignalStopSource() + # may have already activated a signal-receiving StopSource. + # Just warn instead of erroring out. + maybe_source.status().Warn() + else: + self._stop_token.init(deref(maybe_source).token()) + # signals don't work on Emscripten without threads. + # and possibly other single-thread environments. + self._enabled = is_threading_enabled() + + def _init_signals(self): + if (signal_handlers_enabled and + threading.current_thread() is threading.main_thread()): + self._signals = [ + sig for sig in (signal.SIGINT, signal.SIGTERM) + if signal.getsignal(sig) not in (signal.SIG_DFL, + signal.SIG_IGN, None)] + + def __enter__(self): + if self._enabled: + check_status(RegisterCancellingSignalHandler(self._signals)) + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + if self._enabled: + UnregisterCancellingSignalHandler() + if exc_value is None: + # Make sure we didn't lose a signal + try: + check_status(self._stop_token.stop_token.Poll()) + except ArrowCancelled as e: + exc_value = e + if isinstance(exc_value, ArrowCancelled): + if exc_value.signum: + # Re-emit the exact same signal. We restored the Python signal + # handler above, so it should receive it. + if os.name == 'nt': + SendSignal(exc_value.signum) + else: + SendSignalToThread(exc_value.signum, + threading.main_thread().ident) + else: + # Simulate Python receiving a SIGINT + # (see https://bugs.python.org/issue43356 for why we can't + # simulate the exact signal number) + PyErr_SetInterrupt() + # Maximize chances of the Python signal handler being executed now. + # Otherwise a potential KeyboardInterrupt might be missed by an + # immediately enclosing try/except block. + PyErr_CheckSignals() + # ArrowCancelled will be re-raised if PyErr_CheckSignals() + # returned successfully. + + def __dealloc__(self): + if self._enabled: + ResetSignalStopSource() + + @property + def stop_token(self): + return self._stop_token diff --git a/parrot/lib/python3.10/site-packages/pyarrow/feather.py b/parrot/lib/python3.10/site-packages/pyarrow/feather.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd0602597006734d66a9a965ea462fb35cbe178 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/feather.py @@ -0,0 +1,277 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import os + +from pyarrow.pandas_compat import _pandas_api # noqa +from pyarrow.lib import (Codec, Table, # noqa + concat_tables, schema) +import pyarrow.lib as ext +from pyarrow import _feather +from pyarrow._feather import FeatherError # noqa: F401 + + +class FeatherDataset: + """ + Encapsulates details of reading a list of Feather files. + + Parameters + ---------- + path_or_paths : List[str] + A list of file names + validate_schema : bool, default True + Check that individual file schemas are all the same / compatible + """ + + def __init__(self, path_or_paths, validate_schema=True): + self.paths = path_or_paths + self.validate_schema = validate_schema + + def read_table(self, columns=None): + """ + Read multiple feather files as a single pyarrow.Table + + Parameters + ---------- + columns : List[str] + Names of columns to read from the file + + Returns + ------- + pyarrow.Table + Content of the file as a table (of columns) + """ + _fil = read_table(self.paths[0], columns=columns) + self._tables = [_fil] + self.schema = _fil.schema + + for path in self.paths[1:]: + table = read_table(path, columns=columns) + if self.validate_schema: + self.validate_schemas(path, table) + self._tables.append(table) + return concat_tables(self._tables) + + def validate_schemas(self, piece, table): + if not self.schema.equals(table.schema): + raise ValueError('Schema in {!s} was different. \n' + '{!s}\n\nvs\n\n{!s}' + .format(piece, self.schema, + table.schema)) + + def read_pandas(self, columns=None, use_threads=True): + """ + Read multiple Parquet files as a single pandas DataFrame + + Parameters + ---------- + columns : List[str] + Names of columns to read from the file + use_threads : bool, default True + Use multiple threads when converting to pandas + + Returns + ------- + pandas.DataFrame + Content of the file as a pandas DataFrame (of columns) + """ + return self.read_table(columns=columns).to_pandas( + use_threads=use_threads) + + +def check_chunked_overflow(name, col): + if col.num_chunks == 1: + return + + if col.type in (ext.binary(), ext.string()): + raise ValueError("Column '{}' exceeds 2GB maximum capacity of " + "a Feather binary column. This restriction may be " + "lifted in the future".format(name)) + else: + # TODO(wesm): Not sure when else this might be reached + raise ValueError("Column '{}' of type {} was chunked on conversion " + "to Arrow and cannot be currently written to " + "Feather format".format(name, str(col.type))) + + +_FEATHER_SUPPORTED_CODECS = {'lz4', 'zstd', 'uncompressed'} + + +def write_feather(df, dest, compression=None, compression_level=None, + chunksize=None, version=2): + """ + Write a pandas.DataFrame to Feather format. + + Parameters + ---------- + df : pandas.DataFrame or pyarrow.Table + Data to write out as Feather format. + dest : str + Local destination path. + compression : string, default None + Can be one of {"zstd", "lz4", "uncompressed"}. The default of None uses + LZ4 for V2 files if it is available, otherwise uncompressed. + compression_level : int, default None + Use a compression level particular to the chosen compressor. If None + use the default compression level + chunksize : int, default None + For V2 files, the internal maximum size of Arrow RecordBatch chunks + when writing the Arrow IPC file format. None means use the default, + which is currently 64K + version : int, default 2 + Feather file version. Version 2 is the current. Version 1 is the more + limited legacy format + """ + if _pandas_api.have_pandas: + if (_pandas_api.has_sparse and + isinstance(df, _pandas_api.pd.SparseDataFrame)): + df = df.to_dense() + + if _pandas_api.is_data_frame(df): + # Feather v1 creates a new column in the resultant Table to + # store index information if index type is not RangeIndex + + if version == 1: + preserve_index = False + elif version == 2: + preserve_index = None + else: + raise ValueError("Version value should either be 1 or 2") + + table = Table.from_pandas(df, preserve_index=preserve_index) + + if version == 1: + # Version 1 does not chunking + for i, name in enumerate(table.schema.names): + col = table[i] + check_chunked_overflow(name, col) + else: + table = df + + if version == 1: + if len(table.column_names) > len(set(table.column_names)): + raise ValueError("cannot serialize duplicate column names") + + if compression is not None: + raise ValueError("Feather V1 files do not support compression " + "option") + + if chunksize is not None: + raise ValueError("Feather V1 files do not support chunksize " + "option") + else: + if compression is None and Codec.is_available('lz4_frame'): + compression = 'lz4' + elif (compression is not None and + compression not in _FEATHER_SUPPORTED_CODECS): + raise ValueError('compression="{}" not supported, must be ' + 'one of {}'.format(compression, + _FEATHER_SUPPORTED_CODECS)) + + try: + _feather.write_feather(table, dest, compression=compression, + compression_level=compression_level, + chunksize=chunksize, version=version) + except Exception: + if isinstance(dest, str): + try: + os.remove(dest) + except os.error: + pass + raise + + +def read_feather(source, columns=None, use_threads=True, + memory_map=False, **kwargs): + """ + Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use + feather.read_table. + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + use_threads : bool, default True + Whether to parallelize reading using multiple threads. If false the + restriction is used in the conversion to Pandas as well as in the + reading from Feather format. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str. + **kwargs + Additional keyword arguments passed on to `pyarrow.Table.to_pandas`. + + Returns + ------- + df : pandas.DataFrame + The contents of the Feather file as a pandas.DataFrame + """ + return (read_table( + source, columns=columns, memory_map=memory_map, + use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs)) + + +def read_table(source, columns=None, memory_map=False, use_threads=True): + """ + Read a pyarrow.Table from Feather format + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str + use_threads : bool, default True + Whether to parallelize reading using multiple threads. + + Returns + ------- + table : pyarrow.Table + The contents of the Feather file as a pyarrow.Table + """ + reader = _feather.FeatherReader( + source, use_memory_map=memory_map, use_threads=use_threads) + + if columns is None: + return reader.read() + + column_types = [type(column) for column in columns] + if all(map(lambda t: t == int, column_types)): + table = reader.read_indices(columns) + elif all(map(lambda t: t == str, column_types)): + table = reader.read_names(columns) + else: + column_type_names = [t.__name__ for t in column_types] + raise TypeError("Columns must be indices or names. " + "Got columns {} of types {}" + .format(columns, column_type_names)) + + # Feather v1 already respects the column selection + if reader.version < 3: + return table + # Feather v2 reads with sorted / deduplicated selection + elif sorted(set(columns)) == columns: + return table + else: + # follow exact order / selection of names + return table.select(columns) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/fs.py b/parrot/lib/python3.10/site-packages/pyarrow/fs.py new file mode 100644 index 0000000000000000000000000000000000000000..abdd1a995751aa32aeba2a84176747e22bc64744 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/fs.py @@ -0,0 +1,431 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +FileSystem abstraction to interact with various local and remote filesystems. +""" + +from pyarrow.util import _is_path_like, _stringify_path + +from pyarrow._fs import ( # noqa + FileSelector, + FileType, + FileInfo, + FileSystem, + LocalFileSystem, + SubTreeFileSystem, + _MockFileSystem, + FileSystemHandler, + PyFileSystem, + _copy_files, + _copy_files_selector, +) + +# For backward compatibility. +FileStats = FileInfo + +_not_imported = [] +try: + from pyarrow._azurefs import AzureFileSystem # noqa +except ImportError: + _not_imported.append("AzureFileSystem") + +try: + from pyarrow._hdfs import HadoopFileSystem # noqa +except ImportError: + _not_imported.append("HadoopFileSystem") + +try: + from pyarrow._gcsfs import GcsFileSystem # noqa +except ImportError: + _not_imported.append("GcsFileSystem") + +try: + from pyarrow._s3fs import ( # noqa + AwsDefaultS3RetryStrategy, AwsStandardS3RetryStrategy, + S3FileSystem, S3LogLevel, S3RetryStrategy, ensure_s3_initialized, + finalize_s3, ensure_s3_finalized, initialize_s3, resolve_s3_region) +except ImportError: + _not_imported.append("S3FileSystem") +else: + # GH-38364: we don't initialize S3 eagerly as that could lead + # to crashes at shutdown even when S3 isn't used. + # Instead, S3 is initialized lazily using `ensure_s3_initialized` + # in assorted places. + import atexit + atexit.register(ensure_s3_finalized) + + +def __getattr__(name): + if name in _not_imported: + raise ImportError( + "The pyarrow installation is not built with support for " + "'{0}'".format(name) + ) + + raise AttributeError( + "module 'pyarrow.fs' has no attribute '{0}'".format(name) + ) + + +def _filesystem_from_str(uri): + # instantiate the file system from an uri, if the uri has a path + # component then it will be treated as a path prefix + filesystem, prefix = FileSystem.from_uri(uri) + prefix = filesystem.normalize_path(prefix) + if prefix: + # validate that the prefix is pointing to a directory + prefix_info = filesystem.get_file_info([prefix])[0] + if prefix_info.type != FileType.Directory: + raise ValueError( + "The path component of the filesystem URI must point to a " + "directory but it has a type: `{}`. The path component " + "is `{}` and the given filesystem URI is `{}`".format( + prefix_info.type.name, prefix_info.path, uri + ) + ) + filesystem = SubTreeFileSystem(prefix, filesystem) + return filesystem + + +def _ensure_filesystem(filesystem, *, use_mmap=False): + if isinstance(filesystem, FileSystem): + return filesystem + elif isinstance(filesystem, str): + if use_mmap: + raise ValueError( + "Specifying to use memory mapping not supported for " + "filesystem specified as an URI string" + ) + return _filesystem_from_str(filesystem) + + # handle fsspec-compatible filesystems + try: + import fsspec + except ImportError: + pass + else: + if isinstance(filesystem, fsspec.AbstractFileSystem): + if type(filesystem).__name__ == 'LocalFileSystem': + # In case its a simple LocalFileSystem, use native arrow one + return LocalFileSystem(use_mmap=use_mmap) + return PyFileSystem(FSSpecHandler(filesystem)) + + raise TypeError( + "Unrecognized filesystem: {}. `filesystem` argument must be a " + "FileSystem instance or a valid file system URI'".format( + type(filesystem)) + ) + + +def _resolve_filesystem_and_path(path, filesystem=None, *, memory_map=False): + """ + Return filesystem/path from path which could be an URI or a plain + filesystem path. + """ + if not _is_path_like(path): + if filesystem is not None: + raise ValueError( + "'filesystem' passed but the specified path is file-like, so" + " there is nothing to open with 'filesystem'." + ) + return filesystem, path + + if filesystem is not None: + filesystem = _ensure_filesystem(filesystem, use_mmap=memory_map) + if isinstance(filesystem, LocalFileSystem): + path = _stringify_path(path) + elif not isinstance(path, str): + raise TypeError( + "Expected string path; path-like objects are only allowed " + "with a local filesystem" + ) + path = filesystem.normalize_path(path) + return filesystem, path + + path = _stringify_path(path) + + # if filesystem is not given, try to automatically determine one + # first check if the file exists as a local (relative) file path + # if not then try to parse the path as an URI + filesystem = LocalFileSystem(use_mmap=memory_map) + + try: + file_info = filesystem.get_file_info(path) + except ValueError: # ValueError means path is likely an URI + file_info = None + exists_locally = False + else: + exists_locally = (file_info.type != FileType.NotFound) + + # if the file or directory doesn't exists locally, then assume that + # the path is an URI describing the file system as well + if not exists_locally: + try: + filesystem, path = FileSystem.from_uri(path) + except ValueError as e: + # neither an URI nor a locally existing path, so assume that + # local path was given and propagate a nicer file not found error + # instead of a more confusing scheme parsing error + if "empty scheme" not in str(e) \ + and "Cannot parse URI" not in str(e): + raise + else: + path = filesystem.normalize_path(path) + + return filesystem, path + + +def copy_files(source, destination, + source_filesystem=None, destination_filesystem=None, + *, chunk_size=1024*1024, use_threads=True): + """ + Copy files between FileSystems. + + This functions allows you to recursively copy directories of files from + one file system to another, such as from S3 to your local machine. + + Parameters + ---------- + source : string + Source file path or URI to a single file or directory. + If a directory, files will be copied recursively from this path. + destination : string + Destination file path or URI. If `source` is a file, `destination` + is also interpreted as the destination file (not directory). + Directories will be created as necessary. + source_filesystem : FileSystem, optional + Source filesystem, needs to be specified if `source` is not a URI, + otherwise inferred. + destination_filesystem : FileSystem, optional + Destination filesystem, needs to be specified if `destination` is not + a URI, otherwise inferred. + chunk_size : int, default 1MB + The maximum size of block to read before flushing to the + destination file. A larger chunk_size will use more memory while + copying but may help accommodate high latency FileSystems. + use_threads : bool, default True + Whether to use multiple threads to accelerate copying. + + Examples + -------- + Inspect an S3 bucket's files: + + >>> s3, path = fs.FileSystem.from_uri( + ... "s3://registry.opendata.aws/roda/ndjson/") + >>> selector = fs.FileSelector(path) + >>> s3.get_file_info(selector) + [>> fs.copy_files("s3://registry.opendata.aws/roda/ndjson/index.ndjson", + ... "file:///{}/index_copy.ndjson".format(local_path)) + + >>> fs.LocalFileSystem().get_file_info(str(local_path)+ + ... '/index_copy.ndjson') + + + Copy file using a FileSystem object: + + >>> fs.copy_files("registry.opendata.aws/roda/ndjson/index.ndjson", + ... "file:///{}/index_copy.ndjson".format(local_path), + ... source_filesystem=fs.S3FileSystem()) + """ + source_fs, source_path = _resolve_filesystem_and_path( + source, source_filesystem + ) + destination_fs, destination_path = _resolve_filesystem_and_path( + destination, destination_filesystem + ) + + file_info = source_fs.get_file_info(source_path) + if file_info.type == FileType.Directory: + source_sel = FileSelector(source_path, recursive=True) + _copy_files_selector(source_fs, source_sel, + destination_fs, destination_path, + chunk_size, use_threads) + else: + _copy_files(source_fs, source_path, + destination_fs, destination_path, + chunk_size, use_threads) + + +class FSSpecHandler(FileSystemHandler): + """ + Handler for fsspec-based Python filesystems. + + https://filesystem-spec.readthedocs.io/en/latest/index.html + + Parameters + ---------- + fs : FSSpec-compliant filesystem instance + + Examples + -------- + >>> PyFileSystem(FSSpecHandler(fsspec_fs)) # doctest: +SKIP + """ + + def __init__(self, fs): + self.fs = fs + + def __eq__(self, other): + if isinstance(other, FSSpecHandler): + return self.fs == other.fs + return NotImplemented + + def __ne__(self, other): + if isinstance(other, FSSpecHandler): + return self.fs != other.fs + return NotImplemented + + def get_type_name(self): + protocol = self.fs.protocol + if isinstance(protocol, list): + protocol = protocol[0] + return "fsspec+{0}".format(protocol) + + def normalize_path(self, path): + return path + + @staticmethod + def _create_file_info(path, info): + size = info["size"] + if info["type"] == "file": + ftype = FileType.File + elif info["type"] == "directory": + ftype = FileType.Directory + # some fsspec filesystems include a file size for directories + size = None + else: + ftype = FileType.Unknown + return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None)) + + def get_file_info(self, paths): + infos = [] + for path in paths: + try: + info = self.fs.info(path) + except FileNotFoundError: + infos.append(FileInfo(path, FileType.NotFound)) + else: + infos.append(self._create_file_info(path, info)) + return infos + + def get_file_info_selector(self, selector): + if not self.fs.isdir(selector.base_dir): + if self.fs.exists(selector.base_dir): + raise NotADirectoryError(selector.base_dir) + else: + if selector.allow_not_found: + return [] + else: + raise FileNotFoundError(selector.base_dir) + + if selector.recursive: + maxdepth = None + else: + maxdepth = 1 + + infos = [] + selected_files = self.fs.find( + selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True + ) + for path, info in selected_files.items(): + _path = path.strip("/") + base_dir = selector.base_dir.strip("/") + # Need to exclude base directory from selected files if present + # (fsspec filesystems, see GH-37555) + if _path != base_dir: + infos.append(self._create_file_info(path, info)) + + return infos + + def create_dir(self, path, recursive): + # mkdir also raises FileNotFoundError when base directory is not found + try: + self.fs.mkdir(path, create_parents=recursive) + except FileExistsError: + pass + + def delete_dir(self, path): + self.fs.rm(path, recursive=True) + + def _delete_dir_contents(self, path, missing_dir_ok): + try: + subpaths = self.fs.listdir(path, detail=False) + except FileNotFoundError: + if missing_dir_ok: + return + raise + for subpath in subpaths: + if self.fs.isdir(subpath): + self.fs.rm(subpath, recursive=True) + elif self.fs.isfile(subpath): + self.fs.rm(subpath) + + def delete_dir_contents(self, path, missing_dir_ok): + if path.strip("/") == "": + raise ValueError( + "delete_dir_contents called on path '", path, "'") + self._delete_dir_contents(path, missing_dir_ok) + + def delete_root_dir_contents(self): + self._delete_dir_contents("/") + + def delete_file(self, path): + # fs.rm correctly raises IsADirectoryError when `path` is a directory + # instead of a file and `recursive` is not set to True + if not self.fs.exists(path): + raise FileNotFoundError(path) + self.fs.rm(path) + + def move(self, src, dest): + self.fs.mv(src, dest, recursive=True) + + def copy_file(self, src, dest): + # fs.copy correctly raises IsADirectoryError when `src` is a directory + # instead of a file + self.fs.copy(src, dest) + + # TODO can we read/pass metadata (e.g. Content-Type) in the methods below? + + def open_input_stream(self, path): + from pyarrow import PythonFile + + if not self.fs.isfile(path): + raise FileNotFoundError(path) + + return PythonFile(self.fs.open(path, mode="rb"), mode="r") + + def open_input_file(self, path): + from pyarrow import PythonFile + + if not self.fs.isfile(path): + raise FileNotFoundError(path) + + return PythonFile(self.fs.open(path, mode="rb"), mode="r") + + def open_output_stream(self, path, metadata): + from pyarrow import PythonFile + + return PythonFile(self.fs.open(path, mode="wb"), mode="w") + + def open_append_stream(self, path, metadata): + from pyarrow import PythonFile + + return PythonFile(self.fs.open(path, mode="ab"), mode="w") diff --git a/parrot/lib/python3.10/site-packages/pyarrow/gandiva.pyx b/parrot/lib/python3.10/site-packages/pyarrow/gandiva.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2202ec64f29628d76143759220eb61102d1bea97 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/gandiva.pyx @@ -0,0 +1,760 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp.memory cimport shared_ptr +from libcpp.string cimport string as c_string +from libcpp.vector cimport vector as c_vector +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch, + Schema, check_status, pyarrow_wrap_array, + pyarrow_wrap_data_type, ensure_type, _Weakrefable, + pyarrow_wrap_field) + +from pyarrow.includes.libgandiva cimport ( + CCondition, CGandivaExpression, + CNode, CProjector, CFilter, + CSelectionVector, + _ensure_selection_mode, + CConfiguration, + CConfigurationBuilder, + TreeExprBuilder_MakeExpression, + TreeExprBuilder_MakeFunction, + TreeExprBuilder_MakeBoolLiteral, + TreeExprBuilder_MakeUInt8Literal, + TreeExprBuilder_MakeUInt16Literal, + TreeExprBuilder_MakeUInt32Literal, + TreeExprBuilder_MakeUInt64Literal, + TreeExprBuilder_MakeInt8Literal, + TreeExprBuilder_MakeInt16Literal, + TreeExprBuilder_MakeInt32Literal, + TreeExprBuilder_MakeInt64Literal, + TreeExprBuilder_MakeFloatLiteral, + TreeExprBuilder_MakeDoubleLiteral, + TreeExprBuilder_MakeStringLiteral, + TreeExprBuilder_MakeBinaryLiteral, + TreeExprBuilder_MakeField, + TreeExprBuilder_MakeIf, + TreeExprBuilder_MakeAnd, + TreeExprBuilder_MakeOr, + TreeExprBuilder_MakeCondition, + TreeExprBuilder_MakeInExpressionInt32, + TreeExprBuilder_MakeInExpressionInt64, + TreeExprBuilder_MakeInExpressionTime32, + TreeExprBuilder_MakeInExpressionTime64, + TreeExprBuilder_MakeInExpressionDate32, + TreeExprBuilder_MakeInExpressionDate64, + TreeExprBuilder_MakeInExpressionTimeStamp, + TreeExprBuilder_MakeInExpressionString, + SelectionVector_MakeInt16, + SelectionVector_MakeInt32, + SelectionVector_MakeInt64, + Projector_Make, + Filter_Make, + CFunctionSignature, + GetRegisteredFunctionSignatures) + + +cdef class Node(_Weakrefable): + cdef: + shared_ptr[CNode] node + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API directly" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CNode] node): + cdef Node self = Node.__new__(Node) + self.node = node + return self + + def __str__(self): + return self.node.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def return_type(self): + return pyarrow_wrap_data_type(self.node.get().return_type()) + + +cdef class Expression(_Weakrefable): + cdef: + shared_ptr[CGandivaExpression] expression + + cdef void init(self, shared_ptr[CGandivaExpression] expression): + self.expression = expression + + def __str__(self): + return self.expression.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.expression.get().root()) + + def result(self): + return pyarrow_wrap_field(self.expression.get().result()) + + +cdef class Condition(_Weakrefable): + cdef: + shared_ptr[CCondition] condition + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CCondition] condition): + cdef Condition self = Condition.__new__(Condition) + self.condition = condition + return self + + def __str__(self): + return self.condition.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.condition.get().root()) + + def result(self): + return pyarrow_wrap_field(self.condition.get().result()) + + +cdef class SelectionVector(_Weakrefable): + cdef: + shared_ptr[CSelectionVector] selection_vector + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CSelectionVector] selection_vector): + cdef SelectionVector self = SelectionVector.__new__(SelectionVector) + self.selection_vector = selection_vector + return self + + def to_array(self): + cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray() + return pyarrow_wrap_array(result) + + +cdef class Projector(_Weakrefable): + cdef: + shared_ptr[CProjector] projector + MemoryPool pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_projector instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CProjector] projector, MemoryPool pool): + cdef Projector self = Projector.__new__(Projector) + self.projector = projector + self.pool = pool + return self + + @property + def llvm_ir(self): + return self.projector.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, SelectionVector selection=None): + """ + Evaluate the specified record batch and return the arrays at the + filtered positions. + + Parameters + ---------- + batch : pyarrow.RecordBatch + selection : pyarrow.gandiva.SelectionVector + + Returns + ------- + list[pyarrow.Array] + """ + cdef vector[shared_ptr[CArray]] results + if selection is None: + check_status(self.projector.get().Evaluate( + batch.sp_batch.get()[0], self.pool.pool, &results)) + else: + check_status( + self.projector.get().Evaluate( + batch.sp_batch.get()[0], selection.selection_vector.get(), + self.pool.pool, &results)) + cdef shared_ptr[CArray] result + arrays = [] + for result in results: + arrays.append(pyarrow_wrap_array(result)) + return arrays + + +cdef class Filter(_Weakrefable): + cdef: + shared_ptr[CFilter] filter + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_filter instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFilter] filter): + cdef Filter self = Filter.__new__(Filter) + self.filter = filter + return self + + @property + def llvm_ir(self): + return self.filter.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'): + """ + Evaluate the specified record batch and return a selection vector. + + Parameters + ---------- + batch : pyarrow.RecordBatch + pool : MemoryPool + dtype : DataType or str, default int32 + + Returns + ------- + pyarrow.gandiva.SelectionVector + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CSelectionVector] selection + + if type.id == _Type_INT16: + check_status(SelectionVector_MakeInt16( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT32: + check_status(SelectionVector_MakeInt32( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT64: + check_status(SelectionVector_MakeInt64( + batch.num_rows, pool.pool, &selection)) + else: + raise ValueError("'dtype' of the selection vector should be " + "one of 'int16', 'int32' and 'int64'.") + + check_status(self.filter.get().Evaluate( + batch.sp_batch.get()[0], selection)) + return SelectionVector.create(selection) + + +cdef class TreeExprBuilder(_Weakrefable): + + def make_literal(self, value, dtype): + """ + Create a node on a literal. + + Parameters + ---------- + value : a literal value + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CNode] r + + if type.id == _Type_BOOL: + r = TreeExprBuilder_MakeBoolLiteral(value) + elif type.id == _Type_UINT8: + r = TreeExprBuilder_MakeUInt8Literal(value) + elif type.id == _Type_UINT16: + r = TreeExprBuilder_MakeUInt16Literal(value) + elif type.id == _Type_UINT32: + r = TreeExprBuilder_MakeUInt32Literal(value) + elif type.id == _Type_UINT64: + r = TreeExprBuilder_MakeUInt64Literal(value) + elif type.id == _Type_INT8: + r = TreeExprBuilder_MakeInt8Literal(value) + elif type.id == _Type_INT16: + r = TreeExprBuilder_MakeInt16Literal(value) + elif type.id == _Type_INT32: + r = TreeExprBuilder_MakeInt32Literal(value) + elif type.id == _Type_INT64: + r = TreeExprBuilder_MakeInt64Literal(value) + elif type.id == _Type_FLOAT: + r = TreeExprBuilder_MakeFloatLiteral(value) + elif type.id == _Type_DOUBLE: + r = TreeExprBuilder_MakeDoubleLiteral(value) + elif type.id == _Type_STRING: + r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8')) + elif type.id == _Type_BINARY: + r = TreeExprBuilder_MakeBinaryLiteral(value) + else: + raise TypeError("Didn't recognize dtype " + str(dtype)) + + return Node.create(r) + + def make_expression(self, Node root_node not None, + Field return_field not None): + """ + Create an expression with the specified root_node, + and the result written to result_field. + + Parameters + ---------- + root_node : pyarrow.gandiva.Node + return_field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Expression + """ + cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression( + root_node.node, return_field.sp_field) + cdef Expression expression = Expression() + expression.init(r) + return expression + + def make_function(self, name, children, DataType return_type): + """ + Create a node with a function. + + Parameters + ---------- + name : str + children : pyarrow.gandiva.NodeVector + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction( + name.encode(), c_children, return_type.sp_type) + return Node.create(r) + + def make_field(self, Field field not None): + """ + Create a node with an Arrow field. + + Parameters + ---------- + field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field) + return Node.create(r) + + def make_if(self, Node condition not None, Node this_node not None, + Node else_node not None, DataType return_type not None): + """ + Create a node with an if-else expression. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + this_node : pyarrow.gandiva.Node + else_node : pyarrow.gandiva.Node + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf( + condition.node, this_node.node, else_node.node, + return_type.sp_type) + return Node.create(r) + + def make_and(self, children): + """ + Create a Node with a boolean AND expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children) + return Node.create(r) + + def make_or(self, children): + """ + Create a Node with a boolean OR expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children) + return Node.create(r) + + def _make_in_expression_int32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_int64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_timestamp(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values) + return Node.create(r) + + def _make_in_expression_binary(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def _make_in_expression_string(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string _v + for v in values: + _v = v.encode('UTF-8') + c_values.insert(_v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def make_in_expression(self, Node node not None, values, dtype): + """ + Create a Node with an IN expression. + + Parameters + ---------- + node : pyarrow.gandiva.Node + values : iterable + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef DataType type = ensure_type(dtype) + + if type.id == _Type_INT32: + return self._make_in_expression_int32(node, values) + elif type.id == _Type_INT64: + return self._make_in_expression_int64(node, values) + elif type.id == _Type_TIME32: + return self._make_in_expression_time32(node, values) + elif type.id == _Type_TIME64: + return self._make_in_expression_time64(node, values) + elif type.id == _Type_TIMESTAMP: + return self._make_in_expression_timestamp(node, values) + elif type.id == _Type_DATE32: + return self._make_in_expression_date32(node, values) + elif type.id == _Type_DATE64: + return self._make_in_expression_date64(node, values) + elif type.id == _Type_BINARY: + return self._make_in_expression_binary(node, values) + elif type.id == _Type_STRING: + return self._make_in_expression_string(node, values) + else: + raise TypeError("Data type " + str(dtype) + " not supported.") + + def make_condition(self, Node condition not None): + """ + Create a condition with the specified node. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + + Returns + ------- + pyarrow.gandiva.Condition + """ + cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition( + condition.node) + return Condition.create(r) + +cdef class Configuration(_Weakrefable): + cdef: + shared_ptr[CConfiguration] configuration + + def __cinit__(self, bint optimize=True, bint dump_ir=False): + """ + Initialize the configuration with specified options. + + Parameters + ---------- + optimize : bool, default True + Whether to enable optimizations. + dump_ir : bool, default False + Whether to dump LLVM IR. + """ + self.configuration = CConfigurationBuilder().build() + self.configuration.get().set_optimize(optimize) + self.configuration.get().set_dump_ir(dump_ir) + + @staticmethod + cdef create(shared_ptr[CConfiguration] configuration): + """ + Create a Configuration instance from an existing CConfiguration pointer. + + Parameters + ---------- + configuration : shared_ptr[CConfiguration] + Existing CConfiguration pointer. + + Returns + ------- + Configuration instance + """ + cdef Configuration self = Configuration.__new__(Configuration) + self.configuration = configuration + return self + + +cpdef make_projector(Schema schema, children, MemoryPool pool, + str selection_mode="NONE", + Configuration configuration=None): + """ + Construct a projection using expressions. + + A projector is built for a specific schema and vector of expressions. + Once the projector is built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the expressions. + children : list[pyarrow.gandiva.Expression] + List of projectable expression objects. + pool : pyarrow.MemoryPool + Memory pool used to allocate output arrays. + selection_mode : str, default "NONE" + Possible values are NONE, UINT16, UINT32, UINT64. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the projector. + + Returns + ------- + Projector instance + """ + cdef: + Expression child + c_vector[shared_ptr[CGandivaExpression]] c_children + shared_ptr[CProjector] result + + if configuration is None: + configuration = Configuration() + + for child in children: + if child is None: + raise TypeError("Expressions must not be None") + c_children.push_back(child.expression) + + check_status( + Projector_Make(schema.sp_schema, c_children, + _ensure_selection_mode(selection_mode), + configuration.configuration, + &result)) + return Projector.create(result, pool) + + +cpdef make_filter(Schema schema, Condition condition, + Configuration configuration=None): + """ + Construct a filter based on a condition. + + A filter is built for a specific schema and condition. Once the filter is + built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the condition. + condition : pyarrow.gandiva.Condition + Filter condition. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the filter. + + Returns + ------- + Filter instance + """ + cdef shared_ptr[CFilter] result + if condition is None: + raise TypeError("Condition must not be None") + + if configuration is None: + configuration = Configuration() + + check_status( + Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result)) + return Filter.create(result) + + +cdef class FunctionSignature(_Weakrefable): + """ + Signature of a Gandiva function including name, parameter types + and return type. + """ + + cdef: + shared_ptr[CFunctionSignature] signature + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFunctionSignature] signature): + cdef FunctionSignature self = FunctionSignature.__new__( + FunctionSignature) + self.signature = signature + return self + + def return_type(self): + return pyarrow_wrap_data_type(self.signature.get().ret_type()) + + def param_types(self): + result = [] + cdef vector[shared_ptr[CDataType]] types = \ + self.signature.get().param_types() + for t in types: + result.append(pyarrow_wrap_data_type(t)) + return result + + def name(self): + return self.signature.get().base_name().decode() + + def __repr__(self): + signature = self.signature.get().ToString().decode() + return "FunctionSignature(" + signature + ")" + + +def get_registered_function_signatures(): + """ + Return the function in Gandiva's ExpressionRegistry. + + Returns + ------- + registry: a list of registered function signatures + """ + results = [] + + cdef vector[shared_ptr[CFunctionSignature]] signatures = \ + GetRegisteredFunctionSignatures() + + for signature in signatures: + results.append(FunctionSignature.create(signature)) + + return results diff --git a/parrot/lib/python3.10/site-packages/pyarrow/io.pxi b/parrot/lib/python3.10/site-packages/pyarrow/io.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1d942e8ccabc693c41b8bbc1555e1312994fd85b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/io.pxi @@ -0,0 +1,2919 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Cython wrappers for IO interfaces defined in arrow::io and messaging in +# arrow::ipc + +from libc.stdlib cimport malloc, free + +import codecs +import pickle +import re +import sys +import threading +import time +import warnings +from io import BufferedIOBase, IOBase, TextIOBase, UnsupportedOperation +from queue import Queue, Empty as QueueEmpty + +from pyarrow.lib cimport check_status, HaveLibHdfs +from pyarrow.util import _is_path_like, _stringify_path + + +# 64K +DEFAULT_BUFFER_SIZE = 2 ** 16 + + +cdef extern from "Python.h": + # To let us get a PyObject* and avoid Cython auto-ref-counting + PyObject* PyBytes_FromStringAndSizeNative" PyBytes_FromStringAndSize"( + char *v, Py_ssize_t len) except NULL + + # Workaround https://github.com/cython/cython/issues/4707 + bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len) + + +def have_libhdfs(): + """ + Return true if HDFS (HadoopFileSystem) library is set up correctly. + """ + try: + with nogil: + check_status(HaveLibHdfs()) + return True + except Exception: + return False + + +def io_thread_count(): + """ + Return the number of threads to use for I/O operations. + + Many operations, such as scanning a dataset, will implicitly make + use of this pool. The number of threads is set to a fixed value at + startup. It can be modified at runtime by calling + :func:`set_io_thread_count()`. + + See Also + -------- + set_io_thread_count : Modify the size of this pool. + cpu_count : The analogous function for the CPU thread pool. + """ + return GetIOThreadPoolCapacity() + + +def set_io_thread_count(int count): + """ + Set the number of threads to use for I/O operations. + + Many operations, such as scanning a dataset, will implicitly make + use of this pool. + + Parameters + ---------- + count : int + The max number of threads that may be used for I/O. + Must be positive. + + See Also + -------- + io_thread_count : Get the size of this pool. + set_cpu_count : The analogous function for the CPU thread pool. + """ + if count < 1: + raise ValueError("IO thread count must be strictly positive") + check_status(SetIOThreadPoolCapacity(count)) + + +cdef class NativeFile(_Weakrefable): + """ + The base class for all Arrow streams. + + Streams are either readable, writable, or both. + They optionally support seeking. + + While this class exposes methods to read or write data from Python, the + primary intent of using a Arrow stream is to pass it to other Arrow + facilities that will make use of it, such as Arrow IPC routines. + + Be aware that there are subtle differences with regular Python files, + e.g. destroying a writable Arrow stream without closing it explicitly + will not flush any pending data. + """ + + # Default chunk size for chunked reads. + # Use a large enough value for networked filesystems. + _default_chunk_size = 256 * 1024 + + def __cinit__(self): + self.own_file = False + self.is_readable = False + self.is_writable = False + self.is_seekable = False + self._is_appending = False + + def __dealloc__(self): + if self.own_file: + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close() + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"closed={self.closed} " + f"own_file={self.own_file} " + f"is_seekable={self.is_seekable} " + f"is_writable={self.is_writable} " + f"is_readable={self.is_readable}>") + + @property + def mode(self): + """ + The file mode. Currently instances of NativeFile may support: + + * rb: binary read + * wb: binary write + * rb+: binary read and write + * ab: binary append + """ + # Emulate built-in file modes + if self.is_readable and self.is_writable: + return 'rb+' + elif self.is_readable: + return 'rb' + elif self.is_writable and self._is_appending: + return 'ab' + elif self.is_writable: + return 'wb' + else: + raise ValueError('File object is malformed, has no mode') + + def readable(self): + self._assert_open() + return self.is_readable + + def writable(self): + self._assert_open() + return self.is_writable + + def seekable(self): + self._assert_open() + return self.is_seekable + + def isatty(self): + self._assert_open() + return False + + def fileno(self): + """ + NOT IMPLEMENTED + """ + raise UnsupportedOperation() + + @property + def closed(self): + if self.is_readable: + return self.input_stream.get().closed() + elif self.is_writable: + return self.output_stream.get().closed() + else: + return True + + def close(self): + if not self.closed: + with nogil: + if self.is_readable: + check_status(self.input_stream.get().Close()) + else: + check_status(self.output_stream.get().Close()) + + cdef set_random_access_file(self, shared_ptr[CRandomAccessFile] handle): + self.input_stream = handle + self.random_access = handle + self.is_seekable = True + + cdef set_input_stream(self, shared_ptr[CInputStream] handle): + self.input_stream = handle + self.random_access.reset() + self.is_seekable = False + + cdef set_output_stream(self, shared_ptr[COutputStream] handle): + self.output_stream = handle + + cdef shared_ptr[CRandomAccessFile] get_random_access_file(self) except *: + self._assert_readable() + self._assert_seekable() + return self.random_access + + cdef shared_ptr[CInputStream] get_input_stream(self) except *: + self._assert_readable() + return self.input_stream + + cdef shared_ptr[COutputStream] get_output_stream(self) except *: + self._assert_writable() + return self.output_stream + + def _assert_open(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _assert_readable(self): + self._assert_open() + if not self.is_readable: + # XXX UnsupportedOperation + raise IOError("only valid on readable files") + + def _assert_writable(self): + self._assert_open() + if not self.is_writable: + raise IOError("only valid on writable files") + + def _assert_seekable(self): + self._assert_open() + if not self.is_seekable: + raise IOError("only valid on seekable files") + + def size(self): + """ + Return file size + """ + cdef int64_t size + + handle = self.get_random_access_file() + with nogil: + size = GetResultValue(handle.get().GetSize()) + + return size + + def metadata(self): + """ + Return file metadata + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_metadata + + handle = self.get_input_stream() + with nogil: + c_metadata = GetResultValue(handle.get().ReadMetadata()) + + metadata = {} + if c_metadata.get() != nullptr: + for i in range(c_metadata.get().size()): + metadata[frombytes(c_metadata.get().key(i))] = \ + c_metadata.get().value(i) + return metadata + + def tell(self): + """ + Return current stream position + """ + cdef int64_t position + + if self.is_readable: + rd_handle = self.get_random_access_file() + with nogil: + position = GetResultValue(rd_handle.get().Tell()) + else: + wr_handle = self.get_output_stream() + with nogil: + position = GetResultValue(wr_handle.get().Tell()) + + return position + + def seek(self, int64_t position, int whence=0): + """ + Change current file stream position + + Parameters + ---------- + position : int + Byte offset, interpreted relative to value of whence argument + whence : int, default 0 + Point of reference for seek offset + + Notes + ----- + Values of whence: + * 0 -- start of stream (the default); offset should be zero or positive + * 1 -- current stream position; offset may be negative + * 2 -- end of stream; offset is usually negative + + Returns + ------- + int + The new absolute stream position. + """ + cdef int64_t offset + handle = self.get_random_access_file() + + with nogil: + if whence == 0: + offset = position + elif whence == 1: + offset = GetResultValue(handle.get().Tell()) + offset = offset + position + elif whence == 2: + offset = GetResultValue(handle.get().GetSize()) + offset = offset + position + else: + with gil: + raise ValueError("Invalid value of whence: {0}" + .format(whence)) + check_status(handle.get().Seek(offset)) + + return self.tell() + + def flush(self): + """ + Flush the stream, if applicable. + + An error is raised if stream is not writable. + """ + self._assert_open() + # For IOBase compatibility, flush() on an input stream is a no-op + if self.is_writable: + handle = self.get_output_stream() + with nogil: + check_status(handle.get().Flush()) + + def write(self, data): + """ + Write data to the file. + + Parameters + ---------- + data : bytes-like object or exporter of buffer protocol + + Returns + ------- + int + nbytes: number of bytes written + """ + self._assert_writable() + handle = self.get_output_stream() + + cdef shared_ptr[CBuffer] buf = as_c_buffer(data) + + with nogil: + check_status(handle.get().WriteBuffer(buf)) + return buf.get().size() + + def read(self, nbytes=None): + """ + Read and return up to n bytes. + + If *nbytes* is None, then the entire remaining file contents are read. + + Parameters + ---------- + nbytes : int, default None + + Returns + ------- + data : bytes + """ + cdef: + int64_t c_nbytes + int64_t bytes_read = 0 + PyObject* obj + + if nbytes is None: + if not self.is_seekable: + # Cannot get file size => read chunkwise + bs = self._default_chunk_size + chunks = [] + while True: + chunk = self.read(bs) + if not chunk: + break + chunks.append(chunk) + return b"".join(chunks) + + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + handle = self.get_input_stream() + + # Allocate empty write space + obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes) + + cdef uint8_t* buf = cp.PyBytes_AS_STRING( obj) + with nogil: + bytes_read = GetResultValue(handle.get().Read(c_nbytes, buf)) + + if bytes_read < c_nbytes: + cp._PyBytes_Resize(&obj, bytes_read) + + return PyObject_to_object(obj) + + def get_stream(self, file_offset, nbytes): + """ + Return an input stream that reads a file segment independent of the + state of the file. + + Allows reading portions of a random access file as an input stream + without interfering with each other. + + Parameters + ---------- + file_offset : int + nbytes : int + + Returns + ------- + stream : NativeFile + """ + cdef: + shared_ptr[CInputStream] data + int64_t c_file_offset + int64_t c_nbytes + + c_file_offset = file_offset + c_nbytes = nbytes + + handle = self.get_random_access_file() + + data = GetResultValue( + CRandomAccessFile.GetStream(handle, c_file_offset, c_nbytes)) + + stream = NativeFile() + stream.set_input_stream(data) + stream.is_readable = True + + return stream + + def read_at(self, nbytes, offset): + """ + Read indicated number of bytes at offset from the file + + Parameters + ---------- + nbytes : int + offset : int + + Returns + ------- + data : bytes + """ + cdef: + int64_t c_nbytes + int64_t c_offset + int64_t bytes_read = 0 + PyObject* obj + + c_nbytes = nbytes + + c_offset = offset + + handle = self.get_random_access_file() + + # Allocate empty write space + obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes) + + cdef uint8_t* buf = cp.PyBytes_AS_STRING( obj) + with nogil: + bytes_read = GetResultValue(handle.get(). + ReadAt(c_offset, c_nbytes, buf)) + + if bytes_read < c_nbytes: + cp._PyBytes_Resize(&obj, bytes_read) + + return PyObject_to_object(obj) + + def read1(self, nbytes=None): + """Read and return up to n bytes. + + Unlike read(), if *nbytes* is None then a chunk is read, not the + entire file. + + Parameters + ---------- + nbytes : int, default None + The maximum number of bytes to read. + + Returns + ------- + data : bytes + """ + if nbytes is None: + # The expectation when passing `nbytes=None` is not to read the + # entire file but to issue a single underlying read call up to + # a reasonable size (the use case being to read a bufferable + # amount of bytes, such as with io.TextIOWrapper). + nbytes = self._default_chunk_size + return self.read(nbytes) + + def readall(self): + return self.read() + + def readinto(self, b): + """ + Read into the supplied buffer + + Parameters + ---------- + b : buffer-like object + A writable buffer object (such as a bytearray). + + Returns + ------- + written : int + number of bytes written + """ + + cdef: + int64_t bytes_read + uint8_t* buf + Buffer py_buf + int64_t buf_len + + handle = self.get_input_stream() + + py_buf = py_buffer(b) + buf_len = py_buf.size + buf = py_buf.buffer.get().mutable_data() + + with nogil: + bytes_read = GetResultValue(handle.get().Read(buf_len, buf)) + + return bytes_read + + def readline(self, size=None): + """NOT IMPLEMENTED. Read and return a line of bytes from the file. + + If size is specified, read at most size bytes. + + Line terminator is always b"\\n". + + Parameters + ---------- + size : int + maximum number of bytes read + """ + raise UnsupportedOperation() + + def readlines(self, hint=None): + """NOT IMPLEMENTED. Read lines of the file + + Parameters + ---------- + hint : int + maximum number of bytes read until we stop + """ + raise UnsupportedOperation() + + def __iter__(self): + self._assert_readable() + return self + + def __next__(self): + line = self.readline() + if not line: + raise StopIteration + return line + + def read_buffer(self, nbytes=None): + """ + Read from buffer. + + Parameters + ---------- + nbytes : int, optional + maximum number of bytes read + """ + cdef: + int64_t c_nbytes + int64_t bytes_read = 0 + shared_ptr[CBuffer] output + + handle = self.get_input_stream() + + if nbytes is None: + if not self.is_seekable: + # Cannot get file size => read chunkwise + return py_buffer(self.read()) + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + with nogil: + output = GetResultValue(handle.get().ReadBuffer(c_nbytes)) + + return pyarrow_wrap_buffer(output) + + def truncate(self): + """ + NOT IMPLEMENTED + """ + raise UnsupportedOperation() + + def writelines(self, lines): + """ + Write lines to the file. + + Parameters + ---------- + lines : iterable + Iterable of bytes-like objects or exporters of buffer protocol + """ + self._assert_writable() + + for line in lines: + self.write(line) + + def download(self, stream_or_path, buffer_size=None): + """ + Read this file completely to a local path or destination stream. + + This method first seeks to the beginning of the file. + + Parameters + ---------- + stream_or_path : str or file-like object + If a string, a local file path to write to; otherwise, + should be a writable stream. + buffer_size : int, optional + The buffer size to use for data transfers. + """ + cdef: + int64_t bytes_read = 0 + uint8_t* buf + + if not is_threading_enabled(): + return self._download_nothreads(stream_or_path, buffer_size) + + handle = self.get_input_stream() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + write_queue = Queue(50) + + if not hasattr(stream_or_path, 'read'): + stream = open(stream_or_path, 'wb') + + def cleanup(): + stream.close() + else: + stream = stream_or_path + + def cleanup(): + pass + + done = False + exc_info = None + + def bg_write(): + try: + while not done or write_queue.qsize() > 0: + try: + buf = write_queue.get(timeout=0.01) + except QueueEmpty: + continue + stream.write(buf) + except Exception as e: + exc_info = sys.exc_info() + finally: + cleanup() + + self.seek(0) + + writer_thread = threading.Thread(target=bg_write) + + # This isn't ideal -- PyBytes_FromStringAndSize copies the data from + # the passed buffer, so it's hard for us to avoid doubling the memory + buf = malloc(buffer_size) + if buf == NULL: + raise MemoryError("Failed to allocate {0} bytes" + .format(buffer_size)) + + writer_thread.start() + + cdef int64_t total_bytes = 0 + cdef int32_t c_buffer_size = buffer_size + + try: + while True: + with nogil: + bytes_read = GetResultValue( + handle.get().Read(c_buffer_size, buf)) + + total_bytes += bytes_read + + # EOF + if bytes_read == 0: + break + + pybuf = cp.PyBytes_FromStringAndSize(buf, + bytes_read) + + if writer_thread.is_alive(): + while write_queue.full(): + time.sleep(0.01) + else: + break + + write_queue.put_nowait(pybuf) + finally: + free(buf) + done = True + + writer_thread.join() + if exc_info is not None: + raise exc_info[0], exc_info[1], exc_info[2] + + def _download_nothreads(self, stream_or_path, buffer_size=None): + """ + Internal method to do a download without separate threads, queues etc. + Called by download above if is_threading_enabled() == False + """ + cdef: + int64_t bytes_read = 0 + uint8_t* buf + + handle = self.get_input_stream() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + if not hasattr(stream_or_path, 'read'): + stream = open(stream_or_path, 'wb') + + def cleanup(): + stream.close() + else: + stream = stream_or_path + + def cleanup(): + pass + + self.seek(0) + + # This isn't ideal -- PyBytes_FromStringAndSize copies the data from + # the passed buffer, so it's hard for us to avoid doubling the memory + buf = malloc(buffer_size) + if buf == NULL: + raise MemoryError("Failed to allocate {0} bytes" + .format(buffer_size)) + + cdef int64_t total_bytes = 0 + cdef int32_t c_buffer_size = buffer_size + + try: + while True: + with nogil: + bytes_read = GetResultValue( + handle.get().Read(c_buffer_size, buf)) + + total_bytes += bytes_read + + # EOF + if bytes_read == 0: + break + + pybuf = cp.PyBytes_FromStringAndSize(buf, + bytes_read) + + # no background thread - write on main thread + stream.write(pybuf) + finally: + free(buf) + cleanup() + + def upload(self, stream, buffer_size=None): + """ + Write from a source stream to this file. + + Parameters + ---------- + stream : file-like object + Source stream to pipe to this file. + buffer_size : int, optional + The buffer size to use for data transfers. + """ + if not is_threading_enabled(): + return self._upload_nothreads(stream, buffer_size) + + write_queue = Queue(50) + self._assert_writable() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + done = False + exc_info = None + + def bg_write(): + try: + while not done or write_queue.qsize() > 0: + try: + buf = write_queue.get(timeout=0.01) + except QueueEmpty: + continue + + self.write(buf) + + except Exception as e: + exc_info = sys.exc_info() + + writer_thread = threading.Thread(target=bg_write) + writer_thread.start() + + try: + while True: + buf = stream.read(buffer_size) + if not buf: + break + + if writer_thread.is_alive(): + while write_queue.full(): + time.sleep(0.01) + else: + break + + write_queue.put_nowait(buf) + finally: + done = True + + writer_thread.join() + if exc_info is not None: + raise exc_info[0], exc_info[1], exc_info[2] + + def _upload_nothreads(self, stream, buffer_size=None): + """ + Internal method to do an upload without separate threads, queues etc. + Called by upload above if is_threading_enabled() == False + """ + self._assert_writable() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + while True: + buf = stream.read(buffer_size) + if not buf: + break + + # no threading - just write + self.write(buf) + + +BufferedIOBase.register(NativeFile) + +# ---------------------------------------------------------------------- +# Python file-like objects + + +cdef class PythonFile(NativeFile): + """ + A stream backed by a Python file object. + + This class allows using Python file objects with arbitrary Arrow + functions, including functions written in another language than Python. + + As a downside, there is a non-zero redirection cost in translating + Arrow stream calls to Python method calls. Furthermore, Python's + Global Interpreter Lock may limit parallelism in some situations. + + Examples + -------- + >>> import io + >>> import pyarrow as pa + >>> pa.PythonFile(io.BytesIO()) + + + Create a stream for writing: + + >>> buf = io.BytesIO() + >>> f = pa.PythonFile(buf, mode = 'w') + >>> f.writable() + True + >>> f.write(b'PythonFile') + 10 + >>> buf.getvalue() + b'PythonFile' + >>> f.close() + >>> f + + + Create a stream for reading: + + >>> buf = io.BytesIO(b'PythonFile') + >>> f = pa.PythonFile(buf, mode = 'r') + >>> f.mode + 'rb' + >>> f.read() + b'PythonFile' + >>> f + + >>> f.close() + >>> f + + """ + cdef: + object handle + + def __cinit__(self, handle, mode=None): + self.handle = handle + + if mode is None: + try: + inferred_mode = handle.mode + except AttributeError: + # Not all file-like objects have a mode attribute + # (e.g. BytesIO) + try: + inferred_mode = 'w' if handle.writable() else 'r' + except AttributeError: + raise ValueError("could not infer open mode for file-like " + "object %r, please pass it explicitly" + % (handle,)) + else: + inferred_mode = mode + + if inferred_mode.startswith('w'): + kind = 'w' + elif inferred_mode.startswith('r'): + kind = 'r' + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + # If mode was given, check it matches the given file + if mode is not None: + if isinstance(handle, IOBase): + # Python 3 IO object + if kind == 'r': + if not handle.readable(): + raise TypeError("readable file expected") + else: + if not handle.writable(): + raise TypeError("writable file expected") + # (other duck-typed file-like objects are possible) + + # If possible, check the file is a binary file + if isinstance(handle, TextIOBase): + raise TypeError("binary file expected, got text file") + + if kind == 'r': + self.set_random_access_file( + shared_ptr[CRandomAccessFile](new PyReadableFile(handle))) + self.is_readable = True + else: + self.set_output_stream( + shared_ptr[COutputStream](new PyOutputStream(handle))) + self.is_writable = True + + def truncate(self, pos=None): + """ + Parameters + ---------- + pos : int, optional + """ + self.handle.truncate(pos) + + def readline(self, size=None): + """ + Read and return a line of bytes from the file. + + If size is specified, read at most size bytes. + + Parameters + ---------- + size : int + Maximum number of bytes read + """ + return self.handle.readline(size) + + def readlines(self, hint=None): + """ + Read lines of the file. + + Parameters + ---------- + hint : int + Maximum number of bytes read until we stop + """ + return self.handle.readlines(hint) + + +cdef class MemoryMappedFile(NativeFile): + """ + A stream that represents a memory-mapped file. + + Supports 'r', 'r+', 'w' modes. + + Examples + -------- + Create a new file with memory map: + + >>> import pyarrow as pa + >>> mmap = pa.create_memory_map('example_mmap.dat', 10) + >>> mmap + + >>> mmap.close() + + Open an existing file with memory map: + + >>> with pa.memory_map('example_mmap.dat') as mmap: + ... mmap + ... + + """ + cdef: + shared_ptr[CMemoryMappedFile] handle + object path + + @staticmethod + def create(path, size): + """ + Create a MemoryMappedFile + + Parameters + ---------- + path : str + Where to create the file. + size : int + Size of the memory mapped file. + """ + cdef: + shared_ptr[CMemoryMappedFile] handle + c_string c_path = encode_file_path(path) + int64_t c_size = size + + with nogil: + handle = GetResultValue(CMemoryMappedFile.Create(c_path, c_size)) + + cdef MemoryMappedFile result = MemoryMappedFile() + result.path = path + result.is_readable = True + result.is_writable = True + result.set_output_stream( handle) + result.set_random_access_file( handle) + result.handle = handle + + return result + + def _open(self, path, mode='r'): + self.path = path + + cdef: + FileMode c_mode + shared_ptr[CMemoryMappedFile] handle + c_string c_path = encode_file_path(path) + + if mode in ('r', 'rb'): + c_mode = FileMode_READ + self.is_readable = True + elif mode in ('w', 'wb'): + c_mode = FileMode_WRITE + self.is_writable = True + elif mode in ('r+', 'r+b', 'rb+'): + c_mode = FileMode_READWRITE + self.is_readable = True + self.is_writable = True + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + with nogil: + handle = GetResultValue(CMemoryMappedFile.Open(c_path, c_mode)) + + self.set_output_stream( handle) + self.set_random_access_file( handle) + self.handle = handle + + def resize(self, new_size): + """ + Resize the map and underlying file. + + Parameters + ---------- + new_size : new size in bytes + """ + check_status(self.handle.get().Resize(new_size)) + + def fileno(self): + self._assert_open() + return self.handle.get().file_descriptor() + + +def memory_map(path, mode='r'): + """ + Open memory map at file path. Size of the memory map cannot change. + + Parameters + ---------- + path : str + mode : {'r', 'r+', 'w'}, default 'r' + Whether the file is opened for reading ('r'), writing ('w') + or both ('r+'). + + Returns + ------- + mmap : MemoryMappedFile + + Examples + -------- + Reading from a memory map without any memory allocation or copying: + + >>> import pyarrow as pa + >>> with pa.output_stream('example_mmap.txt') as stream: + ... stream.write(b'Constructing a buffer referencing the mapped memory') + ... + 51 + >>> with pa.memory_map('example_mmap.txt') as mmap: + ... mmap.read_at(6,45) + ... + b'memory' + """ + _check_is_file(path) + + cdef MemoryMappedFile mmap = MemoryMappedFile() + mmap._open(path, mode) + return mmap + + +cdef _check_is_file(path): + if os.path.isdir(path): + raise IOError("Expected file path, but {0} is a directory" + .format(path)) + + +def create_memory_map(path, size): + """ + Create a file of the given size and memory-map it. + + Parameters + ---------- + path : str + The file path to create, on the local filesystem. + size : int + The file size to create. + + Returns + ------- + mmap : MemoryMappedFile + + Examples + -------- + Create a file with a memory map: + + >>> import pyarrow as pa + >>> with pa.create_memory_map('example_mmap_create.dat', 27) as mmap: + ... mmap.write(b'Create a memory-mapped file') + ... mmap.read_at(10, 9) + ... + 27 + b'memory-map' + """ + return MemoryMappedFile.create(path, size) + + +cdef class OSFile(NativeFile): + """ + A stream backed by a regular file descriptor. + + Examples + -------- + Create a new file to write to: + + >>> import pyarrow as pa + >>> with pa.OSFile('example_osfile.arrow', mode='w') as f: + ... f.writable() + ... f.write(b'OSFile') + ... f.seekable() + ... + True + 6 + False + + Open the file to read: + + >>> with pa.OSFile('example_osfile.arrow', mode='r') as f: + ... f.mode + ... f.read() + ... + 'rb' + b'OSFile' + + Open the file to append: + + >>> with pa.OSFile('example_osfile.arrow', mode='ab') as f: + ... f.mode + ... f.write(b' is super!') + ... + 'ab' + 10 + >>> with pa.OSFile('example_osfile.arrow') as f: + ... f.read() + ... + b'OSFile is super!' + + Inspect created OSFile: + + >>> pa.OSFile('example_osfile.arrow') + + """ + cdef: + object path + + def __cinit__(self, path, mode='r', MemoryPool memory_pool=None): + _check_is_file(path) + self.path = path + + cdef: + FileMode c_mode + shared_ptr[Readable] handle + c_string c_path = encode_file_path(path) + + if mode in ('r', 'rb'): + self._open_readable(c_path, maybe_unbox_memory_pool(memory_pool)) + elif mode in ('w', 'wb'): + self._open_writable(c_path) + elif mode in ('a', 'ab'): + self._open_writable(c_path, append=True) + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + cdef _open_readable(self, c_string path, CMemoryPool* pool): + cdef shared_ptr[ReadableFile] handle + + with nogil: + handle = GetResultValue(ReadableFile.Open(path, pool)) + + self.is_readable = True + self.set_random_access_file( handle) + + cdef _open_writable(self, c_string path, c_bool append=False): + with nogil: + self.output_stream = GetResultValue( + FileOutputStream.OpenWithAppend(path, append) + ) + self.is_writable = True + self._is_appending = append + + def fileno(self): + self._assert_open() + return self.handle.file_descriptor() + + +cdef class FixedSizeBufferWriter(NativeFile): + """ + A stream writing to a Arrow buffer. + + Examples + -------- + Create a stream to write to ``pyarrow.Buffer``: + + >>> import pyarrow as pa + >>> buf = pa.allocate_buffer(5) + >>> with pa.output_stream(buf) as stream: + ... stream.write(b'abcde') + ... stream + ... + 5 + + + Inspect the buffer: + + >>> buf.to_pybytes() + b'abcde' + >>> buf + + """ + + def __cinit__(self, Buffer buffer): + self.output_stream.reset(new CFixedSizeBufferWriter(buffer.buffer)) + self.is_writable = True + + def set_memcopy_threads(self, int num_threads): + """ + Parameters + ---------- + num_threads : int + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_threads(num_threads) + + def set_memcopy_blocksize(self, int64_t blocksize): + """ + Parameters + ---------- + blocksize : int64 + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_blocksize(blocksize) + + def set_memcopy_threshold(self, int64_t threshold): + """ + Parameters + ---------- + threshold : int64 + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_threshold(threshold) + + +# ---------------------------------------------------------------------- +# Arrow buffers + + +cdef class Buffer(_Weakrefable): + """ + The base class for all Arrow buffers. + + A buffer represents a contiguous memory area. Many buffers will own + their memory, though not all of them do. + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Buffer's constructor directly, use " + "`pyarrow.py_buffer` function instead.") + + cdef void init(self, const shared_ptr[CBuffer]& buffer): + self.buffer = buffer + self.shape[0] = self.size + self.strides[0] = (1) + + def __len__(self): + return self.size + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"address={hex(self.address)} " + f"size={self.size} " + f"is_cpu={self.is_cpu} " + f"is_mutable={self.is_mutable}>") + + def _assert_cpu(self): + if not self.is_cpu: + raise NotImplementedError("Implemented only for data on CPU device") + + @property + def size(self): + """ + The buffer size in bytes. + """ + return self.buffer.get().size() + + @property + def address(self): + """ + The buffer's address, as an integer. + + The returned address may point to CPU or device memory. + Use `is_cpu()` to disambiguate. + """ + return self.buffer.get().address() + + def hex(self): + """ + Compute hexadecimal representation of the buffer. + + Returns + ------- + : bytes + """ + self._assert_cpu() + return self.buffer.get().ToHexString() + + @property + def is_mutable(self): + """ + Whether the buffer is mutable. + """ + return self.buffer.get().is_mutable() + + @property + def is_cpu(self): + """ + Whether the buffer is CPU-accessible. + """ + return self.buffer.get().is_cpu() + + @property + def device(self): + """ + The device where the buffer resides. + + Returns + ------- + Device + """ + return Device.wrap(self.buffer.get().device()) + + @property + def memory_manager(self): + """ + The memory manager associated with the buffer. + + Returns + ------- + MemoryManager + """ + return MemoryManager.wrap(self.buffer.get().memory_manager()) + + @property + def device_type(self): + """ + The device type where the buffer resides. + + Returns + ------- + DeviceAllocationType + """ + return _wrap_device_allocation_type(self.buffer.get().device_type()) + + @property + def parent(self): + cdef shared_ptr[CBuffer] parent_buf = self.buffer.get().parent() + + if parent_buf.get() == NULL: + return None + else: + return pyarrow_wrap_buffer(parent_buf) + + def __getitem__(self, key): + if isinstance(key, slice): + if (key.step or 1) != 1: + raise IndexError('only slices with step 1 supported') + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.size)) + + cdef getitem(self, int64_t i): + self._assert_cpu() + return self.buffer.get().data()[i] + + def slice(self, offset=0, length=None): + """ + Slice this buffer. Memory is not copied. + + You can also use the Python slice notation ``buffer[start:stop]``. + + Parameters + ---------- + offset : int, default 0 + Offset from start of buffer to slice. + length : int, default None + Length of slice (default is until end of Buffer starting from + offset). + + Returns + ------- + sliced : Buffer + A logical view over this buffer. + """ + cdef shared_ptr[CBuffer] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + if length is None: + result = GetResultValue(SliceBufferSafe(self.buffer, offset)) + else: + result = GetResultValue(SliceBufferSafe(self.buffer, offset, + length)) + return pyarrow_wrap_buffer(result) + + def equals(self, Buffer other): + """ + Determine if two buffers contain exactly the same data. + + Parameters + ---------- + other : Buffer + + Returns + ------- + are_equal : bool + True if buffer contents and size are equal + """ + if self.device != other.device: + raise ValueError( + "Device on which the data resides differs between buffers: " + f"{self.device.type_name} and {other.device.type_name}." + ) + if not self.is_cpu: + if self.address != other.address: + raise NotImplementedError( + "Implemented only for data on CPU device or data with equal " + "addresses" + ) + + cdef c_bool result = False + with nogil: + result = self.buffer.get().Equals(deref(other.buffer.get())) + return result + + def __eq__(self, other): + if isinstance(other, Buffer): + return self.equals(other) + else: + return self.equals(py_buffer(other)) + + def __reduce_ex__(self, protocol): + self._assert_cpu() + + if protocol >= 5: + bufobj = pickle.PickleBuffer(self) + elif self.buffer.get().is_mutable(): + # Need to pass a bytearray to recreate a mutable buffer when + # unpickling. + bufobj = PyByteArray_FromStringAndSize( + self.buffer.get().data(), + self.buffer.get().size()) + else: + bufobj = self.to_pybytes() + return py_buffer, (bufobj,) + + def to_pybytes(self): + """ + Return this buffer as a Python bytes object. Memory is copied. + """ + self._assert_cpu() + + return cp.PyBytes_FromStringAndSize( + self.buffer.get().data(), + self.buffer.get().size()) + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + self._assert_cpu() + + if self.buffer.get().is_mutable(): + buffer.readonly = 0 + else: + if flags & cp.PyBUF_WRITABLE: + raise BufferError("Writable buffer requested but Arrow " + "buffer was not mutable") + buffer.readonly = 1 + buffer.buf = self.buffer.get().data() + buffer.len = self.size + if buffer.buf == NULL: + # ARROW-16048: Ensure we don't export a NULL address. + assert buffer.len == 0 + buffer.buf = cp.PyBytes_AS_STRING(b"") + buffer.format = 'b' + buffer.internal = NULL + buffer.itemsize = 1 + buffer.ndim = 1 + buffer.obj = self + buffer.shape = self.shape + buffer.strides = self.strides + buffer.suboffsets = NULL + + +cdef class ResizableBuffer(Buffer): + """ + A base class for buffers that can be resized. + """ + + cdef void init_rz(self, const shared_ptr[CResizableBuffer]& buffer): + self.init( buffer) + + def resize(self, int64_t new_size, shrink_to_fit=False): + """ + Resize buffer to indicated size. + + Parameters + ---------- + new_size : int + New size of buffer (padding may be added internally). + shrink_to_fit : bool, default False + If this is true, the buffer is shrunk when new_size is less + than the current size. + If this is false, the buffer is never shrunk. + """ + cdef c_bool c_shrink_to_fit = shrink_to_fit + with nogil: + check_status(( self.buffer.get()) + .Resize(new_size, c_shrink_to_fit)) + + +cdef shared_ptr[CResizableBuffer] _allocate_buffer(CMemoryPool* pool) except *: + with nogil: + return to_shared(GetResultValue(AllocateResizableBuffer(0, pool))) + + +def allocate_buffer(int64_t size, MemoryPool memory_pool=None, + resizable=False): + """ + Allocate a mutable buffer. + + Parameters + ---------- + size : int + Number of bytes to allocate (plus internal padding) + memory_pool : MemoryPool, optional + The pool to allocate memory from. + If not given, the default memory pool is used. + resizable : bool, default False + If true, the returned buffer is resizable. + + Returns + ------- + buffer : Buffer or ResizableBuffer + """ + cdef: + CMemoryPool* cpool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CResizableBuffer] c_rz_buffer + shared_ptr[CBuffer] c_buffer + + if resizable: + with nogil: + c_rz_buffer = to_shared(GetResultValue( + AllocateResizableBuffer(size, cpool))) + return pyarrow_wrap_resizable_buffer(c_rz_buffer) + else: + with nogil: + c_buffer = to_shared(GetResultValue(AllocateBuffer(size, cpool))) + return pyarrow_wrap_buffer(c_buffer) + + +cdef class BufferOutputStream(NativeFile): + """ + An output stream that writes to a resizable buffer. + + The buffer is produced as a result when ``getvalue()`` is called. + + Examples + -------- + Create an output stream, write data to it and finalize it with + ``getvalue()``: + + >>> import pyarrow as pa + >>> f = pa.BufferOutputStream() + >>> f.write(b'pyarrow.Buffer') + 14 + >>> f.closed + False + >>> f.getvalue() + + >>> f.closed + True + """ + + cdef: + shared_ptr[CResizableBuffer] buffer + + def __cinit__(self, MemoryPool memory_pool=None): + self.buffer = _allocate_buffer(maybe_unbox_memory_pool(memory_pool)) + self.output_stream.reset(new CBufferOutputStream( + self.buffer)) + self.is_writable = True + + def getvalue(self): + """ + Finalize output stream and return result as pyarrow.Buffer. + + Returns + ------- + value : Buffer + """ + with nogil: + check_status(self.output_stream.get().Close()) + return pyarrow_wrap_buffer( self.buffer) + + +cdef class MockOutputStream(NativeFile): + + def __cinit__(self): + self.output_stream.reset(new CMockOutputStream()) + self.is_writable = True + + def size(self): + handle = self.output_stream.get() + return handle.GetExtentBytesWritten() + + +cdef class BufferReader(NativeFile): + """ + Zero-copy reader from objects convertible to Arrow buffer. + + Parameters + ---------- + obj : Python bytes or pyarrow.Buffer + + Examples + -------- + Create an Arrow input stream and inspect it: + + >>> import pyarrow as pa + >>> data = b'reader data' + >>> buf = memoryview(data) + >>> with pa.input_stream(buf) as stream: + ... stream.size() + ... stream.read(6) + ... stream.seek(7) + ... stream.read(15) + ... + 11 + b'reader' + 7 + b'data' + """ + cdef: + Buffer buffer + + # XXX Needed to make numpydoc happy + def __init__(self, obj): + pass + + def __cinit__(self, object obj): + self.buffer = as_buffer(obj) + self.set_random_access_file(shared_ptr[CRandomAccessFile]( + new CBufferReader(self.buffer.buffer))) + self.is_readable = True + + +cdef class CompressedInputStream(NativeFile): + """ + An input stream wrapper which decompresses data on the fly. + + Parameters + ---------- + stream : string, path, pyarrow.NativeFile, or file-like object + Input stream object to wrap with the compression. + compression : str + The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd"). + + Examples + -------- + Create an output stream wich compresses the data: + + >>> import pyarrow as pa + >>> data = b"Compressed stream" + >>> raw = pa.BufferOutputStream() + >>> with pa.CompressedOutputStream(raw, "gzip") as compressed: + ... compressed.write(data) + ... + 17 + + Create an input stream with decompression referencing the + buffer with compressed data: + + >>> cdata = raw.getvalue() + >>> with pa.input_stream(cdata, compression="gzip") as compressed: + ... compressed.read() + ... + b'Compressed stream' + + which actually translates to the use of ``BufferReader``and + ``CompressedInputStream``: + + >>> raw = pa.BufferReader(cdata) + >>> with pa.CompressedInputStream(raw, "gzip") as compressed: + ... compressed.read() + ... + b'Compressed stream' + """ + + def __init__(self, object stream, str compression not None): + cdef: + NativeFile nf + Codec codec = Codec(compression) + shared_ptr[CInputStream] c_reader + shared_ptr[CCompressedInputStream] compressed_stream + nf = get_native_file(stream, False) + c_reader = nf.get_input_stream() + compressed_stream = GetResultValue( + CCompressedInputStream.Make(codec.unwrap(), c_reader) + ) + self.set_input_stream( compressed_stream) + self.is_readable = True + + +cdef class CompressedOutputStream(NativeFile): + """ + An output stream wrapper which compresses data on the fly. + + Parameters + ---------- + stream : string, path, pyarrow.NativeFile, or file-like object + Input stream object to wrap with the compression. + compression : str + The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd"). + + Examples + -------- + Create an output stream wich compresses the data: + + >>> import pyarrow as pa + >>> data = b"Compressed stream" + >>> raw = pa.BufferOutputStream() + >>> with pa.CompressedOutputStream(raw, "gzip") as compressed: + ... compressed.write(data) + ... + 17 + """ + + def __init__(self, object stream, str compression not None): + cdef: + Codec codec = Codec(compression) + shared_ptr[COutputStream] c_writer + shared_ptr[CCompressedOutputStream] compressed_stream + get_writer(stream, &c_writer) + compressed_stream = GetResultValue( + CCompressedOutputStream.Make(codec.unwrap(), c_writer) + ) + self.set_output_stream( compressed_stream) + self.is_writable = True + + +ctypedef CBufferedInputStream* _CBufferedInputStreamPtr +ctypedef CBufferedOutputStream* _CBufferedOutputStreamPtr +ctypedef CRandomAccessFile* _RandomAccessFilePtr + + +cdef class BufferedInputStream(NativeFile): + """ + An input stream that performs buffered reads from + an unbuffered input stream, which can mitigate the overhead + of many small reads in some cases. + + Parameters + ---------- + stream : NativeFile + The input stream to wrap with the buffer + buffer_size : int + Size of the temporary read buffer. + memory_pool : MemoryPool + The memory pool used to allocate the buffer. + """ + + def __init__(self, NativeFile stream, int buffer_size, + MemoryPool memory_pool=None): + cdef shared_ptr[CBufferedInputStream] buffered_stream + + if buffer_size <= 0: + raise ValueError('Buffer size must be larger than zero') + buffered_stream = GetResultValue(CBufferedInputStream.Create( + buffer_size, maybe_unbox_memory_pool(memory_pool), + stream.get_input_stream())) + + self.set_input_stream( buffered_stream) + self.is_readable = True + + def detach(self): + """ + Release the raw InputStream. + Further operations on this stream are invalid. + + Returns + ------- + raw : NativeFile + The underlying raw input stream + """ + cdef: + shared_ptr[CInputStream] c_raw + _CBufferedInputStreamPtr buffered + NativeFile raw + + buffered = dynamic_cast[_CBufferedInputStreamPtr]( + self.input_stream.get()) + assert buffered != nullptr + + with nogil: + c_raw = GetResultValue(buffered.Detach()) + + raw = NativeFile() + raw.is_readable = True + # Find out whether the raw stream is a RandomAccessFile + # or a mere InputStream. This helps us support seek() etc. + # selectively. + if dynamic_cast[_RandomAccessFilePtr](c_raw.get()) != nullptr: + raw.set_random_access_file( + static_pointer_cast[CRandomAccessFile, CInputStream](c_raw)) + else: + raw.set_input_stream(c_raw) + return raw + + +cdef class BufferedOutputStream(NativeFile): + """ + An output stream that performs buffered reads from + an unbuffered output stream, which can mitigate the overhead + of many small writes in some cases. + + Parameters + ---------- + stream : NativeFile + The writable output stream to wrap with the buffer + buffer_size : int + Size of the buffer that should be added. + memory_pool : MemoryPool + The memory pool used to allocate the buffer. + """ + + def __init__(self, NativeFile stream, int buffer_size, + MemoryPool memory_pool=None): + cdef shared_ptr[CBufferedOutputStream] buffered_stream + + if buffer_size <= 0: + raise ValueError('Buffer size must be larger than zero') + buffered_stream = GetResultValue(CBufferedOutputStream.Create( + buffer_size, maybe_unbox_memory_pool(memory_pool), + stream.get_output_stream())) + + self.set_output_stream( buffered_stream) + self.is_writable = True + + def detach(self): + """ + Flush any buffered writes and release the raw OutputStream. + Further operations on this stream are invalid. + + Returns + ------- + raw : NativeFile + The underlying raw output stream. + """ + cdef: + shared_ptr[COutputStream] c_raw + _CBufferedOutputStreamPtr buffered + NativeFile raw + + buffered = dynamic_cast[_CBufferedOutputStreamPtr]( + self.output_stream.get()) + assert buffered != nullptr + + with nogil: + c_raw = GetResultValue(buffered.Detach()) + + raw = NativeFile() + raw.is_writable = True + raw.set_output_stream(c_raw) + return raw + + +cdef void _cb_transform(transform_func, const shared_ptr[CBuffer]& src, + shared_ptr[CBuffer]* dest) except *: + py_dest = transform_func(pyarrow_wrap_buffer(src)) + dest[0] = pyarrow_unwrap_buffer(py_buffer(py_dest)) + + +cdef class TransformInputStream(NativeFile): + """ + Transform an input stream. + + Parameters + ---------- + stream : NativeFile + The stream to transform. + transform_func : callable + The transformation to apply. + """ + + def __init__(self, NativeFile stream, transform_func): + self.set_input_stream(TransformInputStream.make_native( + stream.get_input_stream(), transform_func)) + self.is_readable = True + + @staticmethod + cdef shared_ptr[CInputStream] make_native( + shared_ptr[CInputStream] stream, transform_func) except *: + cdef: + shared_ptr[CInputStream] transform_stream + CTransformInputStreamVTable vtable + + vtable.transform = _cb_transform + return MakeTransformInputStream(stream, move(vtable), + transform_func) + + +class Transcoder: + + def __init__(self, decoder, encoder): + self._decoder = decoder + self._encoder = encoder + + def __call__(self, buf): + final = len(buf) == 0 + return self._encoder.encode(self._decoder.decode(buf, final), final) + + +cdef shared_ptr[function[StreamWrapFunc]] make_streamwrap_func( + src_encoding, dest_encoding) except *: + """ + Create a function that will add a transcoding transformation to a stream. + Data from that stream will be decoded according to ``src_encoding`` and + then re-encoded according to ``dest_encoding``. + The created function can be used to wrap streams. + + Parameters + ---------- + src_encoding : str + The codec to use when reading data. + dest_encoding : str + The codec to use for emitted data. + """ + cdef: + shared_ptr[function[StreamWrapFunc]] empty_func + CTransformInputStreamVTable vtable + + vtable.transform = _cb_transform + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + return MakeStreamTransformFunc(move(vtable), + Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +def transcoding_input_stream(stream, src_encoding, dest_encoding): + """ + Add a transcoding transformation to the stream. + Incoming data will be decoded according to ``src_encoding`` and + then re-encoded according to ``dest_encoding``. + + Parameters + ---------- + stream : NativeFile + The stream to which the transformation should be applied. + src_encoding : str + The codec to use when reading data. + dest_encoding : str + The codec to use for emitted data. + """ + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + if src_codec.name == dest_codec.name: + # Avoid losing performance on no-op transcoding + # (encoding errors won't be detected) + return stream + return TransformInputStream(stream, + Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +cdef shared_ptr[CInputStream] native_transcoding_input_stream( + shared_ptr[CInputStream] stream, src_encoding, + dest_encoding) except *: + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + if src_codec.name == dest_codec.name: + # Avoid losing performance on no-op transcoding + # (encoding errors won't be detected) + return stream + return TransformInputStream.make_native( + stream, Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +def py_buffer(object obj): + """ + Construct an Arrow buffer from a Python bytes-like or buffer-like object + + Parameters + ---------- + obj : object + the object from which the buffer should be constructed. + """ + cdef shared_ptr[CBuffer] buf + buf = GetResultValue(PyBuffer.FromPyObject(obj)) + return pyarrow_wrap_buffer(buf) + + +def foreign_buffer(address, size, base=None): + """ + Construct an Arrow buffer with the given *address* and *size*. + + The buffer will be optionally backed by the Python *base* object, if given. + The *base* object will be kept alive as long as this buffer is alive, + including across language boundaries (for example if the buffer is + referenced by C++ code). + + Parameters + ---------- + address : int + The starting address of the buffer. The address can + refer to both device or host memory but it must be + accessible from device after mapping it with + `get_device_address` method. + size : int + The size of device buffer in bytes. + base : {None, object} + Object that owns the referenced memory. + """ + cdef: + uintptr_t c_addr = address + int64_t c_size = size + shared_ptr[CBuffer] buf + + check_status(PyForeignBuffer.Make( c_addr, c_size, + base, &buf)) + return pyarrow_wrap_buffer(buf) + + +def as_buffer(object o): + if isinstance(o, Buffer): + return o + return py_buffer(o) + + +cdef shared_ptr[CBuffer] as_c_buffer(object o) except *: + cdef shared_ptr[CBuffer] buf + if isinstance(o, Buffer): + buf = ( o).buffer + if buf == nullptr: + raise ValueError("got null buffer") + else: + buf = GetResultValue(PyBuffer.FromPyObject(o)) + return buf + + +cdef NativeFile get_native_file(object source, c_bool use_memory_map): + try: + source_path = _stringify_path(source) + except TypeError: + if isinstance(source, Buffer): + source = BufferReader(source) + elif not isinstance(source, NativeFile) and hasattr(source, 'read'): + # Optimistically hope this is file-like + source = PythonFile(source, mode='r') + else: + if use_memory_map: + source = memory_map(source_path, mode='r') + else: + source = OSFile(source_path, mode='r') + + return source + + +cdef get_reader(object source, c_bool use_memory_map, + shared_ptr[CRandomAccessFile]* reader): + cdef NativeFile nf + + nf = get_native_file(source, use_memory_map) + reader[0] = nf.get_random_access_file() + + +cdef get_input_stream(object source, c_bool use_memory_map, + shared_ptr[CInputStream]* out): + """ + Like get_reader(), but can automatically decompress, and returns + an InputStream. + """ + cdef: + NativeFile nf + Codec codec + shared_ptr[CInputStream] input_stream + + try: + codec = Codec.detect(source) + except TypeError: + codec = None + + nf = get_native_file(source, use_memory_map) + input_stream = nf.get_input_stream() + + # codec is None if compression can't be detected + if codec is not None: + input_stream = GetResultValue( + CCompressedInputStream.Make(codec.unwrap(), input_stream) + ) + + out[0] = input_stream + + +cdef get_writer(object source, shared_ptr[COutputStream]* writer): + cdef NativeFile nf + + try: + source_path = _stringify_path(source) + except TypeError: + if not isinstance(source, NativeFile) and hasattr(source, 'write'): + # Optimistically hope this is file-like + source = PythonFile(source, mode='w') + else: + source = OSFile(source_path, mode='w') + + if isinstance(source, NativeFile): + nf = source + writer[0] = nf.get_output_stream() + else: + raise TypeError('Unable to write to object of type: {0}' + .format(type(source))) + + +# --------------------------------------------------------------------- + + +def _detect_compression(path): + if isinstance(path, str): + if path.endswith('.bz2'): + return 'bz2' + elif path.endswith('.gz'): + return 'gzip' + elif path.endswith('.lz4'): + return 'lz4' + elif path.endswith('.zst'): + return 'zstd' + + +cdef CCompressionType _ensure_compression(str name) except *: + uppercase = name.upper() + if uppercase == 'BZ2': + return CCompressionType_BZ2 + elif uppercase == 'GZIP': + return CCompressionType_GZIP + elif uppercase == 'BROTLI': + return CCompressionType_BROTLI + elif uppercase == 'LZ4' or uppercase == 'LZ4_FRAME': + return CCompressionType_LZ4_FRAME + elif uppercase == 'LZ4_RAW': + return CCompressionType_LZ4 + elif uppercase == 'SNAPPY': + return CCompressionType_SNAPPY + elif uppercase == 'ZSTD': + return CCompressionType_ZSTD + else: + raise ValueError('Invalid value for compression: {!r}'.format(name)) + + +cdef class CacheOptions(_Weakrefable): + """ + Cache options for a pre-buffered fragment scan. + + Parameters + ---------- + hole_size_limit : int, default 8KiB + The maximum distance in bytes between two consecutive ranges; beyond + this value, ranges are not combined. + range_size_limit : int, default 32MiB + The maximum size in bytes of a combined range; if combining two + consecutive ranges would produce a range of a size greater than this, + they are not combined + lazy : bool, default True + lazy = false: request all byte ranges when PreBuffer or WillNeed is called. + lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader + needs them. + lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the + range that is currently being read. + prefetch_limit : int, default 0 + The maximum number of ranges to be prefetched. This is only used for + lazy cache to asynchronously read some ranges after reading the target + range. + """ + + def __init__(self, *, hole_size_limit=None, range_size_limit=None, lazy=None, prefetch_limit=None): + self.wrapped = CCacheOptions.LazyDefaults() + if hole_size_limit is not None: + self.hole_size_limit = hole_size_limit + if range_size_limit is not None: + self.range_size_limit = range_size_limit + if lazy is not None: + self.lazy = lazy + if prefetch_limit is not None: + self.prefetch_limit = prefetch_limit + + cdef void init(self, CCacheOptions options): + self.wrapped = options + + cdef inline CCacheOptions unwrap(self): + return self.wrapped + + @staticmethod + cdef wrap(CCacheOptions options): + self = CacheOptions() + self.init(options) + return self + + @property + def hole_size_limit(self): + return self.wrapped.hole_size_limit + + @hole_size_limit.setter + def hole_size_limit(self, hole_size_limit): + self.wrapped.hole_size_limit = hole_size_limit + + @property + def range_size_limit(self): + return self.wrapped.range_size_limit + + @range_size_limit.setter + def range_size_limit(self, range_size_limit): + self.wrapped.range_size_limit = range_size_limit + + @property + def lazy(self): + return self.wrapped.lazy + + @lazy.setter + def lazy(self, lazy): + self.wrapped.lazy = lazy + + @property + def prefetch_limit(self): + return self.wrapped.prefetch_limit + + @prefetch_limit.setter + def prefetch_limit(self, prefetch_limit): + self.wrapped.prefetch_limit = prefetch_limit + + def __eq__(self, CacheOptions other): + try: + return self.unwrap().Equals(other.unwrap()) + except TypeError: + return False + + @staticmethod + def from_network_metrics(time_to_first_byte_millis, transfer_bandwidth_mib_per_sec, + ideal_bandwidth_utilization_frac=0.9, max_ideal_request_size_mib=64): + """ + Create suiteable CacheOptions based on provided network metrics. + + Typically this will be used with object storage solutions like Amazon S3, + Google Cloud Storage and Azure Blob Storage. + + Parameters + ---------- + time_to_first_byte_millis : int + Seek-time or Time-To-First-Byte (TTFB) in milliseconds, also called call + setup latency of a new read request. The value is a positive integer. + transfer_bandwidth_mib_per_sec : int + Data transfer Bandwidth (BW) in MiB/sec (per connection). The value is a positive + integer. + ideal_bandwidth_utilization_frac : int, default 0.9 + Transfer bandwidth utilization fraction (per connection) to maximize the net + data load. The value is a positive float less than 1. + max_ideal_request_size_mib : int, default 64 + The maximum single data request size (in MiB) to maximize the net data load. + + Returns + ------- + CacheOptions + """ + return CacheOptions.wrap(CCacheOptions.MakeFromNetworkMetrics( + time_to_first_byte_millis, transfer_bandwidth_mib_per_sec, + ideal_bandwidth_utilization_frac, max_ideal_request_size_mib)) + + @staticmethod + @binding(True) # Required for Cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return CacheOptions(**kwargs) + + def __reduce__(self): + kwargs = dict( + hole_size_limit=self.hole_size_limit, + range_size_limit=self.range_size_limit, + lazy=self.lazy, + prefetch_limit=self.prefetch_limit, + ) + return CacheOptions._reconstruct, (kwargs,) + + +cdef class Codec(_Weakrefable): + """ + Compression codec. + + Parameters + ---------- + compression : str + Type of compression codec to initialize, valid values are: 'gzip', + 'bz2', 'brotli', 'lz4' (or 'lz4_frame'), 'lz4_raw', 'zstd' and + 'snappy'. + compression_level : int, None + Optional parameter specifying how aggressively to compress. The + possible ranges and effect of this parameter depend on the specific + codec chosen. Higher values compress more but typically use more + resources (CPU/RAM). Some codecs support negative values. + + gzip + The compression_level maps to the memlevel parameter of + deflateInit2. Higher levels use more RAM but are faster + and should have higher compression ratios. + + bz2 + The compression level maps to the blockSize100k parameter of + the BZ2_bzCompressInit function. Higher levels use more RAM + but are faster and should have higher compression ratios. + + brotli + The compression level maps to the BROTLI_PARAM_QUALITY + parameter. Higher values are slower and should have higher + compression ratios. + + lz4/lz4_frame/lz4_raw + The compression level parameter is not supported and must + be None + + zstd + The compression level maps to the compressionLevel parameter + of ZSTD_initCStream. Negative values are supported. Higher + values are slower and should have higher compression ratios. + + snappy + The compression level parameter is not supported and must + be None + + + Raises + ------ + ValueError + If invalid compression value is passed. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.Codec.is_available('gzip') + True + >>> codec = pa.Codec('gzip') + >>> codec.name + 'gzip' + >>> codec.compression_level + 9 + """ + + def __init__(self, str compression not None, compression_level=None): + cdef CCompressionType typ = _ensure_compression(compression) + if compression_level is not None: + self.wrapped = shared_ptr[CCodec](move(GetResultValue( + CCodec.CreateWithLevel(typ, compression_level)))) + else: + self.wrapped = shared_ptr[CCodec](move(GetResultValue( + CCodec.Create(typ)))) + + cdef inline CCodec* unwrap(self) nogil: + return self.wrapped.get() + + @staticmethod + def detect(path): + """ + Detect and instantiate compression codec based on file extension. + + Parameters + ---------- + path : str, path-like + File-path to detect compression from. + + Raises + ------ + TypeError + If the passed value is not path-like. + ValueError + If the compression can't be detected from the path. + + Returns + ------- + Codec + """ + return Codec(_detect_compression(_stringify_path(path))) + + @staticmethod + def is_available(str compression not None): + """ + Returns whether the compression support has been built and enabled. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + + Returns + ------- + bool + """ + cdef CCompressionType typ = _ensure_compression(compression) + return CCodec.IsAvailable(typ) + + @staticmethod + def supports_compression_level(str compression not None): + """ + Returns true if the compression level parameter is supported + for the given codec. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return CCodec.SupportsCompressionLevel(typ) + + @staticmethod + def default_compression_level(str compression not None): + """ + Returns the compression level that Arrow will use for the codec if + None is specified. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.DefaultCompressionLevel(typ)) + + @staticmethod + def minimum_compression_level(str compression not None): + """ + Returns the smallest valid value for the compression level + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.MinimumCompressionLevel(typ)) + + @staticmethod + def maximum_compression_level(str compression not None): + """ + Returns the largest valid value for the compression level + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.MaximumCompressionLevel(typ)) + + @property + def name(self): + """Returns the name of the codec""" + return frombytes(self.unwrap().name()) + + @property + def compression_level(self): + """Returns the compression level parameter of the codec""" + if self.name == 'snappy': + return None + return self.unwrap().compression_level() + + def compress(self, object buf, asbytes=False, memory_pool=None): + """ + Compress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any + + Returns + ------- + compressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef: + shared_ptr[CBuffer] owned_buf + CBuffer* c_buf + PyObject* pyobj + ResizableBuffer out_buf + int64_t max_output_size + int64_t output_length + uint8_t* output_buffer = NULL + + owned_buf = as_c_buffer(buf) + c_buf = owned_buf.get() + + max_output_size = self.wrapped.get().MaxCompressedLen( + c_buf.size(), c_buf.data() + ) + + if asbytes: + pyobj = PyBytes_FromStringAndSizeNative(NULL, max_output_size) + output_buffer = cp.PyBytes_AS_STRING( pyobj) + else: + out_buf = allocate_buffer( + max_output_size, memory_pool=memory_pool, resizable=True + ) + output_buffer = out_buf.buffer.get().mutable_data() + + with nogil: + output_length = GetResultValue( + self.unwrap().Compress( + c_buf.size(), + c_buf.data(), + max_output_size, + output_buffer + ) + ) + + if asbytes: + cp._PyBytes_Resize(&pyobj, output_length) + return PyObject_to_object(pyobj) + else: + out_buf.resize(output_length) + return out_buf + + def decompress(self, object buf, decompressed_size=None, asbytes=False, + memory_pool=None): + """ + Decompress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or memoryview-compatible object + decompressed_size : int, default None + Size of the decompressed result + asbytes : boolean, default False + Return result as Python bytes object, otherwise Buffer + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + uncompressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef: + shared_ptr[CBuffer] owned_buf + CBuffer* c_buf + Buffer out_buf + int64_t output_size + uint8_t* output_buffer = NULL + + owned_buf = as_c_buffer(buf) + c_buf = owned_buf.get() + + if decompressed_size is None: + raise ValueError( + "Must pass decompressed_size" + ) + + output_size = decompressed_size + + if asbytes: + pybuf = cp.PyBytes_FromStringAndSize(NULL, output_size) + output_buffer = cp.PyBytes_AS_STRING(pybuf) + else: + out_buf = allocate_buffer(output_size, memory_pool=memory_pool) + output_buffer = out_buf.buffer.get().mutable_data() + + with nogil: + GetResultValue( + self.unwrap().Decompress( + c_buf.size(), + c_buf.data(), + output_size, + output_buffer + ) + ) + + return pybuf if asbytes else out_buf + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"name={self.name} " + f"compression_level={self.compression_level}>") + + +def compress(object buf, codec='lz4', asbytes=False, memory_pool=None): + """ + Compress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol + codec : str, default 'lz4' + Compression codec. + Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'} + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer. + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + compressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef Codec coder = Codec(codec) + return coder.compress(buf, asbytes=asbytes, memory_pool=memory_pool) + + +def decompress(object buf, decompressed_size=None, codec='lz4', + asbytes=False, memory_pool=None): + """ + Decompress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or memoryview-compatible object + Input object to decompress data from. + decompressed_size : int, default None + Size of the decompressed result + codec : str, default 'lz4' + Compression codec. + Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'} + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer. + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + uncompressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef Codec decoder = Codec(codec) + return decoder.decompress(buf, asbytes=asbytes, memory_pool=memory_pool, + decompressed_size=decompressed_size) + + +def input_stream(source, compression='detect', buffer_size=None): + """ + Create an Arrow input stream. + + Parameters + ---------- + source : str, Path, buffer, or file-like object + The source to open for reading. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly decompression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. + Otherwise, a well-known algorithm name must be supplied (e.g. "gzip"). + buffer_size : int, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary read buffer. + + Examples + -------- + Create a readable BufferReader (NativeFile) from a Buffer or a memoryview object: + + >>> import pyarrow as pa + >>> buf = memoryview(b"some data") + >>> with pa.input_stream(buf) as stream: + ... stream.read(4) + ... + b'some' + + Create a readable OSFile (NativeFile) from a string or file path: + + >>> import gzip + >>> with gzip.open('example.gz', 'wb') as f: + ... f.write(b'some data') + ... + 9 + >>> with pa.input_stream('example.gz') as stream: + ... stream.read() + ... + b'some data' + + Create a readable PythonFile (NativeFile) from a a Python file object: + + >>> with open('example.txt', mode='w') as f: + ... f.write('some text') + ... + 9 + >>> with pa.input_stream('example.txt') as stream: + ... stream.read(6) + ... + b'some t' + """ + cdef NativeFile stream + + try: + source_path = _stringify_path(source) + except TypeError: + source_path = None + + if isinstance(source, NativeFile): + stream = source + elif source_path is not None: + stream = OSFile(source_path, 'r') + elif isinstance(source, (Buffer, memoryview)): + stream = BufferReader(as_buffer(source)) + elif (hasattr(source, 'read') and + hasattr(source, 'close') and + hasattr(source, 'closed')): + stream = PythonFile(source, 'r') + else: + raise TypeError("pa.input_stream() called with instance of '{}'" + .format(source.__class__)) + + if compression == 'detect': + # detect for OSFile too + compression = _detect_compression(source_path) + + if buffer_size is not None and buffer_size != 0: + stream = BufferedInputStream(stream, buffer_size) + + if compression is not None: + stream = CompressedInputStream(stream, compression) + + return stream + + +def output_stream(source, compression='detect', buffer_size=None): + """ + Create an Arrow output stream. + + Parameters + ---------- + source : str, Path, buffer, file-like object + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. + Otherwise, a well-known algorithm name must be supplied (e.g. "gzip"). + buffer_size : int, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + + Examples + -------- + Create a writable NativeFile from a pyarrow Buffer: + + >>> import pyarrow as pa + >>> data = b"buffer data" + >>> empty_obj = bytearray(11) + >>> buf = pa.py_buffer(empty_obj) + >>> with pa.output_stream(buf) as stream: + ... stream.write(data) + ... + 11 + >>> with pa.input_stream(buf) as stream: + ... stream.read(6) + ... + b'buffer' + + or from a memoryview object: + + >>> buf = memoryview(empty_obj) + >>> with pa.output_stream(buf) as stream: + ... stream.write(data) + ... + 11 + >>> with pa.input_stream(buf) as stream: + ... stream.read() + ... + b'buffer data' + + Create a writable NativeFile from a string or file path: + + >>> with pa.output_stream('example_second.txt') as stream: + ... stream.write(b'Write some data') + ... + 15 + >>> with pa.input_stream('example_second.txt') as stream: + ... stream.read() + ... + b'Write some data' + """ + cdef NativeFile stream + + try: + source_path = _stringify_path(source) + except TypeError: + source_path = None + + if isinstance(source, NativeFile): + stream = source + elif source_path is not None: + stream = OSFile(source_path, 'w') + elif isinstance(source, (Buffer, memoryview)): + stream = FixedSizeBufferWriter(as_buffer(source)) + elif (hasattr(source, 'write') and + hasattr(source, 'close') and + hasattr(source, 'closed')): + stream = PythonFile(source, 'w') + else: + raise TypeError("pa.output_stream() called with instance of '{}'" + .format(source.__class__)) + + if compression == 'detect': + compression = _detect_compression(source_path) + + if buffer_size is not None and buffer_size != 0: + stream = BufferedOutputStream(stream, buffer_size) + + if compression is not None: + stream = CompressedOutputStream(stream, compression) + + return stream diff --git a/parrot/lib/python3.10/site-packages/pyarrow/ipc.py b/parrot/lib/python3.10/site-packages/pyarrow/ipc.py new file mode 100644 index 0000000000000000000000000000000000000000..523196e1e33894871319462cdd6c72bd85830cf0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/ipc.py @@ -0,0 +1,285 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Arrow file and stream reader/writer classes, and other messaging tools + +import os + +import pyarrow as pa + +from pyarrow.lib import (IpcReadOptions, IpcWriteOptions, ReadStats, WriteStats, # noqa + Message, MessageReader, + RecordBatchReader, _ReadPandasMixin, + MetadataVersion, + read_message, read_record_batch, read_schema, + read_tensor, write_tensor, + get_record_batch_size, get_tensor_size) +import pyarrow.lib as lib + + +class RecordBatchStreamReader(lib._RecordBatchStreamReader): + """ + Reader for the Arrow streaming binary format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + If you want to use memory map use MemoryMappedFile as source. + options : pyarrow.ipc.IpcReadOptions + Options for IPC deserialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + """ + + def __init__(self, source, *, options=None, memory_pool=None): + options = _ensure_default_ipc_read_options(options) + self._open(source, options=options, memory_pool=memory_pool) + + +_ipc_writer_class_doc = """\ +Parameters +---------- +sink : str, pyarrow.NativeFile, or file-like Python object + Either a file path, or a writable file object. +schema : pyarrow.Schema + The Arrow schema for data to be written to the file. +use_legacy_format : bool, default None + Deprecated in favor of setting options. Cannot be provided with + options. + + If None, False will be used unless this default is overridden by + setting the environment variable ARROW_PRE_0_15_IPC_FORMAT=1 +options : pyarrow.ipc.IpcWriteOptions + Options for IPC serialization. + + If None, default values will be used: the legacy format will not + be used unless overridden by setting the environment variable + ARROW_PRE_0_15_IPC_FORMAT=1, and the V5 metadata version will be + used unless overridden by setting the environment variable + ARROW_PRE_1_0_METADATA_VERSION=1.""" + + +class RecordBatchStreamWriter(lib._RecordBatchStreamWriter): + __doc__ = """Writer for the Arrow streaming binary format + +{}""".format(_ipc_writer_class_doc) + + def __init__(self, sink, schema, *, use_legacy_format=None, options=None): + options = _get_legacy_format_default(use_legacy_format, options) + self._open(sink, schema, options=options) + + +class RecordBatchFileReader(lib._RecordBatchFileReader): + """ + Class for reading Arrow record batch data from the Arrow binary file format + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + If you want to use memory map use MemoryMappedFile as source. + footer_offset : int, default None + If the file is embedded in some larger file, this is the byte offset to + the very end of the file data + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + """ + + def __init__(self, source, footer_offset=None, *, options=None, + memory_pool=None): + options = _ensure_default_ipc_read_options(options) + self._open(source, footer_offset=footer_offset, + options=options, memory_pool=memory_pool) + + +class RecordBatchFileWriter(lib._RecordBatchFileWriter): + + __doc__ = """Writer to create the Arrow binary file format + +{}""".format(_ipc_writer_class_doc) + + def __init__(self, sink, schema, *, use_legacy_format=None, options=None): + options = _get_legacy_format_default(use_legacy_format, options) + self._open(sink, schema, options=options) + + +def _get_legacy_format_default(use_legacy_format, options): + if use_legacy_format is not None and options is not None: + raise ValueError( + "Can provide at most one of options and use_legacy_format") + elif options: + if not isinstance(options, IpcWriteOptions): + raise TypeError("expected IpcWriteOptions, got {}" + .format(type(options))) + return options + + metadata_version = MetadataVersion.V5 + if use_legacy_format is None: + use_legacy_format = \ + bool(int(os.environ.get('ARROW_PRE_0_15_IPC_FORMAT', '0'))) + if bool(int(os.environ.get('ARROW_PRE_1_0_METADATA_VERSION', '0'))): + metadata_version = MetadataVersion.V4 + return IpcWriteOptions(use_legacy_format=use_legacy_format, + metadata_version=metadata_version) + + +def _ensure_default_ipc_read_options(options): + if options and not isinstance(options, IpcReadOptions): + raise TypeError( + "expected IpcReadOptions, got {}".format(type(options)) + ) + return options or IpcReadOptions() + + +def new_stream(sink, schema, *, use_legacy_format=None, options=None): + return RecordBatchStreamWriter(sink, schema, + use_legacy_format=use_legacy_format, + options=options) + + +new_stream.__doc__ = """\ +Create an Arrow columnar IPC stream writer instance + +{} + +Returns +------- +writer : RecordBatchStreamWriter + A writer for the given sink +""".format(_ipc_writer_class_doc) + + +def open_stream(source, *, options=None, memory_pool=None): + """ + Create reader for Arrow streaming format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + + Returns + ------- + reader : RecordBatchStreamReader + A reader for the given source + """ + return RecordBatchStreamReader(source, options=options, + memory_pool=memory_pool) + + +def new_file(sink, schema, *, use_legacy_format=None, options=None): + return RecordBatchFileWriter(sink, schema, + use_legacy_format=use_legacy_format, + options=options) + + +new_file.__doc__ = """\ +Create an Arrow columnar IPC file writer instance + +{} + +Returns +------- +writer : RecordBatchFileWriter + A writer for the given sink +""".format(_ipc_writer_class_doc) + + +def open_file(source, footer_offset=None, *, options=None, memory_pool=None): + """ + Create reader for Arrow file format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + footer_offset : int, default None + If the file is embedded in some larger file, this is the byte offset to + the very end of the file data. + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + + Returns + ------- + reader : RecordBatchFileReader + A reader for the given source + """ + return RecordBatchFileReader( + source, footer_offset=footer_offset, + options=options, memory_pool=memory_pool) + + +def serialize_pandas(df, *, nthreads=None, preserve_index=None): + """ + Serialize a pandas DataFrame into a buffer protocol compatible object. + + Parameters + ---------- + df : pandas.DataFrame + nthreads : int, default None + Number of threads to use for conversion to Arrow, default all CPUs. + preserve_index : bool, default None + The default of None will store the index as a column, except for + RangeIndex which is stored as metadata only. If True, always + preserve the pandas index data as a column. If False, no index + information is saved and the result will have a default RangeIndex. + + Returns + ------- + buf : buffer + An object compatible with the buffer protocol. + """ + batch = pa.RecordBatch.from_pandas(df, nthreads=nthreads, + preserve_index=preserve_index) + sink = pa.BufferOutputStream() + with pa.RecordBatchStreamWriter(sink, batch.schema) as writer: + writer.write_batch(batch) + return sink.getvalue() + + +def deserialize_pandas(buf, *, use_threads=True): + """Deserialize a buffer protocol compatible object into a pandas DataFrame. + + Parameters + ---------- + buf : buffer + An object compatible with the buffer protocol. + use_threads : bool, default True + Whether to parallelize the conversion using multiple threads. + + Returns + ------- + df : pandas.DataFrame + The buffer deserialized as pandas DataFrame + """ + buffer_reader = pa.BufferReader(buf) + with pa.RecordBatchStreamReader(buffer_reader) as reader: + table = reader.read_all() + return table.to_pandas(use_threads=use_threads) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/lib.h b/parrot/lib/python3.10/site-packages/pyarrow/lib.h new file mode 100644 index 0000000000000000000000000000000000000000..f32cbbe7cd6b8cc13f97b3839e68e54c69bea447 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/lib.h @@ -0,0 +1,83 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE__pyarrow__lib +#define __PYX_HAVE__pyarrow__lib + +#include "Python.h" + +#ifndef __PYX_HAVE_API__pyarrow__lib + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #define __PYX_EXTERN_C extern "C++" +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(_T) _T +#endif + +__PYX_EXTERN_C PyObject *pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &); +__PYX_EXTERN_C std::shared_ptr< arrow::Buffer> pyarrow_unwrap_buffer(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::DataType> pyarrow_unwrap_data_type(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Field> pyarrow_unwrap_field(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Schema> pyarrow_unwrap_schema(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Scalar> pyarrow_unwrap_scalar(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Array> pyarrow_unwrap_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> pyarrow_unwrap_chunked_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> pyarrow_unwrap_sparse_coo_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> pyarrow_unwrap_sparse_csc_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> pyarrow_unwrap_sparse_csf_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> pyarrow_unwrap_sparse_csr_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Tensor> pyarrow_unwrap_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> pyarrow_unwrap_batch(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Table> pyarrow_unwrap_table(PyObject *); + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ + +/* WARNING: the interface of the module init function changed in CPython 3.5. */ +/* It now returns a PyModuleDef instance instead of a PyModule instance. */ + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initlib(void); +#else +/* WARNING: Use PyImport_AppendInittab("lib", PyInit_lib) instead of calling PyInit_lib directly from Python 3.5 */ +PyMODINIT_FUNC PyInit_lib(void); + +#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L)) +#if defined(__cplusplus) && __cplusplus >= 201402L +[[deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")]] inline +#elif defined(__GNUC__) || defined(__clang__) +__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly."), __unused__)) __inline__ +#elif defined(_MSC_VER) +__declspec(deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")) __inline +#endif +static PyObject* __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyObject* res) { + return res; +} +#define PyInit_lib() __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyInit_lib()) +#endif +#endif + +#endif /* !__PYX_HAVE__pyarrow__lib */ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/lib.pxd b/parrot/lib/python3.10/site-packages/pyarrow/lib.pxd new file mode 100644 index 0000000000000000000000000000000000000000..082d8470cdbb073a850d3418802d4fd179fd8478 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/lib.pxd @@ -0,0 +1,728 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cpython cimport PyObject +from libcpp cimport nullptr, bool as c_bool +from libcpp.cast cimport dynamic_cast +from libcpp.memory cimport dynamic_pointer_cast +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * + +# Will be available in Cython 3, not backported +# ref: https://github.com/cython/cython/issues/3293#issuecomment-1223058101 +cdef extern from "" namespace "std" nogil: + cdef cppclass nullopt_t: + nullopt_t() + + cdef nullopt_t nullopt + + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + c_bool has_value() + T& value() + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + # T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + c_bool operator bool() + c_bool operator!() + c_bool operator==[U](optional&, U&) + c_bool operator!=[U](optional&, U&) + c_bool operator<[U](optional&, U&) + c_bool operator>[U](optional&, U&) + c_bool operator<=[U](optional&, U&) + c_bool operator>=[U](optional&, U&) + + optional[T] make_optional[T](...) except + + +cdef extern from "Python.h": + int PySlice_Check(object) + + +cdef int check_status(const CStatus& status) except -1 nogil +cdef object convert_status(const CStatus& status) + + +cdef class _Weakrefable: + cdef object __weakref__ + + +cdef class IpcWriteOptions(_Weakrefable): + cdef: + CIpcWriteOptions c_options + + +cdef class IpcReadOptions(_Weakrefable): + cdef: + CIpcReadOptions c_options + + +cdef class Message(_Weakrefable): + cdef: + unique_ptr[CMessage] message + + +cdef class MemoryPool(_Weakrefable): + cdef: + CMemoryPool* pool + + cdef void init(self, CMemoryPool* pool) + + +cdef CMemoryPool* maybe_unbox_memory_pool(MemoryPool memory_pool) + + +cdef object box_memory_pool(CMemoryPool* pool) + + +cdef class DataType(_Weakrefable): + cdef: + shared_ptr[CDataType] sp_type + CDataType* type + bytes pep3118_format + + cdef void init(self, const shared_ptr[CDataType]& type) except * + cpdef Field field(self, i) + + +cdef class ListType(DataType): + cdef: + const CListType* list_type + + +cdef class LargeListType(DataType): + cdef: + const CLargeListType* list_type + + +cdef class ListViewType(DataType): + cdef: + const CListViewType* list_view_type + + +cdef class LargeListViewType(DataType): + cdef: + const CLargeListViewType* list_view_type + + +cdef class MapType(DataType): + cdef: + const CMapType* map_type + + +cdef class FixedSizeListType(DataType): + cdef: + const CFixedSizeListType* list_type + + +cdef class StructType(DataType): + cdef: + const CStructType* struct_type + + cdef Field field_by_name(self, name) + + +cdef class DictionaryMemo(_Weakrefable): + cdef: + # Even though the CDictionaryMemo instance is private, we allocate + # it on the heap so as to avoid C++ ABI issues with Python wheels. + shared_ptr[CDictionaryMemo] sp_memo + CDictionaryMemo* memo + + +cdef class DictionaryType(DataType): + cdef: + const CDictionaryType* dict_type + + +cdef class TimestampType(DataType): + cdef: + const CTimestampType* ts_type + + +cdef class Time32Type(DataType): + cdef: + const CTime32Type* time_type + + +cdef class Time64Type(DataType): + cdef: + const CTime64Type* time_type + + +cdef class DurationType(DataType): + cdef: + const CDurationType* duration_type + + +cdef class FixedSizeBinaryType(DataType): + cdef: + const CFixedSizeBinaryType* fixed_size_binary_type + + +cdef class Decimal128Type(FixedSizeBinaryType): + cdef: + const CDecimal128Type* decimal128_type + + +cdef class Decimal256Type(FixedSizeBinaryType): + cdef: + const CDecimal256Type* decimal256_type + + +cdef class RunEndEncodedType(DataType): + cdef: + const CRunEndEncodedType* run_end_encoded_type + + +cdef class BaseExtensionType(DataType): + cdef: + const CExtensionType* ext_type + + +cdef class ExtensionType(BaseExtensionType): + cdef: + const CPyExtensionType* cpy_ext_type + + +cdef class FixedShapeTensorType(BaseExtensionType): + cdef: + const CFixedShapeTensorType* tensor_ext_type + + +cdef class PyExtensionType(ExtensionType): + pass + + +cdef class _Metadata(_Weakrefable): + # required because KeyValueMetadata also extends collections.abc.Mapping + # and the first parent class must be an extension type + pass + + +cdef class KeyValueMetadata(_Metadata): + cdef: + shared_ptr[const CKeyValueMetadata] wrapped + const CKeyValueMetadata* metadata + + cdef void init(self, const shared_ptr[const CKeyValueMetadata]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[const CKeyValueMetadata]& sp) + cdef inline shared_ptr[const CKeyValueMetadata] unwrap(self) nogil + + +cdef class Field(_Weakrefable): + cdef: + shared_ptr[CField] sp_field + CField* field + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CField]& field) + + +cdef class Schema(_Weakrefable): + cdef: + shared_ptr[CSchema] sp_schema + CSchema* schema + + cdef void init(self, const vector[shared_ptr[CField]]& fields) + cdef void init_schema(self, const shared_ptr[CSchema]& schema) + + +cdef class Scalar(_Weakrefable): + cdef: + shared_ptr[CScalar] wrapped + + cdef void init(self, const shared_ptr[CScalar]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[CScalar]& wrapped) + + cdef inline shared_ptr[CScalar] unwrap(self) nogil + + +cdef class _PandasConvertible(_Weakrefable): + pass + + +cdef class Array(_PandasConvertible): + cdef: + shared_ptr[CArray] sp_array + CArray* ap + + cdef readonly: + DataType type + # To allow Table to propagate metadata to pandas.Series + object _name + + cdef void init(self, const shared_ptr[CArray]& sp_array) except * + cdef getitem(self, int64_t i) + cdef int64_t length(self) + cdef void _assert_cpu(self) except * + + +cdef class Tensor(_Weakrefable): + cdef: + shared_ptr[CTensor] sp_tensor + CTensor* tp + + cdef readonly: + DataType type + bytes _ssize_t_shape + bytes _ssize_t_strides + + cdef void init(self, const shared_ptr[CTensor]& sp_tensor) + + +cdef class SparseCSRMatrix(_Weakrefable): + cdef: + shared_ptr[CSparseCSRMatrix] sp_sparse_tensor + CSparseCSRMatrix* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) + + +cdef class SparseCSCMatrix(_Weakrefable): + cdef: + shared_ptr[CSparseCSCMatrix] sp_sparse_tensor + CSparseCSCMatrix* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) + + +cdef class SparseCOOTensor(_Weakrefable): + cdef: + shared_ptr[CSparseCOOTensor] sp_sparse_tensor + CSparseCOOTensor* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) + + +cdef class SparseCSFTensor(_Weakrefable): + cdef: + shared_ptr[CSparseCSFTensor] sp_sparse_tensor + CSparseCSFTensor* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) + + +cdef class NullArray(Array): + pass + + +cdef class BooleanArray(Array): + pass + + +cdef class NumericArray(Array): + pass + + +cdef class IntegerArray(NumericArray): + pass + + +cdef class FloatingPointArray(NumericArray): + pass + + +cdef class Int8Array(IntegerArray): + pass + + +cdef class UInt8Array(IntegerArray): + pass + + +cdef class Int16Array(IntegerArray): + pass + + +cdef class UInt16Array(IntegerArray): + pass + + +cdef class Int32Array(IntegerArray): + pass + + +cdef class UInt32Array(IntegerArray): + pass + + +cdef class Int64Array(IntegerArray): + pass + + +cdef class UInt64Array(IntegerArray): + pass + + +cdef class HalfFloatArray(FloatingPointArray): + pass + + +cdef class FloatArray(FloatingPointArray): + pass + + +cdef class DoubleArray(FloatingPointArray): + pass + + +cdef class FixedSizeBinaryArray(Array): + pass + + +cdef class Decimal128Array(FixedSizeBinaryArray): + pass + + +cdef class Decimal256Array(FixedSizeBinaryArray): + pass + + +cdef class StructArray(Array): + pass + + +cdef class BaseListArray(Array): + pass + + +cdef class ListArray(BaseListArray): + pass + + +cdef class LargeListArray(BaseListArray): + pass + + +cdef class ListViewArray(BaseListArray): + pass + + +cdef class LargeListViewArray(BaseListArray): + pass + + +cdef class MapArray(ListArray): + pass + + +cdef class FixedSizeListArray(BaseListArray): + pass + + +cdef class UnionArray(Array): + pass + + +cdef class StringArray(Array): + pass + + +cdef class BinaryArray(Array): + pass + + +cdef class StringViewArray(Array): + pass + + +cdef class BinaryViewArray(Array): + pass + + +cdef class DictionaryArray(Array): + cdef: + object _indices, _dictionary + + +cdef class ExtensionArray(Array): + pass + + +cdef class MonthDayNanoIntervalArray(Array): + pass + + +cdef wrap_array_output(PyObject* output) +cdef wrap_datum(const CDatum& datum) + + +cdef class ChunkedArray(_PandasConvertible): + cdef: + shared_ptr[CChunkedArray] sp_chunked_array + CChunkedArray* chunked_array + + cdef readonly: + # To allow Table to propagate metadata to pandas.Series + object _name + + cdef void init(self, const shared_ptr[CChunkedArray]& chunked_array) + cdef getitem(self, int64_t i) + + +cdef class _Tabular(_PandasConvertible): + pass + + +cdef class Table(_Tabular): + cdef: + shared_ptr[CTable] sp_table + CTable* table + + cdef void init(self, const shared_ptr[CTable]& table) + + +cdef class RecordBatch(_Tabular): + cdef: + shared_ptr[CRecordBatch] sp_batch + CRecordBatch* batch + Schema _schema + + cdef void init(self, const shared_ptr[CRecordBatch]& table) + + +cdef class Device(_Weakrefable): + cdef: + shared_ptr[CDevice] device + + cdef void init(self, const shared_ptr[CDevice]& device) + + @staticmethod + cdef wrap(const shared_ptr[CDevice]& device) + + +cdef class MemoryManager(_Weakrefable): + cdef: + shared_ptr[CMemoryManager] memory_manager + + cdef void init(self, const shared_ptr[CMemoryManager]& memory_manager) + + @staticmethod + cdef wrap(const shared_ptr[CMemoryManager]& mm) + + +cdef class Buffer(_Weakrefable): + cdef: + shared_ptr[CBuffer] buffer + Py_ssize_t shape[1] + Py_ssize_t strides[1] + + cdef void init(self, const shared_ptr[CBuffer]& buffer) + cdef getitem(self, int64_t i) + + +cdef class ResizableBuffer(Buffer): + + cdef void init_rz(self, const shared_ptr[CResizableBuffer]& buffer) + + +cdef class NativeFile(_Weakrefable): + cdef: + shared_ptr[CInputStream] input_stream + shared_ptr[CRandomAccessFile] random_access + shared_ptr[COutputStream] output_stream + bint is_readable + bint is_writable + bint is_seekable + bint _is_appending + bint own_file + + # By implementing these "virtual" functions (all functions in Cython + # extension classes are technically virtual in the C++ sense) we can expose + # the arrow::io abstract file interfaces to other components throughout the + # suite of Arrow C++ libraries + cdef set_random_access_file(self, shared_ptr[CRandomAccessFile] handle) + cdef set_input_stream(self, shared_ptr[CInputStream] handle) + cdef set_output_stream(self, shared_ptr[COutputStream] handle) + + cdef shared_ptr[CRandomAccessFile] get_random_access_file(self) except * + cdef shared_ptr[CInputStream] get_input_stream(self) except * + cdef shared_ptr[COutputStream] get_output_stream(self) except * + + +cdef class BufferedInputStream(NativeFile): + pass + + +cdef class BufferedOutputStream(NativeFile): + pass + + +cdef class CompressedInputStream(NativeFile): + pass + + +cdef class CompressedOutputStream(NativeFile): + pass + + +cdef class _CRecordBatchWriter(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchWriter] writer + + +cdef class RecordBatchReader(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchReader] reader + + +cdef class CacheOptions(_Weakrefable): + cdef: + CCacheOptions wrapped + + cdef void init(self, CCacheOptions options) + + cdef inline CCacheOptions unwrap(self) + + @staticmethod + cdef wrap(const CCacheOptions options) + + +cdef class Codec(_Weakrefable): + cdef: + shared_ptr[CCodec] wrapped + + cdef inline CCodec* unwrap(self) nogil + + +# This class is only used internally for now +cdef class StopToken: + cdef: + CStopToken stop_token + + cdef void init(self, CStopToken stop_token) + + +cdef get_input_stream(object source, c_bool use_memory_map, + shared_ptr[CInputStream]* reader) +cdef get_reader(object source, c_bool use_memory_map, + shared_ptr[CRandomAccessFile]* reader) +cdef get_writer(object source, shared_ptr[COutputStream]* writer) +cdef NativeFile get_native_file(object source, c_bool use_memory_map) + +cdef shared_ptr[CInputStream] native_transcoding_input_stream( + shared_ptr[CInputStream] stream, src_encoding, + dest_encoding) except * + +cdef shared_ptr[function[StreamWrapFunc]] make_streamwrap_func( + src_encoding, dest_encoding) except * + +# Default is allow_none=False +cpdef DataType ensure_type(object type, bint allow_none=*) + +cdef timeunit_to_string(TimeUnit unit) +cdef TimeUnit string_to_timeunit(unit) except * + +# Exceptions may be raised when converting dict values, so need to +# check exception state on return +cdef shared_ptr[const CKeyValueMetadata] pyarrow_unwrap_metadata( + object meta) except * +cdef object pyarrow_wrap_metadata( + const shared_ptr[const CKeyValueMetadata]& meta) + +# +# Public Cython API for 3rd party code +# +# If you add functions to this list, please also update +# `cpp/src/arrow/python/pyarrow.{h, cc}` +# + +# Wrapping C++ -> Python + +cdef public object pyarrow_wrap_buffer(const shared_ptr[CBuffer]& buf) +cdef public object pyarrow_wrap_resizable_buffer( + const shared_ptr[CResizableBuffer]& buf) + +cdef public object pyarrow_wrap_data_type(const shared_ptr[CDataType]& type) +cdef public object pyarrow_wrap_field(const shared_ptr[CField]& field) +cdef public object pyarrow_wrap_schema(const shared_ptr[CSchema]& type) + +cdef public object pyarrow_wrap_scalar(const shared_ptr[CScalar]& sp_scalar) + +cdef public object pyarrow_wrap_array(const shared_ptr[CArray]& sp_array) +cdef public object pyarrow_wrap_chunked_array( + const shared_ptr[CChunkedArray]& sp_array) + +cdef public object pyarrow_wrap_sparse_coo_tensor( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csc_matrix( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csf_tensor( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csr_matrix( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) +cdef public object pyarrow_wrap_tensor(const shared_ptr[CTensor]& sp_tensor) + +cdef public object pyarrow_wrap_batch(const shared_ptr[CRecordBatch]& cbatch) +cdef public object pyarrow_wrap_table(const shared_ptr[CTable]& ctable) + +# Unwrapping Python -> C++ + +cdef public shared_ptr[CBuffer] pyarrow_unwrap_buffer(object buffer) + +cdef public shared_ptr[CDataType] pyarrow_unwrap_data_type(object data_type) +cdef public shared_ptr[CField] pyarrow_unwrap_field(object field) +cdef public shared_ptr[CSchema] pyarrow_unwrap_schema(object schema) + +cdef public shared_ptr[CScalar] pyarrow_unwrap_scalar(object scalar) + +cdef public shared_ptr[CArray] pyarrow_unwrap_array(object array) +cdef public shared_ptr[CChunkedArray] pyarrow_unwrap_chunked_array( + object array) + +cdef public shared_ptr[CSparseCOOTensor] pyarrow_unwrap_sparse_coo_tensor( + object sparse_tensor) +cdef public shared_ptr[CSparseCSCMatrix] pyarrow_unwrap_sparse_csc_matrix( + object sparse_tensor) +cdef public shared_ptr[CSparseCSFTensor] pyarrow_unwrap_sparse_csf_tensor( + object sparse_tensor) +cdef public shared_ptr[CSparseCSRMatrix] pyarrow_unwrap_sparse_csr_matrix( + object sparse_tensor) +cdef public shared_ptr[CTensor] pyarrow_unwrap_tensor(object tensor) + +cdef public shared_ptr[CRecordBatch] pyarrow_unwrap_batch(object batch) +cdef public shared_ptr[CTable] pyarrow_unwrap_table(object table) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/lib_api.h b/parrot/lib/python3.10/site-packages/pyarrow/lib_api.h new file mode 100644 index 0000000000000000000000000000000000000000..6c4fee277774dba421569dd4691b775ab73e283a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/lib_api.h @@ -0,0 +1,201 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE_API__pyarrow__lib +#define __PYX_HAVE_API__pyarrow__lib +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "lib.h" + +static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0; +#define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0; +#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0; +#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0; +#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0; +#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0; +#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0; +#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0; +#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0; +#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0; +#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0; +#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0; +#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0; +#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0; +#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table +static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0; +#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer +static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0; +#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type +static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0; +#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field +static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0; +#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema +static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0; +#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar +static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0; +#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array +static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0; +#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array +static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor +static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix +static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor +static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix +static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor +static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0; +#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch +static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0; +#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0; +#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0; +#define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0; +#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0; +#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0; +#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0; +#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0; +#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0; +#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0; +#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0; +#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0; +#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0; +#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0; +#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_10 +#define __PYX_HAVE_RT_ImportFunction_3_0_10 +static int __Pyx_ImportFunction_3_0_10(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_pyarrow__lib(void) { + PyObject *module = 0; + module = PyImport_ImportModule("pyarrow.lib"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/orc.py b/parrot/lib/python3.10/site-packages/pyarrow/orc.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5feafb3363eb35e642e5c98db07ba1908053d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/orc.py @@ -0,0 +1,384 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from numbers import Integral +import warnings + +from pyarrow.lib import Table +import pyarrow._orc as _orc +from pyarrow.fs import _resolve_filesystem_and_path + + +class ORCFile: + """ + Reader interface for a single ORC file + + Parameters + ---------- + source : str or pyarrow.NativeFile + Readable source. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. + """ + + def __init__(self, source): + self.reader = _orc.ORCReader() + self.reader.open(source) + + @property + def metadata(self): + """The file metadata, as an arrow KeyValueMetadata""" + return self.reader.metadata() + + @property + def schema(self): + """The file schema, as an arrow schema""" + return self.reader.schema() + + @property + def nrows(self): + """The number of rows in the file""" + return self.reader.nrows() + + @property + def nstripes(self): + """The number of stripes in the file""" + return self.reader.nstripes() + + @property + def file_version(self): + """Format version of the ORC file, must be 0.11 or 0.12""" + return self.reader.file_version() + + @property + def software_version(self): + """Software instance and version that wrote this file""" + return self.reader.software_version() + + @property + def compression(self): + """Compression codec of the file""" + return self.reader.compression() + + @property + def compression_size(self): + """Number of bytes to buffer for the compression codec in the file""" + return self.reader.compression_size() + + @property + def writer(self): + """Name of the writer that wrote this file. + If the writer is unknown then its Writer ID + (a number) is returned""" + return self.reader.writer() + + @property + def writer_version(self): + """Version of the writer""" + return self.reader.writer_version() + + @property + def row_index_stride(self): + """Number of rows per an entry in the row index or 0 + if there is no row index""" + return self.reader.row_index_stride() + + @property + def nstripe_statistics(self): + """Number of stripe statistics""" + return self.reader.nstripe_statistics() + + @property + def content_length(self): + """Length of the data stripes in the file in bytes""" + return self.reader.content_length() + + @property + def stripe_statistics_length(self): + """The number of compressed bytes in the file stripe statistics""" + return self.reader.stripe_statistics_length() + + @property + def file_footer_length(self): + """The number of compressed bytes in the file footer""" + return self.reader.file_footer_length() + + @property + def file_postscript_length(self): + """The number of bytes in the file postscript""" + return self.reader.file_postscript_length() + + @property + def file_length(self): + """The number of bytes in the file""" + return self.reader.file_length() + + def _select_names(self, columns=None): + if columns is None: + return None + + schema = self.schema + names = [] + for col in columns: + if isinstance(col, Integral): + col = int(col) + if 0 <= col < len(schema): + col = schema[col].name + names.append(col) + else: + raise ValueError("Column indices must be in 0 <= ind < %d," + " got %d" % (len(schema), col)) + else: + return columns + + return names + + def read_stripe(self, n, columns=None): + """Read a single stripe from the file. + + Parameters + ---------- + n : int + The stripe index + columns : list + If not None, only these columns will be read from the stripe. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e' + + Returns + ------- + pyarrow.RecordBatch + Content of the stripe as a RecordBatch. + """ + columns = self._select_names(columns) + return self.reader.read_stripe(n, columns=columns) + + def read(self, columns=None): + """Read the whole file. + + Parameters + ---------- + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. Output always follows the + ordering of the file and not the `columns` list. + + Returns + ------- + pyarrow.Table + Content of the file as a Table. + """ + columns = self._select_names(columns) + return self.reader.read(columns=columns) + + +_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12" + Determine which ORC file version to use. + `Hive 0.11 / ORC v0 `_ + is the older version + while `Hive 0.12 / ORC v1 `_ + is the newer one. +batch_size : int, default 1024 + Number of rows the ORC writer writes at a time. +stripe_size : int, default 64 * 1024 * 1024 + Size of each ORC stripe in bytes. +compression : string, default 'uncompressed' + The compression codec. + Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ4', 'ZSTD'} + Note that LZ0 is currently not supported. +compression_block_size : int, default 64 * 1024 + Size of each compression block in bytes. +compression_strategy : string, default 'speed' + The compression strategy i.e. speed vs size reduction. + Valid values: {'SPEED', 'COMPRESSION'} +row_index_stride : int, default 10000 + The row index stride i.e. the number of rows per + an entry in the row index. +padding_tolerance : double, default 0.0 + The padding tolerance. +dictionary_key_size_threshold : double, default 0.0 + The dictionary key size threshold. 0 to disable dictionary encoding. + 1 to always enable dictionary encoding. +bloom_filter_columns : None, set-like or list-like, default None + Columns that use the bloom filter. +bloom_filter_fpp : double, default 0.05 + Upper limit of the false-positive rate of the bloom filter. +""" + + +class ORCWriter: + __doc__ = """ +Writer interface for a single ORC file + +Parameters +---------- +where : str or pyarrow.io.NativeFile + Writable target. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream + or pyarrow.io.FixedSizeBufferWriter. +{} +""".format(_orc_writer_args_docs) + + is_open = False + + def __init__(self, where, *, + file_version='0.12', + batch_size=1024, + stripe_size=64 * 1024 * 1024, + compression='uncompressed', + compression_block_size=65536, + compression_strategy='speed', + row_index_stride=10000, + padding_tolerance=0.0, + dictionary_key_size_threshold=0.0, + bloom_filter_columns=None, + bloom_filter_fpp=0.05, + ): + self.writer = _orc.ORCWriter() + self.writer.open( + where, + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) + self.is_open = True + + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def write(self, table): + """ + Write the table into an ORC file. The schema of the table must + be equal to the schema used when opening the ORC file. + + Parameters + ---------- + table : pyarrow.Table + The table to be written into the ORC file + """ + assert self.is_open + self.writer.write(table) + + def close(self): + """ + Close the ORC file + """ + if self.is_open: + self.writer.close() + self.is_open = False + + +def read_table(source, columns=None, filesystem=None): + filesystem, path = _resolve_filesystem_and_path(source, filesystem) + if filesystem is not None: + source = filesystem.open_input_file(path) + + if columns is not None and len(columns) == 0: + result = ORCFile(source).read().select(columns) + else: + result = ORCFile(source).read(columns=columns) + + return result + + +read_table.__doc__ = """ +Read a Table from an ORC file. + +Parameters +---------- +source : str, pyarrow.NativeFile, or file-like object + If a string passed, can be a single file name. For file-like objects, + only read a single file. Use pyarrow.BufferReader to read a file + contained in a bytes or buffer-like object. +columns : list + If not None, only these columns will be read from the file. A column + name may be a prefix of a nested field, e.g. 'a' will select 'a.b', + 'a.c', and 'a.d.e'. Output always follows the ordering of the file and + not the `columns` list. If empty, no columns will be read. Note + that the table will still have the correct num_rows set despite having + no columns. +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +""" + + +def write_table(table, where, *, + file_version='0.12', + batch_size=1024, + stripe_size=64 * 1024 * 1024, + compression='uncompressed', + compression_block_size=65536, + compression_strategy='speed', + row_index_stride=10000, + padding_tolerance=0.0, + dictionary_key_size_threshold=0.0, + bloom_filter_columns=None, + bloom_filter_fpp=0.05): + if isinstance(where, Table): + warnings.warn( + "The order of the arguments has changed. Pass as " + "'write_table(table, where)' instead. The old order will raise " + "an error in the future.", FutureWarning, stacklevel=2 + ) + table, where = where, table + with ORCWriter( + where, + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) as writer: + writer.write(table) + + +write_table.__doc__ = """ +Write a table into an ORC file. + +Parameters +---------- +table : pyarrow.lib.Table + The table to be written into the ORC file +where : str or pyarrow.io.NativeFile + Writable target. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream + or pyarrow.io.FixedSizeBufferWriter. +{} +""".format(_orc_writer_args_docs) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/public-api.pxi b/parrot/lib/python3.10/site-packages/pyarrow/public-api.pxi new file mode 100644 index 0000000000000000000000000000000000000000..966273b4bea84304a9f38ecc04a8ad99cd17209e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/public-api.pxi @@ -0,0 +1,430 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from libcpp.memory cimport shared_ptr +from pyarrow.includes.libarrow cimport (CArray, CDataType, CField, + CRecordBatch, CSchema, + CTable, CTensor, + CSparseCOOTensor, CSparseCSRMatrix, + CSparseCSCMatrix, CSparseCSFTensor) + +# You cannot assign something to a dereferenced pointer in Cython thus these +# methods don't use Status to indicate a successful operation. + + +cdef api bint pyarrow_is_buffer(object buffer): + return isinstance(buffer, Buffer) + + +cdef api shared_ptr[CBuffer] pyarrow_unwrap_buffer(object buffer): + cdef Buffer buf + if pyarrow_is_buffer(buffer): + buf = (buffer) + return buf.buffer + + return shared_ptr[CBuffer]() + + +cdef api object pyarrow_wrap_buffer(const shared_ptr[CBuffer]& buf): + cdef Buffer result = Buffer.__new__(Buffer) + result.init(buf) + return result + + +cdef api object pyarrow_wrap_resizable_buffer( + const shared_ptr[CResizableBuffer]& buf): + cdef ResizableBuffer result = ResizableBuffer.__new__(ResizableBuffer) + result.init_rz(buf) + return result + + +cdef api bint pyarrow_is_data_type(object type_): + return isinstance(type_, DataType) + + +cdef api shared_ptr[CDataType] pyarrow_unwrap_data_type( + object data_type): + cdef DataType type_ + if pyarrow_is_data_type(data_type): + type_ = (data_type) + return type_.sp_type + + return shared_ptr[CDataType]() + + +# Workaround for Cython parsing bug +# https://github.com/cython/cython/issues/2143 +ctypedef const CPyExtensionType* _CPyExtensionTypePtr + + +cdef api object pyarrow_wrap_data_type( + const shared_ptr[CDataType]& type): + cdef: + const CExtensionType* ext_type + const CPyExtensionType* cpy_ext_type + DataType out + + if type.get() == NULL: + return None + + if type.get().id() == _Type_DICTIONARY: + out = DictionaryType.__new__(DictionaryType) + elif type.get().id() == _Type_LIST: + out = ListType.__new__(ListType) + elif type.get().id() == _Type_LARGE_LIST: + out = LargeListType.__new__(LargeListType) + elif type.get().id() == _Type_LIST_VIEW: + out = ListViewType.__new__(ListViewType) + elif type.get().id() == _Type_LARGE_LIST_VIEW: + out = LargeListViewType.__new__(LargeListViewType) + elif type.get().id() == _Type_MAP: + out = MapType.__new__(MapType) + elif type.get().id() == _Type_FIXED_SIZE_LIST: + out = FixedSizeListType.__new__(FixedSizeListType) + elif type.get().id() == _Type_STRUCT: + out = StructType.__new__(StructType) + elif type.get().id() == _Type_SPARSE_UNION: + out = SparseUnionType.__new__(SparseUnionType) + elif type.get().id() == _Type_DENSE_UNION: + out = DenseUnionType.__new__(DenseUnionType) + elif type.get().id() == _Type_TIME32: + out = Time32Type.__new__(Time32Type) + elif type.get().id() == _Type_TIME64: + out = Time64Type.__new__(Time64Type) + elif type.get().id() == _Type_TIMESTAMP: + out = TimestampType.__new__(TimestampType) + elif type.get().id() == _Type_DURATION: + out = DurationType.__new__(DurationType) + elif type.get().id() == _Type_FIXED_SIZE_BINARY: + out = FixedSizeBinaryType.__new__(FixedSizeBinaryType) + elif type.get().id() == _Type_DECIMAL128: + out = Decimal128Type.__new__(Decimal128Type) + elif type.get().id() == _Type_DECIMAL256: + out = Decimal256Type.__new__(Decimal256Type) + elif type.get().id() == _Type_RUN_END_ENCODED: + out = RunEndEncodedType.__new__(RunEndEncodedType) + elif type.get().id() == _Type_EXTENSION: + ext_type = type.get() + cpy_ext_type = dynamic_cast[_CPyExtensionTypePtr](ext_type) + if cpy_ext_type != nullptr: + return cpy_ext_type.GetInstance() + elif ext_type.extension_name() == b"arrow.fixed_shape_tensor": + out = FixedShapeTensorType.__new__(FixedShapeTensorType) + else: + out = BaseExtensionType.__new__(BaseExtensionType) + else: + out = DataType.__new__(DataType) + + out.init(type) + return out + + +cdef object pyarrow_wrap_metadata( + const shared_ptr[const CKeyValueMetadata]& meta): + if meta.get() == nullptr: + return None + else: + return KeyValueMetadata.wrap(meta) + + +cdef api bint pyarrow_is_metadata(object metadata): + return isinstance(metadata, KeyValueMetadata) + + +cdef shared_ptr[const CKeyValueMetadata] pyarrow_unwrap_metadata(object meta): + cdef shared_ptr[const CKeyValueMetadata] c_meta + if pyarrow_is_metadata(meta): + c_meta = (meta).unwrap() + return c_meta + + +cdef api bint pyarrow_is_field(object field): + return isinstance(field, Field) + + +cdef api shared_ptr[CField] pyarrow_unwrap_field(object field): + cdef Field field_ + if pyarrow_is_field(field): + field_ = (field) + return field_.sp_field + + return shared_ptr[CField]() + + +cdef api object pyarrow_wrap_field(const shared_ptr[CField]& field): + if field.get() == NULL: + return None + cdef Field out = Field.__new__(Field) + out.init(field) + return out + + +cdef api bint pyarrow_is_schema(object schema): + return isinstance(schema, Schema) + + +cdef api shared_ptr[CSchema] pyarrow_unwrap_schema(object schema): + cdef Schema sch + if pyarrow_is_schema(schema): + sch = (schema) + return sch.sp_schema + + return shared_ptr[CSchema]() + + +cdef api object pyarrow_wrap_schema(const shared_ptr[CSchema]& schema): + cdef Schema out = Schema.__new__(Schema) + out.init_schema(schema) + return out + + +cdef api bint pyarrow_is_array(object array): + return isinstance(array, Array) + + +cdef api shared_ptr[CArray] pyarrow_unwrap_array(object array): + cdef Array arr + if pyarrow_is_array(array): + arr = (array) + return arr.sp_array + + return shared_ptr[CArray]() + + +cdef api object pyarrow_wrap_array(const shared_ptr[CArray]& sp_array): + if sp_array.get() == NULL: + raise ValueError('Array was NULL') + + klass = get_array_class_from_type(sp_array.get().type()) + + cdef Array arr = klass.__new__(klass) + arr.init(sp_array) + return arr + + +cdef api bint pyarrow_is_chunked_array(object array): + return isinstance(array, ChunkedArray) + + +cdef api shared_ptr[CChunkedArray] pyarrow_unwrap_chunked_array(object array): + cdef ChunkedArray arr + if pyarrow_is_chunked_array(array): + arr = (array) + return arr.sp_chunked_array + + return shared_ptr[CChunkedArray]() + + +cdef api object pyarrow_wrap_chunked_array( + const shared_ptr[CChunkedArray]& sp_array): + if sp_array.get() == NULL: + raise ValueError('ChunkedArray was NULL') + + cdef CDataType* data_type = sp_array.get().type().get() + + if data_type == NULL: + raise ValueError('ChunkedArray data type was NULL') + + cdef ChunkedArray arr = ChunkedArray.__new__(ChunkedArray) + arr.init(sp_array) + return arr + + +cdef api bint pyarrow_is_scalar(object value): + return isinstance(value, Scalar) + + +cdef api shared_ptr[CScalar] pyarrow_unwrap_scalar(object scalar): + if pyarrow_is_scalar(scalar): + return ( scalar).unwrap() + return shared_ptr[CScalar]() + + +cdef api object pyarrow_wrap_scalar(const shared_ptr[CScalar]& sp_scalar): + if sp_scalar.get() == NULL: + raise ValueError('Scalar was NULL') + + cdef CDataType* data_type = sp_scalar.get().type.get() + + if data_type == NULL: + raise ValueError('Scalar data type was NULL') + + if data_type.id() == _Type_NA: + return _NULL + + if data_type.id() not in _scalar_classes: + raise ValueError('Scalar type not supported') + + klass = get_scalar_class_from_type(sp_scalar.get().type) + + cdef Scalar scalar = klass.__new__(klass) + scalar.init(sp_scalar) + return scalar + + +cdef api bint pyarrow_is_tensor(object tensor): + return isinstance(tensor, Tensor) + + +cdef api shared_ptr[CTensor] pyarrow_unwrap_tensor(object tensor): + cdef Tensor ten + if pyarrow_is_tensor(tensor): + ten = (tensor) + return ten.sp_tensor + + return shared_ptr[CTensor]() + + +cdef api object pyarrow_wrap_tensor( + const shared_ptr[CTensor]& sp_tensor): + if sp_tensor.get() == NULL: + raise ValueError('Tensor was NULL') + + cdef Tensor tensor = Tensor.__new__(Tensor) + tensor.init(sp_tensor) + return tensor + + +cdef api bint pyarrow_is_sparse_coo_tensor(object sparse_tensor): + return isinstance(sparse_tensor, SparseCOOTensor) + +cdef api shared_ptr[CSparseCOOTensor] pyarrow_unwrap_sparse_coo_tensor( + object sparse_tensor): + cdef SparseCOOTensor sten + if pyarrow_is_sparse_coo_tensor(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCOOTensor]() + +cdef api object pyarrow_wrap_sparse_coo_tensor( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCOOTensor was NULL') + + cdef SparseCOOTensor sparse_tensor = SparseCOOTensor.__new__( + SparseCOOTensor) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csr_matrix(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSRMatrix) + +cdef api shared_ptr[CSparseCSRMatrix] pyarrow_unwrap_sparse_csr_matrix( + object sparse_tensor): + cdef SparseCSRMatrix sten + if pyarrow_is_sparse_csr_matrix(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSRMatrix]() + +cdef api object pyarrow_wrap_sparse_csr_matrix( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSRMatrix was NULL') + + cdef SparseCSRMatrix sparse_tensor = SparseCSRMatrix.__new__( + SparseCSRMatrix) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csc_matrix(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSCMatrix) + +cdef api shared_ptr[CSparseCSCMatrix] pyarrow_unwrap_sparse_csc_matrix( + object sparse_tensor): + cdef SparseCSCMatrix sten + if pyarrow_is_sparse_csc_matrix(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSCMatrix]() + +cdef api object pyarrow_wrap_sparse_csc_matrix( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSCMatrix was NULL') + + cdef SparseCSCMatrix sparse_tensor = SparseCSCMatrix.__new__( + SparseCSCMatrix) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csf_tensor(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSFTensor) + +cdef api shared_ptr[CSparseCSFTensor] pyarrow_unwrap_sparse_csf_tensor( + object sparse_tensor): + cdef SparseCSFTensor sten + if pyarrow_is_sparse_csf_tensor(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSFTensor]() + +cdef api object pyarrow_wrap_sparse_csf_tensor( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSFTensor was NULL') + + cdef SparseCSFTensor sparse_tensor = SparseCSFTensor.__new__( + SparseCSFTensor) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_table(object table): + return isinstance(table, Table) + + +cdef api shared_ptr[CTable] pyarrow_unwrap_table(object table): + cdef Table tab + if pyarrow_is_table(table): + tab = (table) + return tab.sp_table + + return shared_ptr[CTable]() + + +cdef api object pyarrow_wrap_table(const shared_ptr[CTable]& ctable): + cdef Table table = Table.__new__(Table) + table.init(ctable) + return table + + +cdef api bint pyarrow_is_batch(object batch): + return isinstance(batch, RecordBatch) + + +cdef api shared_ptr[CRecordBatch] pyarrow_unwrap_batch(object batch): + cdef RecordBatch bat + if pyarrow_is_batch(batch): + bat = (batch) + return bat.sp_batch + + return shared_ptr[CRecordBatch]() + + +cdef api object pyarrow_wrap_batch( + const shared_ptr[CRecordBatch]& cbatch): + cdef RecordBatch batch = RecordBatch.__new__(RecordBatch) + batch.init(cbatch) + return batch diff --git a/parrot/lib/python3.10/site-packages/pyarrow/scalar.pxi b/parrot/lib/python3.10/site-packages/pyarrow/scalar.pxi new file mode 100644 index 0000000000000000000000000000000000000000..41bfde39adb6fb0d468fcc6d85fd427294bd5845 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/scalar.pxi @@ -0,0 +1,1220 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections +from cython cimport binding + + +cdef class Scalar(_Weakrefable): + """ + The base class for scalars. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "pa.scalar() instead.".format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CScalar]& wrapped): + self.wrapped = wrapped + + @staticmethod + cdef wrap(const shared_ptr[CScalar]& wrapped): + cdef: + Scalar self + Type type_id = wrapped.get().type.get().id() + shared_ptr[CDataType] sp_data_type = wrapped.get().type + + if type_id == _Type_NA: + return _NULL + + if type_id not in _scalar_classes: + raise NotImplementedError( + "Wrapping scalar of type " + frombytes(sp_data_type.get().ToString())) + + typ = get_scalar_class_from_type(sp_data_type) + self = typ.__new__(typ) + self.init(wrapped) + + return self + + cdef inline shared_ptr[CScalar] unwrap(self) nogil: + return self.wrapped + + @property + def type(self): + """ + Data type of the Scalar object. + """ + return pyarrow_wrap_data_type(self.wrapped.get().type) + + @property + def is_valid(self): + """ + Holds a valid (non-null) value. + """ + return self.wrapped.get().is_valid + + def cast(self, object target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast scalar value to another data type. + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, default None + Type to cast scalar to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Returns + ------- + scalar : A Scalar of the given target data type. + """ + return _pc().cast(self, target_type, safe=safe, + options=options, memory_pool=memory_pool) + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.wrapped.get().ValidateFull()) + else: + with nogil: + check_status(self.wrapped.get().Validate()) + + def __repr__(self): + return ''.format( + self.__class__.__name__, self.as_py() + ) + + def __str__(self): + return str(self.as_py()) + + def equals(self, Scalar other not None): + """ + Parameters + ---------- + other : pyarrow.Scalar + + Returns + ------- + bool + """ + return self.wrapped.get().Equals(other.unwrap().get()[0]) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __hash__(self): + cdef CScalarHash hasher + return hasher(self.wrapped) + + def __reduce__(self): + return scalar, (self.as_py(), self.type) + + def as_py(self): + raise NotImplementedError() + + +_NULL = NA = None + + +cdef class NullScalar(Scalar): + """ + Concrete class for null scalars. + """ + + def __cinit__(self): + global NA + if NA is not None: + raise RuntimeError('Cannot create multiple NullScalar instances') + self.init(shared_ptr[CScalar](new CNullScalar())) + + def __init__(self): + pass + + def as_py(self): + """ + Return this value as a Python None. + """ + return None + + +_NULL = NA = NullScalar() + + +cdef class BooleanScalar(Scalar): + """ + Concrete class for boolean scalars. + """ + + def as_py(self): + """ + Return this value as a Python bool. + """ + cdef CBooleanScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt8Scalar(Scalar): + """ + Concrete class for uint8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int8Scalar(Scalar): + """ + Concrete class for int8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt16Scalar(Scalar): + """ + Concrete class for uint16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int16Scalar(Scalar): + """ + Concrete class for int16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt32Scalar(Scalar): + """ + Concrete class for uint32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int32Scalar(Scalar): + """ + Concrete class for int32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt64Scalar(Scalar): + """ + Concrete class for uint64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int64Scalar(Scalar): + """ + Concrete class for int64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class HalfFloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CHalfFloatScalar* sp = self.wrapped.get() + return PyHalf_FromHalf(sp.value) if sp.is_valid else None + + +cdef class FloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CFloatScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class DoubleScalar(Scalar): + """ + Concrete class for double scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CDoubleScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Decimal128Scalar(Scalar): + """ + Concrete class for decimal128 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal128Scalar* sp = self.wrapped.get() + CDecimal128Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Decimal256Scalar(Scalar): + """ + Concrete class for decimal256 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal256Scalar* sp = self.wrapped.get() + CDecimal256Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Date32Scalar(Scalar): + """ + Concrete class for date32 scalars. + """ + + @property + def value(self): + cdef CDate32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate32Scalar* sp = self.wrapped.get() + + if sp.is_valid: + # shift to seconds since epoch + return ( + datetime.date(1970, 1, 1) + datetime.timedelta(days=sp.value) + ) + else: + return None + + +cdef class Date64Scalar(Scalar): + """ + Concrete class for date64 scalars. + """ + + @property + def value(self): + cdef CDate64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate64Scalar* sp = self.wrapped.get() + + if sp.is_valid: + return ( + datetime.date(1970, 1, 1) + + datetime.timedelta(days=sp.value / 86400000) + ) + else: + return None + + +def _datetime_from_int(int64_t value, TimeUnit unit, tzinfo=None): + if unit == TimeUnit_SECOND: + delta = datetime.timedelta(seconds=value) + elif unit == TimeUnit_MILLI: + delta = datetime.timedelta(milliseconds=value) + elif unit == TimeUnit_MICRO: + delta = datetime.timedelta(microseconds=value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timestamp(value, tz=tzinfo, unit='ns') + # otherwise safely truncate to microsecond resolution datetime + if value % 1000 != 0: + raise ValueError( + "Nanosecond resolution temporal type {} is not safely " + "convertible to microseconds to convert to datetime.datetime. " + "Install pandas to return as Timestamp with nanosecond " + "support or access the .value attribute.".format(value) + ) + delta = datetime.timedelta(microseconds=value // 1000) + + dt = datetime.datetime(1970, 1, 1) + delta + # adjust timezone if set to the datatype + if tzinfo is not None: + dt = dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo) + + return dt + + +cdef class Time32Scalar(Scalar): + """ + Concrete class for time32 scalars. + """ + + @property + def value(self): + cdef CTime32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime32Scalar* sp = self.wrapped.get() + CTime32Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class Time64Scalar(Scalar): + """ + Concrete class for time64 scalars. + """ + + @property + def value(self): + cdef CTime64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime64Scalar* sp = self.wrapped.get() + CTime64Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class TimestampScalar(Scalar): + """ + Concrete class for timestamp scalars. + """ + + @property + def value(self): + cdef CTimestampScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timestamp instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.datetime instance. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not sp.is_valid: + return None + + if not dtype.timezone().empty(): + tzinfo = string_to_tzinfo(frombytes(dtype.timezone())) + else: + tzinfo = None + + return _datetime_from_int(sp.value, unit=dtype.unit(), tzinfo=tzinfo) + + def __repr__(self): + """ + Return the representation of TimestampScalar using `strftime` to avoid + original repr datetime values being out of range. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not dtype.timezone().empty(): + type_format = str(_pc().strftime(self, format="%Y-%m-%dT%H:%M:%S%z")) + else: + type_format = str(_pc().strftime(self)) + return ''.format( + self.__class__.__name__, type_format + ) + + +cdef class DurationScalar(Scalar): + """ + Concrete class for duration scalars. + """ + + @property + def value(self): + cdef CDurationScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timedelta instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.timedelta instance. + """ + cdef: + CDurationScalar* sp = self.wrapped.get() + CDurationType* dtype = sp.type.get() + TimeUnit unit = dtype.unit() + + if not sp.is_valid: + return None + + if unit == TimeUnit_SECOND: + return datetime.timedelta(seconds=sp.value) + elif unit == TimeUnit_MILLI: + return datetime.timedelta(milliseconds=sp.value) + elif unit == TimeUnit_MICRO: + return datetime.timedelta(microseconds=sp.value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timedelta(sp.value, unit='ns') + # otherwise safely truncate to microsecond resolution timedelta + if sp.value % 1000 != 0: + raise ValueError( + "Nanosecond duration {} is not safely convertible to " + "microseconds to convert to datetime.timedelta. Install " + "pandas to return as Timedelta with nanosecond support or " + "access the .value attribute.".format(sp.value) + ) + return datetime.timedelta(microseconds=sp.value // 1000) + + +cdef class MonthDayNanoIntervalScalar(Scalar): + """ + Concrete class for month, day, nanosecond interval scalars. + """ + + @property + def value(self): + """ + Same as self.as_py() + """ + return self.as_py() + + def as_py(self): + """ + Return this value as a pyarrow.MonthDayNano. + """ + cdef: + PyObject* val + CMonthDayNanoIntervalScalar* scalar + scalar = self.wrapped.get() + val = GetResultValue(MonthDayNanoIntervalScalarToPyObject( + deref(scalar))) + return PyObject_to_object(val) + + +cdef class BinaryScalar(Scalar): + """ + Concrete class for binary-like scalars. + """ + + def as_buffer(self): + """ + Return a view over this value as a Buffer object. + """ + cdef CBaseBinaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_buffer(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python bytes. + """ + buffer = self.as_buffer() + return None if buffer is None else buffer.to_pybytes() + + +cdef class LargeBinaryScalar(BinaryScalar): + pass + + +cdef class FixedSizeBinaryScalar(BinaryScalar): + pass + + +cdef class StringScalar(BinaryScalar): + """ + Concrete class for string-like (utf8) scalars. + """ + + def as_py(self): + """ + Return this value as a Python string. + """ + buffer = self.as_buffer() + return None if buffer is None else str(buffer, 'utf8') + + +cdef class LargeStringScalar(StringScalar): + pass + + +cdef class BinaryViewScalar(BinaryScalar): + pass + + +cdef class StringViewScalar(StringScalar): + pass + + +cdef class ListScalar(Scalar): + """ + Concrete class for list-like scalars. + """ + + @property + def values(self): + cdef CBaseListScalar* sp = self.wrapped.get() + if sp.is_valid: + return pyarrow_wrap_array(sp.value) + else: + return None + + def __len__(self): + """ + Return the number of values. + """ + return len(self.values) + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + return self.values[_normalize_index(i, len(self))] + + def __iter__(self): + """ + Iterate over this element's values. + """ + return iter(self.values) + + def as_py(self): + """ + Return this value as a Python list. + """ + arr = self.values + return None if arr is None else arr.to_pylist() + + +cdef class FixedSizeListScalar(ListScalar): + pass + + +cdef class LargeListScalar(ListScalar): + pass + + +cdef class ListViewScalar(ListScalar): + pass + + +cdef class LargeListViewScalar(ListScalar): + pass + + +cdef class StructScalar(Scalar, collections.abc.Mapping): + """ + Concrete class for struct scalars. + """ + + def __len__(self): + cdef CStructScalar* sp = self.wrapped.get() + return sp.value.size() + + def __iter__(self): + cdef: + CStructScalar* sp = self.wrapped.get() + CStructType* dtype = sp.type.get() + vector[shared_ptr[CField]] fields = dtype.fields() + + for i in range(dtype.num_fields()): + yield frombytes(fields[i].get().name()) + + def items(self): + return ((key, self[i]) for i, key in enumerate(self)) + + def __contains__(self, key): + return key in list(self) + + def __getitem__(self, key): + """ + Return the child value for the given field. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + + Returns + ------- + result : Scalar + """ + cdef: + CFieldRef ref + CStructScalar* sp = self.wrapped.get() + + if isinstance(key, (bytes, str)): + ref = CFieldRef( tobytes(key)) + elif isinstance(key, int): + ref = CFieldRef( key) + else: + raise TypeError('Expected integer or string index') + + try: + return Scalar.wrap(GetResultValue(sp.field(ref))) + except ArrowInvalid as exc: + if isinstance(key, int): + raise IndexError(key) from exc + else: + raise KeyError(key) from exc + + def as_py(self): + """ + Return this value as a Python dict. + """ + if self.is_valid: + try: + return {k: self[k].as_py() for k in self.keys()} + except KeyError: + raise ValueError( + "Converting to Python dictionary is not supported when " + "duplicate field names are present") + else: + return None + + def _as_py_tuple(self): + # a version that returns a tuple instead of dict to support repr/str + # with the presence of duplicate field names + if self.is_valid: + return [(key, self[i].as_py()) for i, key in enumerate(self)] + else: + return None + + def __repr__(self): + return ''.format( + self.__class__.__name__, self._as_py_tuple() + ) + + def __str__(self): + return str(self._as_py_tuple()) + + +cdef class MapScalar(ListScalar): + """ + Concrete class for map scalars. + """ + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + arr = self.values + if arr is None: + raise IndexError(i) + dct = arr[_normalize_index(i, len(arr))] + return (dct[self.type.key_field.name], dct[self.type.item_field.name]) + + def __iter__(self): + """ + Iterate over this element's values. + """ + arr = self.values + if arr is None: + return + for k, v in zip(arr.field(self.type.key_field.name), arr.field(self.type.item_field.name)): + yield (k.as_py(), v.as_py()) + + def as_py(self): + """ + Return this value as a Python list. + """ + cdef CStructScalar* sp = self.wrapped.get() + return list(self) if sp.is_valid else None + + +cdef class DictionaryScalar(Scalar): + """ + Concrete class for dictionary-encoded scalars. + """ + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(type, is_valid, index, dictionary): + cdef: + CDictionaryScalarIndexAndDictionary value + shared_ptr[CDictionaryScalar] wrapped + DataType type_ + Scalar index_ + Array dictionary_ + + type_ = ensure_type(type, allow_none=False) + if not isinstance(type_, DictionaryType): + raise TypeError('Must pass a DictionaryType instance') + + if isinstance(index, Scalar): + if not index.type.equals(type.index_type): + raise TypeError("The Scalar value passed as index must have " + "identical type to the dictionary type's " + "index_type") + index_ = index + else: + index_ = scalar(index, type=type_.index_type) + + if isinstance(dictionary, Array): + if not dictionary.type.equals(type.value_type): + raise TypeError("The Array passed as dictionary must have " + "identical type to the dictionary type's " + "value_type") + dictionary_ = dictionary + else: + dictionary_ = array(dictionary, type=type_.value_type) + + value.index = pyarrow_unwrap_scalar(index_) + value.dictionary = pyarrow_unwrap_array(dictionary_) + + wrapped = make_shared[CDictionaryScalar]( + value, pyarrow_unwrap_data_type(type_), (is_valid) + ) + return Scalar.wrap( wrapped) + + def __reduce__(self): + return DictionaryScalar._reconstruct, ( + self.type, self.is_valid, self.index, self.dictionary + ) + + @property + def index(self): + """ + Return this value's underlying index as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value.index) + + @property + def value(self): + """ + Return the encoded value as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(GetResultValue(sp.GetEncodedValue())) + + @property + def dictionary(self): + cdef CDictionaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_array(sp.value.dictionary) + + def as_py(self): + """ + Return this encoded value as a Python object. + """ + return self.value.as_py() if self.is_valid else None + + +cdef class RunEndEncodedScalar(Scalar): + """ + Concrete class for RunEndEncoded scalars. + """ + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CRunEndEncodedScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) + + def as_py(self): + """ + Return underlying value as a Python object. + """ + return self.value.as_py() + + +cdef class UnionScalar(Scalar): + """ + Concrete class for Union scalars. + """ + + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CSparseUnionScalar* sp + cdef CDenseUnionScalar* dp + if self.type.id == _Type_SPARSE_UNION: + sp = self.wrapped.get() + return Scalar.wrap(sp.value[sp.child_id]) if sp.is_valid else None + else: + dp = self.wrapped.get() + return Scalar.wrap(dp.value) if dp.is_valid else None + + def as_py(self): + """ + Return underlying value as a Python object. + """ + value = self.value + return None if value is None else value.as_py() + + @property + def type_code(self): + """ + Return the union type code for this scalar. + """ + cdef CUnionScalar* sp = self.wrapped.get() + return sp.type_code + + +cdef class ExtensionScalar(Scalar): + """ + Concrete class for Extension scalars. + """ + + @property + def value(self): + """ + Return storage value as a scalar. + """ + cdef CExtensionScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this scalar as a Python object. + """ + return None if self.value is None else self.value.as_py() + + @staticmethod + def from_storage(BaseExtensionType typ, value): + """ + Construct ExtensionScalar from type and storage value. + + Parameters + ---------- + typ : DataType + The extension type for the result scalar. + value : object + The storage value for the result scalar. + + Returns + ------- + ext_scalar : ExtensionScalar + """ + cdef: + shared_ptr[CExtensionScalar] sp_scalar + shared_ptr[CScalar] sp_storage + CExtensionScalar* ext_scalar + + if value is None: + storage = None + elif isinstance(value, Scalar): + if value.type != typ.storage_type: + raise TypeError("Incompatible storage type {0} " + "for extension type {1}" + .format(value.type, typ)) + storage = value + else: + storage = scalar(value, typ.storage_type) + + cdef c_bool is_valid = storage is not None and storage.is_valid + if is_valid: + sp_storage = pyarrow_unwrap_scalar(storage) + else: + sp_storage = MakeNullScalar(( typ.storage_type).sp_type) + sp_scalar = make_shared[CExtensionScalar](sp_storage, typ.sp_type, + is_valid) + with nogil: + check_status(sp_scalar.get().Validate()) + return pyarrow_wrap_scalar( sp_scalar) + + +cdef class FixedShapeTensorScalar(ExtensionScalar): + """ + Concrete class for fixed shape tensor extension scalar. + """ + + def to_numpy(self): + """ + Convert fixed shape tensor scalar to a numpy.ndarray. + + The resulting ndarray's shape matches the permuted shape of the + fixed shape tensor scalar. + The conversion is zero-copy. + + Returns + ------- + numpy.ndarray + """ + return self.to_tensor().to_numpy() + + def to_tensor(self): + """ + Convert fixed shape tensor extension scalar to a pyarrow.Tensor, using shape + and strides derived from corresponding FixedShapeTensorType. + + The conversion is zero-copy. + + Returns + ------- + pyarrow.Tensor + Tensor represented stored in FixedShapeTensorScalar. + """ + cdef: + CFixedShapeTensorType* c_type = static_pointer_cast[CFixedShapeTensorType, CDataType]( + self.wrapped.get().type).get() + shared_ptr[CExtensionScalar] scalar = static_pointer_cast[CExtensionScalar, CScalar](self.wrapped) + shared_ptr[CTensor] ctensor + + with nogil: + ctensor = GetResultValue(c_type.MakeTensor(scalar)) + return pyarrow_wrap_tensor(ctensor) + + +cdef dict _scalar_classes = { + _Type_BOOL: BooleanScalar, + _Type_UINT8: UInt8Scalar, + _Type_UINT16: UInt16Scalar, + _Type_UINT32: UInt32Scalar, + _Type_UINT64: UInt64Scalar, + _Type_INT8: Int8Scalar, + _Type_INT16: Int16Scalar, + _Type_INT32: Int32Scalar, + _Type_INT64: Int64Scalar, + _Type_HALF_FLOAT: HalfFloatScalar, + _Type_FLOAT: FloatScalar, + _Type_DOUBLE: DoubleScalar, + _Type_DECIMAL128: Decimal128Scalar, + _Type_DECIMAL256: Decimal256Scalar, + _Type_DATE32: Date32Scalar, + _Type_DATE64: Date64Scalar, + _Type_TIME32: Time32Scalar, + _Type_TIME64: Time64Scalar, + _Type_TIMESTAMP: TimestampScalar, + _Type_DURATION: DurationScalar, + _Type_BINARY: BinaryScalar, + _Type_LARGE_BINARY: LargeBinaryScalar, + _Type_FIXED_SIZE_BINARY: FixedSizeBinaryScalar, + _Type_BINARY_VIEW: BinaryViewScalar, + _Type_STRING: StringScalar, + _Type_LARGE_STRING: LargeStringScalar, + _Type_STRING_VIEW: StringViewScalar, + _Type_LIST: ListScalar, + _Type_LARGE_LIST: LargeListScalar, + _Type_FIXED_SIZE_LIST: FixedSizeListScalar, + _Type_LIST_VIEW: ListViewScalar, + _Type_LARGE_LIST_VIEW: LargeListViewScalar, + _Type_STRUCT: StructScalar, + _Type_MAP: MapScalar, + _Type_DICTIONARY: DictionaryScalar, + _Type_RUN_END_ENCODED: RunEndEncodedScalar, + _Type_SPARSE_UNION: UnionScalar, + _Type_DENSE_UNION: UnionScalar, + _Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalScalar, + _Type_EXTENSION: ExtensionScalar, +} + + +cdef object get_scalar_class_from_type( + const shared_ptr[CDataType]& sp_data_type): + cdef CDataType* data_type = sp_data_type.get() + if data_type == NULL: + raise ValueError('Scalar data type was NULL') + + if data_type.id() == _Type_EXTENSION: + py_ext_data_type = pyarrow_wrap_data_type(sp_data_type) + return py_ext_data_type.__arrow_ext_scalar_class__() + else: + return _scalar_classes[data_type.id()] + + +def scalar(value, type=None, *, from_pandas=None, MemoryPool memory_pool=None): + """ + Create a pyarrow.Scalar instance from a Python object. + + Parameters + ---------- + value : Any + Python object coercible to arrow's type system. + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred from + the value. + from_pandas : bool, default None + Use pandas's semantics for inferring nulls from values in + ndarray-like data. Defaults to False if not passed explicitly by user, + or True if a pandas object is passed in. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Returns + ------- + scalar : pyarrow.Scalar + + Examples + -------- + >>> import pyarrow as pa + + >>> pa.scalar(42) + + + >>> pa.scalar("string") + + + >>> pa.scalar([1, 2]) + + + >>> pa.scalar([1, 2], type=pa.list_(pa.int16())) + + """ + cdef: + DataType ty + PyConversionOptions options + shared_ptr[CScalar] scalar + shared_ptr[CArray] array + shared_ptr[CChunkedArray] chunked + bint is_pandas_object = False + CMemoryPool* pool + + type = ensure_type(type, allow_none=True) + pool = maybe_unbox_memory_pool(memory_pool) + + if _is_array_like(value): + value = get_values(value, &is_pandas_object) + + options.size = 1 + + if type is not None: + ty = ensure_type(type) + options.type = ty.sp_type + + if from_pandas is None: + options.from_pandas = is_pandas_object + else: + options.from_pandas = from_pandas + + value = [value] + with nogil: + chunked = GetResultValue(ConvertPySequence(value, None, options, pool)) + + # get the first chunk + assert chunked.get().num_chunks() == 1 + array = chunked.get().chunk(0) + + # retrieve the scalar from the first position + scalar = GetResultValue(array.get().GetScalar(0)) + return Scalar.wrap(scalar) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/substrait.py b/parrot/lib/python3.10/site-packages/pyarrow/substrait.py new file mode 100644 index 0000000000000000000000000000000000000000..a2b217f4936c56238f8aefb88ae6ca3791c099e6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/substrait.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +try: + from pyarrow._substrait import ( # noqa + BoundExpressions, + get_supported_functions, + run_query, + deserialize_expressions, + serialize_expressions + ) +except ImportError as exc: + raise ImportError( + "The pyarrow installation is not built with support " + f"for 'substrait' ({str(exc)})" + ) from None diff --git a/parrot/lib/python3.10/site-packages/pyarrow/table.pxi b/parrot/lib/python3.10/site-packages/pyarrow/table.pxi new file mode 100644 index 0000000000000000000000000000000000000000..eb9ba650dbf60deba9a3fe452b3ebea46a2165be --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/table.pxi @@ -0,0 +1,6361 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +import warnings +from cython import sizeof + +cdef class ChunkedArray(_PandasConvertible): + """ + An array-like composed from a (possibly empty) collection of pyarrow.Arrays + + Warnings + -------- + Do not call this class's constructor directly. + + Examples + -------- + To construct a ChunkedArray object use :func:`pyarrow.chunked_array`: + + >>> import pyarrow as pa + >>> pa.chunked_array([], type=pa.int8()) + + [ + ... + ] + + >>> pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> isinstance(pa.chunked_array([[2, 2, 4], [4, 5, 100]]), pa.ChunkedArray) + True + """ + + def __cinit__(self): + self.chunked_array = NULL + + def __init__(self): + raise TypeError("Do not call ChunkedArray's constructor directly, use " + "`chunked_array` function instead.") + + cdef void init(self, const shared_ptr[CChunkedArray]& chunked_array): + self.sp_chunked_array = chunked_array + self.chunked_array = chunked_array.get() + + def __reduce__(self): + return chunked_array, (self.chunks, self.type) + + @property + def data(self): + import warnings + warnings.warn("Calling .data on ChunkedArray is provided for " + "compatibility after Column was removed, simply drop " + "this attribute", FutureWarning) + return self + + @property + def type(self): + """ + Return data type of a ChunkedArray. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.type + DataType(int64) + """ + return pyarrow_wrap_data_type(self.sp_chunked_array.get().type()) + + def length(self): + """ + Return length of a ChunkedArray. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.length() + 6 + """ + return self.chunked_array.length() + + def __len__(self): + return self.length() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def to_string(self, *, int indent=0, int window=5, int container_window=2, + c_bool skip_new_lines=False): + """ + Render a "pretty-printed" string representation of the ChunkedArray + + Parameters + ---------- + indent : int + How much to indent right the content of the array, + by default ``0``. + window : int + How many items to preview within each chunk at the begin and end + of the chunk when the chunk is bigger than the window. + The other elements will be ellipsed. + container_window : int + How many chunks to preview at the begin and end + of the array when the array is bigger than the window. + The other elements will be ellipsed. + This setting also applies to list columns. + skip_new_lines : bool + If the array should be rendered as a single line of text + or if each element should be on its own line. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_string(skip_new_lines=True) + '[[2,2,4],[4,5,100]]' + """ + cdef: + c_string result + PrettyPrintOptions options + + with nogil: + options = PrettyPrintOptions(indent, window) + options.skip_new_lines = skip_new_lines + options.container_window = container_window + check_status( + PrettyPrint( + deref(self.chunked_array), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def format(self, **kwargs): + """ + DEPRECATED, use pyarrow.ChunkedArray.to_string + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + str + """ + import warnings + warnings.warn('ChunkedArray.format is deprecated, ' + 'use ChunkedArray.to_string') + return self.to_string(**kwargs) + + def __str__(self): + return self.to_string() + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.sp_chunked_array.get().ValidateFull()) + else: + with nogil: + check_status(self.sp_chunked_array.get().Validate()) + + @property + def null_count(self): + """ + Number of null entries + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.null_count + 1 + """ + return self.chunked_array.null_count() + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the chunked array. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.nbytes + 49 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.chunked_array)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the chunked array. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.get_total_buffer_size() + 49 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.chunked_array)) + return total_buffer_size + + def __sizeof__(self): + return super(ChunkedArray, self).__sizeof__() + self.nbytes + + def __iter__(self): + for chunk in self.iterchunks(): + for item in chunk: + yield item + + def __getitem__(self, key): + """ + Slice or return value at given index + + Parameters + ---------- + key : integer or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + value : Scalar (index) or ChunkedArray (slice) + """ + + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.chunked_array.length())) + + cdef getitem(self, int64_t i): + return Scalar.wrap(GetResultValue(self.chunked_array.GetScalar(i))) + + def is_null(self, *, nan_is_null=False): + """ + Return boolean array indicating the null values. + + Parameters + ---------- + nan_is_null : bool (optional, default False) + Whether floating-point NaN values should also be considered null. + + Returns + ------- + array : boolean Array or ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.is_null() + + [ + [ + false, + false, + false, + false, + true, + false + ] + ] + """ + options = _pc().NullOptions(nan_is_null=nan_is_null) + return _pc().call_function('is_null', [self], options) + + def is_nan(self): + """ + Return boolean array indicating the NaN values. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> arr = pa.chunked_array([[2, np.nan, 4], [4, None, 100]]) + >>> arr.is_nan() + + [ + [ + false, + true, + false, + false, + null, + false + ] + ] + """ + return _pc().is_nan(self) + + def is_valid(self): + """ + Return boolean array indicating the non-null values. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.is_valid() + + [ + [ + true, + true, + true + ], + [ + true, + false, + true + ] + ] + """ + return _pc().is_valid(self) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def fill_null(self, fill_value): + """ + Replace each null element in values with fill_value. + + See :func:`pyarrow.compute.fill_null` for full usage. + + Parameters + ---------- + fill_value : any + The replacement value for null entries. + + Returns + ------- + result : Array or ChunkedArray + A new array with nulls replaced by the given value. + + Examples + -------- + >>> import pyarrow as pa + >>> fill_value = pa.scalar(5, type=pa.int8()) + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.fill_null(fill_value) + + [ + [ + 2, + 2, + 4, + 4, + 5, + 100 + ] + ] + """ + return _pc().fill_null(self, fill_value) + + def equals(self, ChunkedArray other): + """ + Return whether the contents of two chunked arrays are equal. + + Parameters + ---------- + other : pyarrow.ChunkedArray + Chunked array to compare against. + + Returns + ------- + are_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array(( + ... ["Flamingo", "Parrot", "Dog"], + ... ["Horse", "Brittle stars", "Centipede"] + ... )) + >>> n_legs.equals(n_legs) + True + >>> n_legs.equals(animals) + False + """ + if other is None: + return False + + cdef: + CChunkedArray* this_arr = self.chunked_array + CChunkedArray* other_arr = other.chunked_array + c_bool result + + with nogil: + result = this_arr.Equals(deref(other_arr)) + + return result + + def _to_pandas(self, options, types_mapper=None, **kwargs): + return _array_like_to_pandas(self, options, types_mapper=types_mapper) + + def to_numpy(self, zero_copy_only=False): + """ + Return a NumPy copy of this array (experimental). + + Parameters + ---------- + zero_copy_only : bool, default False + Introduced for signature consistence with pyarrow.Array.to_numpy. + This must be False here since NumPy arrays' buffer must be contiguous. + + Returns + ------- + array : numpy.ndarray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_numpy() + array([ 2, 2, 4, 4, 5, 100]) + """ + if zero_copy_only: + raise ValueError( + "zero_copy_only must be False for pyarrow.ChunkedArray.to_numpy" + ) + cdef: + PyObject* out + PandasOptions c_options + object values + + c_options.to_numpy = True + + with nogil: + check_status( + ConvertChunkedArrayToPandas( + c_options, + self.sp_chunked_array, + self, + &out + ) + ) + + # wrap_array_output uses pandas to convert to Categorical, here + # always convert to numpy array + values = PyObject_to_object(out) + + if isinstance(values, dict): + values = np.take(values['dictionary'], values['indices']) + + return values + + def __array__(self, dtype=None, copy=None): + if copy is False: + raise ValueError( + "Unable to avoid a copy while creating a numpy array as requested " + "(converting a pyarrow.ChunkedArray always results in a copy).\n" + "If using `np.array(obj, copy=False)` replace it with " + "`np.asarray(obj)` to allow a copy when needed" + ) + # 'copy' can further be ignored because to_numpy() already returns a copy + values = self.to_numpy() + if dtype is None: + return values + return values.astype(dtype, copy=False) + + def cast(self, object target_type=None, safe=None, options=None): + """ + Cast array values to another data type + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + cast : Array or ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.type + DataType(int64) + + Change the data type of an array: + + >>> n_legs_seconds = n_legs.cast(pa.duration('s')) + >>> n_legs_seconds.type + DurationType(duration[s]) + """ + return _pc().cast(self, target_type, safe=safe, options=options) + + def dictionary_encode(self, null_encoding='mask'): + """ + Compute dictionary-encoded representation of array. + + See :func:`pyarrow.compute.dictionary_encode` for full usage. + + Parameters + ---------- + null_encoding : str, default "mask" + How to handle null entries. + + Returns + ------- + encoded : ChunkedArray + A dictionary-encoded version of this array. + + Examples + -------- + >>> import pyarrow as pa + >>> animals = pa.chunked_array(( + ... ["Flamingo", "Parrot", "Dog"], + ... ["Horse", "Brittle stars", "Centipede"] + ... )) + >>> animals.dictionary_encode() + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 3, + 4, + 5 + ] + ] + """ + options = _pc().DictionaryEncodeOptions(null_encoding) + return _pc().call_function('dictionary_encode', [self], options) + + def flatten(self, MemoryPool memory_pool=None): + """ + Flatten this ChunkedArray. If it has a struct type, the column is + flattened into one array per struct field. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : list of ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> c_arr = pa.chunked_array(n_legs.value_counts()) + >>> c_arr + + [ + -- is_valid: all not null + -- child 0 type: int64 + [ + 2, + 4, + 5, + 100 + ] + -- child 1 type: int64 + [ + 2, + 2, + 1, + 1 + ] + ] + >>> c_arr.flatten() + [ + [ + [ + 2, + 4, + 5, + 100 + ] + ], + [ + [ + 2, + 2, + 1, + 1 + ] + ]] + >>> c_arr.type + StructType(struct) + >>> n_legs.type + DataType(int64) + """ + cdef: + vector[shared_ptr[CChunkedArray]] flattened + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + flattened = GetResultValue(self.chunked_array.Flatten(pool)) + + return [pyarrow_wrap_chunked_array(col) for col in flattened] + + def combine_chunks(self, MemoryPool memory_pool=None): + """ + Flatten this ChunkedArray into a single non-chunked array. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.combine_chunks() + + [ + 2, + 2, + 4, + 4, + 5, + 100 + ] + """ + if self.num_chunks == 0: + return array([], type=self.type) + else: + return concat_arrays(self.chunks) + + def unique(self): + """ + Compute distinct elements in array + + Returns + ------- + pyarrow.Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.unique() + + [ + 2, + 4, + 5, + 100 + ] + """ + return _pc().call_function('unique', [self]) + + def value_counts(self): + """ + Compute counts of unique elements in array. + + Returns + ------- + An array of structs + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.value_counts() + + -- is_valid: all not null + -- child 0 type: int64 + [ + 2, + 4, + 5, + 100 + ] + -- child 1 type: int64 + [ + 2, + 2, + 1, + 1 + ] + """ + return _pc().call_function('value_counts', [self]) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this ChunkedArray + + Parameters + ---------- + offset : int, default 0 + Offset from start of array to slice + length : int, default None + Length of slice (default is until end of batch starting from + offset) + + Returns + ------- + sliced : ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.slice(2,2) + + [ + [ + 4 + ], + [ + 4 + ] + ] + """ + cdef shared_ptr[CChunkedArray] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.chunked_array.Slice(offset) + else: + result = self.chunked_array.Slice(offset, length) + + return pyarrow_wrap_chunked_array(result) + + def filter(self, mask, object null_selection_behavior="drop"): + """ + Select values from the chunked array. + + See :func:`pyarrow.compute.filter` for full usage. + + Parameters + ---------- + mask : Array or array-like + The boolean mask to filter the chunked array with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled. + + Returns + ------- + filtered : Array or ChunkedArray + An array of the same type, with only the elements selected by + the boolean mask. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> mask = pa.array([True, False, None, True, False, True]) + >>> n_legs.filter(mask) + + [ + [ + 2 + ], + [ + 4, + 100 + ] + ] + >>> n_legs.filter(mask, null_selection_behavior="emit_null") + + [ + [ + 2, + null + ], + [ + 4, + 100 + ] + ] + """ + return _pc().filter(self, mask, null_selection_behavior) + + def index(self, value, start=None, end=None, *, memory_pool=None): + """ + Find the first index of a value. + + See :func:`pyarrow.compute.index` for full usage. + + Parameters + ---------- + value : Scalar or object + The value to look for in the array. + start : int, optional + The start index where to look for `value`. + end : int, optional + The end index where to look for `value`. + memory_pool : MemoryPool, optional + A memory pool for potential memory allocations. + + Returns + ------- + index : Int64Scalar + The index of the value in the array (-1 if not found). + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.index(4) + + >>> n_legs.index(4, start=3) + + """ + return _pc().index(self, value, start, end, memory_pool=memory_pool) + + def take(self, object indices): + """ + Select values from the chunked array. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the array whose values will be returned. + + Returns + ------- + taken : Array or ChunkedArray + An array with the same datatype, containing the taken values. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.take([1,4,5]) + + [ + [ + 2, + 5, + 100 + ] + ] + """ + return _pc().take(self, indices) + + def drop_null(self): + """ + Remove missing values from a chunked array. + See :func:`pyarrow.compute.drop_null` for full description. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.drop_null() + + [ + [ + 2, + 2 + ], + [ + 4, + 5, + 100 + ] + ] + """ + return _pc().drop_null(self) + + def sort(self, order="ascending", **kwargs): + """ + Sort the ChunkedArray + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : ChunkedArray + """ + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=[("", order)], **kwargs) + ) + return self.take(indices) + + def unify_dictionaries(self, MemoryPool memory_pool=None): + """ + Unify dictionaries across all chunks. + + This method returns an equivalent chunked array, but where all + chunks share the same dictionary values. Dictionary indices are + transposed accordingly. + + If there are no dictionaries in the chunked array, it is returned + unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> arr_1 = pa.array(["Flamingo", "Parrot", "Dog"]).dictionary_encode() + >>> arr_2 = pa.array(["Horse", "Brittle stars", "Centipede"]).dictionary_encode() + >>> c_arr = pa.chunked_array([arr_1, arr_2]) + >>> c_arr + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ] + ] + >>> c_arr.unify_dictionaries() + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 3, + 4, + 5 + ] + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CChunkedArray] c_result + + with nogil: + c_result = GetResultValue(CDictionaryUnifier.UnifyChunkedArray( + self.sp_chunked_array, pool)) + + return pyarrow_wrap_chunked_array(c_result) + + @property + def num_chunks(self): + """ + Number of underlying chunks. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs.num_chunks + 2 + """ + return self.chunked_array.num_chunks() + + def chunk(self, i): + """ + Select a chunk by its index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs.chunk(1) + + [ + 4, + 5, + 100 + ] + """ + if i >= self.num_chunks or i < 0: + raise IndexError('Chunk index out of range.') + + return pyarrow_wrap_array(self.chunked_array.chunk(i)) + + @property + def chunks(self): + """ + Convert to a list of single-chunked arrays. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.chunks + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ]] + """ + return list(self.iterchunks()) + + def iterchunks(self): + """ + Convert to an iterator of ChunkArrays. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> for i in n_legs.iterchunks(): + ... print(i.null_count) + ... + 0 + 1 + + """ + for i in range(self.num_chunks): + yield self.chunk(i) + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.to_pylist() + [2, 2, 4, 4, None, 100] + """ + result = [] + for i in range(self.num_chunks): + result += self.chunk(i).to_pylist() + return result + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export to a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + + Returns + ------- + PyCapsule + A capsule containing a C ArrowArrayStream struct. + """ + cdef: + ChunkedArray chunked + ArrowArrayStream* c_stream = NULL + + if requested_schema is not None: + target_type = DataType._import_from_c_capsule(requested_schema) + + if target_type != self.type: + try: + chunked = self.cast(target_type, safe=True) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.type} to requested type {target_type}: {e}" + ) + else: + chunked = self + else: + chunked = self + + stream_capsule = alloc_c_stream(&c_stream) + + with nogil: + check_status(ExportChunkedArray(chunked.sp_chunked_array, c_stream)) + + return stream_capsule + + @staticmethod + def _import_from_c_capsule(stream): + """ + Import ChunkedArray from a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + stream: PyCapsule + A capsule containing a C ArrowArrayStream PyCapsule. + + Returns + ------- + ChunkedArray + """ + cdef: + ArrowArrayStream* c_stream + shared_ptr[CChunkedArray] c_chunked_array + ChunkedArray self + + c_stream = PyCapsule_GetPointer( + stream, 'arrow_array_stream' + ) + + with nogil: + c_chunked_array = GetResultValue(ImportChunkedArray(c_stream)) + + self = ChunkedArray.__new__(ChunkedArray) + self.init(c_chunked_array) + return self + + +def chunked_array(arrays, type=None): + """ + Construct chunked array from list of array-like objects + + Parameters + ---------- + arrays : Array, list of Array, or array-like + Must all be the same data type. Can be empty only if type also passed. + Any Arrow-compatible array that implements the Arrow PyCapsule Protocol + (has an ``__arrow_c_array__`` or ``__arrow_c_stream__`` method) can be + passed as well. + type : DataType or string coercible to DataType + + Returns + ------- + ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> pa.chunked_array([], type=pa.int8()) + + [ + ... + ] + + >>> pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + """ + cdef: + Array arr + vector[shared_ptr[CArray]] c_arrays + shared_ptr[CChunkedArray] c_result + shared_ptr[CDataType] c_type + + type = ensure_type(type, allow_none=True) + + if isinstance(arrays, Array): + arrays = [arrays] + elif hasattr(arrays, "__arrow_c_stream__"): + if type is not None: + requested_type = type.__arrow_c_schema__() + else: + requested_type = None + capsule = arrays.__arrow_c_stream__(requested_type) + result = ChunkedArray._import_from_c_capsule(capsule) + if type is not None and result.type != type: + # __arrow_c_stream__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + result = result.cast(type) + return result + elif hasattr(arrays, "__arrow_c_array__"): + arr = array(arrays, type=type) + arrays = [arr] + + for x in arrays: + arr = x if isinstance(x, Array) else array(x, type=type) + + if type is None: + # it allows more flexible chunked array construction from to coerce + # subsequent arrays to the firstly inferred array type + # it also spares the inference overhead after the first chunk + type = arr.type + + c_arrays.push_back(arr.sp_array) + + c_type = pyarrow_unwrap_data_type(type) + with nogil: + c_result = GetResultValue(CChunkedArray.Make(c_arrays, c_type)) + return pyarrow_wrap_chunked_array(c_result) + + +cdef _schema_from_arrays(arrays, names, metadata, shared_ptr[CSchema]* schema): + cdef: + Py_ssize_t K = len(arrays) + c_string c_name + shared_ptr[CDataType] c_type + shared_ptr[const CKeyValueMetadata] c_meta + vector[shared_ptr[CField]] c_fields + + if metadata is not None: + c_meta = KeyValueMetadata(metadata).unwrap() + + if K == 0: + if names is None or len(names) == 0: + schema.reset(new CSchema(c_fields, c_meta)) + return arrays + else: + raise ValueError('Length of names ({}) does not match ' + 'length of arrays ({})'.format(len(names), K)) + + c_fields.resize(K) + + if names is None: + raise ValueError('Must pass names or schema when constructing ' + 'Table or RecordBatch.') + + if len(names) != K: + raise ValueError('Length of names ({}) does not match ' + 'length of arrays ({})'.format(len(names), K)) + + converted_arrays = [] + for i in range(K): + val = arrays[i] + if not isinstance(val, (Array, ChunkedArray)): + val = array(val) + + c_type = ( val.type).sp_type + + if names[i] is None: + c_name = b'None' + else: + c_name = tobytes(names[i]) + c_fields[i].reset(new CField(c_name, c_type, True)) + converted_arrays.append(val) + + schema.reset(new CSchema(c_fields, c_meta)) + return converted_arrays + + +cdef _sanitize_arrays(arrays, names, schema, metadata, + shared_ptr[CSchema]* c_schema): + cdef Schema cy_schema + if schema is None: + converted_arrays = _schema_from_arrays(arrays, names, metadata, + c_schema) + else: + if names is not None: + raise ValueError('Cannot pass both schema and names') + if metadata is not None: + raise ValueError('Cannot pass both schema and metadata') + cy_schema = schema + + if len(schema) != len(arrays): + raise ValueError('Schema and number of arrays unequal') + + c_schema[0] = cy_schema.sp_schema + converted_arrays = [] + for i, item in enumerate(arrays): + item = asarray(item, type=schema[i].type) + converted_arrays.append(item) + return converted_arrays + +cdef class _Tabular(_PandasConvertible): + """Internal: An interface for common operations on tabular objects.""" + + def __init__(self): + raise TypeError(f"Do not call {self.__class__.__name__}'s constructor directly, use " + f"one of the `{self.__class__.__name__}.from_*` functions instead.") + + def __array__(self, dtype=None, copy=None): + if copy is False: + raise ValueError( + "Unable to avoid a copy while creating a numpy array as requested " + f"(converting a pyarrow.{self.__class__.__name__} always results " + "in a copy).\n" + "If using `np.array(obj, copy=False)` replace it with " + "`np.asarray(obj)` to allow a copy when needed" + ) + # 'copy' can further be ignored because stacking will result in a copy + column_arrays = [ + np.asarray(self.column(i), dtype=dtype) for i in range(self.num_columns) + ] + if column_arrays: + arr = np.stack(column_arrays, axis=1) + else: + arr = np.empty((self.num_rows, 0), dtype=dtype) + return arr + + def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): + """ + Return the dataframe interchange object implementing the interchange protocol. + + Parameters + ---------- + nan_as_null : bool, default False + Whether to tell the DataFrame to overwrite null values in the data + with ``NaN`` (or ``NaT``). + allow_copy : bool, default True + Whether to allow memory copying when exporting. If set to False + it would cause non-zero-copy exports to fail. + + Returns + ------- + DataFrame interchange object + The object which consuming library can use to ingress the dataframe. + + Notes + ----- + Details on the interchange protocol: + https://data-apis.org/dataframe-protocol/latest/index.html + `nan_as_null` currently has no effect; once support for nullable extension + dtypes is added, this value should be propagated to columns. + """ + + from pyarrow.interchange.dataframe import _PyArrowDataFrame + + return _PyArrowDataFrame(self, nan_as_null, allow_copy) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __getitem__(self, key): + """ + Slice or return column at given index or column name + + Parameters + ---------- + key : integer, str, or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + Array (from RecordBatch) or ChunkedArray (from Table) for column input. + RecordBatch or Table for slice input. + """ + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.column(key) + + def __len__(self): + return self.num_rows + + def __repr__(self): + if not self._is_initialized(): + raise ValueError("This object's internal pointer is NULL, do not " + "use any methods or attributes on this object") + return self.to_string(preview_cols=10) + + def _column(self, int i): + raise NotImplementedError + + def _ensure_integer_index(self, i): + """ + Ensure integer index (convert string column name to integer if needed). + """ + if isinstance(i, (bytes, str)): + field_indices = self.schema.get_all_field_indices(i) + + if len(field_indices) == 0: + raise KeyError("Field \"{}\" does not exist in schema" + .format(i)) + elif len(field_indices) > 1: + raise KeyError("Field \"{}\" exists {} times in schema" + .format(i, len(field_indices))) + else: + return field_indices[0] + elif isinstance(i, int): + return i + else: + raise TypeError("Index must either be string or integer") + + def _is_initialized(self): + raise NotImplementedError + + def column(self, i): + """ + Select single column from Table or RecordBatch. + + Parameters + ---------- + i : int or string + The index or name of the column to retrieve. + + Returns + ------- + column : Array (for RecordBatch) or ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Select a column by numeric index: + + >>> table.column(0) + + [ + [ + 2, + 4, + 5, + 100 + ] + ] + + Select a column by its name: + + >>> table.column("animals") + + [ + [ + "Flamingo", + "Horse", + "Brittle stars", + "Centipede" + ] + ] + """ + return self._column(self._ensure_integer_index(i)) + + @property + def column_names(self): + """ + Names of the Table or RecordBatch columns. + + Returns + ------- + list of str + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> table = pa.Table.from_arrays([[2, 4, 5, 100], + ... ["Flamingo", "Horse", "Brittle stars", "Centipede"]], + ... names=['n_legs', 'animals']) + >>> table.column_names + ['n_legs', 'animals'] + """ + return [self.field(i).name for i in range(self.num_columns)] + + @property + def columns(self): + """ + List of all columns in numerical order. + + Returns + ------- + columns : list of Array (for RecordBatch) or list of ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.columns + [ + [ + [ + null, + 4, + 5, + null + ] + ], + [ + [ + "Flamingo", + "Horse", + null, + "Centipede" + ] + ]] + """ + return [self._column(i) for i in range(self.num_columns)] + + def drop_null(self): + """ + Remove rows that contain missing values from a Table or RecordBatch. + + See :func:`pyarrow.compute.drop_null` for full usage. + + Returns + ------- + Table or RecordBatch + A tabular object with the same schema, with rows containing + no missing values. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [None, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.drop_null() + pyarrow.Table + year: double + n_legs: int64 + animals: string + ---- + year: [[2022,2021]] + n_legs: [[4,100]] + animals: [["Horse","Centipede"]] + """ + return _pc().drop_null(self) + + def field(self, i): + """ + Select a schema field by its column name or numeric index. + + Parameters + ---------- + i : int or string + The index or name of the field to retrieve. + + Returns + ------- + Field + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.field(0) + pyarrow.Field + >>> table.field(1) + pyarrow.Field + """ + return self.schema.field(i) + + @classmethod + def from_pydict(cls, mapping, schema=None, metadata=None): + """ + Construct a Table or RecordBatch from Arrow arrays or columns. + + Parameters + ---------- + mapping : dict or Mapping + A mapping of strings to Arrays or Python lists. + schema : Schema, default None + If not passed, will be inferred from the Mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table or RecordBatch + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> pydict = {'n_legs': n_legs, 'animals': animals} + + Construct a Table from a dictionary of arrays: + + >>> pa.Table.from_pydict(pydict) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_pydict(pydict).schema + n_legs: int64 + animals: string + + Construct a Table from a dictionary of arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pydict(pydict, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a dictionary of arrays with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.Table.from_pydict(pydict, schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + return _from_pydict(cls=cls, + mapping=mapping, + schema=schema, + metadata=metadata) + + @classmethod + def from_pylist(cls, mapping, schema=None, metadata=None): + """ + Construct a Table or RecordBatch from list of rows / dictionaries. + + Parameters + ---------- + mapping : list of dicts of rows + A mapping of strings to row values. + schema : Schema, default None + If not passed, will be inferred from the first row of the + mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table or RecordBatch + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, + ... {'n_legs': 4, 'animals': 'Dog'}] + + Construct a Table from a list of rows: + + >>> pa.Table.from_pylist(pylist) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4]] + animals: [["Flamingo","Dog"]] + + Construct a Table from a list of rows with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pylist(pylist, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a list of rows with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.Table.from_pylist(pylist, schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + return _from_pylist(cls=cls, + mapping=mapping, + schema=schema, + metadata=metadata) + + def itercolumns(self): + """ + Iterator over all columns in their numerical order. + + Yields + ------ + Array (for RecordBatch) or ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> for i in table.itercolumns(): + ... print(i.null_count) + ... + 2 + 1 + """ + for i in range(self.num_columns): + yield self._column(i) + + @property + def num_columns(self): + raise NotImplementedError + + @property + def num_rows(self): + raise NotImplementedError + + @property + def shape(self): + """ + Dimensions of the table or record batch: (#rows, #columns). + + Returns + ------- + (int, int) + Number of rows and number of columns. + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table.shape + (4, 2) + """ + return (self.num_rows, self.num_columns) + + @property + def schema(self): + raise NotImplementedError + + def sort_by(self, sorting, **kwargs): + """ + Sort the Table or RecordBatch by one or multiple columns. + + Parameters + ---------- + sorting : str or list[tuple(name, order)] + Name of the column to use to sort (ascending), or + a list of multiple sorting conditions where + each entry is a tuple with column name + and sorting order ("ascending" or "descending") + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + Table or RecordBatch + A new tabular object sorted according to the sort keys. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.sort_by('animal') + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2019,2021,2021,2020,2022,2022]] + n_legs: [[5,100,4,2,4,2]] + animal: [["Brittle stars","Centipede","Dog","Flamingo","Horse","Parrot"]] + """ + if isinstance(sorting, str): + sorting = [(sorting, "ascending")] + + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=sorting, **kwargs) + ) + return self.take(indices) + + def take(self, object indices): + """ + Select rows from a Table or RecordBatch. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the tabular object whose rows will be returned. + + Returns + ------- + Table or RecordBatch + A tabular object with the same schema, containing the taken rows. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.take([1,3]) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2022,2021]] + n_legs: [[4,100]] + animals: [["Horse","Centipede"]] + """ + return _pc().take(self, indices) + + def filter(self, mask, object null_selection_behavior="drop"): + """ + Select rows from the table or record batch based on a boolean mask. + + The Table can be filtered based on a mask, which will be passed to + :func:`pyarrow.compute.filter` to perform the filtering, or it can + be filtered through a boolean :class:`.Expression` + + Parameters + ---------- + mask : Array or array-like or .Expression + The boolean mask or the :class:`.Expression` to filter the table with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled, does nothing if + an :class:`.Expression` is used. + + Returns + ------- + filtered : Table or RecordBatch + A tabular object of the same schema, with only the rows selected + by applied filtering + + Examples + -------- + Using a Table (works similarly for RecordBatch): + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + + Define an expression and select rows: + + >>> import pyarrow.compute as pc + >>> expr = pc.field("year") <= 2020 + >>> table.filter(expr) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2019]] + n_legs: [[2,5]] + animals: [["Flamingo","Brittle stars"]] + + Define a mask and select rows: + + >>> mask=[True, True, False, None] + >>> table.filter(mask) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022]] + n_legs: [[2,4]] + animals: [["Flamingo","Horse"]] + >>> table.filter(mask, null_selection_behavior='emit_null') + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,null]] + n_legs: [[2,4,null]] + animals: [["Flamingo","Horse",null]] + """ + if isinstance(mask, _pc().Expression): + return _pac()._filter_table(self, mask) + else: + return _pc().filter(self, mask, null_selection_behavior) + + def to_pydict(self): + """ + Convert the Table or RecordBatch to a dict or OrderedDict. + + Returns + ------- + dict + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> table = pa.Table.from_arrays([n_legs, animals], names=["n_legs", "animals"]) + >>> table.to_pydict() + {'n_legs': [2, 2, 4, 4, 5, 100], 'animals': ['Flamingo', 'Parrot', ..., 'Centipede']} + """ + entries = [] + for i in range(self.num_columns): + name = self.field(i).name + column = self[i].to_pylist() + entries.append((name, column)) + return ordered_dict(entries) + + def to_pylist(self): + """ + Convert the Table or RecordBatch to a list of rows / dictionaries. + + Returns + ------- + list + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> data = [[2, 4, 5, 100], + ... ["Flamingo", "Horse", "Brittle stars", "Centipede"]] + >>> table = pa.table(data, names=["n_legs", "animals"]) + >>> table.to_pylist() + [{'n_legs': 2, 'animals': 'Flamingo'}, {'n_legs': 4, 'animals': 'Horse'}, ... + """ + pydict = self.to_pydict() + names = self.schema.names + pylist = [{column: pydict[column][row] for column in names} + for row in range(self.num_rows)] + return pylist + + def to_string(self, *, show_metadata=False, preview_cols=0): + """ + Return human-readable string representation of Table or RecordBatch. + + Parameters + ---------- + show_metadata : bool, default False + Display Field-level and Schema-level KeyValueMetadata. + preview_cols : int, default 0 + Display values of the columns for the first N columns. + + Returns + ------- + str + """ + # Use less verbose schema output. + schema_as_string = self.schema.to_string( + show_field_metadata=show_metadata, + show_schema_metadata=show_metadata + ) + title = 'pyarrow.{}\n{}'.format(type(self).__name__, schema_as_string) + pieces = [title] + if preview_cols: + pieces.append('----') + for i in range(min(self.num_columns, preview_cols)): + pieces.append('{}: {}'.format( + self.field(i).name, + self.column(i).to_string(indent=0, skip_new_lines=True) + )) + if preview_cols < self.num_columns: + pieces.append('...') + return '\n'.join(pieces) + + def remove_column(self, int i): + # implemented in RecordBatch/Table subclasses + raise NotImplementedError + + def drop_columns(self, columns): + """ + Drop one or more columns and return a new Table or RecordBatch. + + Parameters + ---------- + columns : str or list[str] + Field name(s) referencing existing column(s). + + Raises + ------ + KeyError + If any of the passed column names do not exist. + + Returns + ------- + Table or RecordBatch + A tabular object without the column(s). + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Drop one column: + + >>> table.drop_columns("animals") + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,4,5,100]] + + Drop one or more columns: + + >>> table.drop_columns(["n_legs", "animals"]) + pyarrow.Table + ... + ---- + """ + if isinstance(columns, str): + columns = [columns] + + indices = [] + for col in columns: + idx = self.schema.get_field_index(col) + if idx == -1: + raise KeyError("Column {!r} not found".format(col)) + indices.append(idx) + + indices.sort() + indices.reverse() + + res = self + for idx in indices: + res = res.remove_column(idx) + + return res + + def add_column(self, int i, field_, column): + # implemented in RecordBatch/Table subclasses + raise NotImplementedError + + def append_column(self, field_, column): + """ + Append column at end of columns. + + Parameters + ---------- + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array or value coercible to array + Column data. + + Returns + ------- + Table or RecordBatch + New table or record batch with the passed column added. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Append column at the end: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.append_column('year', [year]) + pyarrow.Table + n_legs: int64 + animals: string + year: int64 + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + year: [[2021,2022,2019,2021]] + """ + return self.add_column(self.num_columns, field_, column) + + +cdef class RecordBatch(_Tabular): + """ + Batch of rows of columns of equal length + + Warnings + -------- + Do not call this class's constructor directly, use one of the + ``RecordBatch.from_*`` functions instead. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Constructing a RecordBatch from arrays: + + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Constructing a RecordBatch from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.RecordBatch.from_pandas(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_pandas(df).to_pandas() + year month day n_legs animals + 0 2020 3 1 2 Flamingo + 1 2022 5 5 4 Horse + 2 2021 7 9 5 Brittle stars + 3 2022 9 13 100 Centipede + + Constructing a RecordBatch from pylist: + + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, + ... {'n_legs': 4, 'animals': 'Dog'}] + >>> pa.RecordBatch.from_pylist(pylist).to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Dog + + You can also construct a RecordBatch using :func:`pyarrow.record_batch`: + + >>> pa.record_batch([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + >>> pa.record_batch(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + """ + + def __cinit__(self): + self.batch = NULL + self._schema = None + + cdef void init(self, const shared_ptr[CRecordBatch]& batch): + self.sp_batch = batch + self.batch = batch.get() + + def _is_initialized(self): + return self.batch != NULL + + def __reduce__(self): + return _reconstruct_record_batch, (self.columns, self.schema) + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.batch.ValidateFull()) + else: + with nogil: + check_status(self.batch.Validate()) + + def replace_schema_metadata(self, metadata=None): + """ + Create shallow copy of record batch by replacing schema + key-value metadata with the indicated new metadata (which may be None, + which deletes any existing metadata + + Parameters + ---------- + metadata : dict, default None + + Returns + ------- + shallow_copy : RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + + Constructing a RecordBatch with schema and metadata: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> batch = pa.RecordBatch.from_arrays([n_legs], schema=my_schema) + >>> batch.schema + n_legs: int64 + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Shallow copy of a RecordBatch with deleted schema metadata: + + >>> batch.replace_schema_metadata().schema + n_legs: int64 + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CRecordBatch] c_batch + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + with nogil: + c_batch = self.batch.ReplaceSchemaMetadata(c_meta) + + return pyarrow_wrap_batch(c_batch) + + @property + def num_columns(self): + """ + Number of columns + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.num_columns + 2 + """ + return self.batch.num_columns() + + @property + def num_rows(self): + """ + Number of rows + + Due to the definition of a RecordBatch, all columns have the same + number of rows. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.num_rows + 6 + """ + return self.batch.num_rows() + + @property + def schema(self): + """ + Schema of the RecordBatch and its columns + + Returns + ------- + pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.schema + n_legs: int64 + animals: string + """ + if self._schema is None: + self._schema = pyarrow_wrap_schema(self.batch.schema()) + + return self._schema + + def _column(self, int i): + """ + Select single column from record batch by its numeric index. + + Parameters + ---------- + i : int + The index of the column to retrieve. + + Returns + ------- + column : pyarrow.Array + """ + cdef int index = _normalize_index(i, self.num_columns) + cdef Array result = pyarrow_wrap_array(self.batch.column(index)) + result._name = self.schema[index].name + return result + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the record batch. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.nbytes + 116 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.batch)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the record batch + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.get_total_buffer_size() + 120 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.batch)) + return total_buffer_size + + def __sizeof__(self): + return super(RecordBatch, self).__sizeof__() + self.nbytes + + def add_column(self, int i, field_, column): + """ + Add column to RecordBatch at position i. + + A new record batch is returned with the column added, the original record batch + object is left unchanged. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array or value coercible to array + Column data. + + Returns + ------- + RecordBatch + New record batch with the passed column added. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> batch = pa.RecordBatch.from_pandas(df) + + Add column: + + >>> year = [2021, 2022, 2019, 2021] + >>> batch.add_column(0,"year", year) + pyarrow.RecordBatch + year: int64 + n_legs: int64 + animals: string + ---- + year: [2021,2022,2019,2021] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + Original record batch is left unchanged: + + >>> batch + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + """ + cdef: + shared_ptr[CRecordBatch] c_batch + Field c_field + Array c_arr + + if isinstance(column, Array): + c_arr = column + else: + c_arr = array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_batch = GetResultValue(self.batch.AddColumn( + i, c_field.sp_field, c_arr.sp_array)) + + return pyarrow_wrap_batch(c_batch) + + def remove_column(self, int i): + """ + Create new RecordBatch with the indicated column removed. + + Parameters + ---------- + i : int + Index of column to remove. + + Returns + ------- + Table + New record batch without the column. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> batch = pa.RecordBatch.from_pandas(df) + >>> batch.remove_column(1) + pyarrow.RecordBatch + n_legs: int64 + ---- + n_legs: [2,4,5,100] + """ + cdef shared_ptr[CRecordBatch] c_batch + + with nogil: + c_batch = GetResultValue(self.batch.RemoveColumn(i)) + + return pyarrow_wrap_batch(c_batch) + + def set_column(self, int i, field_, column): + """ + Replace column in RecordBatch at position. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array or value coercible to array + Column data. + + Returns + ------- + RecordBatch + New record batch with the passed column set. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> batch = pa.RecordBatch.from_pandas(df) + + Replace a column: + + >>> year = [2021, 2022, 2019, 2021] + >>> batch.set_column(1,'year', year) + pyarrow.RecordBatch + n_legs: int64 + year: int64 + ---- + n_legs: [2,4,5,100] + year: [2021,2022,2019,2021] + """ + cdef: + shared_ptr[CRecordBatch] c_batch + Field c_field + Array c_arr + + if isinstance(column, Array): + c_arr = column + else: + c_arr = array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_batch = GetResultValue(self.batch.SetColumn( + i, c_field.sp_field, c_arr.sp_array)) + + return pyarrow_wrap_batch(c_batch) + + def rename_columns(self, names): + """ + Create new record batch with columns renamed to provided names. + + Parameters + ---------- + names : list[str] or dict[str, str] + List of new column names or mapping of old column names to new column names. + + If a mapping of old to new column names is passed, then all columns which are + found to match a provided old column name will be renamed to the new column name. + If any column names are not found in the mapping, a KeyError will be raised. + + Raises + ------ + KeyError + If any of the column names passed in the names mapping do not exist. + + Returns + ------- + RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> batch = pa.RecordBatch.from_pandas(df) + >>> new_names = ["n", "name"] + >>> batch.rename_columns(new_names) + pyarrow.RecordBatch + n: int64 + name: string + ---- + n: [2,4,5,100] + name: ["Flamingo","Horse","Brittle stars","Centipede"] + >>> new_names = {"n_legs": "n", "animals": "name"} + >>> batch.rename_columns(new_names) + pyarrow.RecordBatch + n: int64 + name: string + ---- + n: [2,4,5,100] + name: ["Flamingo","Horse","Brittle stars","Centipede"] + """ + cdef: + shared_ptr[CRecordBatch] c_batch + vector[c_string] c_names + + if isinstance(names, list): + for name in names: + c_names.push_back(tobytes(name)) + elif isinstance(names, dict): + idx_to_new_name = {} + for name, new_name in names.items(): + indices = self.schema.get_all_field_indices(name) + + if not indices: + raise KeyError("Column {!r} not found".format(name)) + + for index in indices: + idx_to_new_name[index] = new_name + + for i in range(self.num_columns): + new_name = idx_to_new_name.get(i, self.column_names[i]) + c_names.push_back(tobytes(new_name)) + else: + raise TypeError(f"names must be a list or dict not {type(names)!r}") + + with nogil: + c_batch = GetResultValue(self.batch.RenameColumns(move(c_names))) + + return pyarrow_wrap_batch(c_batch) + + def serialize(self, memory_pool=None): + """ + Write RecordBatch to Buffer as encapsulated IPC message, which does not + include a Schema. + + To reconstruct a RecordBatch from the encapsulated IPC message Buffer + returned by this function, a Schema must be passed separately. See + Examples. + + Parameters + ---------- + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> buf = batch.serialize() + >>> buf + + + Reconstruct RecordBatch from IPC message Buffer and original Schema + + >>> pa.ipc.read_record_batch(buf, batch.schema) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + """ + cdef shared_ptr[CBuffer] buffer + cdef CIpcWriteOptions options = CIpcWriteOptions.Defaults() + options.memory_pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + buffer = GetResultValue( + SerializeRecordBatch(deref(self.batch), options)) + return pyarrow_wrap_buffer(buffer) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this RecordBatch + + Parameters + ---------- + offset : int, default 0 + Offset from start of record batch to slice + length : int, default None + Length of slice (default is until end of batch starting from + offset) + + Returns + ------- + sliced : RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + >>> batch.slice(offset=3).to_pandas() + n_legs animals + 0 4 Horse + 1 5 Brittle stars + 2 100 Centipede + >>> batch.slice(length=2).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + >>> batch.slice(offset=3, length=1).to_pandas() + n_legs animals + 0 4 Horse + """ + cdef shared_ptr[CRecordBatch] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.batch.Slice(offset) + else: + result = self.batch.Slice(offset, length) + + return pyarrow_wrap_batch(result) + + def equals(self, object other, bint check_metadata=False): + """ + Check if contents of two record batches are equal. + + Parameters + ---------- + other : pyarrow.RecordBatch + RecordBatch to compare against. + check_metadata : bool, default False + Whether schema metadata equality should be checked as well. + + Returns + ------- + are_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch_0 = pa.record_batch([]) + >>> batch_1 = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> batch.equals(batch) + True + >>> batch.equals(batch_0) + False + >>> batch.equals(batch_1) + True + >>> batch.equals(batch_1, check_metadata=True) + False + """ + cdef: + CRecordBatch* this_batch = self.batch + shared_ptr[CRecordBatch] other_batch = pyarrow_unwrap_batch(other) + c_bool result + + if not other_batch: + return False + + with nogil: + result = this_batch.Equals(deref(other_batch), check_metadata) + + return result + + def select(self, object columns): + """ + Select columns of the RecordBatch. + + Returns a new RecordBatch with the specified columns, and metadata + preserved. + + Parameters + ---------- + columns : list-like + The column names or integer indices to select. + + Returns + ------- + RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.record_batch([n_legs, animals], + ... names=["n_legs", "animals"]) + + Select columns my indices: + + >>> batch.select([1]) + pyarrow.RecordBatch + animals: string + ---- + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + + Select columns by names: + + >>> batch.select(["n_legs"]) + pyarrow.RecordBatch + n_legs: int64 + ---- + n_legs: [2,2,4,4,5,100] + """ + cdef: + shared_ptr[CRecordBatch] c_batch + vector[int] c_indices + + for idx in columns: + idx = self._ensure_integer_index(idx) + idx = _normalize_index(idx, self.num_columns) + c_indices.push_back( idx) + + with nogil: + c_batch = GetResultValue(self.batch.SelectColumns(move(c_indices))) + + return pyarrow_wrap_batch(c_batch) + + def cast(self, Schema target_schema, safe=None, options=None): + """ + Cast record batch values to another schema. + + Parameters + ---------- + target_schema : Schema + Schema to cast to, the names and order of fields must match. + safe : bool, default True + Check for overflows or other unsafe conversions. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> batch = pa.RecordBatch.from_pandas(df) + >>> batch.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, ... + + Define new schema and cast batch values: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.duration('s')), + ... pa.field('animals', pa.string())] + ... ) + >>> batch.cast(target_schema=my_schema) + pyarrow.RecordBatch + n_legs: duration[s] + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + """ + cdef: + Array column, casted + Field field + list newcols = [] + + if self.schema.names != target_schema.names: + raise ValueError("Target schema's field names are not matching " + "the record batch's field names: {!r}, {!r}" + .format(self.schema.names, target_schema.names)) + + for column, field in zip(self.itercolumns(), target_schema): + if not field.nullable and column.null_count > 0: + raise ValueError("Casting field {!r} with null values to non-nullable" + .format(field.name)) + casted = column.cast(field.type, safe=safe, options=options) + newcols.append(casted) + + return RecordBatch.from_arrays(newcols, schema=target_schema) + + def _to_pandas(self, options, **kwargs): + return Table.from_batches([self])._to_pandas(options, **kwargs) + + @classmethod + def from_pandas(cls, df, Schema schema=None, preserve_index=None, + nthreads=None, columns=None): + """ + Convert pandas.DataFrame to an Arrow RecordBatch + + Parameters + ---------- + df : pandas.DataFrame + schema : pyarrow.Schema, optional + The expected schema of the RecordBatch. This can be used to + indicate the type of columns if we cannot infer it automatically. + If passed, the output will have exactly this schema. Columns + specified in the schema that are not found in the DataFrame columns + or its index will raise an error. Additional columns or index + levels in the DataFrame which are not specified in the schema will + be ignored. + preserve_index : bool, optional + Whether to store the index as an additional column in the resulting + ``RecordBatch``. The default of None will store the index as a + column, except for RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + nthreads : int, default None + If greater than 1, convert columns to Arrow in parallel using + indicated number of threads. By default, this follows + :func:`pyarrow.cpu_count` (may use up to system CPU count threads). + columns : list, optional + List of column to be converted. If None, use all columns. + + Returns + ------- + pyarrow.RecordBatch + + + Examples + -------- + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + + Convert pandas DataFrame to RecordBatch: + + >>> import pyarrow as pa + >>> pa.RecordBatch.from_pandas(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + Convert pandas DataFrame to RecordBatch using schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.RecordBatch.from_pandas(df, schema=my_schema) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + Convert pandas DataFrame to RecordBatch specifying columns: + + >>> pa.RecordBatch.from_pandas(df, columns=["n_legs"]) + pyarrow.RecordBatch + n_legs: int64 + ---- + n_legs: [2,4,5,100] + """ + from pyarrow.pandas_compat import dataframe_to_arrays + arrays, schema, n_rows = dataframe_to_arrays( + df, schema, preserve_index, nthreads=nthreads, columns=columns + ) + + # If df is empty but row index is not, create empty RecordBatch with rows >0 + cdef vector[shared_ptr[CArray]] c_arrays + if n_rows: + return pyarrow_wrap_batch(CRecordBatch.Make(( schema).sp_schema, + n_rows, c_arrays)) + else: + return cls.from_arrays(arrays, schema=schema) + + @staticmethod + def from_arrays(list arrays, names=None, schema=None, metadata=None): + """ + Construct a RecordBatch from multiple pyarrow.Arrays + + Parameters + ---------- + arrays : list of pyarrow.Array + One for each field in RecordBatch + names : list of str, optional + Names for the batch fields. If not passed, schema must be passed + schema : Schema, default None + Schema for the created batch. If not passed, names must be passed + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + pyarrow.RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a RecordBatch from pyarrow Arrays using names: + + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Construct a RecordBatch from pyarrow Arrays using schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.RecordBatch.from_arrays([n_legs, animals], schema=my_schema).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + >>> pa.RecordBatch.from_arrays([n_legs, animals], schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + cdef: + Array arr + shared_ptr[CSchema] c_schema + vector[shared_ptr[CArray]] c_arrays + int64_t num_rows + + if len(arrays) > 0: + num_rows = len(arrays[0]) + else: + num_rows = 0 + + if isinstance(names, Schema): + import warnings + warnings.warn("Schema passed to names= option, please " + "pass schema= explicitly. " + "Will raise exception in future", FutureWarning) + schema = names + names = None + + converted_arrays = _sanitize_arrays(arrays, names, schema, metadata, + &c_schema) + + c_arrays.reserve(len(arrays)) + for arr in converted_arrays: + if len(arr) != num_rows: + raise ValueError('Arrays were not all the same length: ' + '{0} vs {1}'.format(len(arr), num_rows)) + c_arrays.push_back(arr.sp_array) + + result = pyarrow_wrap_batch(CRecordBatch.Make(c_schema, num_rows, + c_arrays)) + result.validate() + return result + + @staticmethod + def from_struct_array(StructArray struct_array): + """ + Construct a RecordBatch from a StructArray. + + Each field in the StructArray will become a column in the resulting + ``RecordBatch``. + + Parameters + ---------- + struct_array : StructArray + Array to construct the record batch from. + + Returns + ------- + pyarrow.RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> pa.RecordBatch.from_struct_array(struct).to_pandas() + animals n_legs year + 0 Parrot 2 NaN + 1 None 4 2022.0 + """ + cdef: + shared_ptr[CRecordBatch] c_record_batch + with nogil: + c_record_batch = GetResultValue( + CRecordBatch.FromStructArray(struct_array.sp_array)) + return pyarrow_wrap_batch(c_record_batch) + + def to_struct_array(self): + """ + Convert to a struct array. + """ + cdef: + shared_ptr[CRecordBatch] c_record_batch + shared_ptr[CArray] c_array + + c_record_batch = pyarrow_unwrap_batch(self) + with nogil: + c_array = GetResultValue( + deref(c_record_batch).ToStructArray()) + return pyarrow_wrap_array(c_array) + + def to_tensor(self, c_bool null_to_nan=False, c_bool row_major=True, MemoryPool memory_pool=None): + """ + Convert to a :class:`~pyarrow.Tensor`. + + RecordBatches that can be converted have fields of type signed or unsigned + integer or float, including all bit-widths. + + ``null_to_nan`` is ``False`` by default and this method will raise an error in case + any nulls are present. RecordBatches with nulls can be converted with ``null_to_nan`` + set to ``True``. In this case null values are converted to ``NaN`` and integer type + arrays are promoted to the appropriate float type. + + Parameters + ---------- + null_to_nan : bool, default False + Whether to write null values in the result as ``NaN``. + row_major : bool, default True + Whether resulting Tensor is row-major or column-major + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Examples + -------- + >>> import pyarrow as pa + >>> batch = pa.record_batch( + ... [ + ... pa.array([1, 2, 3, 4, None], type=pa.int32()), + ... pa.array([10, 20, 30, 40, None], type=pa.float32()), + ... ], names = ["a", "b"] + ... ) + + >>> batch + pyarrow.RecordBatch + a: int32 + b: float + ---- + a: [1,2,3,4,null] + b: [10,20,30,40,null] + + Convert a RecordBatch to row-major Tensor with null values + written as ``NaN``s + + >>> batch.to_tensor(null_to_nan=True) + + type: double + shape: (5, 2) + strides: (16, 8) + >>> batch.to_tensor(null_to_nan=True).to_numpy() + array([[ 1., 10.], + [ 2., 20.], + [ 3., 30.], + [ 4., 40.], + [nan, nan]]) + + Convert a RecordBatch to column-major Tensor + + >>> batch.to_tensor(null_to_nan=True, row_major=False) + + type: double + shape: (5, 2) + strides: (8, 40) + >>> batch.to_tensor(null_to_nan=True, row_major=False).to_numpy() + array([[ 1., 10.], + [ 2., 20.], + [ 3., 30.], + [ 4., 40.], + [nan, nan]]) + """ + cdef: + shared_ptr[CRecordBatch] c_record_batch + shared_ptr[CTensor] c_tensor + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + c_record_batch = pyarrow_unwrap_batch(self) + with nogil: + c_tensor = GetResultValue( + deref(c_record_batch).ToTensor(null_to_nan, + row_major, pool)) + return pyarrow_wrap_tensor(c_tensor) + + def _export_to_c(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the record batch + schema is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportRecordBatch(deref(self.sp_batch), + c_ptr, + c_schema_ptr)) + + @staticmethod + def _import_from_c(in_ptr, schema): + """ + Import RecordBatch from a C ArrowArray struct, given its pointer + and the imported schema. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArray struct. + type: Schema or int + Either a Schema object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + void* c_schema_ptr + shared_ptr[CRecordBatch] c_batch + + c_schema = pyarrow_unwrap_schema(schema) + if c_schema == nullptr: + # Not a Schema object, perhaps a raw ArrowSchema pointer + c_schema_ptr = _as_c_pointer(schema, allow_null=True) + with nogil: + c_batch = GetResultValue(ImportRecordBatch( + c_ptr, c_schema_ptr)) + else: + with nogil: + c_batch = GetResultValue(ImportRecordBatch( + c_ptr, c_schema)) + return pyarrow_wrap_batch(c_batch) + + def __arrow_c_array__(self, requested_schema=None): + """ + Get a pair of PyCapsules containing a C ArrowArray representation of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the batch to this schema. + If None, the batch will be returned as-is, with a schema matching the + one returned by :meth:`__arrow_c_schema__()`. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowArray, + respectively. + """ + cdef: + ArrowArray* c_array + ArrowSchema* c_schema + + if requested_schema is not None: + target_schema = Schema._import_from_c_capsule(requested_schema) + + if target_schema != self.schema: + try: + casted_batch = self.cast(target_schema, safe=True) + inner_batch = pyarrow_unwrap_batch(casted_batch) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.schema} to requested schema {target_schema}: {e}" + ) + else: + inner_batch = self.sp_batch + else: + inner_batch = self.sp_batch + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_array(&c_array) + + with nogil: + check_status(ExportRecordBatch(deref(inner_batch), c_array, c_schema)) + + return schema_capsule, array_capsule + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export the batch as an Arrow C stream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + Currently, this is not supported and will raise a + NotImplementedError if the schema doesn't match the current schema. + + Returns + ------- + PyCapsule + """ + return Table.from_batches([self]).__arrow_c_stream__(requested_schema) + + @staticmethod + def _import_from_c_capsule(schema_capsule, array_capsule): + """ + Import RecordBatch from a pair of PyCapsules containing a C ArrowSchema + and ArrowArray, respectively. + + Parameters + ---------- + schema_capsule : PyCapsule + A PyCapsule containing a C ArrowSchema representation of the schema. + array_capsule : PyCapsule + A PyCapsule containing a C ArrowArray representation of the array. + + Returns + ------- + pyarrow.RecordBatch + """ + cdef: + ArrowSchema* c_schema + ArrowArray* c_array + shared_ptr[CRecordBatch] c_batch + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer(array_capsule, 'arrow_array') + + with nogil: + c_batch = GetResultValue(ImportRecordBatch(c_array, c_schema)) + + return pyarrow_wrap_batch(c_batch) + + def _export_to_c_device(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowDeviceArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the record batch + schema is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowDeviceArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowDeviceArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportDeviceRecordBatch( + deref(self.sp_batch), NULL, + c_ptr, c_schema_ptr) + ) + + @staticmethod + def _import_from_c_device(in_ptr, schema): + """ + Import RecordBatch from a C ArrowDeviceArray struct, given its pointer + and the imported schema. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowDeviceArray struct. + type: Schema or int + Either a Schema object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + ArrowDeviceArray* c_device_array = _as_c_pointer(in_ptr) + void* c_schema_ptr + shared_ptr[CRecordBatch] c_batch + + if c_device_array.device_type == ARROW_DEVICE_CUDA: + _ensure_cuda_loaded() + + c_schema = pyarrow_unwrap_schema(schema) + if c_schema == nullptr: + # Not a Schema object, perhaps a raw ArrowSchema pointer + c_schema_ptr = _as_c_pointer(schema, allow_null=True) + with nogil: + c_batch = GetResultValue(ImportDeviceRecordBatch( + c_device_array, c_schema_ptr)) + else: + with nogil: + c_batch = GetResultValue(ImportDeviceRecordBatch( + c_device_array, c_schema)) + return pyarrow_wrap_batch(c_batch) + + def __arrow_c_device_array__(self, requested_schema=None, **kwargs): + """ + Get a pair of PyCapsules containing a C ArrowDeviceArray representation + of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the batch to this data type. + If None, the batch will be returned as-is, with a type matching the + one returned by :meth:`__arrow_c_schema__()`. + kwargs + Currently no additional keyword arguments are supported, but + this method will accept any keyword with a value of ``None`` + for compatibility with future keywords. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowDeviceArray, + respectively. + """ + cdef: + ArrowDeviceArray* c_array + ArrowSchema* c_schema + shared_ptr[CRecordBatch] inner_batch + + non_default_kwargs = [ + name for name, value in kwargs.items() if value is not None + ] + if non_default_kwargs: + raise NotImplementedError( + f"Received unsupported keyword argument(s): {non_default_kwargs}" + ) + + if requested_schema is not None: + target_schema = Schema._import_from_c_capsule(requested_schema) + + if target_schema != self.schema: + if not self.is_cpu: + raise NotImplementedError( + "Casting to a requested schema is only supported for CPU data" + ) + try: + casted_batch = self.cast(target_schema, safe=True) + inner_batch = pyarrow_unwrap_batch(casted_batch) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.schema} to requested schema {target_schema}: {e}" + ) + else: + inner_batch = self.sp_batch + else: + inner_batch = self.sp_batch + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_device_array(&c_array) + + with nogil: + check_status(ExportDeviceRecordBatch( + deref(inner_batch), NULL, c_array, c_schema)) + + return schema_capsule, array_capsule + + @staticmethod + def _import_from_c_device_capsule(schema_capsule, array_capsule): + """ + Import RecordBatch from a pair of PyCapsules containing a + C ArrowSchema and ArrowDeviceArray, respectively. + + Parameters + ---------- + schema_capsule : PyCapsule + A PyCapsule containing a C ArrowSchema representation of the schema. + array_capsule : PyCapsule + A PyCapsule containing a C ArrowDeviceArray representation of the array. + + Returns + ------- + pyarrow.RecordBatch + """ + cdef: + ArrowSchema* c_schema + ArrowDeviceArray* c_array + shared_ptr[CRecordBatch] batch + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer( + array_capsule, 'arrow_device_array' + ) + + with nogil: + batch = GetResultValue(ImportDeviceRecordBatch(c_array, c_schema)) + + return pyarrow_wrap_batch(batch) + + @property + def device_type(self): + """ + The device type where the arrays in the RecordBatch reside. + + Returns + ------- + DeviceAllocationType + """ + return _wrap_device_allocation_type(self.sp_batch.get().device_type()) + + @property + def is_cpu(self): + """ + Whether the RecordBatch's arrays are CPU-accessible. + """ + return self.device_type == DeviceAllocationType.CPU + + +def _reconstruct_record_batch(columns, schema): + """ + Internal: reconstruct RecordBatch from pickled components. + """ + return RecordBatch.from_arrays(columns, schema=schema) + + +def table_to_blocks(options, Table table, categories, extension_columns): + cdef: + PyObject* result_obj + shared_ptr[CTable] c_table + CMemoryPool* pool + PandasOptions c_options = _convert_pandas_options(options) + + if categories is not None: + c_options.categorical_columns = {tobytes(cat) for cat in categories} + if extension_columns is not None: + c_options.extension_columns = {tobytes(col) + for col in extension_columns} + + if pandas_api.is_v1(): + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + c_options.coerce_temporal_nanoseconds = True + + if c_options.self_destruct: + # Move the shared_ptr, table is now unsafe to use further + c_table = move(table.sp_table) + table.table = NULL + else: + c_table = table.sp_table + + with nogil: + check_status( + libarrow_python.ConvertTableToPandas(c_options, move(c_table), + &result_obj) + ) + + return PyObject_to_object(result_obj) + + +cdef class Table(_Tabular): + """ + A collection of top-level named, equal length Arrow arrays. + + Warnings + -------- + Do not call this class's constructor directly, use one of the ``from_*`` + methods instead. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from arrays: + + >>> pa.Table.from_arrays([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a RecordBatch: + + >>> batch = pa.record_batch([n_legs, animals], names=names) + >>> pa.Table.from_batches([batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.Table.from_pandas(df) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a dictionary of arrays: + + >>> pydict = {'n_legs': n_legs, 'animals': animals} + >>> pa.Table.from_pydict(pydict) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_pydict(pydict).schema + n_legs: int64 + animals: string + + Construct a Table from a dictionary of arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pydict(pydict, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a list of rows: + + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, {'year': 2021, 'animals': 'Centipede'}] + >>> pa.Table.from_pylist(pylist) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,null]] + animals: [["Flamingo","Centipede"]] + + Construct a Table from a list of rows with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('year', pa.int64()), + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"year": "Year of entry"}) + >>> pa.Table.from_pylist(pylist, schema=my_schema).schema + year: int64 + n_legs: int64 + animals: string + -- schema metadata -- + year: 'Year of entry' + + Construct a Table with :func:`pyarrow.table`: + + >>> pa.table([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + + def __cinit__(self): + self.table = NULL + + cdef void init(self, const shared_ptr[CTable]& table): + self.sp_table = table + self.table = table.get() + + def _is_initialized(self): + return self.table != NULL + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.table.ValidateFull()) + else: + with nogil: + check_status(self.table.Validate()) + + def __reduce__(self): + # Reduce the columns as ChunkedArrays to avoid serializing schema + # data twice + columns = [col for col in self.columns] + return _reconstruct_table, (columns, self.schema) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this Table. + + Parameters + ---------- + offset : int, default 0 + Offset from start of table to slice. + length : int, default None + Length of slice (default is until end of table starting from + offset). + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.slice(length=3) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019]] + n_legs: [[2,4,5]] + animals: [["Flamingo","Horse","Brittle stars"]] + >>> table.slice(offset=2) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2019,2021]] + n_legs: [[5,100]] + animals: [["Brittle stars","Centipede"]] + >>> table.slice(offset=2, length=1) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2019]] + n_legs: [[5]] + animals: [["Brittle stars"]] + """ + cdef shared_ptr[CTable] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.table.Slice(offset) + else: + result = self.table.Slice(offset, length) + + return pyarrow_wrap_table(result) + + def select(self, object columns): + """ + Select columns of the Table. + + Returns a new Table with the specified columns, and metadata + preserved. + + Parameters + ---------- + columns : list-like + The column names or integer indices to select. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.select([0,1]) + pyarrow.Table + year: int64 + n_legs: int64 + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + >>> table.select(["year"]) + pyarrow.Table + year: int64 + ---- + year: [[2020,2022,2019,2021]] + """ + cdef: + shared_ptr[CTable] c_table + vector[int] c_indices + + for idx in columns: + idx = self._ensure_integer_index(idx) + idx = _normalize_index(idx, self.num_columns) + c_indices.push_back( idx) + + with nogil: + c_table = GetResultValue(self.table.SelectColumns(move(c_indices))) + + return pyarrow_wrap_table(c_table) + + def replace_schema_metadata(self, metadata=None): + """ + Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be None), + which deletes any existing metadata. + + Parameters + ---------- + metadata : dict, default None + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Constructing a Table with pyarrow schema and metadata: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> table= pa.table(df, my_schema) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: ... + + Create a shallow copy of a Table with deleted schema metadata: + + >>> table.replace_schema_metadata().schema + n_legs: int64 + animals: string + + Create a shallow copy of a Table with new schema metadata: + + >>> metadata={"animals": "Which animal"} + >>> table.replace_schema_metadata(metadata = metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + animals: 'Which animal' + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CTable] c_table + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + with nogil: + c_table = self.table.ReplaceSchemaMetadata(c_meta) + + return pyarrow_wrap_table(c_table) + + def flatten(self, MemoryPool memory_pool=None): + """ + Flatten this Table. + + Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> month = pa.array([4, 6]) + >>> table = pa.Table.from_arrays([struct,month], + ... names = ["a", "month"]) + >>> table + pyarrow.Table + a: struct + child 0, animals: string + child 1, n_legs: int64 + child 2, year: int64 + month: int64 + ---- + a: [ + -- is_valid: all not null + -- child 0 type: string + ["Parrot",null] + -- child 1 type: int64 + [2,4] + -- child 2 type: int64 + [null,2022]] + month: [[4,6]] + + Flatten the columns with struct field: + + >>> table.flatten() + pyarrow.Table + a.animals: string + a.n_legs: int64 + a.year: int64 + month: int64 + ---- + a.animals: [["Parrot",null]] + a.n_legs: [[2,4]] + a.year: [[null,2022]] + month: [[4,6]] + """ + cdef: + shared_ptr[CTable] flattened + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + flattened = GetResultValue(self.table.Flatten(pool)) + + return pyarrow_wrap_table(flattened) + + def combine_chunks(self, MemoryPool memory_pool=None): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the ChunkedArray of each column are + concatenated into zero or one chunk. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array([["Flamingo", "Parrot", "Dog"], ["Horse", "Brittle stars", "Centipede"]]) + >>> names = ["n_legs", "animals"] + >>> table = pa.table([n_legs, animals], names=names) + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4],[4,5,100]] + animals: [["Flamingo","Parrot","Dog"],["Horse","Brittle stars","Centipede"]] + >>> table.combine_chunks() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4,4,5,100]] + animals: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] combined + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + combined = GetResultValue(self.table.CombineChunks(pool)) + + return pyarrow_wrap_table(combined) + + def unify_dictionaries(self, MemoryPool memory_pool=None): + """ + Unify dictionaries across all chunks. + + This method returns an equivalent table, but where all chunks of + each column share the same dictionary values. Dictionary indices + are transposed accordingly. + + Columns without dictionaries are returned unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> arr_1 = pa.array(["Flamingo", "Parrot", "Dog"]).dictionary_encode() + >>> arr_2 = pa.array(["Horse", "Brittle stars", "Centipede"]).dictionary_encode() + >>> c_arr = pa.chunked_array([arr_1, arr_2]) + >>> table = pa.table([c_arr], names=["animals"]) + >>> table + pyarrow.Table + animals: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Parrot","Dog"] -- indices: + [0,1,2], -- dictionary: + ["Horse","Brittle stars","Centipede"] -- indices: + [0,1,2]] + + Unify dictionaries across both chunks: + + >>> table.unify_dictionaries() + pyarrow.Table + animals: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] -- indices: + [0,1,2], -- dictionary: + ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] -- indices: + [3,4,5]] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CTable] c_result + + with nogil: + c_result = GetResultValue(CDictionaryUnifier.UnifyTable( + deref(self.table), pool)) + + return pyarrow_wrap_table(c_result) + + def equals(self, Table other, bint check_metadata=False): + """ + Check if contents of two tables are equal. + + Parameters + ---------- + other : pyarrow.Table + Table to compare against. + check_metadata : bool, default False + Whether schema metadata equality should be checked as well. + + Returns + ------- + bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names=["n_legs", "animals"] + >>> table = pa.Table.from_arrays([n_legs, animals], names=names) + >>> table_0 = pa.Table.from_arrays([]) + >>> table_1 = pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata={"n_legs": "Number of legs per animal"}) + >>> table.equals(table) + True + >>> table.equals(table_0) + False + >>> table.equals(table_1) + True + >>> table.equals(table_1, check_metadata=True) + False + """ + if other is None: + return False + + cdef: + CTable* this_table = self.table + CTable* other_table = other.table + c_bool result + + with nogil: + result = this_table.Equals(deref(other_table), check_metadata) + + return result + + def cast(self, Schema target_schema, safe=None, options=None): + """ + Cast table values to another schema. + + Parameters + ---------- + target_schema : Schema + Schema to cast to, the names and order of fields must match. + safe : bool, default True + Check for overflows or other unsafe conversions. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, ... + + Define new schema and cast table values: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.duration('s')), + ... pa.field('animals', pa.string())] + ... ) + >>> table.cast(target_schema=my_schema) + pyarrow.Table + n_legs: duration[s] + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + ChunkedArray column, casted + Field field + list newcols = [] + + if self.schema.names != target_schema.names: + raise ValueError("Target schema's field names are not matching " + "the table's field names: {!r}, {!r}" + .format(self.schema.names, target_schema.names)) + + for column, field in zip(self.itercolumns(), target_schema): + if not field.nullable and column.null_count > 0: + raise ValueError("Casting field {!r} with null values to non-nullable" + .format(field.name)) + casted = column.cast(field.type, safe=safe, options=options) + newcols.append(casted) + + return Table.from_arrays(newcols, schema=target_schema) + + @classmethod + def from_pandas(cls, df, Schema schema=None, preserve_index=None, + nthreads=None, columns=None, bint safe=True): + """ + Convert pandas.DataFrame to an Arrow Table. + + The column types in the resulting Arrow Table are inferred from the + dtypes of the pandas.Series in the DataFrame. In the case of non-object + Series, the NumPy dtype is translated to its Arrow equivalent. In the + case of `object`, we need to guess the datatype by looking at the + Python objects in this Series. + + Be aware that Series of the `object` dtype don't carry enough + information to always lead to a meaningful Arrow type. In the case that + we cannot infer a type, e.g. because the DataFrame is of length 0 or + the Series only contains None/nan objects, the type is set to + null. This behavior can be avoided by constructing an explicit schema + and passing it to this function. + + Parameters + ---------- + df : pandas.DataFrame + schema : pyarrow.Schema, optional + The expected schema of the Arrow Table. This can be used to + indicate the type of columns if we cannot infer it automatically. + If passed, the output will have exactly this schema. Columns + specified in the schema that are not found in the DataFrame columns + or its index will raise an error. Additional columns or index + levels in the DataFrame which are not specified in the schema will + be ignored. + preserve_index : bool, optional + Whether to store the index as an additional column in the resulting + ``Table``. The default of None will store the index as a column, + except for RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + nthreads : int, default None + If greater than 1, convert columns to Arrow in parallel using + indicated number of threads. By default, this follows + :func:`pyarrow.cpu_count` (may use up to system CPU count threads). + columns : list, optional + List of column to be converted. If None, use all columns. + safe : bool, default True + Check for overflows or other unsafe conversions. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.Table.from_pandas(df) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + from pyarrow.pandas_compat import dataframe_to_arrays + arrays, schema, n_rows = dataframe_to_arrays( + df, + schema=schema, + preserve_index=preserve_index, + nthreads=nthreads, + columns=columns, + safe=safe + ) + + # If df is empty but row index is not, create empty Table with rows >0 + cdef vector[shared_ptr[CChunkedArray]] c_arrays + if n_rows: + return pyarrow_wrap_table( + CTable.MakeWithRows(( schema).sp_schema, c_arrays, n_rows)) + else: + return cls.from_arrays(arrays, schema=schema) + + @staticmethod + def from_arrays(arrays, names=None, schema=None, metadata=None): + """ + Construct a Table from Arrow arrays. + + Parameters + ---------- + arrays : list of pyarrow.Array or pyarrow.ChunkedArray + Equal-length arrays that should form the table. + names : list of str, optional + Names for the table columns. If not passed, schema must be passed. + schema : Schema, default None + Schema for the created table. If not passed, names must be passed. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from arrays: + + >>> pa.Table.from_arrays([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata=my_metadata) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from arrays with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"animals": "Name of the animal species"}) + >>> pa.Table.from_arrays([n_legs, animals], + ... schema=my_schema) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_arrays([n_legs, animals], + ... schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + animals: 'Name of the animal species' + """ + cdef: + vector[shared_ptr[CChunkedArray]] columns + shared_ptr[CSchema] c_schema + int i, K = len(arrays) + + converted_arrays = _sanitize_arrays(arrays, names, schema, metadata, + &c_schema) + + columns.reserve(K) + for item in converted_arrays: + if isinstance(item, Array): + columns.push_back( + make_shared[CChunkedArray]( + ( item).sp_array + ) + ) + elif isinstance(item, ChunkedArray): + columns.push_back(( item).sp_chunked_array) + else: + raise TypeError(type(item)) + + result = pyarrow_wrap_table(CTable.Make(c_schema, columns)) + result.validate() + return result + + @staticmethod + def from_struct_array(struct_array): + """ + Construct a Table from a StructArray. + + Each field in the StructArray will become a column in the resulting + ``Table``. + + Parameters + ---------- + struct_array : StructArray or ChunkedArray + Array to construct the table from. + + Returns + ------- + pyarrow.Table + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> pa.Table.from_struct_array(struct).to_pandas() + animals n_legs year + 0 Parrot 2 NaN + 1 None 4 2022.0 + """ + if isinstance(struct_array, Array): + return Table.from_batches([RecordBatch.from_struct_array(struct_array)]) + else: + return Table.from_batches([ + RecordBatch.from_struct_array(chunk) + for chunk in struct_array.chunks + ]) + + def to_struct_array(self, max_chunksize=None): + """ + Convert to a chunked array of struct type. + + Parameters + ---------- + max_chunksize : int, default None + Maximum number of rows for ChunkedArray chunks. Individual chunks + may be smaller depending on the chunk layout of individual columns. + + Returns + ------- + ChunkedArray + """ + return chunked_array([ + batch.to_struct_array() + for batch in self.to_batches(max_chunksize=max_chunksize) + ]) + + @staticmethod + def from_batches(batches, Schema schema=None): + """ + Construct a Table from a sequence or iterator of Arrow RecordBatches. + + Parameters + ---------- + batches : sequence or iterator of RecordBatch + Sequence of RecordBatch to be converted, all schemas must be equal. + schema : Schema, default None + If not passed, will be inferred from the first RecordBatch. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + >>> batch = pa.record_batch([n_legs, animals], names=names) + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + + Construct a Table from a RecordBatch: + + >>> pa.Table.from_batches([batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a sequence of RecordBatches: + + >>> pa.Table.from_batches([batch, batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100],[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"],["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + vector[shared_ptr[CRecordBatch]] c_batches + shared_ptr[CTable] c_table + shared_ptr[CSchema] c_schema + RecordBatch batch + + for batch in batches: + c_batches.push_back(batch.sp_batch) + + if schema is None: + if c_batches.size() == 0: + raise ValueError('Must pass schema, or at least ' + 'one RecordBatch') + c_schema = c_batches[0].get().schema() + else: + c_schema = schema.sp_schema + + with nogil: + c_table = GetResultValue( + CTable.FromRecordBatches(c_schema, move(c_batches))) + + return pyarrow_wrap_table(c_table) + + def to_batches(self, max_chunksize=None): + """ + Convert Table to a list of RecordBatch objects. + + Note that this method is zero-copy, it merely exposes the same data + under a different API. + + Parameters + ---------- + max_chunksize : int, default None + Maximum number of rows for each RecordBatch chunk. Individual chunks + may be smaller depending on the chunk layout of individual columns. + + Returns + ------- + list[RecordBatch] + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Convert a Table to a RecordBatch: + + >>> table.to_batches()[0].to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + + Convert a Table to a list of RecordBatches: + + >>> table.to_batches(max_chunksize=2)[0].to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + >>> table.to_batches(max_chunksize=2)[1].to_pandas() + n_legs animals + 0 5 Brittle stars + 1 100 Centipede + """ + cdef: + unique_ptr[TableBatchReader] reader + int64_t c_max_chunksize + list result = [] + shared_ptr[CRecordBatch] batch + + reader.reset(new TableBatchReader(deref(self.table))) + + if max_chunksize is not None: + if not max_chunksize > 0: + raise ValueError("'max_chunksize' should be strictly positive") + c_max_chunksize = max_chunksize + reader.get().set_chunksize(c_max_chunksize) + + while True: + with nogil: + check_status(reader.get().ReadNext(&batch)) + + if batch.get() == NULL: + break + + result.append(pyarrow_wrap_batch(batch)) + + return result + + def to_reader(self, max_chunksize=None): + """ + Convert the Table to a RecordBatchReader. + + Note that this method is zero-copy, it merely exposes the same data + under a different API. + + Parameters + ---------- + max_chunksize : int, default None + Maximum number of rows for each RecordBatch chunk. Individual chunks + may be smaller depending on the chunk layout of individual columns. + + Returns + ------- + RecordBatchReader + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Convert a Table to a RecordBatchReader: + + >>> table.to_reader() + + + >>> reader = table.to_reader() + >>> reader.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, ... + >>> reader.read_all() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + shared_ptr[TableBatchReader] t_reader + t_reader = make_shared[TableBatchReader](self.sp_table) + + if max_chunksize is not None: + t_reader.get().set_chunksize(max_chunksize) + + c_reader = dynamic_pointer_cast[CRecordBatchReader, TableBatchReader]( + t_reader) + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return reader + + def _to_pandas(self, options, categories=None, ignore_metadata=False, + types_mapper=None): + from pyarrow.pandas_compat import table_to_dataframe + df = table_to_dataframe( + options, self, categories, + ignore_metadata=ignore_metadata, + types_mapper=types_mapper) + return df + + @property + def schema(self): + """ + Schema of the table and its columns. + + Returns + ------- + Schema + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' ... + """ + return pyarrow_wrap_schema(self.table.schema()) + + def _column(self, int i): + """ + Select a column by its numeric index. + + Parameters + ---------- + i : int + The index of the column to retrieve. + + Returns + ------- + ChunkedArray + """ + cdef int index = _normalize_index(i, self.num_columns) + cdef ChunkedArray result = pyarrow_wrap_chunked_array( + self.table.column(index)) + result._name = self.schema[index].name + return result + + @property + def num_columns(self): + """ + Number of columns in this table. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.num_columns + 2 + """ + return self.table.num_columns() + + @property + def num_rows(self): + """ + Number of rows in this table. + + Due to the definition of a table, all columns have the same number of + rows. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.num_rows + 4 + """ + return self.table.num_rows() + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the table. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.nbytes + 72 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.table)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the table. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.get_total_buffer_size() + 76 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.table)) + return total_buffer_size + + def __sizeof__(self): + return super(Table, self).__sizeof__() + self.nbytes + + def add_column(self, int i, field_, column): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array, list of Array, or values coercible to arrays + Column data. + + Returns + ------- + Table + New table with the passed column added. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Add column: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.add_column(0,"year", [year]) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2021,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Original table is left unchanged: + + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] c_table + Field c_field + ChunkedArray c_arr + + if isinstance(column, ChunkedArray): + c_arr = column + else: + c_arr = chunked_array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_table = GetResultValue(self.table.AddColumn( + i, c_field.sp_field, c_arr.sp_chunked_array)) + + return pyarrow_wrap_table(c_table) + + def remove_column(self, int i): + """ + Create new Table with the indicated column removed. + + Parameters + ---------- + i : int + Index of column to remove. + + Returns + ------- + Table + New table without the column. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.remove_column(1) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,4,5,100]] + """ + cdef shared_ptr[CTable] c_table + + with nogil: + c_table = GetResultValue(self.table.RemoveColumn(i)) + + return pyarrow_wrap_table(c_table) + + def set_column(self, int i, field_, column): + """ + Replace column in Table at position. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array, list of Array, or values coercible to arrays + Column data. + + Returns + ------- + Table + New table with the passed column set. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Replace a column: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.set_column(1,'year', [year]) + pyarrow.Table + n_legs: int64 + year: int64 + ---- + n_legs: [[2,4,5,100]] + year: [[2021,2022,2019,2021]] + """ + cdef: + shared_ptr[CTable] c_table + Field c_field + ChunkedArray c_arr + + if isinstance(column, ChunkedArray): + c_arr = column + else: + c_arr = chunked_array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_table = GetResultValue(self.table.SetColumn( + i, c_field.sp_field, c_arr.sp_chunked_array)) + + return pyarrow_wrap_table(c_table) + + def rename_columns(self, names): + """ + Create new table with columns renamed to provided names. + + Parameters + ---------- + names : list[str] or dict[str, str] + List of new column names or mapping of old column names to new column names. + + If a mapping of old to new column names is passed, then all columns which are + found to match a provided old column name will be renamed to the new column name. + If any column names are not found in the mapping, a KeyError will be raised. + + Raises + ------ + KeyError + If any of the column names passed in the names mapping do not exist. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> new_names = ["n", "name"] + >>> table.rename_columns(new_names) + pyarrow.Table + n: int64 + name: string + ---- + n: [[2,4,5,100]] + name: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> new_names = {"n_legs": "n", "animals": "name"} + >>> table.rename_columns(new_names) + pyarrow.Table + n: int64 + name: string + ---- + n: [[2,4,5,100]] + name: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] c_table + vector[c_string] c_names + + if isinstance(names, list): + for name in names: + c_names.push_back(tobytes(name)) + elif isinstance(names, dict): + idx_to_new_name = {} + for name, new_name in names.items(): + indices = self.schema.get_all_field_indices(name) + + if not indices: + raise KeyError("Column {!r} not found".format(name)) + + for index in indices: + idx_to_new_name[index] = new_name + + for i in range(self.num_columns): + c_names.push_back(tobytes(idx_to_new_name.get(i, self.schema[i].name))) + else: + raise TypeError(f"names must be a list or dict not {type(names)!r}") + + with nogil: + c_table = GetResultValue(self.table.RenameColumns(move(c_names))) + + return pyarrow_wrap_table(c_table) + + def drop(self, columns): + """ + Drop one or more columns and return a new table. + + Alias of Table.drop_columns, but kept for backwards compatibility. + + Parameters + ---------- + columns : str or list[str] + Field name(s) referencing existing column(s). + + Returns + ------- + Table + New table without the column(s). + """ + return self.drop_columns(columns) + + def group_by(self, keys, use_threads=True): + """ + Declare a grouping over the columns of the table. + + Resulting grouping can then be used to perform aggregations + with a subsequent ``aggregate()`` method. + + Parameters + ---------- + keys : str or list[str] + Name of the columns that should be used as the grouping key. + use_threads : bool, default True + Whether to use multithreading or not. When set to True (the + default), no stable ordering of the output is guaranteed. + + Returns + ------- + TableGroupBy + + See Also + -------- + TableGroupBy.aggregate + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.group_by('year').aggregate([('n_legs', 'sum')]) + pyarrow.Table + year: int64 + n_legs_sum: int64 + ---- + year: [[2020,2022,2021,2019]] + n_legs_sum: [[2,6,104,5]] + """ + return TableGroupBy(self, keys, use_threads=use_threads) + + def join(self, right_table, keys, right_keys=None, join_type="left outer", + left_suffix=None, right_suffix=None, coalesce_keys=True, + use_threads=True): + """ + Perform a join between this table and another one. + + Result of the join will be a new Table, where further + operations can be applied. + + Parameters + ---------- + right_table : Table + The table to join to the current one, acting as the right table + in the join operation. + keys : str or list[str] + The columns from current table that should be used as keys + of the join operation left side. + right_keys : str or list[str], default None + The columns from the right_table that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left table. + join_type : str, default "left outer" + The kind of join that should be performed, one of + ("left semi", "right semi", "left anti", "right anti", + "inner", "left outer", "right outer", "full outer") + left_suffix : str, default None + Which suffix to add to left column names. This prevents confusion + when the columns in left and right tables have colliding names. + right_suffix : str, default None + Which suffix to add to the right column names. This prevents confusion + when the columns in left and right tables have colliding names. + coalesce_keys : bool, default True + If the duplicated keys should be omitted from one of the sides + in the join result. + use_threads : bool, default True + Whether to use multithreading or not. + + Returns + ------- + Table + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df1 = pd.DataFrame({'id': [1, 2, 3], + ... 'year': [2020, 2022, 2019]}) + >>> df2 = pd.DataFrame({'id': [3, 4], + ... 'n_legs': [5, 100], + ... 'animal': ["Brittle stars", "Centipede"]}) + >>> t1 = pa.Table.from_pandas(df1) + >>> t2 = pa.Table.from_pandas(df2) + + Left outer join: + + >>> t1.join(t2, 'id').combine_chunks().sort_by('year') + pyarrow.Table + id: int64 + year: int64 + n_legs: int64 + animal: string + ---- + id: [[3,1,2]] + year: [[2019,2020,2022]] + n_legs: [[5,null,null]] + animal: [["Brittle stars",null,null]] + + Full outer join: + + >>> t1.join(t2, 'id', join_type="full outer").combine_chunks().sort_by('year') + pyarrow.Table + id: int64 + year: int64 + n_legs: int64 + animal: string + ---- + id: [[3,1,2,4]] + year: [[2019,2020,2022,null]] + n_legs: [[5,null,null,100]] + animal: [["Brittle stars",null,null,"Centipede"]] + + Right outer join: + + >>> t1.join(t2, 'id', join_type="right outer").combine_chunks().sort_by('year') + pyarrow.Table + year: int64 + id: int64 + n_legs: int64 + animal: string + ---- + year: [[2019,null]] + id: [[3,4]] + n_legs: [[5,100]] + animal: [["Brittle stars","Centipede"]] + + Right anti join + + >>> t1.join(t2, 'id', join_type="right anti") + pyarrow.Table + id: int64 + n_legs: int64 + animal: string + ---- + id: [[4]] + n_legs: [[100]] + animal: [["Centipede"]] + """ + if right_keys is None: + right_keys = keys + return _pac()._perform_join( + join_type, self, keys, right_table, right_keys, + left_suffix=left_suffix, right_suffix=right_suffix, + use_threads=use_threads, coalesce_keys=coalesce_keys, + output_type=Table + ) + + def join_asof(self, right_table, on, by, tolerance, right_on=None, right_by=None): + """ + Perform an asof join between this table and another one. + + This is similar to a left-join except that we match on nearest key rather + than equal keys. Both tables must be sorted by the key. This type of join + is most useful for time series data that are not perfectly aligned. + + Optionally match on equivalent keys with "by" before searching with "on". + + Result of the join will be a new Table, where further + operations can be applied. + + Parameters + ---------- + right_table : Table + The table to join to the current one, acting as the right table + in the join operation. + on : str + The column from current table that should be used as the "on" key + of the join operation left side. + + An inexact match is used on the "on" key, i.e. a row is considered a + match if and only if left_on - tolerance <= right_on <= left_on. + + The input dataset must be sorted by the "on" key. Must be a single + field of a common type. + + Currently, the "on" key must be an integer, date, or timestamp type. + by : str or list[str] + The columns from current table that should be used as the keys + of the join operation left side. The join operation is then done + only for the matches in these columns. + tolerance : int + The tolerance for inexact "on" key matching. A right row is considered + a match with the left row ``right.on - left.on <= tolerance``. The + ``tolerance`` may be: + + - negative, in which case a past-as-of-join occurs; + - or positive, in which case a future-as-of-join occurs; + - or zero, in which case an exact-as-of-join occurs. + + The tolerance is interpreted in the same units as the "on" key. + right_on : str or list[str], default None + The columns from the right_table that should be used as the on key + on the join operation right side. + When ``None`` use the same key name as the left table. + right_by : str or list[str], default None + The columns from the right_table that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left table. + + Returns + ------- + Table + + Example + -------- + >>> import pyarrow as pa + >>> t1 = pa.table({'id': [1, 3, 2, 3, 3], + ... 'year': [2020, 2021, 2022, 2022, 2023]}) + >>> t2 = pa.table({'id': [3, 4], + ... 'year': [2020, 2021], + ... 'n_legs': [5, 100], + ... 'animal': ["Brittle stars", "Centipede"]}) + + >>> t1.join_asof(t2, on='year', by='id', tolerance=-2) + pyarrow.Table + id: int64 + year: int64 + n_legs: int64 + animal: string + ---- + id: [[1,3,2,3,3]] + year: [[2020,2021,2022,2022,2023]] + n_legs: [[null,5,null,5,null]] + animal: [[null,"Brittle stars",null,"Brittle stars",null]] + """ + if right_on is None: + right_on = on + if right_by is None: + right_by = by + return _pac()._perform_join_asof(self, on, by, + right_table, right_on, right_by, + tolerance, output_type=Table) + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export the table as an Arrow C stream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + Currently, this is not supported and will raise a + NotImplementedError if the schema doesn't match the current schema. + + Returns + ------- + PyCapsule + """ + return self.to_reader().__arrow_c_stream__(requested_schema) + + +def _reconstruct_table(arrays, schema): + """ + Internal: reconstruct pa.Table from pickled components. + """ + return Table.from_arrays(arrays, schema=schema) + + +def record_batch(data, names=None, schema=None, metadata=None): + """ + Create a pyarrow.RecordBatch from another Python data structure or sequence + of arrays. + + Parameters + ---------- + data : dict, list, pandas.DataFrame, Arrow-compatible table + A mapping of strings to Arrays or Python lists, a list of Arrays, + a pandas DataFame, or any tabular object implementing the + Arrow PyCapsule Protocol (has an ``__arrow_c_array__`` or + ``__arrow_c_device_array__`` method). + names : list, default None + Column names if list of arrays passed as data. Mutually exclusive with + 'schema' argument. + schema : Schema, default None + The expected schema of the RecordBatch. If not passed, will be inferred + from the data. Mutually exclusive with 'names' argument. + metadata : dict or Mapping, default None + Optional metadata for the schema (if schema not passed). + + Returns + ------- + RecordBatch + + See Also + -------- + RecordBatch.from_arrays, RecordBatch.from_pandas, table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a RecordBatch from a python dictionary: + + >>> pa.record_batch({"n_legs": n_legs, "animals": animals}) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.record_batch({"n_legs": n_legs, "animals": animals}).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Creating a RecordBatch from a list of arrays with names: + + >>> pa.record_batch([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + + Creating a RecordBatch from a list of arrays with names and metadata: + + >>> my_metadata={"n_legs": "How many legs does an animal have?"} + >>> pa.record_batch([n_legs, animals], + ... names=names, + ... metadata = my_metadata) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.record_batch([n_legs, animals], + ... names=names, + ... metadata = my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'How many legs does an animal have?' + + Creating a RecordBatch from a pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.record_batch(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + >>> pa.record_batch(df).to_pandas() + year month day n_legs animals + 0 2020 3 1 2 Flamingo + 1 2022 5 5 4 Horse + 2 2021 7 9 5 Brittle stars + 3 2022 9 13 100 Centipede + + Creating a RecordBatch from a pandas DataFrame with schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.record_batch(df, my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: ... + >>> pa.record_batch(df, my_schema).to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + """ + # accept schema as first argument for backwards compatibility / usability + if isinstance(names, Schema) and schema is None: + schema = names + names = None + + if isinstance(data, (list, tuple)): + return RecordBatch.from_arrays(data, names=names, schema=schema, + metadata=metadata) + elif isinstance(data, dict): + if names is not None: + raise ValueError( + "The 'names' argument is not valid when passing a dictionary") + return RecordBatch.from_pydict(data, schema=schema, metadata=metadata) + elif hasattr(data, "__arrow_c_device_array__"): + if schema is not None: + requested_schema = schema.__arrow_c_schema__() + else: + requested_schema = None + schema_capsule, array_capsule = data.__arrow_c_device_array__(requested_schema) + batch = RecordBatch._import_from_c_device_capsule(schema_capsule, array_capsule) + if schema is not None and batch.schema != schema: + # __arrow_c_device_array__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + batch = batch.cast(schema) + return batch + elif hasattr(data, "__arrow_c_array__"): + if schema is not None: + requested_schema = schema.__arrow_c_schema__() + else: + requested_schema = None + schema_capsule, array_capsule = data.__arrow_c_array__(requested_schema) + batch = RecordBatch._import_from_c_capsule(schema_capsule, array_capsule) + if schema is not None and batch.schema != schema: + # __arrow_c_array__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + batch = batch.cast(schema) + return batch + + elif _pandas_api.is_data_frame(data): + return RecordBatch.from_pandas(data, schema=schema) + + else: + raise TypeError("Expected pandas DataFrame or list of arrays") + + +def table(data, names=None, schema=None, metadata=None, nthreads=None): + """ + Create a pyarrow.Table from a Python data structure or sequence of arrays. + + Parameters + ---------- + data : dict, list, pandas.DataFrame, Arrow-compatible table + A mapping of strings to Arrays or Python lists, a list of arrays or + chunked arrays, a pandas DataFame, or any tabular object implementing + the Arrow PyCapsule Protocol (has an ``__arrow_c_array__``, + ``__arrow_c_device_array__`` or ``__arrow_c_stream__`` method). + names : list, default None + Column names if list of arrays passed as data. Mutually exclusive with + 'schema' argument. + schema : Schema, default None + The expected schema of the Arrow Table. If not passed, will be inferred + from the data. Mutually exclusive with 'names' argument. + If passed, the output will have exactly this schema (raising an error + when columns are not found in the data and ignoring additional data not + specified in the schema, when data is a dict or DataFrame). + metadata : dict or Mapping, default None + Optional metadata for the schema (if schema not passed). + nthreads : int, default None + For pandas.DataFrame inputs: if greater than 1, convert columns to + Arrow in parallel using indicated number of threads. By default, + this follows :func:`pyarrow.cpu_count` (may use up to system CPU count + threads). + + Returns + ------- + Table + + See Also + -------- + Table.from_arrays, Table.from_pandas, Table.from_pydict + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from a python dictionary: + + >>> pa.table({"n_legs": n_legs, "animals": animals}) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from arrays: + + >>> pa.table([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.table([n_legs, animals], names=names, metadata = my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.table(df) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from pandas DataFrame with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.table(df, my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: '{"index_columns": [], "column_indexes": [{"name": null, ... + + Construct a Table from chunked arrays: + + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array([["Flamingo", "Parrot", "Dog"], ["Horse", "Brittle stars", "Centipede"]]) + >>> table = pa.table([n_legs, animals], names=names) + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4],[4,5,100]] + animals: [["Flamingo","Parrot","Dog"],["Horse","Brittle stars","Centipede"]] + """ + # accept schema as first argument for backwards compatibility / usability + if isinstance(names, Schema) and schema is None: + schema = names + names = None + + if isinstance(data, (list, tuple)): + return Table.from_arrays(data, names=names, schema=schema, + metadata=metadata) + elif isinstance(data, dict): + if names is not None: + raise ValueError( + "The 'names' argument is not valid when passing a dictionary") + return Table.from_pydict(data, schema=schema, metadata=metadata) + elif _pandas_api.is_data_frame(data): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "passing a pandas DataFrame") + return Table.from_pandas(data, schema=schema, nthreads=nthreads) + elif hasattr(data, "__arrow_c_stream__"): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "using Arrow PyCapsule Interface") + if schema is not None: + requested = schema.__arrow_c_schema__() + else: + requested = None + capsule = data.__arrow_c_stream__(requested) + reader = RecordBatchReader._import_from_c_capsule(capsule) + table = reader.read_all() + if schema is not None and table.schema != schema: + # __arrow_c_array__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + table = table.cast(schema) + return table + elif hasattr(data, "__arrow_c_array__") or hasattr(data, "__arrow_c_device_array__"): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "using Arrow PyCapsule Interface") + batch = record_batch(data, schema) + return Table.from_batches([batch]) + else: + raise TypeError( + "Expected pandas DataFrame, python dictionary or list of arrays") + + +def concat_tables(tables, MemoryPool memory_pool=None, str promote_options="none", **kwargs): + """ + Concatenate pyarrow.Table objects. + + If promote_options="none", a zero-copy concatenation will be performed. The schemas + of all the Tables must be the same (except the metadata), otherwise an + exception will be raised. The result Table will share the metadata with the + first table. + + If promote_options="default", any null type arrays will be casted to the type of other + arrays in the column of the same name. If a table is missing a particular + field, null values of the appropriate type will be generated to take the + place of the missing field. The new schema will share the metadata with the + first table. Each field in the new schema will share the metadata with the + first table which has the field defined. Note that type promotions may + involve additional allocations on the given ``memory_pool``. + + If promote_options="permissive", the behavior of default plus types will be promoted + to the common denominator that fits all the fields. + + Parameters + ---------- + tables : iterable of pyarrow.Table objects + Pyarrow tables to concatenate into a single Table. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + promote_options : str, default none + Accepts strings "none", "default" and "permissive". + **kwargs : dict, optional + + Examples + -------- + >>> import pyarrow as pa + >>> t1 = pa.table([ + ... pa.array([2, 4, 5, 100]), + ... pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + ... ], names=['n_legs', 'animals']) + >>> t2 = pa.table([ + ... pa.array([2, 4]), + ... pa.array(["Parrot", "Dog"]) + ... ], names=['n_legs', 'animals']) + >>> pa.concat_tables([t1,t2]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100],[2,4]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"],["Parrot","Dog"]] + + """ + cdef: + vector[shared_ptr[CTable]] c_tables + shared_ptr[CTable] c_result_table + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + Table table + CConcatenateTablesOptions options = ( + CConcatenateTablesOptions.Defaults()) + + if "promote" in kwargs: + warnings.warn( + "promote has been superseded by promote_options='default'.", + FutureWarning, stacklevel=2) + if kwargs['promote'] is True: + promote_options = "default" + + for table in tables: + c_tables.push_back(table.sp_table) + + if promote_options == "permissive": + options.field_merge_options = CField.CMergeOptions.Permissive() + elif promote_options in {"default", "none"}: + options.field_merge_options = CField.CMergeOptions.Defaults() + else: + raise ValueError(f"Invalid promote options: {promote_options}") + + with nogil: + options.unify_schemas = promote_options != "none" + c_result_table = GetResultValue( + ConcatenateTables(c_tables, options, pool)) + + return pyarrow_wrap_table(c_result_table) + + +def _from_pydict(cls, mapping, schema, metadata): + """ + Construct a Table/RecordBatch from Arrow arrays or columns. + + Parameters + ---------- + cls : Class Table/RecordBatch + mapping : dict or Mapping + A mapping of strings to Arrays or Python lists. + schema : Schema, default None + If not passed, will be inferred from the Mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table/RecordBatch + """ + + arrays = [] + if schema is None: + names = [] + for k, v in mapping.items(): + names.append(k) + arrays.append(asarray(v)) + return cls.from_arrays(arrays, names, metadata=metadata) + elif isinstance(schema, Schema): + for field in schema: + try: + v = mapping[field.name] + except KeyError: + try: + v = mapping[tobytes(field.name)] + except KeyError: + present = mapping.keys() + missing = [n for n in schema.names if n not in present] + raise KeyError( + "The passed mapping doesn't contain the " + "following field(s) of the schema: {}". + format(', '.join(missing)) + ) + arrays.append(asarray(v, type=field.type)) + # Will raise if metadata is not None + return cls.from_arrays(arrays, schema=schema, metadata=metadata) + else: + raise TypeError('Schema must be an instance of pyarrow.Schema') + + +def _from_pylist(cls, mapping, schema, metadata): + """ + Construct a Table/RecordBatch from list of rows / dictionaries. + + Parameters + ---------- + cls : Class Table/RecordBatch + mapping : list of dicts of rows + A mapping of strings to row values. + schema : Schema, default None + If not passed, will be inferred from the first row of the + mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table/RecordBatch + """ + + arrays = [] + if schema is None: + names = [] + if mapping: + names = list(mapping[0].keys()) + for n in names: + v = [row[n] if n in row else None for row in mapping] + arrays.append(v) + return cls.from_arrays(arrays, names, metadata=metadata) + else: + if isinstance(schema, Schema): + for n in schema.names: + v = [row[n] if n in row else None for row in mapping] + arrays.append(v) + # Will raise if metadata is not None + return cls.from_arrays(arrays, schema=schema, metadata=metadata) + else: + raise TypeError('Schema must be an instance of pyarrow.Schema') + + +class TableGroupBy: + """ + A grouping of columns in a table on which to perform aggregations. + + Parameters + ---------- + table : pyarrow.Table + Input table to execute the aggregation on. + keys : str or list[str] + Name of the grouped columns. + use_threads : bool, default True + Whether to use multithreading or not. When set to True (the default), + no stable ordering of the output is guaranteed. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.table([ + ... pa.array(["a", "a", "b", "b", "c"]), + ... pa.array([1, 2, 3, 4, 5]), + ... ], names=["keys", "values"]) + + Grouping of columns: + + >>> pa.TableGroupBy(t,"keys") + + + Perform aggregations: + + >>> pa.TableGroupBy(t,"keys").aggregate([("values", "sum")]) + pyarrow.Table + keys: string + values_sum: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + """ + + def __init__(self, table, keys, use_threads=True): + if isinstance(keys, str): + keys = [keys] + + self._table = table + self.keys = keys + self._use_threads = use_threads + + def aggregate(self, aggregations): + """ + Perform an aggregation over the grouped columns of the table. + + Parameters + ---------- + aggregations : list[tuple(str, str)] or \ +list[tuple(str, str, FunctionOptions)] + List of tuples, where each tuple is one aggregation specification + and consists of: aggregation column name followed + by function name and optionally aggregation function option. + Pass empty list to get a single row for each group. + The column name can be a string, an empty list or a list of + column names, for unary, nullary and n-ary aggregation functions + respectively. + + For the list of function names and respective aggregation + function options see :ref:`py-grouped-aggrs`. + + Returns + ------- + Table + Results of the aggregation functions. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.table([ + ... pa.array(["a", "a", "b", "b", "c"]), + ... pa.array([1, 2, 3, 4, 5]), + ... ], names=["keys", "values"]) + + Sum the column "values" over the grouped column "keys": + + >>> t.group_by("keys").aggregate([("values", "sum")]) + pyarrow.Table + keys: string + values_sum: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + + Count the rows over the grouped column "keys": + + >>> t.group_by("keys").aggregate([([], "count_all")]) + pyarrow.Table + keys: string + count_all: int64 + ---- + keys: [["a","b","c"]] + count_all: [[2,2,1]] + + Do multiple aggregations: + + >>> t.group_by("keys").aggregate([ + ... ("values", "sum"), + ... ("keys", "count") + ... ]) + pyarrow.Table + keys: string + values_sum: int64 + keys_count: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + keys_count: [[2,2,1]] + + Count the number of non-null values for column "values" + over the grouped column "keys": + + >>> import pyarrow.compute as pc + >>> t.group_by(["keys"]).aggregate([ + ... ("values", "count", pc.CountOptions(mode="only_valid")) + ... ]) + pyarrow.Table + keys: string + values_count: int64 + ---- + keys: [["a","b","c"]] + values_count: [[2,2,1]] + + Get a single row for each group in column "keys": + + >>> t.group_by("keys").aggregate([]) + pyarrow.Table + keys: string + ---- + keys: [["a","b","c"]] + """ + group_by_aggrs = [] + for aggr in aggregations: + # Set opt to None if not specified + if len(aggr) == 2: + target, func = aggr + opt = None + else: + target, func, opt = aggr + # Ensure target is a list + if not isinstance(target, (list, tuple)): + target = [target] + # Ensure aggregate function is hash_ if needed + if len(self.keys) > 0 and not func.startswith("hash_"): + func = "hash_" + func + if len(self.keys) == 0 and func.startswith("hash_"): + func = func[5:] + # Determine output field name + func_nohash = func if not func.startswith("hash_") else func[5:] + if len(target) == 0: + aggr_name = func_nohash + else: + aggr_name = "_".join(target) + "_" + func_nohash + group_by_aggrs.append((target, func, opt, aggr_name)) + + return _pac()._group_by( + self._table, group_by_aggrs, self.keys, use_threads=self._use_threads + ) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/tensor.pxi b/parrot/lib/python3.10/site-packages/pyarrow/tensor.pxi new file mode 100644 index 0000000000000000000000000000000000000000..6fb4fc99d7cbc3e096979d3eec2ed2028b011d41 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/tensor.pxi @@ -0,0 +1,1296 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Avoid name clash with `pa.struct` function +import struct as _struct + + +cdef class Tensor(_Weakrefable): + """ + A n-dimensional array a.k.a Tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + + type: int32 + shape: (2, 3) + strides: (12, 4) + """ + + def __init__(self): + raise TypeError("Do not call Tensor's constructor directly, use one " + "of the `pyarrow.Tensor.from_*` functions instead.") + + cdef void init(self, const shared_ptr[CTensor]& sp_tensor): + self.sp_tensor = sp_tensor + self.tp = sp_tensor.get() + self.type = pyarrow_wrap_data_type(self.tp.type()) + self._ssize_t_shape = self._make_shape_or_strides_buffer(self.shape) + self._ssize_t_strides = self._make_shape_or_strides_buffer(self.strides) + + def _make_shape_or_strides_buffer(self, values): + """ + Make a bytes object holding an array of `values` cast to `Py_ssize_t`. + """ + return _struct.pack(f"{len(values)}n", *values) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape} +strides: {0.strides}""".format(self) + + @staticmethod + def from_numpy(obj, dim_names=None): + """ + Create a Tensor from a numpy array. + + Parameters + ---------- + obj : numpy.ndarray + The source numpy array + dim_names : list, optional + Names of each dimension of the Tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + + type: int32 + shape: (2, 3) + strides: (12, 4) + """ + cdef: + vector[c_string] c_dim_names + shared_ptr[CTensor] ctensor + + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + check_status(NdarrayToTensor(c_default_memory_pool(), obj, + c_dim_names, &ctensor)) + return pyarrow_wrap_tensor(ctensor) + + def to_numpy(self): + """ + Convert arrow::Tensor to numpy.ndarray with zero copy + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.to_numpy() + array([[ 2, 2, 4], + [ 4, 5, 100]], dtype=int32) + """ + cdef PyObject* out + + check_status(TensorToNdarray(self.sp_tensor, self, &out)) + return PyObject_to_object(out) + + def equals(self, Tensor other): + """ + Return true if the tensors contains exactly equal data. + + Parameters + ---------- + other : Tensor + The other tensor to compare for equality. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> y = np.array([[2, 2, 4], [4, 5, 10]], np.int32) + >>> tensor2 = pa.Tensor.from_numpy(y, dim_names=["a","b"]) + >>> tensor.equals(tensor) + True + >>> tensor.equals(tensor2) + False + """ + return self.tp.Equals(deref(other.tp)) + + def __eq__(self, other): + if isinstance(other, Tensor): + return self.equals(other) + else: + return NotImplemented + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.dim_name(0) + 'dim1' + >>> tensor.dim_name(1) + 'dim2' + """ + return frombytes(self.tp.dim_name(i)) + + @property + def dim_names(self): + """ + Names of this tensor dimensions. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.dim_names + ['dim1', 'dim2'] + """ + return [frombytes(x) for x in tuple(self.tp.dim_names())] + + @property + def is_mutable(self): + """ + Is this tensor mutable or immutable. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.is_mutable + True + """ + return self.tp.is_mutable() + + @property + def is_contiguous(self): + """ + Is this tensor contiguous in memory. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.is_contiguous + True + """ + return self.tp.is_contiguous() + + @property + def ndim(self): + """ + The dimension (n) of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.ndim + 2 + """ + return self.tp.ndim() + + @property + def size(self): + """ + The size of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.size + 6 + """ + return self.tp.size() + + @property + def shape(self): + """ + The shape of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.shape + (2, 3) + """ + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.tp.shape()) + + @property + def strides(self): + """ + Strides of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.strides + (12, 4) + """ + return tuple(self.tp.strides()) + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + buffer.buf = self.tp.data().get().data() + pep3118_format = self.type.pep3118_format + if pep3118_format is None: + raise NotImplementedError("type %s not supported for buffer " + "protocol" % (self.type,)) + buffer.format = pep3118_format + buffer.itemsize = self.type.bit_width // 8 + buffer.internal = NULL + buffer.len = self.tp.size() * buffer.itemsize + buffer.ndim = self.tp.ndim() + buffer.obj = self + if self.tp.is_mutable(): + buffer.readonly = 0 + else: + buffer.readonly = 1 + buffer.shape = cp.PyBytes_AsString(self._ssize_t_shape) + buffer.strides = cp.PyBytes_AsString(self._ssize_t_strides) + buffer.suboffsets = NULL + + +ctypedef CSparseCOOIndex* _CSparseCOOIndexPtr + + +cdef class SparseCOOTensor(_Weakrefable): + """ + A sparse COO tensor. + """ + + def __init__(self): + raise TypeError("Do not call SparseCOOTensor's constructor directly, " + "use one of the `pyarrow.SparseCOOTensor.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCOOTensor + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCOOTensor + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, coords, shape, dim_names=None): + """ + Create arrow::SparseCOOTensor from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the rows. + coords : numpy.ndarray + Coordinates of the data. + shape : tuple + Shape of the tensor. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCOOTensor indices + coords = np.require(coords, dtype='i8', requirements='C') + if coords.ndim != 2: + raise ValueError("Expected 2-dimensional array for " + "SparseCOOTensor indices") + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.coo_matrix to arrow::SparseCOOTensor + + Parameters + ---------- + obj : scipy.sparse.csr_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.coo_matrix): + raise TypeError( + "Expected scipy.sparse.coo_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + row = obj.row + col = obj.col + + # When SciPy's coo_matrix has canonical format, its indices matrix is + # sorted in column-major order. As Arrow's SparseCOOIndex is sorted + # in row-major order if it is canonical, we must sort indices matrix + # into row-major order to keep its canonicalness, here. + if obj.has_canonical_format: + order = np.lexsort((col, row)) # sort in row-major order + row = row[order] + col = col[order] + coords = np.vstack([row, col]).T + coords = np.require(coords, dtype='i8', requirements='C') + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + obj.data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_pydata_sparse(obj, dim_names=None): + """ + Convert pydata/sparse.COO to arrow::SparseCOOTensor. + + Parameters + ---------- + obj : pydata.sparse.COO + The sparse multidimensional array that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import sparse + if not isinstance(obj, sparse.COO): + raise TypeError( + "Expected sparse.COO, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + coords = np.require(obj.coords.T, dtype='i8', requirements='C') + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + obj.data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCOOTensor. + + Parameters + ---------- + obj : Tensor + The tensor that should be converted. + """ + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCOOTensor(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCOOTensor to numpy.ndarrays with zero copy. + """ + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + return PyObject_to_object(out_data), PyObject_to_object(out_coords) + + def to_scipy(self): + """ + Convert arrow::SparseCOOTensor to scipy.sparse.coo_matrix. + """ + from scipy.sparse import coo_matrix + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + data = PyObject_to_object(out_data) + coords = PyObject_to_object(out_coords) + row, col = coords[:, 0], coords[:, 1] + result = coo_matrix((data[:, 0], (row, col)), shape=self.shape) + + # As the description in from_scipy above, we sorted indices matrix + # in row-major order if SciPy's coo_matrix has canonical format. + # So, we must call sum_duplicates() to make the result coo_matrix + # has canonical format. + if self.has_canonical_format: + result.sum_duplicates() + return result + + def to_pydata_sparse(self): + """ + Convert arrow::SparseCOOTensor to pydata/sparse.COO. + """ + from sparse import COO + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + data = PyObject_to_object(out_data) + coords = PyObject_to_object(out_coords) + result = COO(data=data[:, 0], coords=coords.T, shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCOOTensor to arrow::Tensor. + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCOOTensor other): + """ + Return true if sparse tensors contains exactly equal data. + + Parameters + ---------- + other : SparseCOOTensor + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCOOTensor): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + + @property + def has_canonical_format(self): + cdef: + _CSparseCOOIndexPtr csi + + csi = <_CSparseCOOIndexPtr>(self.stp.sparse_index().get()) + if csi != nullptr: + return csi.is_canonical() + return True + +cdef class SparseCSRMatrix(_Weakrefable): + """ + A sparse CSR matrix. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSRMatrix's constructor directly, " + "use one of the `pyarrow.SparseCSRMatrix.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSRMatrix + + Parameters + ---------- + obj : numpy.ndarray + The dense numpy array that should be converted. + dim_names : list, optional + The names of the dimensions. + + Returns + ------- + pyarrow.SparseCSRMatrix + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, dim_names=None): + """ + Create arrow::SparseCSRMatrix from numpy.ndarrays. + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse matrix. + indptr : numpy.ndarray + Range of the rows, + The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. + indices : numpy.ndarray + Column indices of the corresponding non-zero values. + shape : tuple + Shape of the matrix. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCSRMatrix indices + indptr = np.require(indptr, dtype='i8') + indices = np.require(indices, dtype='i8') + if indptr.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSRMatrix indptr") + if indices.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSRMatrix indices") + + check_status(NdarraysToSparseCSRMatrix(c_default_memory_pool(), + data, indptr, indices, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.csr_matrix to arrow::SparseCSRMatrix. + + Parameters + ---------- + obj : scipy.sparse.csr_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.csr_matrix): + raise TypeError( + "Expected scipy.sparse.csr_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for CSparseCSRMatrix indices + indptr = np.require(obj.indptr, dtype='i8') + indices = np.require(obj.indices, dtype='i8') + + check_status(NdarraysToSparseCSRMatrix(c_default_memory_pool(), + obj.data, indptr, indices, + c_shape, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSRMatrix. + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSRMatrix(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSRMatrix to numpy.ndarrays with zero copy. + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSRMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_scipy(self): + """ + Convert arrow::SparseCSRMatrix to scipy.sparse.csr_matrix. + """ + from scipy.sparse import csr_matrix + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSRMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + + data = PyObject_to_object(out_data) + indptr = PyObject_to_object(out_indptr) + indices = PyObject_to_object(out_indices) + result = csr_matrix((data[:, 0], indices, indptr), shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCSRMatrix to arrow::Tensor. + """ + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSRMatrix other): + """ + Return true if sparse tensors contains exactly equal data. + + Parameters + ---------- + other : SparseCSRMatrix + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSRMatrix): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + +cdef class SparseCSCMatrix(_Weakrefable): + """ + A sparse CSC matrix. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSCMatrix's constructor directly, " + "use one of the `pyarrow.SparseCSCMatrix.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCSCMatrix + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, dim_names=None): + """ + Create arrow::SparseCSCMatrix from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse matrix. + indptr : numpy.ndarray + Range of the rows, + The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. + indices : numpy.ndarray + Column indices of the corresponding non-zero values. + shape : tuple + Shape of the matrix. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCSCMatrix indices + indptr = np.require(indptr, dtype='i8') + indices = np.require(indices, dtype='i8') + if indptr.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSCMatrix indptr") + if indices.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSCMatrix indices") + + check_status(NdarraysToSparseCSCMatrix(c_default_memory_pool(), + data, indptr, indices, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.csc_matrix to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : scipy.sparse.csc_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.csc_matrix): + raise TypeError( + "Expected scipy.sparse.csc_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for CSparseCSCMatrix indices + indptr = np.require(obj.indptr, dtype='i8') + indices = np.require(obj.indices, dtype='i8') + + check_status(NdarraysToSparseCSCMatrix(c_default_memory_pool(), + obj.data, indptr, indices, + c_shape, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSCMatrix(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSCMatrix to numpy.ndarrays with zero copy + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSCMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_scipy(self): + """ + Convert arrow::SparseCSCMatrix to scipy.sparse.csc_matrix + """ + from scipy.sparse import csc_matrix + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSCMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + + data = PyObject_to_object(out_data) + indptr = PyObject_to_object(out_indptr) + indices = PyObject_to_object(out_indices) + result = csc_matrix((data[:, 0], indices, indptr), shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCSCMatrix to arrow::Tensor + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSCMatrix other): + """ + Return true if sparse tensors contains exactly equal data + + Parameters + ---------- + other : SparseCSCMatrix + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSCMatrix): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + + +cdef class SparseCSFTensor(_Weakrefable): + """ + A sparse CSF tensor. + + CSF is a generalization of compressed sparse row (CSR) index. + + CSF index recursively compresses each dimension of a tensor into a set + of prefix trees. Each path from a root to leaf forms one tensor + non-zero index. CSF is implemented with two arrays of buffers and one + arrays of integers. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSFTensor's constructor directly, " + "use one of the `pyarrow.SparseCSFTensor.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSFTensor + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCSFTensor + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, axis_order=None, + dim_names=None): + """ + Create arrow::SparseCSFTensor from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse tensor. + indptr : numpy.ndarray + The sparsity structure. + Each two consecutive dimensions in a tensor correspond to + a buffer in indices. + A pair of consecutive values at `indptr[dim][i]` + `indptr[dim][i + 1]` signify a range of nodes in + `indices[dim + 1]` who are children of `indices[dim][i]` node. + indices : numpy.ndarray + Stores values of nodes. + Each tensor dimension corresponds to a buffer in indptr. + shape : tuple + Shape of the matrix. + axis_order : list, optional + the sequence in which dimensions were traversed to + produce the prefix tree. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSFTensor] csparse_tensor + cdef vector[int64_t] c_axis_order + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if not axis_order: + axis_order = np.argsort(shape) + for x in axis_order: + c_axis_order.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce preconditions for SparseCSFTensor indices + if not (isinstance(indptr, (list, tuple)) and + isinstance(indices, (list, tuple))): + raise TypeError("Expected list or tuple, got {}, {}" + .format(type(indptr), type(indices))) + if len(indptr) != len(shape) - 1: + raise ValueError("Expected list of {ndim} np.arrays for " + "SparseCSFTensor.indptr".format(ndim=len(shape))) + if len(indices) != len(shape): + raise ValueError("Expected list of {ndim} np.arrays for " + "SparseCSFTensor.indices".format(ndim=len(shape))) + if any([x.ndim != 1 for x in indptr]): + raise ValueError("Expected a list of 1-dimensional arrays for " + "SparseCSFTensor.indptr") + if any([x.ndim != 1 for x in indices]): + raise ValueError("Expected a list of 1-dimensional arrays for " + "SparseCSFTensor.indices") + indptr = [np.require(arr, dtype='i8') for arr in indptr] + indices = [np.require(arr, dtype='i8') for arr in indices] + + check_status(NdarraysToSparseCSFTensor(c_default_memory_pool(), data, + indptr, indices, c_shape, + c_axis_order, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csf_tensor(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSFTensor + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSFTensor] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSFTensor(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csf_tensor(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSFTensor to numpy.ndarrays with zero copy + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSFTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_tensor(self): + """ + Convert arrow::SparseCSFTensor to arrow::Tensor + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSFTensor other): + """ + Return true if sparse tensors contains exactly equal data + + Parameters + ---------- + other : SparseCSFTensor + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSFTensor): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() diff --git a/parrot/lib/python3.10/site-packages/pyarrow/types.pxi b/parrot/lib/python3.10/site-packages/pyarrow/types.pxi new file mode 100644 index 0000000000000000000000000000000000000000..4343d7ea300b0a658a57c2a43b542195d3ecb9bc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/types.pxi @@ -0,0 +1,5565 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport ( + PyCapsule_CheckExact, + PyCapsule_GetPointer, + PyCapsule_GetName, + PyCapsule_New, + PyCapsule_IsValid +) + +import atexit +from collections.abc import Mapping +import pickle +import re +import sys +import warnings +from cython import sizeof + +# These are imprecise because the type (in pandas 0.x) depends on the presence +# of nulls +cdef dict _pandas_type_map = { + _Type_NA: np.object_, # NaNs + _Type_BOOL: np.bool_, + _Type_INT8: np.int8, + _Type_INT16: np.int16, + _Type_INT32: np.int32, + _Type_INT64: np.int64, + _Type_UINT8: np.uint8, + _Type_UINT16: np.uint16, + _Type_UINT32: np.uint32, + _Type_UINT64: np.uint64, + _Type_HALF_FLOAT: np.float16, + _Type_FLOAT: np.float32, + _Type_DOUBLE: np.float64, + # Pandas does not support [D]ay, so default to [ms] for date32 + _Type_DATE32: np.dtype('datetime64[ms]'), + _Type_DATE64: np.dtype('datetime64[ms]'), + _Type_TIMESTAMP: { + 's': np.dtype('datetime64[s]'), + 'ms': np.dtype('datetime64[ms]'), + 'us': np.dtype('datetime64[us]'), + 'ns': np.dtype('datetime64[ns]'), + }, + _Type_DURATION: { + 's': np.dtype('timedelta64[s]'), + 'ms': np.dtype('timedelta64[ms]'), + 'us': np.dtype('timedelta64[us]'), + 'ns': np.dtype('timedelta64[ns]'), + }, + _Type_BINARY: np.object_, + _Type_FIXED_SIZE_BINARY: np.object_, + _Type_STRING: np.object_, + _Type_LIST: np.object_, + _Type_MAP: np.object_, + _Type_DECIMAL128: np.object_, +} + +cdef dict _pep3118_type_map = { + _Type_INT8: b'b', + _Type_INT16: b'h', + _Type_INT32: b'i', + _Type_INT64: b'q', + _Type_UINT8: b'B', + _Type_UINT16: b'H', + _Type_UINT32: b'I', + _Type_UINT64: b'Q', + _Type_HALF_FLOAT: b'e', + _Type_FLOAT: b'f', + _Type_DOUBLE: b'd', +} + + +cdef bytes _datatype_to_pep3118(CDataType* type): + """ + Construct a PEP 3118 format string describing the given datatype. + None is returned for unsupported types. + """ + try: + char = _pep3118_type_map[type.id()] + except KeyError: + return None + else: + if char in b'bBhHiIqQ': + # Use "standard" int widths, not native + return b'=' + char + else: + return char + + +cdef void* _as_c_pointer(v, allow_null=False) except *: + """ + Convert a Python object to a raw C pointer. + + Used mainly for the C data interface. + Integers are accepted as well as capsule objects with a NULL name. + (the latter for compatibility with raw pointers exported by reticulate) + """ + cdef void* c_ptr + cdef const char* capsule_name + if isinstance(v, int): + c_ptr = v + elif isinstance(v, float): + warnings.warn( + "Passing a pointer value as a float is unsafe and only " + "supported for compatibility with older versions of the R " + "Arrow library", UserWarning, stacklevel=2) + c_ptr = v + elif PyCapsule_CheckExact(v): + # An R external pointer was how the R bindings passed pointer values to + # Python from versions 7 to 15 (inclusive); however, the reticulate 1.35.0 + # update changed the name of the capsule from NULL to "r_extptr". + # Newer versions of the R package pass a Python integer; however, this + # workaround ensures that old versions of the R package continue to work + # with newer versions of pyarrow. + capsule_name = PyCapsule_GetName(v) + if capsule_name == NULL or capsule_name == b"r_extptr": + c_ptr = PyCapsule_GetPointer(v, capsule_name) + else: + capsule_name_str = capsule_name.decode() + raise ValueError( + f"Can't convert PyCapsule with name '{capsule_name_str}' to pointer address" + ) + else: + raise TypeError(f"Expected a pointer value, got {type(v)!r}") + if not allow_null and c_ptr == NULL: + raise ValueError(f"Null pointer (value before cast = {v!r})") + return c_ptr + + +def _is_primitive(Type type): + # This is simply a redirect, the official API is in pyarrow.types. + return is_primitive(type) + + +def _get_pandas_type(arrow_type, coerce_to_ns=False): + cdef Type type_id = arrow_type.id + if type_id not in _pandas_type_map: + return None + if coerce_to_ns: + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + if type_id == _Type_DURATION: + return np.dtype('timedelta64[ns]') + return np.dtype('datetime64[ns]') + pandas_type = _pandas_type_map[type_id] + if isinstance(pandas_type, dict): + unit = getattr(arrow_type, 'unit', None) + pandas_type = pandas_type.get(unit, None) + return pandas_type + + +def _get_pandas_tz_type(arrow_type, coerce_to_ns=False): + from pyarrow.pandas_compat import make_datetimetz + unit = 'ns' if coerce_to_ns else arrow_type.unit + return make_datetimetz(unit, arrow_type.tz) + + +def _to_pandas_dtype(arrow_type, options=None): + coerce_to_ns = (options and options.get('coerce_temporal_nanoseconds', False)) or ( + _pandas_api.is_v1() and arrow_type.id in + [_Type_DATE32, _Type_DATE64, _Type_TIMESTAMP, _Type_DURATION]) + + if getattr(arrow_type, 'tz', None): + dtype = _get_pandas_tz_type(arrow_type, coerce_to_ns) + else: + dtype = _get_pandas_type(arrow_type, coerce_to_ns) + + if not dtype: + raise NotImplementedError(str(arrow_type)) + + return dtype + + +# Workaround for Cython parsing bug +# https://github.com/cython/cython/issues/2143 +ctypedef CFixedWidthType* _CFixedWidthTypePtr + + +cdef class DataType(_Weakrefable): + """ + Base class of all Arrow data types. + + Each data type is an *instance* of this class. + + Examples + -------- + Instance of int64 type: + + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use public " + "functions like pyarrow.int64, pyarrow.list_, etc. " + "instead.".format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + assert type != nullptr + self.sp_type = type + self.type = type.get() + self.pep3118_format = _datatype_to_pep3118(self.type) + + cpdef Field field(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + """ + if not isinstance(i, int): + raise TypeError(f"Expected int index, got type '{type(i)}'") + cdef int index = _normalize_index(i, self.type.num_fields()) + return pyarrow_wrap_field(self.type.field(index)) + + @property + def id(self): + return self.type.id() + + @property + def bit_width(self): + """ + Bit width for fixed width type. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> pa.int64().bit_width + 64 + """ + cdef _CFixedWidthTypePtr ty + ty = dynamic_cast[_CFixedWidthTypePtr](self.type) + if ty == nullptr: + raise ValueError("Non-fixed width type") + return ty.bit_width() + + @property + def byte_width(self): + """ + Byte width for fixed width type. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> pa.int64().byte_width + 8 + """ + cdef _CFixedWidthTypePtr ty + ty = dynamic_cast[_CFixedWidthTypePtr](self.type) + if ty == nullptr: + raise ValueError("Non-fixed width type") + byte_width = ty.byte_width() + if byte_width == 0 and self.bit_width != 0: + raise ValueError("Less than one byte") + return byte_width + + @property + def num_fields(self): + """ + The number of child fields. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> pa.int64().num_fields + 0 + >>> pa.list_(pa.string()) + ListType(list) + >>> pa.list_(pa.string()).num_fields + 1 + >>> struct = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct.num_fields + 2 + """ + return self.type.num_fields() + + @property + def num_buffers(self): + """ + Number of data buffers required to construct Array type + excluding children. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().num_buffers + 2 + >>> pa.string().num_buffers + 3 + """ + return self.type.layout().buffers.size() + + def __str__(self): + return frombytes(self.type.ToString(), safe=True) + + def __hash__(self): + return hash(str(self)) + + def __reduce__(self): + return type_for_alias, (str(self),) + + def __repr__(self): + return '{0.__class__.__name__}({0})'.format(self) + + def __eq__(self, other): + try: + return self.equals(other) + except (TypeError, ValueError): + return NotImplemented + + def equals(self, other, *, check_metadata=False): + """ + Return true if type is equivalent to passed value. + + Parameters + ---------- + other : DataType or string convertible to DataType + check_metadata : bool + Whether nested Field metadata equality should be checked as well. + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().equals(pa.string()) + False + >>> pa.int64().equals(pa.int64()) + True + """ + cdef: + DataType other_type + c_bool c_check_metadata + + other_type = ensure_type(other) + c_check_metadata = check_metadata + return self.type.Equals(deref(other_type.type), c_check_metadata) + + def to_pandas_dtype(self): + """ + Return the equivalent NumPy / Pandas dtype. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().to_pandas_dtype() + + """ + return _to_pandas_dtype(self) + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportType(deref(self.type), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import DataType from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + result = GetResultValue(ImportType( + _as_c_pointer(in_ptr))) + return pyarrow_wrap_data_type(result) + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportType(deref(self.type), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a DataType from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + shared_ptr[CDataType] c_type + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise TypeError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + c_type = GetResultValue(ImportType(c_schema)) + + return pyarrow_wrap_data_type(c_type) + + +cdef class DictionaryMemo(_Weakrefable): + """ + Tracking container for dictionary-encoded fields. + """ + + def __cinit__(self): + self.sp_memo.reset(new CDictionaryMemo()) + self.memo = self.sp_memo.get() + + +cdef class DictionaryType(DataType): + """ + Concrete class for dictionary data types. + + Examples + -------- + Create an instance of dictionary type: + + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()) + DictionaryType(dictionary) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.dict_type = type.get() + + def __reduce__(self): + return dictionary, (self.index_type, self.value_type, self.ordered) + + @property + def ordered(self): + """ + Whether the dictionary is ordered, i.e. whether the ordering of values + in the dictionary is important. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()).ordered + False + """ + return self.dict_type.ordered() + + @property + def index_type(self): + """ + The data type of dictionary indices (a signed integer type). + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int16(), pa.utf8()).index_type + DataType(int16) + """ + return pyarrow_wrap_data_type(self.dict_type.index_type()) + + @property + def value_type(self): + """ + The dictionary value type. + + The dictionary values are found in an instance of DictionaryArray. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int16(), pa.utf8()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.dict_type.value_type()) + + +cdef class ListType(DataType): + """ + Concrete class for list data types. + + Examples + -------- + Create an instance of ListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.string()) + ListType(list) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return list_, (self.value_field,) + + @property + def value_field(self): + """ + The field for list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.string()).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + +cdef class LargeListType(DataType): + """ + Concrete class for large list data types + (like ListType, but with 64-bit offsets). + + Examples + -------- + Create an instance of LargeListType: + + >>> import pyarrow as pa + >>> pa.large_list(pa.string()) + LargeListType(large_list) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return large_list, (self.value_field,) + + @property + def value_field(self): + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of large list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.large_list(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + +cdef class ListViewType(DataType): + """ + Concrete class for list view data types. + + Examples + -------- + Create an instance of ListViewType: + + >>> import pyarrow as pa + >>> pa.list_view(pa.string()) + ListViewType(list_view) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_view_type = type.get() + + def __reduce__(self): + return list_view, (self.value_field,) + + @property + def value_field(self): + """ + The field for list view values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_view(pa.string()).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_view_type.value_field()) + + @property + def value_type(self): + """ + The data type of list view values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_view(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_view_type.value_type()) + + +cdef class LargeListViewType(DataType): + """ + Concrete class for large list view data types + (like ListViewType, but with 64-bit offsets). + + Examples + -------- + Create an instance of LargeListViewType: + + >>> import pyarrow as pa + >>> pa.large_list_view(pa.string()) + LargeListViewType(large_list_view) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_view_type = type.get() + + def __reduce__(self): + return large_list_view, (self.value_field,) + + @property + def value_field(self): + """ + The field for large list view values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.large_list_view(pa.string()).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_view_type.value_field()) + + @property + def value_type(self): + """ + The data type of large list view values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.large_list_view(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_view_type.value_type()) + + +cdef class MapType(DataType): + """ + Concrete class for map data types. + + Examples + -------- + Create an instance of MapType: + + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()) + MapType(map) + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True) + MapType(map) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.map_type = type.get() + + def __reduce__(self): + return map_, (self.key_field, self.item_field) + + @property + def key_field(self): + """ + The field for keys in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).key_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.map_type.key_field()) + + @property + def key_type(self): + """ + The data type of keys in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).key_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.map_type.key_type()) + + @property + def item_field(self): + """ + The field for items in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).item_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.map_type.item_field()) + + @property + def item_type(self): + """ + The data type of items in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).item_type + DataType(int32) + """ + return pyarrow_wrap_data_type(self.map_type.item_type()) + + @property + def keys_sorted(self): + """ + Should the entries be sorted according to keys. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True).keys_sorted + True + """ + return self.map_type.keys_sorted() + + +cdef class FixedSizeListType(DataType): + """ + Concrete class for fixed size list data types. + + Examples + -------- + Create an instance of FixedSizeListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2) + FixedSizeListType(fixed_size_list[2]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return list_, (self.value_type, self.list_size) + + @property + def value_field(self): + """ + The field for list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of large list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).value_type + DataType(int32) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + @property + def list_size(self): + """ + The size of the fixed size lists. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).list_size + 2 + """ + return self.list_type.list_size() + + +cdef class StructType(DataType): + """ + Concrete class for struct data types. + + ``StructType`` supports direct indexing using ``[...]`` (implemented via + ``__getitem__``) to access its fields. + It will return the struct field with the given index or name. + + Examples + -------- + >>> import pyarrow as pa + + Accessing fields using direct indexing: + + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct_type[0] + pyarrow.Field + >>> struct_type['y'] + pyarrow.Field + + Accessing fields using ``field()``: + + >>> struct_type.field(1) + pyarrow.Field + >>> struct_type.field('x') + pyarrow.Field + + # Creating a schema from the struct type's fields: + >>> pa.schema(list(struct_type)) + x: int32 + y: string + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.struct_type = type.get() + + cdef Field field_by_name(self, name): + """ + Return a child field by its name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + field : Field + The child field with the given name. + + Raises + ------ + KeyError + If the name isn't found, or if several fields have the given + name. + """ + cdef vector[shared_ptr[CField]] fields + + fields = self.struct_type.GetAllFieldsByName(tobytes(name)) + if fields.size() == 0: + raise KeyError(name) + elif fields.size() > 1: + warnings.warn("Struct field name corresponds to more " + "than one field", UserWarning) + raise KeyError(name) + else: + return pyarrow_wrap_field(fields[0]) + + def get_field_index(self, name): + """ + Return index of the unique field with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + index : int + The index of the field with the given name; -1 if the + name isn't found or there are several fields with the given + name. + + Examples + -------- + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + + Index of the field with a name 'y': + + >>> struct_type.get_field_index('y') + 1 + + Index of the field that does not exist: + + >>> struct_type.get_field_index('z') + -1 + """ + return self.struct_type.GetFieldIndex(tobytes(name)) + + cpdef Field field(self, i): + """ + Select a field by its column name or numeric index. + + Parameters + ---------- + i : int or str + + Returns + ------- + pyarrow.Field + + Examples + -------- + + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + + Select the second field: + + >>> struct_type.field(1) + pyarrow.Field + + Select the field named 'x': + + >>> struct_type.field('x') + pyarrow.Field + """ + if isinstance(i, (bytes, str)): + return self.field_by_name(i) + elif isinstance(i, int): + return DataType.field(self, i) + else: + raise TypeError('Expected integer or string index') + + def get_all_field_indices(self, name): + """ + Return sorted list of indices for the fields with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + indices : List[int] + + Examples + -------- + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct_type.get_all_field_indices('x') + [0] + """ + return self.struct_type.GetAllFieldIndices(tobytes(name)) + + def __len__(self): + """ + Like num_fields(). + """ + return self.type.num_fields() + + def __iter__(self): + """ + Iterate over struct fields, in order. + """ + for i in range(len(self)): + yield self[i] + + def __getitem__(self, i): + """ + Return the struct field with the given index or name. + + Alias of ``field``. + """ + return self.field(i) + + def __reduce__(self): + return struct, (list(self),) + + +cdef class UnionType(DataType): + """ + Base class for union data types. + + Examples + -------- + Create an instance of a dense UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_DENSE), + (DenseUnionType(dense_union),) + + Create an instance of a dense UnionType using ``pa.dense_union``: + + >>> pa.dense_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + DenseUnionType(dense_union) + + Create an instance of a sparse UnionType using ``pa.union``: + + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_SPARSE), + (SparseUnionType(sparse_union),) + + Create an instance of a sparse UnionType using ``pa.sparse_union``: + + >>> pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + SparseUnionType(sparse_union) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + + @property + def mode(self): + """ + The mode of the union ("dense" or "sparse"). + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union.mode + 'sparse' + """ + cdef CUnionType* type = self.sp_type.get() + cdef int mode = type.mode() + if mode == _UnionMode_DENSE: + return 'dense' + if mode == _UnionMode_SPARSE: + return 'sparse' + assert 0 + + @property + def type_codes(self): + """ + The type code to indicate each data type in this union. + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union.type_codes + [0, 1] + """ + cdef CUnionType* type = self.sp_type.get() + return type.type_codes() + + def __len__(self): + """ + Like num_fields(). + """ + return self.type.num_fields() + + def __iter__(self): + """ + Iterate over union members, in order. + """ + for i in range(len(self)): + yield self[i] + + cpdef Field field(self, i): + """ + Return a child field by its numeric index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union[0] + pyarrow.Field + """ + if isinstance(i, int): + return DataType.field(self, i) + else: + raise TypeError('Expected integer') + + def __getitem__(self, i): + """ + Return a child field by its index. + + Alias of ``field``. + """ + return self.field(i) + + def __reduce__(self): + return union, (list(self), self.mode, self.type_codes) + + +cdef class SparseUnionType(UnionType): + """ + Concrete class for sparse union types. + + Examples + -------- + Create an instance of a sparse UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_SPARSE), + (SparseUnionType(sparse_union),) + + Create an instance of a sparse UnionType using ``pa.sparse_union``: + + >>> pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + SparseUnionType(sparse_union) + """ + + +cdef class DenseUnionType(UnionType): + """ + Concrete class for dense union types. + + Examples + -------- + Create an instance of a dense UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_DENSE), + (DenseUnionType(dense_union),) + + Create an instance of a dense UnionType using ``pa.dense_union``: + + >>> pa.dense_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + DenseUnionType(dense_union) + """ + + +cdef class TimestampType(DataType): + """ + Concrete class for timestamp data types. + + Examples + -------- + >>> import pyarrow as pa + + Create an instance of timestamp type: + + >>> pa.timestamp('us') + TimestampType(timestamp[us]) + + Create an instance of timestamp type with timezone: + + >>> pa.timestamp('s', tz='UTC') + TimestampType(timestamp[s, tz=UTC]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.ts_type = type.get() + + @property + def unit(self): + """ + The timestamp unit ('s', 'ms', 'us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.timestamp('us') + >>> t.unit + 'us' + """ + return timeunit_to_string(self.ts_type.unit()) + + @property + def tz(self): + """ + The timestamp time zone, if any, or None. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.timestamp('s', tz='UTC') + >>> t.tz + 'UTC' + """ + if self.ts_type.timezone().size() > 0: + return frombytes(self.ts_type.timezone()) + else: + return None + + def __reduce__(self): + return timestamp, (self.unit, self.tz) + + +cdef class Time32Type(DataType): + """ + Concrete class for time32 data types. + + Supported time unit resolutions are 's' [second] + and 'ms' [millisecond]. + + Examples + -------- + Create an instance of time32 type: + + >>> import pyarrow as pa + >>> pa.time32('ms') + Time32Type(time32[ms]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.time_type = type.get() + + @property + def unit(self): + """ + The time unit ('s' or 'ms'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.time32('ms') + >>> t.unit + 'ms' + """ + return timeunit_to_string(self.time_type.unit()) + + +cdef class Time64Type(DataType): + """ + Concrete class for time64 data types. + + Supported time unit resolutions are 'us' [microsecond] + and 'ns' [nanosecond]. + + Examples + -------- + Create an instance of time64 type: + + >>> import pyarrow as pa + >>> pa.time64('us') + Time64Type(time64[us]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.time_type = type.get() + + @property + def unit(self): + """ + The time unit ('us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.time64('us') + >>> t.unit + 'us' + """ + return timeunit_to_string(self.time_type.unit()) + + +cdef class DurationType(DataType): + """ + Concrete class for duration data types. + + Examples + -------- + Create an instance of duration type: + + >>> import pyarrow as pa + >>> pa.duration('s') + DurationType(duration[s]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.duration_type = type.get() + + @property + def unit(self): + """ + The duration unit ('s', 'ms', 'us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.duration('s') + >>> t.unit + 's' + """ + return timeunit_to_string(self.duration_type.unit()) + + +cdef class FixedSizeBinaryType(DataType): + """ + Concrete class for fixed-size binary data types. + + Examples + -------- + Create an instance of fixed-size binary type: + + >>> import pyarrow as pa + >>> pa.binary(3) + FixedSizeBinaryType(fixed_size_binary[3]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.fixed_size_binary_type = ( + type.get()) + + def __reduce__(self): + return binary, (self.byte_width,) + + +cdef class Decimal128Type(FixedSizeBinaryType): + """ + Concrete class for decimal128 data types. + + Examples + -------- + Create an instance of decimal128 type: + + >>> import pyarrow as pa + >>> pa.decimal128(5, 2) + Decimal128Type(decimal128(5, 2)) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + FixedSizeBinaryType.init(self, type) + self.decimal128_type = type.get() + + def __reduce__(self): + return decimal128, (self.precision, self.scale) + + @property + def precision(self): + """ + The decimal precision, in number of decimal digits (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal128(5, 2) + >>> t.precision + 5 + """ + return self.decimal128_type.precision() + + @property + def scale(self): + """ + The decimal scale (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal128(5, 2) + >>> t.scale + 2 + """ + return self.decimal128_type.scale() + + +cdef class Decimal256Type(FixedSizeBinaryType): + """ + Concrete class for decimal256 data types. + + Examples + -------- + Create an instance of decimal256 type: + + >>> import pyarrow as pa + >>> pa.decimal256(76, 38) + Decimal256Type(decimal256(76, 38)) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + FixedSizeBinaryType.init(self, type) + self.decimal256_type = type.get() + + def __reduce__(self): + return decimal256, (self.precision, self.scale) + + @property + def precision(self): + """ + The decimal precision, in number of decimal digits (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal256(76, 38) + >>> t.precision + 76 + """ + return self.decimal256_type.precision() + + @property + def scale(self): + """ + The decimal scale (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal256(76, 38) + >>> t.scale + 38 + """ + return self.decimal256_type.scale() + + +cdef class RunEndEncodedType(DataType): + """ + Concrete class for run-end encoded types. + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.run_end_encoded_type = type.get() + + def __reduce__(self): + return run_end_encoded, (self.run_end_type, self.value_type) + + @property + def run_end_type(self): + return pyarrow_wrap_data_type(self.run_end_encoded_type.run_end_type()) + + @property + def value_type(self): + return pyarrow_wrap_data_type(self.run_end_encoded_type.value_type()) + + +cdef class BaseExtensionType(DataType): + """ + Concrete base class for extension types. + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.ext_type = type.get() + + def __arrow_ext_class__(self): + """ + The associated array extension class + """ + return ExtensionArray + + def __arrow_ext_scalar_class__(self): + """ + The associated scalar class + """ + return ExtensionScalar + + @property + def extension_name(self): + """ + The extension type name. + """ + return frombytes(self.ext_type.extension_name()) + + @property + def storage_type(self): + """ + The underlying storage type. + """ + return pyarrow_wrap_data_type(self.ext_type.storage_type()) + + @property + def byte_width(self): + """ + The byte width of the extension type. + """ + if self.ext_type.byte_width() == -1: + raise ValueError("Non-fixed width type") + return self.ext_type.byte_width() + + @property + def bit_width(self): + """ + The bit width of the extension type. + """ + if self.ext_type.bit_width() == -1: + raise ValueError("Non-fixed width type") + return self.ext_type.bit_width() + + def wrap_array(self, storage): + """ + Wrap the given storage array as an extension array. + + Parameters + ---------- + storage : Array or ChunkedArray + + Returns + ------- + array : Array or ChunkedArray + Extension array wrapping the storage array + """ + cdef: + shared_ptr[CDataType] c_storage_type + + if isinstance(storage, Array): + c_storage_type = ( storage).ap.type() + elif isinstance(storage, ChunkedArray): + c_storage_type = ( storage).chunked_array.type() + else: + raise TypeError( + f"Expected array or chunked array, got {storage.__class__}") + + if not c_storage_type.get().Equals(deref(self.ext_type) + .storage_type(), False): + raise TypeError( + f"Incompatible storage type for {self}: " + f"expected {self.storage_type}, got {storage.type}") + + if isinstance(storage, Array): + return pyarrow_wrap_array( + self.ext_type.WrapArray( + self.sp_type, ( storage).sp_array)) + else: + return pyarrow_wrap_chunked_array( + self.ext_type.WrapArray( + self.sp_type, ( storage).sp_chunked_array)) + + +cdef class ExtensionType(BaseExtensionType): + """ + Concrete base class for Python-defined extension types. + + Parameters + ---------- + storage_type : DataType + The underlying storage type for the extension type. + extension_name : str + A unique name distinguishing this extension type. The name will be + used when deserializing IPC data. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Create an instance of UuidType extension type: + + >>> uuid_type = UuidType() + + Inspect the extension type: + + >>> uuid_type.extension_name + 'my_package.uuid' + >>> uuid_type.storage_type + FixedSizeBinaryType(fixed_size_binary[16]) + + Wrap an array as an extension array: + + >>> import uuid + >>> storage_array = pa.array([uuid.uuid4().bytes for _ in range(4)], pa.binary(16)) + >>> uuid_type.wrap_array(storage_array) + + [ + ... + ] + + Or do the same with creating an ExtensionArray: + + >>> pa.ExtensionArray.from_storage(uuid_type, storage_array) + + [ + ... + ] + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + + def __cinit__(self): + if type(self) is ExtensionType: + raise TypeError("Can only instantiate subclasses of " + "ExtensionType") + + def __init__(self, DataType storage_type, extension_name): + """ + Initialize an extension type instance. + + This should be called at the end of the subclass' + ``__init__`` method. + """ + cdef: + shared_ptr[CExtensionType] cpy_ext_type + c_string c_extension_name + + c_extension_name = tobytes(extension_name) + + assert storage_type is not None + check_status(CPyExtensionType.FromClass( + storage_type.sp_type, c_extension_name, type(self), + &cpy_ext_type)) + self.init( cpy_ext_type) + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + BaseExtensionType.init(self, type) + self.cpy_ext_type = type.get() + # Store weakref and serialized version of self on C++ type instance + check_status(self.cpy_ext_type.SetInstance(self)) + + def __eq__(self, other): + # Default implementation to avoid infinite recursion through + # DataType.__eq__ -> ExtensionType::ExtensionEquals -> DataType.__eq__ + if isinstance(other, ExtensionType): + return (type(self) == type(other) and + self.extension_name == other.extension_name and + self.storage_type == other.storage_type) + else: + return NotImplemented + + def __repr__(self): + fmt = '{0.__class__.__name__}({1})' + return fmt.format(self, repr(self.storage_type)) + + def __arrow_ext_serialize__(self): + """ + Serialized representation of metadata to reconstruct the type object. + + This method should return a bytes object, and those serialized bytes + are stored in the custom metadata of the Field holding an extension + type in an IPC message. + The bytes are passed to ``__arrow_ext_deserialize`` and should hold + sufficient information to reconstruct the data type instance. + """ + return NotImplementedError + + @classmethod + def __arrow_ext_deserialize__(self, storage_type, serialized): + """ + Return an extension type instance from the storage type and serialized + metadata. + + This method should return an instance of the ExtensionType subclass + that matches the passed storage type and serialized metadata (the + return value of ``__arrow_ext_serialize__``). + """ + return NotImplementedError + + def __reduce__(self): + return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) + + def __arrow_ext_class__(self): + """Return an extension array class to be used for building or + deserializing arrays with this extension type. + + This method should return a subclass of the ExtensionArray class. By + default, if not specialized in the extension implementation, an + extension type array will be a built-in ExtensionArray instance. + """ + return ExtensionArray + + def __arrow_ext_scalar_class__(self): + """Return an extension scalar class for building scalars with this + extension type. + + This method should return subclass of the ExtensionScalar class. By + default, if not specialized in the extension implementation, an + extension type scalar will be a built-in ExtensionScalar instance. + """ + return ExtensionScalar + + +cdef class FixedShapeTensorType(BaseExtensionType): + """ + Concrete class for fixed shape tensor extension type. + + Examples + -------- + Create an instance of fixed shape tensor extension type: + + >>> import pyarrow as pa + >>> pa.fixed_shape_tensor(pa.int32(), [2, 2]) + FixedShapeTensorType(extension) + + Create an instance of fixed shape tensor extension type with + permutation: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... permutation=[0, 2, 1]) + >>> tensor_type.permutation + [0, 2, 1] + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + BaseExtensionType.init(self, type) + self.tensor_ext_type = type.get() + + @property + def value_type(self): + """ + Data type of an individual tensor. + """ + return pyarrow_wrap_data_type(self.tensor_ext_type.value_type()) + + @property + def shape(self): + """ + Shape of the tensors. + """ + return self.tensor_ext_type.shape() + + @property + def dim_names(self): + """ + Explicit names of the dimensions. + """ + list_of_bytes = self.tensor_ext_type.dim_names() + if len(list_of_bytes) != 0: + return [frombytes(x) for x in list_of_bytes] + else: + return None + + @property + def permutation(self): + """ + Indices of the dimensions ordering. + """ + indices = self.tensor_ext_type.permutation() + if len(indices) != 0: + return indices + else: + return None + + def __arrow_ext_class__(self): + return FixedShapeTensorArray + + def __reduce__(self): + return fixed_shape_tensor, (self.value_type, self.shape, + self.dim_names, self.permutation) + + def __arrow_ext_scalar_class__(self): + return FixedShapeTensorScalar + + +_py_extension_type_auto_load = False + + +cdef class PyExtensionType(ExtensionType): + """ + Concrete base class for Python-defined extension types based on pickle + for (de)serialization. + + .. warning:: + This class is deprecated and its deserialization is disabled by default. + :class:`ExtensionType` is recommended instead. + + Parameters + ---------- + storage_type : DataType + The storage type for which the extension is built. + """ + + def __cinit__(self): + if type(self) is PyExtensionType: + raise TypeError("Can only instantiate subclasses of " + "PyExtensionType") + + def __init__(self, DataType storage_type): + warnings.warn( + "pyarrow.PyExtensionType is deprecated " + "and will refuse deserialization by default. " + "Instead, please derive from pyarrow.ExtensionType and implement " + "your own serialization mechanism.", + FutureWarning) + ExtensionType.__init__(self, storage_type, "arrow.py_extension_type") + + def __reduce__(self): + raise NotImplementedError("Please implement {0}.__reduce__" + .format(type(self).__name__)) + + def __arrow_ext_serialize__(self): + return pickle.dumps(self) + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + if not _py_extension_type_auto_load: + warnings.warn( + "pickle-based deserialization of pyarrow.PyExtensionType subclasses " + "is disabled by default; if you only ingest " + "trusted data files, you may re-enable this using " + "`pyarrow.PyExtensionType.set_auto_load(True)`.\n" + "In the future, Python-defined extension subclasses should " + "derive from pyarrow.ExtensionType (not pyarrow.PyExtensionType) " + "and implement their own serialization mechanism.\n", + RuntimeWarning) + return UnknownExtensionType(storage_type, serialized) + try: + ty = pickle.loads(serialized) + except Exception: + # For some reason, it's impossible to deserialize the + # ExtensionType instance. Perhaps the serialized data is + # corrupt, or more likely the type is being deserialized + # in an environment where the original Python class or module + # is not available. Fall back on a generic BaseExtensionType. + return UnknownExtensionType(storage_type, serialized) + + if ty.storage_type != storage_type: + raise TypeError("Expected storage type {0} but got {1}" + .format(ty.storage_type, storage_type)) + return ty + + # XXX Cython marks extension types as immutable, so cannot expose this + # as a writable class attribute. + @classmethod + def set_auto_load(cls, value): + """ + Enable or disable auto-loading of serialized PyExtensionType instances. + + Parameters + ---------- + value : bool + Whether to enable auto-loading. + """ + global _py_extension_type_auto_load + assert isinstance(value, bool) + _py_extension_type_auto_load = value + + +cdef class UnknownExtensionType(PyExtensionType): + """ + A concrete class for Python-defined extension types that refer to + an unknown Python implementation. + + Parameters + ---------- + storage_type : DataType + The storage type for which the extension is built. + serialized : bytes + The serialised output. + """ + + cdef: + bytes serialized + + def __init__(self, DataType storage_type, serialized): + self.serialized = serialized + PyExtensionType.__init__(self, storage_type) + + def __arrow_ext_serialize__(self): + return self.serialized + + +_python_extension_types_registry = [] + + +def register_extension_type(ext_type): + """ + Register a Python extension type. + + Registration is based on the extension name (so different registered types + need unique extension names). Registration needs an extension type + instance, but then works for any instance of the same subclass regardless + of parametrization of the type. + + Parameters + ---------- + ext_type : BaseExtensionType instance + The ExtensionType subclass to register. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + cdef: + DataType _type = ensure_type(ext_type, allow_none=False) + + if not isinstance(_type, BaseExtensionType): + raise TypeError("Only extension types can be registered") + + # register on the C++ side + check_status( + RegisterPyExtensionType( _type.sp_type)) + + # register on the python side + _python_extension_types_registry.append(_type) + + +def unregister_extension_type(type_name): + """ + Unregister a Python extension type. + + Parameters + ---------- + type_name : str + The name of the ExtensionType subclass to unregister. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + cdef: + c_string c_type_name = tobytes(type_name) + check_status(UnregisterPyExtensionType(c_type_name)) + + +cdef class KeyValueMetadata(_Metadata, Mapping): + """ + KeyValueMetadata + + Parameters + ---------- + __arg0__ : dict + A dict of the key-value metadata + **kwargs : optional + additional key-value metadata + """ + + def __init__(self, __arg0__=None, **kwargs): + cdef: + vector[c_string] keys, values + shared_ptr[const CKeyValueMetadata] result + + items = [] + if __arg0__ is not None: + other = (__arg0__.items() if isinstance(__arg0__, Mapping) + else __arg0__) + items.extend((tobytes(k), v) for k, v in other) + + prior_keys = {k for k, v in items} + for k, v in kwargs.items(): + k = tobytes(k) + if k in prior_keys: + raise KeyError("Duplicate key {}, " + "use pass all items as list of tuples if you " + "intend to have duplicate keys") + items.append((k, v)) + + keys.reserve(len(items)) + for key, value in items: + keys.push_back(tobytes(key)) + values.push_back(tobytes(value)) + result.reset(new CKeyValueMetadata(move(keys), move(values))) + self.init(result) + + cdef void init(self, const shared_ptr[const CKeyValueMetadata]& wrapped): + self.wrapped = wrapped + self.metadata = wrapped.get() + + @staticmethod + cdef wrap(const shared_ptr[const CKeyValueMetadata]& sp): + cdef KeyValueMetadata self = KeyValueMetadata.__new__(KeyValueMetadata) + self.init(sp) + return self + + cdef inline shared_ptr[const CKeyValueMetadata] unwrap(self) nogil: + return self.wrapped + + def equals(self, KeyValueMetadata other): + """ + Parameters + ---------- + other : pyarrow.KeyValueMetadata + + Returns + ------- + bool + """ + return self.metadata.Equals(deref(other.wrapped)) + + def __repr__(self): + return str(self) + + def __str__(self): + return frombytes(self.metadata.ToString(), safe=True) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + pass + + if isinstance(other, Mapping): + try: + other = KeyValueMetadata(other) + return self.equals(other) + except TypeError: + pass + + return NotImplemented + + def __len__(self): + return self.metadata.size() + + def __contains__(self, key): + return self.metadata.Contains(tobytes(key)) + + def __getitem__(self, key): + return GetResultValue(self.metadata.Get(tobytes(key))) + + def __iter__(self): + return self.keys() + + def __reduce__(self): + return KeyValueMetadata, (list(self.items()),) + + def key(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + byte + """ + return self.metadata.key(i) + + def value(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + byte + """ + return self.metadata.value(i) + + def keys(self): + for i in range(self.metadata.size()): + yield self.metadata.key(i) + + def values(self): + for i in range(self.metadata.size()): + yield self.metadata.value(i) + + def items(self): + for i in range(self.metadata.size()): + yield (self.metadata.key(i), self.metadata.value(i)) + + def get_all(self, key): + """ + Parameters + ---------- + key : str + + Returns + ------- + list[byte] + """ + key = tobytes(key) + return [v for k, v in self.items() if k == key] + + def to_dict(self): + """ + Convert KeyValueMetadata to dict. If a key occurs twice, the value for + the first one is returned + """ + cdef object key # to force coercion to Python + result = ordered_dict() + for i in range(self.metadata.size()): + key = self.metadata.key(i) + if key not in result: + result[key] = self.metadata.value(i) + return result + + +cpdef KeyValueMetadata ensure_metadata(object meta, c_bool allow_none=False): + if allow_none and meta is None: + return None + elif isinstance(meta, KeyValueMetadata): + return meta + else: + return KeyValueMetadata(meta) + + +cdef class Field(_Weakrefable): + """ + A named field, with a data type, nullability, and optional metadata. + + Notes + ----- + Do not use this class's constructor directly; use pyarrow.field + + Examples + -------- + Create an instance of pyarrow.Field: + + >>> import pyarrow as pa + >>> pa.field('key', pa.int32()) + pyarrow.Field + >>> pa.field('key', pa.int32(), nullable=False) + pyarrow.Field + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field + pyarrow.Field + >>> field.metadata + {b'key': b'Something important'} + + Use the field to create a struct type: + + >>> pa.struct([field]) + StructType(struct) + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Field's constructor directly, use " + "`pyarrow.field` instead.") + + cdef void init(self, const shared_ptr[CField]& field): + self.sp_field = field + self.field = field.get() + self.type = pyarrow_wrap_data_type(field.get().type()) + + def equals(self, Field other, bint check_metadata=False): + """ + Test if this field is equal to the other + + Parameters + ---------- + other : pyarrow.Field + check_metadata : bool, default False + Whether Field metadata equality should be checked as well. + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('key', pa.int32()) + >>> f2 = pa.field('key', pa.int32(), nullable=False) + >>> f1.equals(f2) + False + >>> f1.equals(f1) + True + """ + return self.field.Equals(deref(other.field), check_metadata) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __reduce__(self): + return field, (self.name, self.type, self.nullable, self.metadata) + + def __str__(self): + return 'pyarrow.Field<{0}>'.format( + frombytes(self.field.ToString(), safe=True)) + + def __repr__(self): + return self.__str__() + + def __hash__(self): + return hash((self.field.name(), self.type, self.field.nullable())) + + @property + def nullable(self): + """ + The field nullability. + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('key', pa.int32()) + >>> f2 = pa.field('key', pa.int32(), nullable=False) + >>> f1.nullable + True + >>> f2.nullable + False + """ + return self.field.nullable() + + @property + def name(self): + """ + The field name. + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field.name + 'key' + """ + return frombytes(self.field.name()) + + @property + def metadata(self): + """ + The field metadata. + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field.metadata + {b'key': b'Something important'} + """ + wrapped = pyarrow_wrap_metadata(self.field.metadata()) + if wrapped is not None: + return wrapped.to_dict() + else: + return wrapped + + def with_metadata(self, metadata): + """ + Add metadata as dict of string keys and values to Field + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + + Create new field by adding metadata to existing one: + + >>> field_new = field.with_metadata({"key": "Something important"}) + >>> field_new + pyarrow.Field + >>> field_new.metadata + {b'key': b'Something important'} + """ + cdef shared_ptr[CField] c_field + + meta = ensure_metadata(metadata, allow_none=False) + with nogil: + c_field = self.field.WithMetadata(meta.unwrap()) + + return pyarrow_wrap_field(c_field) + + def remove_metadata(self): + """ + Create new field without metadata, if any + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field.metadata + {b'key': b'Something important'} + + Create new field by removing the metadata from the existing one: + + >>> field_new = field.remove_metadata() + >>> field_new.metadata + """ + cdef shared_ptr[CField] new_field + with nogil: + new_field = self.field.RemoveMetadata() + return pyarrow_wrap_field(new_field) + + def with_type(self, DataType new_type): + """ + A copy of this field with the replaced type + + Parameters + ---------- + new_type : pyarrow.DataType + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + + Create new field by replacing type of an existing one: + + >>> field_new = field.with_type(pa.int64()) + >>> field_new + pyarrow.Field + """ + cdef: + shared_ptr[CField] c_field + shared_ptr[CDataType] c_datatype + + c_datatype = pyarrow_unwrap_data_type(new_type) + with nogil: + c_field = self.field.WithType(c_datatype) + + return pyarrow_wrap_field(c_field) + + def with_name(self, name): + """ + A copy of this field with the replaced name + + Parameters + ---------- + name : str + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + + Create new field by replacing the name of an existing one: + + >>> field_new = field.with_name('lock') + >>> field_new + pyarrow.Field + """ + cdef: + shared_ptr[CField] c_field + + c_field = self.field.WithName(tobytes(name)) + + return pyarrow_wrap_field(c_field) + + def with_nullable(self, nullable): + """ + A copy of this field with the replaced nullability + + Parameters + ---------- + nullable : bool + + Returns + ------- + field: pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + >>> field.nullable + True + + Create new field by replacing the nullability of an existing one: + + >>> field_new = field.with_nullable(False) + >>> field_new + pyarrow.Field + >>> field_new.nullable + False + """ + cdef: + shared_ptr[CField] field + c_bool c_nullable + + c_nullable = bool(nullable) + with nogil: + c_field = self.field.WithNullable(c_nullable) + + return pyarrow_wrap_field(c_field) + + def flatten(self): + """ + Flatten this field. If a struct field, individual child fields + will be returned with their names prefixed by the parent's name. + + Returns + ------- + fields : List[pyarrow.Field] + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('bar', pa.float64(), nullable=False) + >>> f2 = pa.field('foo', pa.int32()).with_metadata({"key": "Something important"}) + >>> ff = pa.field('ff', pa.struct([f1, f2]), nullable=False) + + Flatten a struct field: + + >>> ff + pyarrow.Field not null> + >>> ff.flatten() + [pyarrow.Field, pyarrow.Field] + """ + cdef vector[shared_ptr[CField]] flattened + with nogil: + flattened = self.field.Flatten() + return [pyarrow_wrap_field(f) for f in flattened] + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportField(deref(self.field), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import Field from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + cdef void* c_ptr = _as_c_pointer(in_ptr) + with nogil: + result = GetResultValue(ImportField( c_ptr)) + return pyarrow_wrap_field(result) + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportField(deref(self.field), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a Field from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + shared_ptr[CField] c_field + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise ValueError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + c_field = GetResultValue(ImportField(c_schema)) + + return pyarrow_wrap_field(c_field) + + +cdef class Schema(_Weakrefable): + """ + A named collection of types a.k.a schema. A schema defines the + column names and types in a record batch or table data structure. + They also contain metadata about the columns. For example, schemas + converted from Pandas contain metadata about their original Pandas + types so they can be converted back to the same types. + + Warnings + -------- + Do not call this class's constructor directly. Instead use + :func:`pyarrow.schema` factory function which makes a new Arrow + Schema object. + + Examples + -------- + Create a new Arrow Schema object: + + >>> import pyarrow as pa + >>> pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()) + ... ]) + some_int: int32 + some_string: string + + Create Arrow Schema with metadata: + + >>> pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Schema's constructor directly, use " + "`pyarrow.schema` instead.") + + def __len__(self): + return self.schema.num_fields() + + def __getitem__(self, key): + # access by integer index + return self._field(key) + + def __iter__(self): + for i in range(len(self)): + yield self[i] + + cdef void init(self, const vector[shared_ptr[CField]]& fields): + self.schema = new CSchema(fields) + self.sp_schema.reset(self.schema) + + cdef void init_schema(self, const shared_ptr[CSchema]& schema): + self.schema = schema.get() + self.sp_schema = schema + + def __reduce__(self): + return schema, (list(self), self.metadata) + + def __hash__(self): + return hash((tuple(self), self.metadata)) + + def __sizeof__(self): + size = 0 + if self.metadata: + for key, value in self.metadata.items(): + size += sys.getsizeof(key) + size += sys.getsizeof(value) + + return size + super(Schema, self).__sizeof__() + + @property + def pandas_metadata(self): + """ + Return deserialized-from-JSON pandas metadata field (if it exists) + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> schema = pa.Table.from_pandas(df).schema + + Select pandas metadata field from Arrow Schema: + + >>> schema.pandas_metadata + {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, 'stop': 4, 'step': 1}], ... + """ + metadata = self.metadata + key = b'pandas' + if metadata is None or key not in metadata: + return None + + import json + return json.loads(metadata[key].decode('utf8')) + + @property + def names(self): + """ + The schema's field names. + + Returns + ------- + list of str + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the names of the schema's fields: + + >>> schema.names + ['n_legs', 'animals'] + """ + cdef int i + result = [] + for i in range(self.schema.num_fields()): + name = frombytes(self.schema.field(i).get().name()) + result.append(name) + return result + + @property + def types(self): + """ + The schema's field types. + + Returns + ------- + list of DataType + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the types of the schema's fields: + + >>> schema.types + [DataType(int64), DataType(string)] + """ + return [field.type for field in self] + + @property + def metadata(self): + """ + The schema's metadata. + + Returns + ------- + metadata: dict + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + + Get the metadata of the schema's fields: + + >>> schema.metadata + {b'n_legs': b'Number of legs per animal'} + """ + wrapped = pyarrow_wrap_metadata(self.schema.metadata()) + if wrapped is not None: + return wrapped.to_dict() + else: + return wrapped + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def empty_table(self): + """ + Provide an empty table according to the schema. + + Returns + ------- + table: pyarrow.Table + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Create an empty table with schema's fields: + + >>> schema.empty_table() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[]] + animals: [[]] + """ + arrays = [_empty_array(field.type) for field in self] + return Table.from_arrays(arrays, schema=self) + + def equals(self, Schema other not None, bint check_metadata=False): + """ + Test if this schema is equal to the other + + Parameters + ---------- + other : pyarrow.Schema + check_metadata : bool, default False + Key/value metadata must be equal too + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> schema1 = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema2 = pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()) + ... ]) + + Test two equal schemas: + + >>> schema1.equals(schema1) + True + + Test two unequal schemas: + + >>> schema1.equals(schema2) + False + """ + return self.sp_schema.get().Equals(deref(other.schema), + check_metadata) + + @classmethod + def from_pandas(cls, df, preserve_index=None): + """ + Returns implied schema from dataframe + + Parameters + ---------- + df : pandas.DataFrame + preserve_index : bool, default True + Whether to store the index as an additional column (or columns, for + MultiIndex) in the resulting `Table`. + The default of None will store the index as a column, except for + RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + + Returns + ------- + pyarrow.Schema + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({ + ... 'int': [1, 2], + ... 'str': ['a', 'b'] + ... }) + + Create an Arrow Schema from the schema of a pandas dataframe: + + >>> pa.Schema.from_pandas(df) + int: int64 + str: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, ... + """ + from pyarrow.pandas_compat import dataframe_to_types + names, types, metadata = dataframe_to_types( + df, + preserve_index=preserve_index + ) + fields = [] + for name, type_ in zip(names, types): + fields.append(field(name, type_)) + return schema(fields, metadata) + + def field(self, i): + """ + Select a field by its column name or numeric index. + + Parameters + ---------- + i : int or string + + Returns + ------- + pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Select the second field: + + >>> schema.field(1) + pyarrow.Field + + Select the field of the column named 'n_legs': + + >>> schema.field('n_legs') + pyarrow.Field + """ + if isinstance(i, (bytes, str)): + field_index = self.get_field_index(i) + if field_index < 0: + raise KeyError("Column {} does not exist in schema".format(i)) + else: + return self._field(field_index) + elif isinstance(i, int): + return self._field(i) + else: + raise TypeError("Index must either be string or integer") + + def _field(self, int i): + """ + Select a field by its numeric index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + """ + cdef int index = _normalize_index(i, self.schema.num_fields()) + return pyarrow_wrap_field(self.schema.field(index)) + + def field_by_name(self, name): + """ + DEPRECATED + + Parameters + ---------- + name : str + + Returns + ------- + field: pyarrow.Field + """ + cdef: + vector[shared_ptr[CField]] results + + warnings.warn( + "The 'field_by_name' method is deprecated, use 'field' instead", + FutureWarning, stacklevel=2) + + results = self.schema.GetAllFieldsByName(tobytes(name)) + if results.size() == 0: + return None + elif results.size() > 1: + warnings.warn("Schema field name corresponds to more " + "than one field", UserWarning) + return None + else: + return pyarrow_wrap_field(results[0]) + + def get_field_index(self, name): + """ + Return index of the unique field with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + index : int + The index of the field with the given name; -1 if the + name isn't found or there are several fields with the given + name. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the index of the field named 'animals': + + >>> schema.get_field_index("animals") + 1 + + Index in case of several fields with the given name: + + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string()), + ... pa.field('animals', pa.bool_())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema.get_field_index("animals") + -1 + """ + return self.schema.GetFieldIndex(tobytes(name)) + + def get_all_field_indices(self, name): + """ + Return sorted list of indices for the fields with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + indices : List[int] + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string()), + ... pa.field('animals', pa.bool_())]) + + Get the indexes of the fields named 'animals': + + >>> schema.get_all_field_indices("animals") + [1, 2] + """ + return self.schema.GetAllFieldIndices(tobytes(name)) + + def append(self, Field field): + """ + Append a field at the end of the schema. + + In contrast to Python's ``list.append()`` it does return a new + object, leaving the original Schema unmodified. + + Parameters + ---------- + field : Field + + Returns + ------- + schema: Schema + New object with appended field. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Append a field 'extra' at the end of the schema: + + >>> schema_new = schema.append(pa.field('extra', pa.bool_())) + >>> schema_new + n_legs: int64 + animals: string + extra: bool + + Original schema is unmodified: + + >>> schema + n_legs: int64 + animals: string + """ + return self.insert(self.schema.num_fields(), field) + + def insert(self, int i, Field field): + """ + Add a field at position i to the schema. + + Parameters + ---------- + i : int + field : Field + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Insert a new field on the second position: + + >>> schema.insert(1, pa.field('extra', pa.bool_())) + n_legs: int64 + extra: bool + animals: string + """ + cdef: + shared_ptr[CSchema] new_schema + shared_ptr[CField] c_field + + c_field = field.sp_field + + with nogil: + new_schema = GetResultValue(self.schema.AddField(i, c_field)) + + return pyarrow_wrap_schema(new_schema) + + def remove(self, int i): + """ + Remove the field at index i from the schema. + + Parameters + ---------- + i : int + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Remove the second field of the schema: + + >>> schema.remove(1) + n_legs: int64 + """ + cdef shared_ptr[CSchema] new_schema + + with nogil: + new_schema = GetResultValue(self.schema.RemoveField(i)) + + return pyarrow_wrap_schema(new_schema) + + def set(self, int i, Field field): + """ + Replace a field at position i in the schema. + + Parameters + ---------- + i : int + field : Field + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Replace the second field of the schema with a new field 'extra': + + >>> schema.set(1, pa.field('replaced', pa.bool_())) + n_legs: int64 + replaced: bool + """ + cdef: + shared_ptr[CSchema] new_schema + shared_ptr[CField] c_field + + c_field = field.sp_field + + with nogil: + new_schema = GetResultValue(self.schema.SetField(i, c_field)) + + return pyarrow_wrap_schema(new_schema) + + def add_metadata(self, metadata): + """ + DEPRECATED + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + """ + warnings.warn("The 'add_metadata' method is deprecated, use " + "'with_metadata' instead", FutureWarning, stacklevel=2) + return self.with_metadata(metadata) + + def with_metadata(self, metadata): + """ + Add metadata as dict of string keys and values to Schema + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + + Returns + ------- + schema : pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Add metadata to existing schema field: + + >>> schema.with_metadata({"n_legs": "Number of legs per animal"}) + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + cdef shared_ptr[CSchema] c_schema + + meta = ensure_metadata(metadata, allow_none=False) + with nogil: + c_schema = self.schema.WithMetadata(meta.unwrap()) + + return pyarrow_wrap_schema(c_schema) + + def serialize(self, memory_pool=None): + """ + Write Schema to Buffer as encapsulated IPC message + + Parameters + ---------- + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Write schema to Buffer: + + >>> schema.serialize() + + """ + cdef: + shared_ptr[CBuffer] buffer + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + buffer = GetResultValue(SerializeSchema(deref(self.schema), + pool)) + return pyarrow_wrap_buffer(buffer) + + def remove_metadata(self): + """ + Create new schema without metadata, if any + + Returns + ------- + schema : pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Create a new schema with removing the metadata from the original: + + >>> schema.remove_metadata() + n_legs: int64 + animals: string + """ + cdef shared_ptr[CSchema] new_schema + with nogil: + new_schema = self.schema.RemoveMetadata() + return pyarrow_wrap_schema(new_schema) + + def to_string(self, truncate_metadata=True, show_field_metadata=True, + show_schema_metadata=True): + """ + Return human-readable representation of Schema + + Parameters + ---------- + truncate_metadata : boolean, default True + Limit metadata key/value display to a single line of ~80 characters + or less + show_field_metadata : boolean, default True + Display Field-level KeyValueMetadata + show_schema_metadata : boolean, default True + Display Schema-level KeyValueMetadata + + Returns + ------- + str : the formatted output + """ + cdef: + c_string result + PrettyPrintOptions options = PrettyPrintOptions.Defaults() + + options.indent = 0 + options.truncate_metadata = truncate_metadata + options.show_field_metadata = show_field_metadata + options.show_schema_metadata = show_schema_metadata + + with nogil: + check_status( + PrettyPrint( + deref(self.schema), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportSchema(deref(self.schema), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import Schema from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + cdef void* c_ptr = _as_c_pointer(in_ptr) + with nogil: + result = GetResultValue(ImportSchema( c_ptr)) + return pyarrow_wrap_schema(result) + + def __str__(self): + return self.to_string() + + def __repr__(self): + return self.__str__() + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportSchema(deref(self.schema), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a Schema from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise ValueError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + result = GetResultValue(ImportSchema(c_schema)) + + return pyarrow_wrap_schema(result) + + +def unify_schemas(schemas, *, promote_options="default"): + """ + Unify schemas by merging fields by name. + + The resulting schema will contain the union of fields from all schemas. + Fields with the same name will be merged. Note that two fields with + different types will fail merging by default. + + - The unified field will inherit the metadata from the schema where + that field is first defined. + - The first N fields in the schema will be ordered the same as the + N fields in the first schema. + + The resulting schema will inherit its metadata from the first input + schema. + + Parameters + ---------- + schemas : list of Schema + Schemas to merge into a single one. + promote_options : str, default default + Accepts strings "default" and "permissive". + Default: null and only null can be unified with another type. + Permissive: types are promoted to the greater common denominator. + + Returns + ------- + Schema + + Raises + ------ + ArrowInvalid : + If any input schema contains fields with duplicate names. + If Fields of the same name are not mergeable. + """ + cdef: + Schema schema + CField.CMergeOptions c_options + vector[shared_ptr[CSchema]] c_schemas + for schema in schemas: + if not isinstance(schema, Schema): + raise TypeError("Expected Schema, got {}".format(type(schema))) + c_schemas.push_back(pyarrow_unwrap_schema(schema)) + + if promote_options == "default": + c_options = CField.CMergeOptions.Defaults() + elif promote_options == "permissive": + c_options = CField.CMergeOptions.Permissive() + else: + raise ValueError(f"Invalid merge mode: {promote_options}") + + return pyarrow_wrap_schema( + GetResultValue(UnifySchemas(c_schemas, c_options))) + + +cdef dict _type_cache = {} + + +cdef DataType primitive_type(Type type): + if type in _type_cache: + return _type_cache[type] + + cdef DataType out = DataType.__new__(DataType) + out.init(GetPrimitiveType(type)) + + _type_cache[type] = out + return out + + +# ----------------------------------------------------------- +# Type factory functions + + +def field(name, type=None, nullable=None, metadata=None): + """ + Create a pyarrow.Field instance. + + Parameters + ---------- + name : str or bytes + Name of the field. + Alternatively, you can also pass an object that implements the Arrow + PyCapsule Protocol for schemas (has an ``__arrow_c_schema__`` method). + type : pyarrow.DataType + Arrow datatype of the field. + nullable : bool, default True + Whether the field's values are nullable. + metadata : dict, default None + Optional field metadata, the keys and values must be coercible to + bytes. + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + Create an instance of pyarrow.Field: + + >>> import pyarrow as pa + >>> pa.field('key', pa.int32()) + pyarrow.Field + >>> pa.field('key', pa.int32(), nullable=False) + pyarrow.Field + + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field + pyarrow.Field + >>> field.metadata + {b'key': b'Something important'} + + Use the field to create a struct type: + + >>> pa.struct([field]) + StructType(struct) + """ + if hasattr(name, "__arrow_c_schema__"): + if type is not None: + raise ValueError( + "cannot specify 'type' when creating a Field from an ArrowSchema" + ) + field = Field._import_from_c_capsule(name.__arrow_c_schema__()) + if metadata is not None: + field = field.with_metadata(metadata) + if nullable is not None: + field = field.with_nullable(nullable) + return field + + cdef: + Field result = Field.__new__(Field) + DataType _type = ensure_type(type, allow_none=False) + shared_ptr[const CKeyValueMetadata] c_meta + + nullable = True if nullable is None else nullable + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + if _type.type.id() == _Type_NA and not nullable: + raise ValueError("A null type field may not be non-nullable") + + result.sp_field.reset( + new CField(tobytes(name), _type.sp_type, nullable, c_meta) + ) + result.field = result.sp_field.get() + result.type = _type + + return result + + +cdef set PRIMITIVE_TYPES = set([ + _Type_NA, _Type_BOOL, + _Type_UINT8, _Type_INT8, + _Type_UINT16, _Type_INT16, + _Type_UINT32, _Type_INT32, + _Type_UINT64, _Type_INT64, + _Type_TIMESTAMP, _Type_DATE32, + _Type_TIME32, _Type_TIME64, + _Type_DATE64, + _Type_HALF_FLOAT, + _Type_FLOAT, + _Type_DOUBLE]) + + +def null(): + """ + Create instance of null type. + + Examples + -------- + Create an instance of a null type: + + >>> import pyarrow as pa + >>> pa.null() + DataType(null) + >>> print(pa.null()) + null + + Create a ``Field`` type with a null type and a name: + + >>> pa.field('null_field', pa.null()) + pyarrow.Field + """ + return primitive_type(_Type_NA) + + +def bool_(): + """ + Create instance of boolean type. + + Examples + -------- + Create an instance of a boolean type: + + >>> import pyarrow as pa + >>> pa.bool_() + DataType(bool) + >>> print(pa.bool_()) + bool + + Create a ``Field`` type with a boolean type + and a name: + + >>> pa.field('bool_field', pa.bool_()) + pyarrow.Field + """ + return primitive_type(_Type_BOOL) + + +def uint8(): + """ + Create instance of unsigned int8 type. + + Examples + -------- + Create an instance of unsigned int8 type: + + >>> import pyarrow as pa + >>> pa.uint8() + DataType(uint8) + >>> print(pa.uint8()) + uint8 + + Create an array with unsigned int8 type: + + >>> pa.array([0, 1, 2], type=pa.uint8()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT8) + + +def int8(): + """ + Create instance of signed int8 type. + + Examples + -------- + Create an instance of int8 type: + + >>> import pyarrow as pa + >>> pa.int8() + DataType(int8) + >>> print(pa.int8()) + int8 + + Create an array with int8 type: + + >>> pa.array([0, 1, 2], type=pa.int8()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT8) + + +def uint16(): + """ + Create instance of unsigned uint16 type. + + Examples + -------- + Create an instance of unsigned int16 type: + + >>> import pyarrow as pa + >>> pa.uint16() + DataType(uint16) + >>> print(pa.uint16()) + uint16 + + Create an array with unsigned int16 type: + + >>> pa.array([0, 1, 2], type=pa.uint16()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT16) + + +def int16(): + """ + Create instance of signed int16 type. + + Examples + -------- + Create an instance of int16 type: + + >>> import pyarrow as pa + >>> pa.int16() + DataType(int16) + >>> print(pa.int16()) + int16 + + Create an array with int16 type: + + >>> pa.array([0, 1, 2], type=pa.int16()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT16) + + +def uint32(): + """ + Create instance of unsigned uint32 type. + + Examples + -------- + Create an instance of unsigned int32 type: + + >>> import pyarrow as pa + >>> pa.uint32() + DataType(uint32) + >>> print(pa.uint32()) + uint32 + + Create an array with unsigned int32 type: + + >>> pa.array([0, 1, 2], type=pa.uint32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT32) + + +def int32(): + """ + Create instance of signed int32 type. + + Examples + -------- + Create an instance of int32 type: + + >>> import pyarrow as pa + >>> pa.int32() + DataType(int32) + >>> print(pa.int32()) + int32 + + Create an array with int32 type: + + >>> pa.array([0, 1, 2], type=pa.int32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT32) + + +def uint64(): + """ + Create instance of unsigned uint64 type. + + Examples + -------- + Create an instance of unsigned int64 type: + + >>> import pyarrow as pa + >>> pa.uint64() + DataType(uint64) + >>> print(pa.uint64()) + uint64 + + Create an array with unsigned uint64 type: + + >>> pa.array([0, 1, 2], type=pa.uint64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT64) + + +def int64(): + """ + Create instance of signed int64 type. + + Examples + -------- + Create an instance of int64 type: + + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> print(pa.int64()) + int64 + + Create an array with int64 type: + + >>> pa.array([0, 1, 2], type=pa.int64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT64) + + +cdef dict _timestamp_type_cache = {} +cdef dict _time_type_cache = {} +cdef dict _duration_type_cache = {} + + +cdef timeunit_to_string(TimeUnit unit): + if unit == TimeUnit_SECOND: + return 's' + elif unit == TimeUnit_MILLI: + return 'ms' + elif unit == TimeUnit_MICRO: + return 'us' + elif unit == TimeUnit_NANO: + return 'ns' + + +cdef TimeUnit string_to_timeunit(unit) except *: + if unit == 's': + return TimeUnit_SECOND + elif unit == 'ms': + return TimeUnit_MILLI + elif unit == 'us': + return TimeUnit_MICRO + elif unit == 'ns': + return TimeUnit_NANO + else: + raise ValueError(f"Invalid time unit: {unit!r}") + + +def tzinfo_to_string(tz): + """ + Converts a time zone object into a string indicating the name of a time + zone, one of: + * As used in the Olson time zone database (the "tz database" or + "tzdata"), such as "America/New_York" + * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 + + Parameters + ---------- + tz : datetime.tzinfo + Time zone object + + Returns + ------- + name : str + Time zone name + """ + return frombytes(GetResultValue(TzinfoToString(tz))) + + +def string_to_tzinfo(name): + """ + Convert a time zone name into a time zone object. + + Supported input strings are: + * As used in the Olson time zone database (the "tz database" or + "tzdata"), such as "America/New_York" + * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 + + Parameters + ---------- + name: str + Time zone name. + + Returns + ------- + tz : datetime.tzinfo + Time zone object + """ + cdef PyObject* tz = GetResultValue(StringToTzinfo(name.encode('utf-8'))) + return PyObject_to_object(tz) + + +def timestamp(unit, tz=None): + """ + Create instance of timestamp type with resolution and optional time zone. + + Parameters + ---------- + unit : str + one of 's' [second], 'ms' [millisecond], 'us' [microsecond], or 'ns' + [nanosecond] + tz : str, default None + Time zone name. None indicates time zone naive + + Examples + -------- + Create an instance of timestamp type: + + >>> import pyarrow as pa + >>> pa.timestamp('us') + TimestampType(timestamp[us]) + >>> pa.timestamp('s', tz='America/New_York') + TimestampType(timestamp[s, tz=America/New_York]) + >>> pa.timestamp('s', tz='+07:30') + TimestampType(timestamp[s, tz=+07:30]) + + Use timestamp type when creating a scalar object: + + >>> from datetime import datetime + >>> pa.scalar(datetime(2012, 1, 1), type=pa.timestamp('s', tz='UTC')) + + >>> pa.scalar(datetime(2012, 1, 1), type=pa.timestamp('us')) + + + Returns + ------- + timestamp_type : TimestampType + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + unit_code = string_to_timeunit(unit) + + cdef TimestampType out = TimestampType.__new__(TimestampType) + + if tz is None: + out.init(ctimestamp(unit_code)) + if unit_code in _timestamp_type_cache: + return _timestamp_type_cache[unit_code] + _timestamp_type_cache[unit_code] = out + else: + if not isinstance(tz, (bytes, str)): + tz = tzinfo_to_string(tz) + + c_timezone = tobytes(tz) + out.init(ctimestamp(unit_code, c_timezone)) + + return out + + +def time32(unit): + """ + Create instance of 32-bit time (time of day) type with unit resolution. + + Parameters + ---------- + unit : str + one of 's' [second], or 'ms' [millisecond] + + Returns + ------- + type : pyarrow.Time32Type + + Examples + -------- + >>> import pyarrow as pa + >>> pa.time32('s') + Time32Type(time32[s]) + >>> pa.time32('ms') + Time32Type(time32[ms]) + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + if unit == 's': + unit_code = TimeUnit_SECOND + elif unit == 'ms': + unit_code = TimeUnit_MILLI + else: + raise ValueError(f"Invalid time unit for time32: {unit!r}") + + if unit_code in _time_type_cache: + return _time_type_cache[unit_code] + + cdef Time32Type out = Time32Type.__new__(Time32Type) + + out.init(ctime32(unit_code)) + _time_type_cache[unit_code] = out + + return out + + +def time64(unit): + """ + Create instance of 64-bit time (time of day) type with unit resolution. + + Parameters + ---------- + unit : str + One of 'us' [microsecond], or 'ns' [nanosecond]. + + Returns + ------- + type : pyarrow.Time64Type + + Examples + -------- + >>> import pyarrow as pa + >>> pa.time64('us') + Time64Type(time64[us]) + >>> pa.time64('ns') + Time64Type(time64[ns]) + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + if unit == 'us': + unit_code = TimeUnit_MICRO + elif unit == 'ns': + unit_code = TimeUnit_NANO + else: + raise ValueError(f"Invalid time unit for time64: {unit!r}") + + if unit_code in _time_type_cache: + return _time_type_cache[unit_code] + + cdef Time64Type out = Time64Type.__new__(Time64Type) + + out.init(ctime64(unit_code)) + _time_type_cache[unit_code] = out + + return out + + +def duration(unit): + """ + Create instance of a duration type with unit resolution. + + Parameters + ---------- + unit : str + One of 's' [second], 'ms' [millisecond], 'us' [microsecond], or + 'ns' [nanosecond]. + + Returns + ------- + type : pyarrow.DurationType + + Examples + -------- + Create an instance of duration type: + + >>> import pyarrow as pa + >>> pa.duration('us') + DurationType(duration[us]) + >>> pa.duration('s') + DurationType(duration[s]) + + Create an array with duration type: + + >>> pa.array([0, 1, 2], type=pa.duration('s')) + + [ + 0, + 1, + 2 + ] + """ + cdef: + TimeUnit unit_code + + unit_code = string_to_timeunit(unit) + + if unit_code in _duration_type_cache: + return _duration_type_cache[unit_code] + + cdef DurationType out = DurationType.__new__(DurationType) + + out.init(cduration(unit_code)) + _duration_type_cache[unit_code] = out + + return out + + +def month_day_nano_interval(): + """ + Create instance of an interval type representing months, days and + nanoseconds between two dates. + + Examples + -------- + Create an instance of an month_day_nano_interval type: + + >>> import pyarrow as pa + >>> pa.month_day_nano_interval() + DataType(month_day_nano_interval) + + Create a scalar with month_day_nano_interval type: + + >>> pa.scalar((1, 15, -30), type=pa.month_day_nano_interval()) + + """ + return primitive_type(_Type_INTERVAL_MONTH_DAY_NANO) + + +def date32(): + """ + Create instance of 32-bit date (days since UNIX epoch 1970-01-01). + + Examples + -------- + Create an instance of 32-bit date type: + + >>> import pyarrow as pa + >>> pa.date32() + DataType(date32[day]) + + Create a scalar with 32-bit date type: + + >>> from datetime import date + >>> pa.scalar(date(2012, 1, 1), type=pa.date32()) + + """ + return primitive_type(_Type_DATE32) + + +def date64(): + """ + Create instance of 64-bit date (milliseconds since UNIX epoch 1970-01-01). + + Examples + -------- + Create an instance of 64-bit date type: + + >>> import pyarrow as pa + >>> pa.date64() + DataType(date64[ms]) + + Create a scalar with 64-bit date type: + + >>> from datetime import datetime + >>> pa.scalar(datetime(2012, 1, 1), type=pa.date64()) + + """ + return primitive_type(_Type_DATE64) + + +def float16(): + """ + Create half-precision floating point type. + + Examples + -------- + Create an instance of float16 type: + + >>> import pyarrow as pa + >>> pa.float16() + DataType(halffloat) + >>> print(pa.float16()) + halffloat + + Create an array with float16 type: + + >>> arr = np.array([1.5, np.nan], dtype=np.float16) + >>> a = pa.array(arr, type=pa.float16()) + >>> a + + [ + 15872, + 32256 + ] + >>> a.to_pylist() + [np.float16(1.5), np.float16(nan)] + """ + return primitive_type(_Type_HALF_FLOAT) + + +def float32(): + """ + Create single-precision floating point type. + + Examples + -------- + Create an instance of float32 type: + + >>> import pyarrow as pa + >>> pa.float32() + DataType(float) + >>> print(pa.float32()) + float + + Create an array with float32 type: + + >>> pa.array([0.0, 1.0, 2.0], type=pa.float32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_FLOAT) + + +def float64(): + """ + Create double-precision floating point type. + + Examples + -------- + Create an instance of float64 type: + + >>> import pyarrow as pa + >>> pa.float64() + DataType(double) + >>> print(pa.float64()) + double + + Create an array with float64 type: + + >>> pa.array([0.0, 1.0, 2.0], type=pa.float64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_DOUBLE) + + +cpdef DataType decimal128(int precision, int scale=0): + """ + Create decimal type with precision and scale and 128-bit width. + + Arrow decimals are fixed-point decimal numbers encoded as a scaled + integer. The precision is the number of significant digits that the + decimal type can represent; the scale is the number of digits after + the decimal point (note the scale can be negative). + + As an example, ``decimal128(7, 3)`` can exactly represent the numbers + 1234.567 and -1234.567 (encoded internally as the 128-bit integers + 1234567 and -1234567, respectively), but neither 12345.67 nor 123.4567. + + ``decimal128(5, -3)`` can exactly represent the number 12345000 + (encoded internally as the 128-bit integer 12345), but neither + 123450000 nor 1234500. + + If you need a precision higher than 38 significant digits, consider + using ``decimal256``. + + Parameters + ---------- + precision : int + Must be between 1 and 38 + scale : int + + Returns + ------- + decimal_type : Decimal128Type + + Examples + -------- + Create an instance of decimal type: + + >>> import pyarrow as pa + >>> pa.decimal128(5, 2) + Decimal128Type(decimal128(5, 2)) + + Create an array with decimal type: + + >>> import decimal + >>> a = decimal.Decimal('123.45') + >>> pa.array([a], pa.decimal128(5, 2)) + + [ + 123.45 + ] + """ + cdef shared_ptr[CDataType] decimal_type + if precision < 1 or precision > 38: + raise ValueError("precision should be between 1 and 38") + decimal_type.reset(new CDecimal128Type(precision, scale)) + return pyarrow_wrap_data_type(decimal_type) + + +cpdef DataType decimal256(int precision, int scale=0): + """ + Create decimal type with precision and scale and 256-bit width. + + Arrow decimals are fixed-point decimal numbers encoded as a scaled + integer. The precision is the number of significant digits that the + decimal type can represent; the scale is the number of digits after + the decimal point (note the scale can be negative). + + For most use cases, the maximum precision offered by ``decimal128`` + is sufficient, and it will result in a more compact and more efficient + encoding. ``decimal256`` is useful if you need a precision higher + than 38 significant digits. + + Parameters + ---------- + precision : int + Must be between 1 and 76 + scale : int + + Returns + ------- + decimal_type : Decimal256Type + """ + cdef shared_ptr[CDataType] decimal_type + if precision < 1 or precision > 76: + raise ValueError("precision should be between 1 and 76") + decimal_type.reset(new CDecimal256Type(precision, scale)) + return pyarrow_wrap_data_type(decimal_type) + + +def string(): + """ + Create UTF8 variable-length string type. + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.string() + DataType(string) + + and use the string type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.string()) + + [ + "foo", + "bar", + "baz" + ] + """ + return primitive_type(_Type_STRING) + + +def utf8(): + """ + Alias for string(). + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.utf8() + DataType(string) + + and use the string type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + + [ + "foo", + "bar", + "baz" + ] + """ + return string() + + +def binary(int length=-1): + """ + Create variable-length or fixed size binary type. + + Parameters + ---------- + length : int, optional, default -1 + If length == -1 then return a variable length binary type. If length is + greater than or equal to 0 then return a fixed size binary type of + width `length`. + + Examples + -------- + Create an instance of a variable-length binary type: + + >>> import pyarrow as pa + >>> pa.binary() + DataType(binary) + + and use the variable-length binary type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.binary()) + + [ + 666F6F, + 626172, + 62617A + ] + + Create an instance of a fixed-size binary type: + + >>> pa.binary(3) + FixedSizeBinaryType(fixed_size_binary[3]) + + and use the fixed-length binary type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.binary(3)) + + [ + 666F6F, + 626172, + 62617A + ] + """ + if length == -1: + return primitive_type(_Type_BINARY) + + cdef shared_ptr[CDataType] fixed_size_binary_type + fixed_size_binary_type.reset(new CFixedSizeBinaryType(length)) + return pyarrow_wrap_data_type(fixed_size_binary_type) + + +def large_binary(): + """ + Create large variable-length binary type. + + This data type may not be supported by all Arrow implementations. Unless + you need to represent data larger than 2GB, you should prefer binary(). + + Examples + -------- + Create an instance of large variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_binary() + DataType(large_binary) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.large_binary()) + + [ + 666F6F, + 626172, + 62617A + ] + """ + return primitive_type(_Type_LARGE_BINARY) + + +def large_string(): + """ + Create large UTF8 variable-length string type. + + This data type may not be supported by all Arrow implementations. Unless + you need to represent data larger than 2GB, you should prefer string(). + + Examples + -------- + Create an instance of large UTF8 variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_string() + DataType(large_string) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar'] * 50, type=pa.large_string()) + + [ + "foo", + "bar", + ... + "foo", + "bar" + ] + """ + return primitive_type(_Type_LARGE_STRING) + + +def large_utf8(): + """ + Alias for large_string(). + + Examples + -------- + Create an instance of large UTF8 variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_utf8() + DataType(large_string) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar'] * 50, type=pa.large_utf8()) + + [ + "foo", + "bar", + ... + "foo", + "bar" + ] + """ + return large_string() + + +def binary_view(): + """ + Create a variable-length binary view type. + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.binary_view() + DataType(binary_view) + """ + return primitive_type(_Type_BINARY_VIEW) + + +def string_view(): + """ + Create UTF8 variable-length string view type. + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.string_view() + DataType(string_view) + """ + return primitive_type(_Type_STRING_VIEW) + + +def list_(value_type, int list_size=-1): + """ + Create ListType instance from child data type or field. + + Parameters + ---------- + value_type : DataType or Field + list_size : int, optional, default -1 + If length == -1 then return a variable length list type. If length is + greater than or equal to 0 then return a fixed size list type. + + Returns + ------- + list_type : DataType + + Examples + -------- + Create an instance of ListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.string()) + ListType(list) + >>> pa.list_(pa.int32(), 2) + FixedSizeListType(fixed_size_list[2]) + + Use the ListType to create a scalar: + + >>> pa.scalar(['foo', None], type=pa.list_(pa.string(), 2)) + + + or an array: + + >>> pa.array([[1, 2], [3, 4]], pa.list_(pa.int32(), 2)) + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + """ + cdef: + Field _field + shared_ptr[CDataType] list_type + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('List requires DataType or Field') + + if list_size == -1: + list_type.reset(new CListType(_field.sp_field)) + else: + if list_size < 0: + raise ValueError("list_size should be a positive integer") + list_type.reset(new CFixedSizeListType(_field.sp_field, list_size)) + + return pyarrow_wrap_data_type(list_type) + + +cpdef LargeListType large_list(value_type): + """ + Create LargeListType instance from child data type or field. + + This data type may not be supported by all Arrow implementations. + Unless you need to represent data larger than 2**31 elements, you should + prefer list_(). + + Parameters + ---------- + value_type : DataType or Field + + Returns + ------- + list_type : DataType + + Examples + -------- + Create an instance of LargeListType: + + >>> import pyarrow as pa + >>> pa.large_list(pa.int8()) + LargeListType(large_list) + + Use the LargeListType to create an array: + + >>> pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8())) + + [ + [ + -1, + 3 + ], + [ + -1, + 3 + ], + ... + """ + cdef: + DataType data_type + Field _field + shared_ptr[CDataType] list_type + LargeListType out = LargeListType.__new__(LargeListType) + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('List requires DataType or Field') + + list_type.reset(new CLargeListType(_field.sp_field)) + out.init(list_type) + return out + + +cpdef ListViewType list_view(value_type): + """ + Create ListViewType instance from child data type or field. + + This data type may not be supported by all Arrow implementations + because it is an alternative to the ListType. + + Parameters + ---------- + value_type : DataType or Field + + Returns + ------- + list_view_type : DataType + + Examples + -------- + Create an instance of ListViewType: + + >>> import pyarrow as pa + >>> pa.list_view(pa.string()) + ListViewType(list_view) + """ + cdef: + Field _field + shared_ptr[CDataType] list_view_type + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('ListView requires DataType or Field') + + list_view_type = CMakeListViewType(_field.sp_field) + return pyarrow_wrap_data_type(list_view_type) + + +cpdef LargeListViewType large_list_view(value_type): + """ + Create LargeListViewType instance from child data type or field. + + This data type may not be supported by all Arrow implementations + because it is an alternative to the ListType. + + Parameters + ---------- + value_type : DataType or Field + + Returns + ------- + list_view_type : DataType + + Examples + -------- + Create an instance of LargeListViewType: + + >>> import pyarrow as pa + >>> pa.large_list_view(pa.int8()) + LargeListViewType(large_list_view) + """ + cdef: + Field _field + shared_ptr[CDataType] list_view_type + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('LargeListView requires DataType or Field') + + list_view_type = CMakeLargeListViewType(_field.sp_field) + return pyarrow_wrap_data_type(list_view_type) + + +cpdef MapType map_(key_type, item_type, keys_sorted=False): + """ + Create MapType instance from key and item data types or fields. + + Parameters + ---------- + key_type : DataType or Field + item_type : DataType or Field + keys_sorted : bool + + Returns + ------- + map_type : DataType + + Examples + -------- + Create an instance of MapType: + + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()) + MapType(map) + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True) + MapType(map) + + Use MapType to create an array: + + >>> data = [[{'key': 'a', 'value': 1}, {'key': 'b', 'value': 2}], [{'key': 'c', 'value': 3}]] + >>> pa.array(data, type=pa.map_(pa.string(), pa.int32(), keys_sorted=True)) + + [ + keys: + [ + "a", + "b" + ] + values: + [ + 1, + 2 + ], + keys: + [ + "c" + ] + values: + [ + 3 + ] + ] + """ + cdef: + Field _key_field + Field _item_field + shared_ptr[CDataType] map_type + MapType out = MapType.__new__(MapType) + + if isinstance(key_type, Field): + if key_type.nullable: + raise TypeError('Map key field should be non-nullable') + _key_field = key_type + else: + _key_field = field('key', ensure_type(key_type, allow_none=False), + nullable=False) + + if isinstance(item_type, Field): + _item_field = item_type + else: + _item_field = field('value', ensure_type(item_type, allow_none=False)) + + map_type.reset(new CMapType(_key_field.sp_field, _item_field.sp_field, + keys_sorted)) + out.init(map_type) + return out + + +cpdef DictionaryType dictionary(index_type, value_type, bint ordered=False): + """ + Dictionary (categorical, or simply encoded) type. + + Parameters + ---------- + index_type : DataType + value_type : DataType + ordered : bool + + Returns + ------- + type : DictionaryType + + Examples + -------- + Create an instance of dictionary type: + + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()) + DictionaryType(dictionary) + + Use dictionary type to create an array: + + >>> pa.array(["a", "b", None, "d"], pa.dictionary(pa.int64(), pa.utf8())) + + ... + -- dictionary: + [ + "a", + "b", + "d" + ] + -- indices: + [ + 0, + 1, + null, + 2 + ] + """ + cdef: + DataType _index_type = ensure_type(index_type, allow_none=False) + DataType _value_type = ensure_type(value_type, allow_none=False) + DictionaryType out = DictionaryType.__new__(DictionaryType) + shared_ptr[CDataType] dict_type + + if _index_type.id not in { + Type_INT8, Type_INT16, Type_INT32, Type_INT64, + Type_UINT8, Type_UINT16, Type_UINT32, Type_UINT64, + }: + raise TypeError("The dictionary index type should be integer.") + + dict_type.reset(new CDictionaryType(_index_type.sp_type, + _value_type.sp_type, ordered == 1)) + out.init(dict_type) + return out + + +def struct(fields): + """ + Create StructType instance from fields. + + A struct is a nested type parameterized by an ordered sequence of types + (which can all be distinct), called its fields. + + Parameters + ---------- + fields : iterable of Fields or tuples, or mapping of strings to DataTypes + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + + Examples + -------- + Create an instance of StructType from an iterable of tuples: + + >>> import pyarrow as pa + >>> fields = [ + ... ('f1', pa.int32()), + ... ('f2', pa.string()), + ... ] + >>> struct_type = pa.struct(fields) + >>> struct_type + StructType(struct) + + Retrieve a field from a StructType: + + >>> struct_type[0] + pyarrow.Field + >>> struct_type['f1'] + pyarrow.Field + + Create an instance of StructType from an iterable of Fields: + + >>> fields = [ + ... pa.field('f1', pa.int32()), + ... pa.field('f2', pa.string(), nullable=False), + ... ] + >>> pa.struct(fields) + StructType(struct) + + Returns + ------- + type : DataType + """ + cdef: + Field py_field + vector[shared_ptr[CField]] c_fields + cdef shared_ptr[CDataType] struct_type + + if isinstance(fields, Mapping): + fields = fields.items() + + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + c_fields.push_back(py_field.sp_field) + + struct_type.reset(new CStructType(c_fields)) + return pyarrow_wrap_data_type(struct_type) + + +cdef _extract_union_params(child_fields, type_codes, + vector[shared_ptr[CField]]* c_fields, + vector[int8_t]* c_type_codes): + cdef: + Field child_field + + for child_field in child_fields: + c_fields[0].push_back(child_field.sp_field) + + if type_codes is not None: + if len(type_codes) != (c_fields.size()): + raise ValueError("type_codes should have the same length " + "as fields") + for code in type_codes: + c_type_codes[0].push_back(code) + else: + c_type_codes[0] = range(c_fields.size()) + + +def sparse_union(child_fields, type_codes=None): + """ + Create SparseUnionType from child fields. + + A sparse union is a nested type where each logical value is taken from + a single child. A buffer of 8-bit type ids indicates which child + a given logical value is to be taken from. + + In a sparse union, each child array should have the same length as the + union array, regardless of the actual number of union values that + refer to it. + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + type_codes : list of integers, default None + + Returns + ------- + type : SparseUnionType + """ + cdef: + vector[shared_ptr[CField]] c_fields + vector[int8_t] c_type_codes + + _extract_union_params(child_fields, type_codes, + &c_fields, &c_type_codes) + + return pyarrow_wrap_data_type( + CMakeSparseUnionType(move(c_fields), move(c_type_codes))) + + +def dense_union(child_fields, type_codes=None): + """ + Create DenseUnionType from child fields. + + A dense union is a nested type where each logical value is taken from + a single child, at a specific offset. A buffer of 8-bit type ids + indicates which child a given logical value is to be taken from, + and a buffer of 32-bit offsets indicates at which physical position + in the given child array the logical value is to be taken from. + + Unlike a sparse union, a dense union allows encoding only the child array + values which are actually referred to by the union array. This is + counterbalanced by the additional footprint of the offsets buffer, and + the additional indirection cost when looking up values. + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + type_codes : list of integers, default None + + Returns + ------- + type : DenseUnionType + """ + cdef: + vector[shared_ptr[CField]] c_fields + vector[int8_t] c_type_codes + + _extract_union_params(child_fields, type_codes, + &c_fields, &c_type_codes) + + return pyarrow_wrap_data_type( + CMakeDenseUnionType(move(c_fields), move(c_type_codes))) + + +def union(child_fields, mode, type_codes=None): + """ + Create UnionType from child fields. + + A union is a nested type where each logical value is taken from a + single child. A buffer of 8-bit type ids indicates which child + a given logical value is to be taken from. + + Unions come in two flavors: sparse and dense + (see also `pyarrow.sparse_union` and `pyarrow.dense_union`). + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + mode : str + Must be 'sparse' or 'dense' + type_codes : list of integers, default None + + Returns + ------- + type : UnionType + """ + if isinstance(mode, int): + if mode not in (_UnionMode_SPARSE, _UnionMode_DENSE): + raise ValueError("Invalid union mode {0!r}".format(mode)) + else: + if mode == 'sparse': + mode = _UnionMode_SPARSE + elif mode == 'dense': + mode = _UnionMode_DENSE + else: + raise ValueError("Invalid union mode {0!r}".format(mode)) + + if mode == _UnionMode_SPARSE: + return sparse_union(child_fields, type_codes) + else: + return dense_union(child_fields, type_codes) + + +def run_end_encoded(run_end_type, value_type): + """ + Create RunEndEncodedType from run-end and value types. + + Parameters + ---------- + run_end_type : pyarrow.DataType + The integer type of the run_ends array. Must be 'int16', 'int32', or 'int64'. + value_type : pyarrow.DataType + The type of the values array. + + Returns + ------- + type : RunEndEncodedType + """ + cdef: + DataType _run_end_type = ensure_type(run_end_type, allow_none=False) + DataType _value_type = ensure_type(value_type, allow_none=False) + shared_ptr[CDataType] ree_type + + if not _run_end_type.type.id() in [_Type_INT16, _Type_INT32, _Type_INT64]: + raise ValueError("The run_end_type should be 'int16', 'int32', or 'int64'") + ree_type = CMakeRunEndEncodedType(_run_end_type.sp_type, _value_type.sp_type) + return pyarrow_wrap_data_type(ree_type) + + +def fixed_shape_tensor(DataType value_type, shape, dim_names=None, permutation=None): + """ + Create instance of fixed shape tensor extension type with shape and optional + names of tensor dimensions and indices of the desired logical + ordering of dimensions. + + Parameters + ---------- + value_type : DataType + Data type of individual tensor elements. + shape : tuple or list of integers + The physical shape of the contained tensors. + dim_names : tuple or list of strings, default None + Explicit names to tensor dimensions. + permutation : tuple or list integers, default None + Indices of the desired ordering of the original dimensions. + The indices contain a permutation of the values ``[0, 1, .., N-1]`` where + N is the number of dimensions. The permutation indicates which dimension + of the logical layout corresponds to which dimension of the physical tensor. + For more information on this parameter see + :ref:`fixed_shape_tensor_extension`. + + Examples + -------- + Create an instance of fixed shape tensor extension type: + + >>> import pyarrow as pa + >>> tensor_type = pa.fixed_shape_tensor(pa.int32(), [2, 2]) + >>> tensor_type + FixedShapeTensorType(extension) + + Inspect the data type: + + >>> tensor_type.value_type + DataType(int32) + >>> tensor_type.shape + [2, 2] + + Create a table with fixed shape tensor extension array: + + >>> arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]] + >>> storage = pa.array(arr, pa.list_(pa.int32(), 4)) + >>> tensor = pa.ExtensionArray.from_storage(tensor_type, storage) + >>> pa.table([tensor], names=["tensor_array"]) + pyarrow.Table + tensor_array: extension + ---- + tensor_array: [[[1,2,3,4],[10,20,30,40],[100,200,300,400]]] + + Create an instance of fixed shape tensor extension type with names + of tensor dimensions: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... dim_names=['C', 'H', 'W']) + >>> tensor_type.dim_names + ['C', 'H', 'W'] + + Create an instance of fixed shape tensor extension type with + permutation: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... permutation=[0, 2, 1]) + >>> tensor_type.permutation + [0, 2, 1] + + Returns + ------- + type : FixedShapeTensorType + """ + + cdef: + vector[int64_t] c_shape + vector[int64_t] c_permutation + vector[c_string] c_dim_names + shared_ptr[CDataType] c_tensor_ext_type + + assert value_type is not None + assert shape is not None + + for i in shape: + c_shape.push_back(i) + + if permutation is not None: + for i in permutation: + c_permutation.push_back(i) + + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + cdef FixedShapeTensorType out = FixedShapeTensorType.__new__(FixedShapeTensorType) + + with nogil: + c_tensor_ext_type = GetResultValue(CFixedShapeTensorType.Make( + value_type.sp_type, c_shape, c_permutation, c_dim_names)) + + out.init(c_tensor_ext_type) + + return out + + +cdef dict _type_aliases = { + 'null': null, + 'bool': bool_, + 'boolean': bool_, + 'i1': int8, + 'int8': int8, + 'i2': int16, + 'int16': int16, + 'i4': int32, + 'int32': int32, + 'i8': int64, + 'int64': int64, + 'u1': uint8, + 'uint8': uint8, + 'u2': uint16, + 'uint16': uint16, + 'u4': uint32, + 'uint32': uint32, + 'u8': uint64, + 'uint64': uint64, + 'f2': float16, + 'halffloat': float16, + 'float16': float16, + 'f4': float32, + 'float': float32, + 'float32': float32, + 'f8': float64, + 'double': float64, + 'float64': float64, + 'string': string, + 'str': string, + 'utf8': string, + 'binary': binary, + 'large_string': large_string, + 'large_str': large_string, + 'large_utf8': large_string, + 'large_binary': large_binary, + 'binary_view': binary_view, + 'string_view': string_view, + 'date32': date32, + 'date64': date64, + 'date32[day]': date32, + 'date64[ms]': date64, + 'time32[s]': time32('s'), + 'time32[ms]': time32('ms'), + 'time64[us]': time64('us'), + 'time64[ns]': time64('ns'), + 'timestamp[s]': timestamp('s'), + 'timestamp[ms]': timestamp('ms'), + 'timestamp[us]': timestamp('us'), + 'timestamp[ns]': timestamp('ns'), + 'duration[s]': duration('s'), + 'duration[ms]': duration('ms'), + 'duration[us]': duration('us'), + 'duration[ns]': duration('ns'), + 'month_day_nano_interval': month_day_nano_interval(), +} + + +def type_for_alias(name): + """ + Return DataType given a string alias if one exists. + + Parameters + ---------- + name : str + The alias of the DataType that should be retrieved. + + Returns + ------- + type : DataType + """ + name = name.lower() + try: + alias = _type_aliases[name] + except KeyError: + raise ValueError('No type alias for {0}'.format(name)) + + if isinstance(alias, DataType): + return alias + return alias() + + +cpdef DataType ensure_type(object ty, bint allow_none=False): + if allow_none and ty is None: + return None + elif isinstance(ty, DataType): + return ty + elif isinstance(ty, str): + return type_for_alias(ty) + else: + raise TypeError('DataType expected, got {!r}'.format(type(ty))) + + +def schema(fields, metadata=None): + """ + Construct pyarrow.Schema from collection of fields. + + Parameters + ---------- + fields : iterable of Fields or tuples, or mapping of strings to DataTypes + Can also pass an object that implements the Arrow PyCapsule Protocol + for schemas (has an ``__arrow_c_schema__`` method). + metadata : dict, default None + Keys and values must be coercible to bytes. + + Examples + -------- + Create a Schema from iterable of tuples: + + >>> import pyarrow as pa + >>> pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()), + ... pa.field('some_required_string', pa.string(), nullable=False) + ... ]) + some_int: int32 + some_string: string + some_required_string: string not null + + Create a Schema from iterable of Fields: + + >>> pa.schema([ + ... pa.field('some_int', pa.int32()), + ... pa.field('some_string', pa.string()) + ... ]) + some_int: int32 + some_string: string + + Returns + ------- + schema : pyarrow.Schema + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CSchema] c_schema + Schema result + Field py_field + vector[shared_ptr[CField]] c_fields + + if isinstance(fields, Mapping): + fields = fields.items() + elif hasattr(fields, "__arrow_c_schema__"): + result = Schema._import_from_c_capsule(fields.__arrow_c_schema__()) + if metadata is not None: + result = result.with_metadata(metadata) + return result + + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + if py_field is None: + raise TypeError("field or tuple expected, got None") + c_fields.push_back(py_field.sp_field) + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + c_schema.reset(new CSchema(c_fields, c_meta)) + result = Schema.__new__(Schema) + result.init_schema(c_schema) + + return result + + +def from_numpy_dtype(object dtype): + """ + Convert NumPy dtype to pyarrow.DataType. + + Parameters + ---------- + dtype : the numpy dtype to convert + + + Examples + -------- + Create a pyarrow DataType from NumPy dtype: + + >>> import pyarrow as pa + >>> import numpy as np + >>> pa.from_numpy_dtype(np.dtype('float16')) + DataType(halffloat) + >>> pa.from_numpy_dtype('U') + DataType(string) + >>> pa.from_numpy_dtype(bool) + DataType(bool) + >>> pa.from_numpy_dtype(np.str_) + DataType(string) + """ + dtype = np.dtype(dtype) + return pyarrow_wrap_data_type(GetResultValue(NumPyDtypeToArrow(dtype))) + + +def is_boolean_value(object obj): + """ + Check if the object is a boolean. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyBool(obj) + + +def is_integer_value(object obj): + """ + Check if the object is an integer. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyInt(obj) + + +def is_float_value(object obj): + """ + Check if the object is a float. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyFloat(obj) + + +cdef class _ExtensionRegistryNanny(_Weakrefable): + # Keep the registry alive until we have unregistered PyExtensionType + cdef: + shared_ptr[CExtensionTypeRegistry] registry + + def __cinit__(self): + self.registry = CExtensionTypeRegistry.GetGlobalRegistry() + + def release_registry(self): + self.registry.reset() + + +_registry_nanny = _ExtensionRegistryNanny() + + +def _register_py_extension_type(): + cdef: + DataType storage_type + shared_ptr[CExtensionType] cpy_ext_type + c_string c_extension_name = tobytes("arrow.py_extension_type") + + # Make a dummy C++ ExtensionType + storage_type = null() + check_status(CPyExtensionType.FromClass( + storage_type.sp_type, c_extension_name, PyExtensionType, + &cpy_ext_type)) + check_status( + RegisterPyExtensionType( cpy_ext_type)) + + +def _unregister_py_extension_types(): + # This needs to be done explicitly before the Python interpreter is + # finalized. If the C++ type is destroyed later in the process + # teardown stage, it will invoke CPython APIs such as Py_DECREF + # with a destroyed interpreter. + unregister_extension_type("arrow.py_extension_type") + for ext_type in _python_extension_types_registry: + try: + unregister_extension_type(ext_type.extension_name) + except KeyError: + pass + _registry_nanny.release_registry() + + +_register_py_extension_type() +atexit.register(_unregister_py_extension_types) + + +# +# PyCapsule export utilities +# + +cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: + cdef ArrowSchema* schema = PyCapsule_GetPointer( + schema_capsule, 'arrow_schema' + ) + if schema.release != NULL: + schema.release(schema) + + free(schema) + +cdef object alloc_c_schema(ArrowSchema** c_schema): + c_schema[0] = malloc(sizeof(ArrowSchema)) + # Ensure the capsule destructor doesn't call a random release pointer + c_schema[0].release = NULL + return PyCapsule_New(c_schema[0], 'arrow_schema', &pycapsule_schema_deleter) + + +cdef void pycapsule_array_deleter(object array_capsule) noexcept: + cdef: + ArrowArray* array + # Do not invoke the deleter on a used/moved capsule + array = cpython.PyCapsule_GetPointer( + array_capsule, 'arrow_array' + ) + if array.release != NULL: + array.release(array) + + free(array) + +cdef object alloc_c_array(ArrowArray** c_array): + c_array[0] = malloc(sizeof(ArrowArray)) + # Ensure the capsule destructor doesn't call a random release pointer + c_array[0].release = NULL + return PyCapsule_New(c_array[0], 'arrow_array', &pycapsule_array_deleter) + + +cdef void pycapsule_stream_deleter(object stream_capsule) noexcept: + cdef: + ArrowArrayStream* stream + # Do not invoke the deleter on a used/moved capsule + stream = PyCapsule_GetPointer( + stream_capsule, 'arrow_array_stream' + ) + if stream.release != NULL: + stream.release(stream) + + free(stream) + +cdef object alloc_c_stream(ArrowArrayStream** c_stream): + c_stream[0] = malloc(sizeof(ArrowArrayStream)) + # Ensure the capsule destructor doesn't call a random release pointer + c_stream[0].release = NULL + return PyCapsule_New(c_stream[0], 'arrow_array_stream', &pycapsule_stream_deleter) + + +cdef void pycapsule_device_array_deleter(object array_capsule) noexcept: + cdef: + ArrowDeviceArray* device_array + # Do not invoke the deleter on a used/moved capsule + device_array = cpython.PyCapsule_GetPointer( + array_capsule, 'arrow_device_array' + ) + if device_array.array.release != NULL: + device_array.array.release(&device_array.array) + + free(device_array) + + +cdef object alloc_c_device_array(ArrowDeviceArray** c_array): + c_array[0] = malloc(sizeof(ArrowDeviceArray)) + # Ensure the capsule destructor doesn't call a random release pointer + c_array[0].array.release = NULL + return PyCapsule_New( + c_array[0], 'arrow_device_array', &pycapsule_device_array_deleter) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/types.py b/parrot/lib/python3.10/site-packages/pyarrow/types.py new file mode 100644 index 0000000000000000000000000000000000000000..66b1ec33953a98a4d4f7ff95a72e1c60e5549abe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/types.py @@ -0,0 +1,314 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Tools for dealing with Arrow type metadata in Python + + +from pyarrow.lib import (is_boolean_value, # noqa + is_integer_value, + is_float_value) + +import pyarrow.lib as lib +from pyarrow.util import doc + + +_SIGNED_INTEGER_TYPES = {lib.Type_INT8, lib.Type_INT16, lib.Type_INT32, + lib.Type_INT64} +_UNSIGNED_INTEGER_TYPES = {lib.Type_UINT8, lib.Type_UINT16, lib.Type_UINT32, + lib.Type_UINT64} +_INTEGER_TYPES = _SIGNED_INTEGER_TYPES | _UNSIGNED_INTEGER_TYPES +_FLOATING_TYPES = {lib.Type_HALF_FLOAT, lib.Type_FLOAT, lib.Type_DOUBLE} +_DECIMAL_TYPES = {lib.Type_DECIMAL128, lib.Type_DECIMAL256} +_DATE_TYPES = {lib.Type_DATE32, lib.Type_DATE64} +_TIME_TYPES = {lib.Type_TIME32, lib.Type_TIME64} +_INTERVAL_TYPES = {lib.Type_INTERVAL_MONTH_DAY_NANO} +_TEMPORAL_TYPES = ({lib.Type_TIMESTAMP, + lib.Type_DURATION} | _TIME_TYPES | _DATE_TYPES | + _INTERVAL_TYPES) +_UNION_TYPES = {lib.Type_SPARSE_UNION, lib.Type_DENSE_UNION} +_NESTED_TYPES = {lib.Type_LIST, lib.Type_FIXED_SIZE_LIST, lib.Type_LARGE_LIST, + lib.Type_LIST_VIEW, lib.Type_LARGE_LIST_VIEW, + lib.Type_STRUCT, lib.Type_MAP} | _UNION_TYPES + + +@doc(datatype="null") +def is_null(t): + """ + Return True if value is an instance of type: {datatype}. + + Parameters + ---------- + t : DataType + """ + return t.id == lib.Type_NA + + +@doc(is_null, datatype="boolean") +def is_boolean(t): + return t.id == lib.Type_BOOL + + +@doc(is_null, datatype="any integer") +def is_integer(t): + return t.id in _INTEGER_TYPES + + +@doc(is_null, datatype="signed integer") +def is_signed_integer(t): + return t.id in _SIGNED_INTEGER_TYPES + + +@doc(is_null, datatype="unsigned integer") +def is_unsigned_integer(t): + return t.id in _UNSIGNED_INTEGER_TYPES + + +@doc(is_null, datatype="int8") +def is_int8(t): + return t.id == lib.Type_INT8 + + +@doc(is_null, datatype="int16") +def is_int16(t): + return t.id == lib.Type_INT16 + + +@doc(is_null, datatype="int32") +def is_int32(t): + return t.id == lib.Type_INT32 + + +@doc(is_null, datatype="int64") +def is_int64(t): + return t.id == lib.Type_INT64 + + +@doc(is_null, datatype="uint8") +def is_uint8(t): + return t.id == lib.Type_UINT8 + + +@doc(is_null, datatype="uint16") +def is_uint16(t): + return t.id == lib.Type_UINT16 + + +@doc(is_null, datatype="uint32") +def is_uint32(t): + return t.id == lib.Type_UINT32 + + +@doc(is_null, datatype="uint64") +def is_uint64(t): + return t.id == lib.Type_UINT64 + + +@doc(is_null, datatype="floating point numeric") +def is_floating(t): + return t.id in _FLOATING_TYPES + + +@doc(is_null, datatype="float16 (half-precision)") +def is_float16(t): + return t.id == lib.Type_HALF_FLOAT + + +@doc(is_null, datatype="float32 (single precision)") +def is_float32(t): + return t.id == lib.Type_FLOAT + + +@doc(is_null, datatype="float64 (double precision)") +def is_float64(t): + return t.id == lib.Type_DOUBLE + + +@doc(is_null, datatype="list") +def is_list(t): + return t.id == lib.Type_LIST + + +@doc(is_null, datatype="large list") +def is_large_list(t): + return t.id == lib.Type_LARGE_LIST + + +@doc(is_null, datatype="fixed size list") +def is_fixed_size_list(t): + return t.id == lib.Type_FIXED_SIZE_LIST + + +@doc(is_null, datatype="list view") +def is_list_view(t): + return t.id == lib.Type_LIST_VIEW + + +@doc(is_null, datatype="large list view") +def is_large_list_view(t): + return t.id == lib.Type_LARGE_LIST_VIEW + + +@doc(is_null, datatype="struct") +def is_struct(t): + return t.id == lib.Type_STRUCT + + +@doc(is_null, datatype="union") +def is_union(t): + return t.id in _UNION_TYPES + + +@doc(is_null, datatype="nested type") +def is_nested(t): + return t.id in _NESTED_TYPES + + +@doc(is_null, datatype="run-end encoded") +def is_run_end_encoded(t): + return t.id == lib.Type_RUN_END_ENCODED + + +@doc(is_null, datatype="date, time, timestamp or duration") +def is_temporal(t): + return t.id in _TEMPORAL_TYPES + + +@doc(is_null, datatype="timestamp") +def is_timestamp(t): + return t.id == lib.Type_TIMESTAMP + + +@doc(is_null, datatype="duration") +def is_duration(t): + return t.id == lib.Type_DURATION + + +@doc(is_null, datatype="time") +def is_time(t): + return t.id in _TIME_TYPES + + +@doc(is_null, datatype="time32") +def is_time32(t): + return t.id == lib.Type_TIME32 + + +@doc(is_null, datatype="time64") +def is_time64(t): + return t.id == lib.Type_TIME64 + + +@doc(is_null, datatype="variable-length binary") +def is_binary(t): + return t.id == lib.Type_BINARY + + +@doc(is_null, datatype="large variable-length binary") +def is_large_binary(t): + return t.id == lib.Type_LARGE_BINARY + + +@doc(method="is_string") +def is_unicode(t): + """ + Alias for {method}. + + Parameters + ---------- + t : DataType + """ + return is_string(t) + + +@doc(is_null, datatype="string (utf8 unicode)") +def is_string(t): + return t.id == lib.Type_STRING + + +@doc(is_unicode, method="is_large_string") +def is_large_unicode(t): + return is_large_string(t) + + +@doc(is_null, datatype="large string (utf8 unicode)") +def is_large_string(t): + return t.id == lib.Type_LARGE_STRING + + +@doc(is_null, datatype="fixed size binary") +def is_fixed_size_binary(t): + return t.id == lib.Type_FIXED_SIZE_BINARY + + +@doc(is_null, datatype="variable-length binary view") +def is_binary_view(t): + return t.id == lib.Type_BINARY_VIEW + + +@doc(is_null, datatype="variable-length string (utf-8) view") +def is_string_view(t): + return t.id == lib.Type_STRING_VIEW + + +@doc(is_null, datatype="date") +def is_date(t): + return t.id in _DATE_TYPES + + +@doc(is_null, datatype="date32 (days)") +def is_date32(t): + return t.id == lib.Type_DATE32 + + +@doc(is_null, datatype="date64 (milliseconds)") +def is_date64(t): + return t.id == lib.Type_DATE64 + + +@doc(is_null, datatype="map") +def is_map(t): + return t.id == lib.Type_MAP + + +@doc(is_null, datatype="decimal") +def is_decimal(t): + return t.id in _DECIMAL_TYPES + + +@doc(is_null, datatype="decimal128") +def is_decimal128(t): + return t.id == lib.Type_DECIMAL128 + + +@doc(is_null, datatype="decimal256") +def is_decimal256(t): + return t.id == lib.Type_DECIMAL256 + + +@doc(is_null, datatype="dictionary-encoded") +def is_dictionary(t): + return t.id == lib.Type_DICTIONARY + + +@doc(is_null, datatype="interval") +def is_interval(t): + return t.id == lib.Type_INTERVAL_MONTH_DAY_NANO + + +@doc(is_null, datatype="primitive type") +def is_primitive(t): + return lib._is_primitive(t.id) diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/flatten.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/flatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc6e99d917afc16a5c1e29ba475ed24c7b758f17 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/flatten.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/nested_partial.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/nested_partial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53fcb8d8f9d921c28c32fc4565caa24e9e8e552d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__pycache__/nested_partial.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74586e418638d423e2c6ed4f852c0be053336b5f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__init__.py @@ -0,0 +1,16 @@ +from .hparam import categorical, hparam, log_uniform, loguniform, uniform +from .hyperparameters import HP, HyperParameters, Point +from .priors import LogUniformPrior, UniformPrior + +__all__ = [ + "categorical", + "hparam", + "log_uniform", + "loguniform", + "uniform", + "HP", + "HyperParameters", + "Point", + "LogUniformPrior", + "UniformPrior", +] diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a100d18517f11a3cf4cf5626e87c0d9a505bb1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7040c27d4d3b9b6cc23c00b2a3f529f7874eebe7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/hyperparameters.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c62cbb7ed3ba7457c2b9dd036a3e81fe29a1ee68 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/priors.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff44ae9eaa7dc90fe1b92ceade9031dcef85a58e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hparam.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hparam.py new file mode 100644 index 0000000000000000000000000000000000000000..d38126d2765e7c5caedafb202975e5cc17e4fcb3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hparam.py @@ -0,0 +1,369 @@ +import dataclasses +from functools import wraps +from logging import getLogger +from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union, overload + +from simple_parsing.helpers.fields import choice as _choice +from simple_parsing.helpers.fields import field + +from .priors import CategoricalPrior, LogUniformPrior, NormalPrior, Prior, UniformPrior + +logger = getLogger(__name__) +T = TypeVar("T") + + +class ValueOutsidePriorException(Exception): + def __init__(self, value: Any, prior: Any, *args, **kwargs): + self.value = value + self.prior = prior + super().__init__(*args, **kwargs) + + +@overload +def uniform( + min: int, + max: int, + default: int = None, + discrete: bool = True, + strict: bool = False, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> int: + pass + + +@overload +def uniform( + min: float, + max: float, + default: float = None, + strict: bool = False, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> float: + pass + + +@overload +def uniform( + min: float, + max: float, + default: float = None, + discrete: bool = False, + strict: bool = False, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> float: + pass + + +def uniform( + min: Union[int, float], + max: Union[int, float], + discrete: bool = None, + default: Union[int, float, dataclasses._MISSING_TYPE] = dataclasses.MISSING, + strict: bool = False, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> Union[int, float]: + """Declares a Field with a Uniform prior. + + Parameters + ---------- + min : Union[int, float] + Minimum value. + + max : Union[int, float] + Maximum value. + + discrete : bool, optional + Whether this value is sampled discretely (an integer) or not. By default None, in + which case the types of the `min`, `max` and `default` arguments + will be used to infer whether the field should be discrete or not. + + default : Union[int, float], optional + The default value to use. Not setting a value here makes this field a required + argument of the dataclass. Setting a value can also be useful for the experiment + version control feature of Orion. By default None. + + strict : bool, optional + Whether the bounds should be strictly enforced. When set to True, attempting to + create an object with this field, and passing a value outside the bounds will + raise a ValueError. + + Returns + ------- + Union[int, float] + A `dataclasses.Field` object, with the prior stored in its metadata. The return + type annotation is intentionally "wrong", so that the type checker doesn't raise + errors when declaring fields on a dataclass, since their type annotations + wouldn't match with the return type of this function. + """ + # TODO: what about uniform over a "choice"? + if "default_value" in kwargs: + assert default in { + None, + dataclasses.MISSING, + }, "can't pass both `default` and `default_value`" + default = kwargs.pop("default_value") + + if discrete is None: + # TODO: Set discrete = False by default, and then maybe set it to True later if + # the annotation on the field is int. + discrete = False + if min == 0 and max == 1: + discrete = False + elif (isinstance(min, int) and isinstance(max, int)) and ( + default in {None, dataclasses.MISSING} or isinstance(default, int) + ): + # If given something like uniform(0, 100) or uniform(5,10,default=7) then + # we can 'safely' assume that the discrete option should be used. + discrete = True + if shape and default not in {None, dataclasses.MISSING}: + assert isinstance(shape, int), "only support int shapes for now." + if isinstance(default, (int, float)): + default = tuple(default for _ in range(shape)) + + if discrete and default not in {None, dataclasses.MISSING}: + if shape is None: + default = round(default) + else: + assert isinstance(shape, int), "only support int shapes for now." + default = tuple(round(default[i]) for i in range(shape)) + + # TODO: Make sure this doesn't by accident make some fields behave as positional + # fields + default_v = None if default is dataclasses.MISSING else default + + prior = UniformPrior(min=min, max=max, discrete=discrete, default=default_v, shape=shape) + + # if default is None: + # default = dataclasses.MISSING + # default = (min + max) / 2 + + return hparam(default=default, prior=prior, strict=strict, **kwargs) + + +@overload +def log_uniform( + min: int, + max: int, + discrete: bool = True, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> int: + pass + + +@overload +def log_uniform( + min: float, + max: float, + discrete: bool = False, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> float: + pass + + +def log_uniform( + min: Union[int, float], + max: Union[int, float], + discrete: bool = False, + default: Union[int, float, dataclasses._MISSING_TYPE] = dataclasses.MISSING, + shape: Union[int, Tuple[int, ...]] = None, + **kwargs, +) -> Union[int, float]: + if "default_value" in kwargs: + assert default in { + None, + dataclasses.MISSING, + }, "can't pass both `default` and `default_value`" + default = kwargs.pop("default_value") + + default_v = default + if default is dataclasses.MISSING: + default_v = None + prior = LogUniformPrior(min=min, max=max, discrete=discrete, default=default_v, shape=shape) + + # TODO: Do we really want to set the default value when not passed? + # if default in {None, dataclasses.MISSING}: + # log_min = math.log(min, prior.base) + # log_max = math.log(max, prior.base) + # default = math.pow(prior.base, (log_min + log_max) / 2) + # if discrete or (isinstance(min, int) and isinstance(max, int)): + # default = round(default) + return hparam( + default=default, + prior=prior, + **kwargs, + ) + + +loguniform = log_uniform + + +@wraps(_choice) +def categorical( + *choices: T, + default: Union[T, dataclasses._MISSING_TYPE] = dataclasses.MISSING, + probabilities: Union[List[float], Dict[str, float]] = None, + strict: bool = False, + **kwargs: Any, +) -> T: + """Marks a field as being a categorical hyper-parameter. + + This wraps the `choice` function from `simple_parsing`, making it possible to choose + the value from the command-line. + + The probabilities for each value should be passed through this `probabilities` + argument. + + Returns: + T: the result of the usual `dataclasses.field()` function (a dataclass field). + """ + if "default_value" in kwargs: + assert default in { + None, + dataclasses.MISSING, + }, "can't pass both `default` and `default_value`" + default = kwargs.pop("default_value") + + metadata = kwargs.get("metadata", {}) + default_key = default + + options: List[Any] + if len(choices) == 1 and isinstance(choices[0], dict): + choice_dict = choices[0] + if probabilities and not isinstance(probabilities, dict): + raise RuntimeError( + "Need to pass a dict of probabilities when passing a dict of choices." + ) + # TODO: If we use keys here, then we have to add a step in __post_init__ of the + # dataclass holding this field, so that it gets the corresponding value from the + # dict. + # IDEA: Adding some kind of 'hook' to be used by simple-parsing? + + def postprocess(value): + if value in choice_dict: + return choice_dict[value] + return value + + metadata["postprocessing"] = postprocess + if default not in {None, dataclasses.MISSING}: + assert default in choice_dict.values() + default_key = [k for k, v in choice_dict.items() if v == default][0] + options = list(choice_dict.keys()) + else: + options = list(choices) + + if isinstance(probabilities, dict): + if abs(sum(probabilities.values()) - 1) > 1e-5: + raise RuntimeError("Probabilities should sum to 1!") + probs = [] + for option in options: + probability = probabilities.get(option, 0.0) + probs.append(probability) + probabilities = probs + + default_v = default_key + if default is dataclasses.MISSING: + default_v = None + + prior = CategoricalPrior( + choices=options, + probabilities=probabilities, + default_value=default_v, + ) + metadata["prior"] = prior + + if strict: + + def postprocess(value): + if value not in prior: + raise ValueOutsidePriorException(value=value, prior=prior) + return value + + if "postprocessing" not in metadata: + metadata["postprocessing"] = postprocess + else: + # TODO: Compose both functions? + existing_fn = metadata["postprocessing"] + new_fn = postprocess + metadata["postprocessing"] = lambda v: new_fn(existing_fn(v)) + + kwargs["metadata"] = metadata + return _choice(*choices, default=default, **kwargs) + + +def hparam( + default: T, + *args, + prior: Union[Type[Prior[T]], Prior[T]] = None, + strict: bool = False, + **kwargs, +) -> T: + metadata = kwargs.get("metadata", {}) + min: Optional[float] = kwargs.get("min", kwargs.get("min")) + max: Optional[float] = kwargs.get("max", kwargs.get("max")) + + if prior is None: + assert min is not None and max is not None + # if min and max are passed but no Prior object, assume a Uniform prior. + prior = UniformPrior(min=min, max=max) + metadata.update( + { + "min": min, + "max": max, + "prior": prior, + } + ) + + elif isinstance(prior, type) and issubclass(prior, (UniformPrior, LogUniformPrior)): + # use the prior as a constructor. + assert min is not None and max is not None + prior = prior(min=min, max=max) + + elif isinstance(prior, Prior): + metadata["prior"] = prior + if isinstance(prior, (UniformPrior, LogUniformPrior)): + metadata.update( + dict( + min=prior.min, + max=prior.max, + ) + ) + elif isinstance(prior, (NormalPrior)): + metadata.update( + dict( + mu=prior.mu, + sigma=prior.sigma, + ) + ) + + else: + # TODO: maybe support an arbitrary callable? + raise RuntimeError( + "hparam should receive either: \n" + "- `min` and `max` kwargs, \n" + "- `min` and `max` kwargs and a type of Prior to use, \n" + "- a `Prior` instance." + ) + + if strict: + assert "postprocessing" not in metadata + + def postprocess(value): + if value not in prior: + raise ValueOutsidePriorException(value=value, prior=prior) + return value + + metadata["postprocessing"] = postprocess + + kwargs["metadata"] = metadata + assert "default" not in kwargs + return field( + default=default, + *args, + **kwargs, + ) diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters.py new file mode 100644 index 0000000000000000000000000000000000000000..2574449b98ed8fb420d21c69c30dd91fbb8b0bbf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters.py @@ -0,0 +1,303 @@ +from __future__ import annotations + +import dataclasses +import inspect +import math +import pickle +import random +import typing +from collections import OrderedDict +from dataclasses import Field, dataclass, fields +from functools import singledispatch, total_ordering +from logging import getLogger +from pathlib import Path +from typing import Any, ClassVar, NamedTuple, TypeVar + +from simple_parsing import utils +from simple_parsing.helpers.serialization.serializable import Serializable +from simple_parsing.utils import ( + compute_identity, + dict_union, + field_dict, + get_type_arguments, +) + +from .hparam import ValueOutsidePriorException +from .priors import Prior + +if typing.TYPE_CHECKING: + import numpy + +logger = getLogger(__name__) +T = TypeVar("T") +HP = TypeVar("HP", bound="HyperParameters") + + +@dataclass +class BoundInfo(Serializable): + """Object used to provide the bounds as required by `GPyOpt`.""" + + name: str + # One of 'continuous', 'discrete' or 'bandit' (unsupported). + type: str = "continuous" + domain: tuple[float, float] = (-math.inf, math.inf) + + +@dataclass +class HyperParameters(Serializable, decode_into_subclasses=True): # type: ignore + """Base class for dataclasses of HyperParameters.""" + + # Class variable holding the random number generator used to create the + # samples. + + rng: ClassVar[random.Random] = random.Random() + + def __post_init__(self): + for name, f in field_dict(self).items(): + f: Field + assert name == f.name + value = getattr(self, name) + # Apply any post-processing function, if applicable. + if "postprocessing" in f.metadata: + logger.debug(f"Post-processing of field {name}") + try: + new_value = f.metadata["postprocessing"](value) + except ValueOutsidePriorException as e: + raise ValueError( + f"Field '{name}' got value {repr(e.value)}, which is outside of the " + f"defined prior region: {e.prior}." + ) + setattr(self, name, new_value) + + @classmethod + def field_names(cls) -> list[str]: + return [f.name for f in fields(cls)] + + def id(self): + return compute_identity(**self.to_dict()) + + def seed(self, seed: int | None) -> None: + """TODO: Seed all priors with the given seed. (recursively if nested dataclasses + are present.) + """ + raise NotImplementedError("TODO") + + @classmethod + def get_priors(cls) -> dict[str, Prior]: + """Returns a dictionary of the Priors for the hparam fields in this class.""" + priors_dict: dict[str, Prior] = {} + for field in fields(cls): + # If a HyperParameters class contains another HyperParameters class as a field + # we perform returned a flattened dict. + if inspect.isclass(field.type) and issubclass(field.type, HyperParameters): + priors_dict[field.name] = field.type.get_priors() + else: + prior: Prior | None = field.metadata.get("prior") + if prior: + priors_dict[field.name] = prior + return priors_dict + + @classmethod + def get_orion_space_dict(cls) -> dict[str, str]: + result: dict[str, str] = {} + for field in fields(cls): + # If a HyperParameters class contains another HyperParameters class as a field + # we perform returned a flattened dict. + if inspect.isclass(field.type) and issubclass(field.type, HyperParameters): + result[field.name] = field.type.get_orion_space_dict() + else: + prior: Prior | None = field.metadata.get("prior") + if prior: + result[field.name] = prior.get_orion_space_string() + return result + + def get_orion_space(self) -> dict[str, str]: + """NOTE: This might be more useful in some cases than the above classmethod + version, for example when a field is a different kind of dataclass than its + annotation. + """ + result: dict[str, str] = {} + for field in fields(self): + value = getattr(self, field.name) + # If a HyperParameters class contains another HyperParameters class as a field + # we perform returned a flattened dict. + if isinstance(value, HyperParameters): + result[field.name] = value.get_orion_space() + else: + prior: Prior | None = field.metadata.get("prior") + if prior: + result[field.name] = prior.get_orion_space_string() + return result + + @classmethod + def space_id(cls): + return compute_identity(**cls.get_orion_space_dict()) + + @classmethod + def get_bounds(cls) -> list[BoundInfo]: + """Returns the bounds of the search domain for this type of HParam. + + Returns them as a list of `BoundInfo` objects, in the format expected by GPyOpt. + """ + bounds: list[BoundInfo] = [] + for f in fields(cls): + # TODO: handle a hparam which is categorical (i.e. choices) + min_v = f.metadata.get("min") + max_v = f.metadata.get("max") + if min_v is None or max_v is None: + continue + if f.type is float: + bound = BoundInfo(name=f.name, type="continuous", domain=(min_v, max_v)) + elif f.type is int: + bound = BoundInfo(name=f.name, type="discrete", domain=(min_v, max_v)) + else: + raise NotImplementedError(f"Unsupported type for field {f.name}: {f.type}") + bounds.append(bound) + return bounds + + @classmethod + def get_bounds_dicts(cls) -> list[dict[str, Any]]: + """Returns the bounds of the search space for this type of HParam, in the format expected + by the `GPyOpt` package.""" + return [b.to_dict() for b in cls.get_bounds()] + + @classmethod + def sample(cls): + kwargs: dict[str, Any] = {} + for field in dataclasses.fields(cls): + if inspect.isclass(field.type) and issubclass(field.type, HyperParameters): + # TODO: Should we allow adding a 'prior' in terms of a dataclass field? + kwargs[field.name] = field.type.sample() + + elif utils.is_union(field.type) and all( + inspect.isclass(v) and issubclass(v, HyperParameters) + for v in utils.get_type_arguments(field.type) + ): + chosen_class = random.choice(get_type_arguments(field.type)) + # BUG: Seems to be a bit of a bug here, when the numpy rng is set! + value = chosen_class.sample() + kwargs[field.name] = value + else: + prior: Prior | None = field.metadata.get("prior") + if prior is not None: + try: + import numpy as np + + prior.np_rng = np.random + except ImportError: + prior.rng = cls.rng + value = prior.sample() + shape = getattr(prior, "shape", None) + if shape == () and hasattr(value, "item") and callable(value.item): + value = value.item() + kwargs[field.name] = value + return cls(**kwargs) + + def replace(self, **new_params): + new_hp_dict = dict_union(self.to_dict(), new_params, recurse=True) + new_hp = type(self).from_dict(new_hp_dict) + return new_hp + + # @classmethod + # @contextmanager + # def use_priors(cls, value: bool = True): + # temp = cls.sample_from_priors + # cls.sample_from_priors = value + # yield + # cls.sample_from_priors = temp + + def to_array(self, dtype: numpy.dtype | None = None) -> numpy.ndarray: + import numpy as np + + dtype = np.float32 if dtype is None else dtype + values: list[float] = [] + for k, v in self.to_dict(dict_factory=OrderedDict).items(): + try: + v = float(v) + except Exception: + logger.warning(f"Ignoring field {k} because we can't make a float out of it.") + else: + values.append(v) + return np.array(values, dtype=dtype) + + @classmethod + def from_array(cls: type[HP], array: numpy.ndarray) -> HP: + import numpy as np + + if len(array.shape) == 2 and array.shape[0] == 1: + array = array[0] + + keys = list(field_dict(cls)) + # idea: could use to_dict and to_array together to determine how many + # values to get for each field. For now we assume that each field is one + # variable. + # cls.sample().to_dict() + # assert len(keys) == len(array), "assuming that each field is dim 1 for now." + assert len(keys) == len(array), "assuming that each field is dim 1 for now." + d = OrderedDict(zip(keys, array)) + logger.debug(f"Creating an instance of {cls} using args {d}") + d = OrderedDict((k, v.item() if isinstance(v, np.ndarray) else v) for k, v in d.items()) + return cls.from_dict(d) + + def clip_within_bounds(self: HP) -> HP: + d = self.to_dict() + for bound in self.get_bounds(): + min_v, max_v = bound.domain + d[bound.name] = min(max_v, max(min_v, d[bound.name])) + return self.from_dict(d) + + +@singledispatch +def save(obj: object, path: Path) -> None: + """Saves the object `obj` at path `path`. + + Uses pickle at the moment, regardless of the path name or object type. + TODO: Choose the serialization function depending on the path's extension. + """ + with open(path, "wb") as f: + pickle.dump(obj, f) + + +@save.register +def save_serializable(obj: Serializable, path: Path) -> None: + obj.save(path) + + +@total_ordering +class Point(NamedTuple): + hp: HyperParameters + perf: float + + def __eq__(self, other: object): + if not isinstance(other, tuple): + return NotImplemented + elif not len(other) == 2: + return NotImplemented + # NOTE: This doesn't do any kind + other_hp, other_perf = other + hps_equal = self.hp == other_hp + if not hps_equal and isinstance(other_hp, dict): + other_id = compute_identity(**other_hp) + # this is hairy, but need to check if the dicts would be equal. + if isinstance(self.hp, dict): + # This should ideally never be the case, we would hope that + # people are using HyperParameter objects in the Point tuples. + hp_id = compute_identity(**self.hp) + else: + hp_id = self.hp.id() + hps_equal = hp_id == other_id + return hps_equal and self.perf == other[1] + + def __gt__(self, other: tuple[object, ...]) -> bool: + # Even though the tuple has (hp, perf), compare based on the order + # (perf, hp). + # This means that sorting a list of Points will work as expected! + if isinstance(other, (Point, tuple)): + hp, perf = other + if not isinstance(perf, float): + print(other) + exit() + return self.perf > perf + + # def __repr__(self): + # return super().__repr__() diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters_test.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d675ac54b353804f29e310afee2ec6f26a56f8fd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/hyperparameters_test.py @@ -0,0 +1,269 @@ +from dataclasses import dataclass +from typing import Sequence, Union + +import pytest + +from .hparam import categorical, log_uniform, uniform +from .hyperparameters import HyperParameters + +numpy_installed = False +try: + import numpy as np + + numpy_installed = True +except ImportError: + pass + + +@dataclass +class A(HyperParameters): + learning_rate: float = uniform(0.0, 1.0) + + +@dataclass +class B(A): + momentum: float = uniform(0.0, 1.0) + + +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +def test_to_array(): + b: B = B.sample() + array = b.to_array() + assert np.isclose(array[0], b.learning_rate) + assert np.isclose(array[1], b.momentum) + + +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +def test_from_array(): + array = np.arange(2) + b: B = B.from_array(array) + assert b.learning_rate == 0.0 + assert b.momentum == 1.0 + + +@dataclass +class C(HyperParameters): + lr: float = uniform(0.0, 1.0) + momentum: float = uniform(0.0, 1.0) + + +def test_clip_within_bounds(): + """Test to make sure that the `clip_within_bounds` actually restricts the values of the + HyperParameters to be within the bounds.""" + # valid range for learning_rate is (0 - 1]. + a = A(learning_rate=123) + assert a.learning_rate == 123 + a = a.clip_within_bounds() + assert a.learning_rate == 1.0 + + b = B(learning_rate=0.5, momentum=456) + assert b.clip_within_bounds() == B(learning_rate=0.5, momentum=1) + + # Test that the types are maintained. + @dataclass + class C(HyperParameters): + a: int = uniform(123, 456, discrete=True) + b: float = log_uniform(4.56, 123.456) + c: str = categorical("foo", "bar", "baz") + + with pytest.raises(TypeError): + _ = C() + + with pytest.raises(TypeError): + _ = C(a=123) + + with pytest.raises(TypeError): + _ = C(b=4.56) + + with pytest.raises(TypeError): + _ = C(c="bar") + + # TODO: IDEA: how about we actually do some post-processing to always clip stuff + # between bounds? + # Check that it doesn't change anything if the values are within the range. + assert C(a=1000, b=1.23, c="bar").clip_within_bounds() == C(456, 4.56, "bar") + assert C(a=-1.234, b=1000, c="foo").clip_within_bounds() == C(123, 123.456, "foo") + + +def test_strict_bounds(): + """When creating a class and using a hparam field with `strict=True`, the values will be + restricted to be within the given bounds.""" + + @dataclass + class C(HyperParameters): + a: int = uniform(0, 1, strict=True) + b: float = log_uniform(4.56, 123.456) + c: str = categorical("foo", "bar", "baz", default="foo", strict=True) + + # valid range for a is [0, 1). + with pytest.raises( + ValueError, + match="Field 'a' got value 123, which is outside of the defined prior", + ): + _ = C(a=123, b=10, c="foo") + + with pytest.raises( + ValueError, + match="Field 'c' got value 'yolo', which is outside of the defined prior", + ): + _ = C(a=0.5, b=0.1, c="yolo") + + # should NOT raise an error, since the field `b` isn't strict. + _ = C(a=0.1, b=-1.26, c="bar") + + +def test_nesting(): + @dataclass + class Child(HyperParameters): + foo: int = uniform(0, 10, default=5) + + from simple_parsing import mutable_field + + @dataclass + class Parent(HyperParameters): + child_a: Child = mutable_field(Child, foo=3) + + parent = Parent.sample() + assert isinstance(parent, Parent) + assert isinstance(parent.child_a, Child) + + +def test_choice_field(): + @dataclass + class Child(HyperParameters): + hparam: float = categorical( + { + "a": 1.23, + "b": 4.56, + "c": 7.89, + }, + default=1.23, + ) + + bob = Child() + assert bob.hparam == 1.23 + + bob = Child.sample() + assert bob.hparam in {1.23, 4.56, 7.89} + assert Child.get_orion_space_dict() == { + "hparam": "choices(['a', 'b', 'c'], default_value='a')" + } + + +def test_choice_field_with_values_of_a_weird_type(): + @dataclass + class Bob(HyperParameters): + hparam_type: float = categorical( + { + "a": A, + "b": B, + "c": C, + }, + probabilities={ + "a": 0.1, + "b": 0.2, + "c": 0.7, + }, + default=B, + ) + + bob = Bob() + assert bob.hparam_type == B + + bob = Bob.sample() + assert bob.hparam_type in {A, B, C} + assert Bob.get_orion_space_dict() == { + "hparam_type": "choices({'a': 0.1, 'b': 0.2, 'c': 0.7}, default_value='b')" + } + + +@pytest.mark.xfail( + reason="TODO: it isn't trivial how to fix this, without having to rework the " + "from_dict from simple-parsing." +) +def test_replace_int_or_float_preserves_type(): + @dataclass + class A(HyperParameters): + # How much of training dataset to check (floats = percent, int = num_batches) + limit_train_batches: Union[int, float] = 1.0 + # How much of validation dataset to check (floats = percent, int = num_batches) + limit_val_batches: Union[int, float] = 1.0 + # How much of test dataset to check (floats = percent, int = num_batches) + limit_test_batches: Union[int, float] = 1.0 + + a = A() + assert isinstance(a.limit_test_batches, float) + b = a.replace(limit_train_batches=0.5) + assert isinstance(b.limit_test_batches, float) + + +try: + from orion.core.io.space_builder import SpaceBuilder + + orion_installed = True +except ImportError: + orion_installed = False + + +@dataclass +class Foo(HyperParameters): + x: Sequence[int] = uniform(0, 10, default=(5, 5), shape=2) + y: Sequence[int] = uniform(0, 10, default=(5, 5, 5), shape=3) + z: Sequence[int] = uniform(0, 10, default=2, shape=5) + + +def test_priors_with_shape(): + foo = Foo() + assert foo.x == (5, 5) + assert foo.y == (5, 5, 5) + assert foo.z == (2, 2, 2, 2, 2) + + foo = Foo.sample() + assert len(foo.x) == 2 + assert len(foo.y) == 3 + assert len(foo.z) == 5 + + +@pytest.mark.xfail(reason="Need to update this since spaces give Trials, not points.") +@pytest.mark.skipif(not numpy_installed, reason="Test requires numpy.") +@pytest.mark.skipif(not orion_installed, reason="Test requires Orion.") +def test_contains(): + # TODO: Add a convenience method for creating a Space object from Orion directly. + foo = Foo(x=(2, 3), y=(1, 2, 3), z=(1, 2, 3, 4, 5)) + space_builder = SpaceBuilder() + space_config = Foo.get_orion_space_dict() + space = space_builder.build(space_config) + assert foo.to_array() in space + + +def test_field_types(): + @dataclass + class C(HyperParameters): + a: int = uniform(123, 456) + b: float = uniform(4.56, 123.456, default=123) + c: float = uniform(4.56, 123.456, default=10, discrete=True) + d: float = uniform(4.56, 123.456, default=10, discrete=False) + e: float = uniform(4.56, 123.456, default=10, discrete=False, shape=2) + f: float = uniform(10, 100, default=20, shape=2) + + cs = [C.sample() for _ in range(100)] + assert C.get_priors()["a"].discrete is True + assert all(isinstance(c.a, int) for c in cs) + + assert C.get_priors()["b"].discrete is False + assert all(isinstance(c.b, float) for c in cs) + + assert C.get_priors()["c"].discrete is True + assert all(isinstance(c.c, int) for c in cs) + + assert C.get_priors()["d"].discrete is False + assert all(isinstance(c.d, float) for c in cs) + + assert C.get_priors()["e"].discrete is False + assert all(all(isinstance(v, float) for v in c.e) for c in cs) + assert C.get_priors()["f"].discrete is True + + if numpy_installed: + assert all(c.f.dtype == int for c in cs) + else: + assert all(all(isinstance(v, int) for v in c.f) for c in cs) diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/utils.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..663360fc434dfccca58ab9eda20df8874098b6db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/hparams/utils.py @@ -0,0 +1,23 @@ +import random + + +def set_seed(seed: int) -> None: + random.seed(seed) + try: + import numpy as np + + np.random.seed(seed) + except ImportError: + pass + + try: + import torch + except ImportError: + pass + else: + try: + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + except AttributeError: + pass diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a76671260e48fd0f9c6b89982cd77cb15080d7c1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__init__.py @@ -0,0 +1,27 @@ +from .decoding import * +from .encoding import * +from .serializable import ( + FrozenSerializable, + Serializable, + SerializableMixin, + dump, + dump_json, + dump_yaml, + dumps, + dumps_json, + dumps_yaml, + from_dict, + load, + load_json, + load_yaml, + save, + save_json, + save_yaml, + to_dict, +) + +try: + from .yaml_serialization import YamlSerializable +except ImportError: + pass +JsonSerializable = Serializable diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb4acae56b072f4c759f8f0df94b7bb77728c1a4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/decoding.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/decoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..523be753853b627647e4297548a023e3b6ff57da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/decoding.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/decoding.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/decoding.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4a5feda85ad698e02cc93736e31ad5747ed582 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/decoding.py @@ -0,0 +1,528 @@ +"""Functions for decoding dataclass fields from "raw" values (e.g. from json).""" +from __future__ import annotations + +import inspect +import sys +import warnings +from collections import OrderedDict +from collections.abc import Mapping +from dataclasses import Field +from enum import Enum +from functools import partial +from logging import getLogger +from pathlib import Path +from typing import Any, Callable, TypeVar + +from simple_parsing.annotation_utils.get_field_annotations import ( + evaluate_string_annotation, +) +from simple_parsing.utils import ( + get_bound, + get_forward_arg, + get_type_arguments, + is_dataclass_type, + is_dict, + is_enum, + is_forward_ref, + is_list, + is_literal, + is_set, + is_tuple, + is_typevar, + is_union, + str2bool, +) + +logger = getLogger(__name__) + +T = TypeVar("T") +K = TypeVar("K") +V = TypeVar("V") + +# Dictionary mapping from types/type annotations to their decoding functions. +_decoding_fns: dict[type[T], Callable[[Any], T]] = { + # the 'primitive' types are decoded using the type fn as a constructor. + t: t + for t in [str, bytes] +} + + +def register_decoding_fn( + some_type: type[T], function: Callable[[Any], T], overwrite: bool = False +) -> None: + """Register a decoding function for the type `some_type`.""" + _register(some_type, function, overwrite=overwrite) + + +def _register(t: type, func: Callable, overwrite: bool = False) -> None: + if t not in _decoding_fns or overwrite: + # logger.debug(f"Registering the type {t} with decoding function {func}") + _decoding_fns[t] = func + + +C = TypeVar("C", bound=Callable[[Any], Any]) + + +def decoding_fn_for_type(some_type: type) -> Callable[[C], C]: + """Registers a function to be used to convert a serialized value to the given type. + + The function should accept one argument (the serialized value) and return the decoded value. + """ + + def _wrapper(fn: C) -> C: + register_decoding_fn(some_type, fn, overwrite=True) + return fn + + return _wrapper + + +@decoding_fn_for_type(int) +def _decode_int(v: str) -> int: + int_v = int(v) + if isinstance(v, bool): + warnings.warn(UnsafeCastingWarning(raw_value=v, decoded_value=int_v)) + elif int_v != float(v): + warnings.warn(UnsafeCastingWarning(raw_value=v, decoded_value=int_v)) + return int_v + + +@decoding_fn_for_type(float) +def _decode_float(v: Any) -> float: + float_v = float(v) + if isinstance(v, bool): + warnings.warn(UnsafeCastingWarning(raw_value=v, decoded_value=float_v)) + return float_v + + +@decoding_fn_for_type(bool) +def _decode_bool(v: Any) -> bool: + if isinstance(v, str): + bool_v = str2bool(v) + else: + bool_v = bool(v) + if isinstance(v, (int, float)) and v not in (0, 1, 0.0, 1.0): + warnings.warn(UnsafeCastingWarning(raw_value=v, decoded_value=bool_v)) + return bool_v + + +def decode_field( + field: Field, + raw_value: Any, + containing_dataclass: type | None = None, + drop_extra_fields: bool | None = None, +) -> Any: + """Converts a "raw" value (e.g. from json file) to the type of the `field`. + + When serializing a dataclass to json, all objects are converted to dicts. + The values which have a special type (not str, int, float, bool) are + converted to string or dict. Hence this function allows us to recover the + original type of pretty much any field which is of type `Serializable`, or + has a registered decoding function (either through `register_decoding_fn` or + through having `decoding_fn` passed directly to the `field` function. + + Args: + field (Field): `Field` object from a dataclass. + raw_value (Any): The `raw` value from deserializing the dataclass. + + Returns: + Any: The "raw" value converted to the right type. + """ + name = field.name + field_type = field.type + logger.debug(f"name = {name}, field_type = {field_type}") + + # If the user set a custom decoding function, we use it. + custom_decoding_fn = field.metadata.get("decoding_fn") + if custom_decoding_fn is not None: + return custom_decoding_fn(raw_value) + + if isinstance(field_type, str) and containing_dataclass: + field_type = evaluate_string_annotation(field_type, containing_dataclass) + + decoding_function = get_decoding_fn(field_type) + + _kwargs = dict(category=UnsafeCastingWarning) if sys.version_info >= (3, 11) else {} + + with warnings.catch_warnings(record=True, **_kwargs) as warning_messages: + if is_dataclass_type(field_type) and drop_extra_fields is not None: + # Pass the drop_extra_fields argument to the decoding function. + decoded_value = decoding_function(raw_value, drop_extra_fields=drop_extra_fields) + else: + decoded_value = decoding_function(raw_value) + + for warning_message in warning_messages.copy(): + if not isinstance(warning_message.message, UnsafeCastingWarning): + warnings.warn_explicit( + message=warning_message.message, + category=warning_message.category, + filename=warning_message.filename, + lineno=warning_message.lineno, + # module=warning_message.module, + # registry=warning_message.registry, + # module_globals=warning_message.module_globals, + ) + warning_messages.remove(warning_message) + + if warning_messages: + warnings.warn( + RuntimeWarning( + f"Unsafe casting occurred when deserializing field '{name}' of type {field_type}: " + f"raw value: {raw_value!r}, decoded value: {decoded_value!r}." + ) + ) + return decoded_value + + +# NOTE: Disabling the caching here might help avoid some bugs, and it's unclear if this has that +# much of a performance impact. +def get_decoding_fn(type_annotation: type[T] | str) -> Callable[..., T]: + """Fetches/Creates a decoding function for the given type annotation. + + This decoding function can then be used to create an instance of the type + when deserializing dicts (which could have been obtained with JSON or YAML). + + This function inspects the type annotation and creates the right decoding + function recursively in a "dynamic-programming-ish" fashion. + NOTE: We cache the results in a `functools.lru_cache` decorator to avoid + wasteful calls to the function. This makes this process pretty efficient. + + Args: + t (Type[T]): + A type or type annotation. Can be arbitrarily nested. + For example: + - List[int] + - Dict[str, Foo] + - Tuple[int, str, Any], + - Dict[Tuple[int, int], List[str]] + - List[List[List[List[Tuple[int, str]]]]] + - etc. + + Returns: + Callable[[Any], T]: + A function that decodes a 'raw' value to an instance of type `t`. + """ + from .serializable import from_dict + + logger.debug(f"Getting the decoding function for {type_annotation!r}") + + if isinstance(type_annotation, str): + # Check first if there are any matching registered decoding functions. + # TODO: Might be better to actually use the scope of the field, right? + matching_entries = { + key: decoding_fn + for key, decoding_fn in _decoding_fns.items() + if (inspect.isclass(key) and key.__name__ == type_annotation) + } + if len(matching_entries) == 1: + _, decoding_fn = matching_entries.popitem() + return decoding_fn + elif len(matching_entries) > 1: + # Multiple decoding functions match the type. Can't tell. + logger.warning( + RuntimeWarning( + f"More than one potential decoding functions were found for types that match " + f"the string annotation {type_annotation!r}. This will simply try each one " + f"and return the first one that works." + ) + ) + return try_functions(*(decoding_fn for _, decoding_fn in matching_entries.items())) + else: + # Try to evaluate the string annotation. + t = evaluate_string_annotation(type_annotation) + + elif is_forward_ref(type_annotation): + forward_arg: str = get_forward_arg(type_annotation) + # Recurse until we've resolved the forward reference. + return get_decoding_fn(forward_arg) + + else: + t = type_annotation + + logger.debug(f"{type_annotation!r} -> {t!r}") + + # T should now be a type or one of the objects from the typing module. + + if t in _decoding_fns: + # The type has a dedicated decoding function. + return _decoding_fns[t] + + if is_dataclass_type(t): + return partial(from_dict, t) + + if t is Any: + logger.debug(f"Decoding an Any type: {t}") + return no_op + + if is_dict(t): + logger.debug(f"Decoding a Dict field: {t}") + args = get_type_arguments(t) + if len(args) != 2: + args = (Any, Any) + return decode_dict(*args) + + if is_set(t): + logger.debug(f"Decoding a Set field: {t}") + args = get_type_arguments(t) + if len(args) != 1: + args = (Any,) + return decode_set(args[0]) + + if is_tuple(t): + logger.debug(f"Decoding a Tuple field: {t}") + args = get_type_arguments(t) + return decode_tuple(*args) + + if is_list(t): + logger.debug(f"Decoding a List field: {t}") + args = get_type_arguments(t) + if not args: + # Using a `List` or `list` annotation, so we don't know what do decode the + # items into! + args = (Any,) + assert len(args) == 1 + return decode_list(args[0]) + + if is_union(t): + logger.debug(f"Decoding a Union field: {t}") + args = get_type_arguments(t) + return decode_union(*args) + + if is_enum(t): + logger.debug(f"Decoding an Enum field: {t}") + return decode_enum(t) + + if is_typevar(t): + bound = get_bound(t) + logger.debug(f"Decoding a typevar: {t}, bound type is {bound}.") + if bound is not None: + return get_decoding_fn(bound) + + if is_literal(t): + logger.debug(f"Decoding a Literal field: {t}") + possible_vals = get_type_arguments(t) + return decode_literal(*possible_vals) + + # Unknown type. + warnings.warn( + UserWarning( + f"Unable to find a decoding function for the annotation {t} (of type {type(t)}). " + f"Will try to use the type as a constructor. Consider registering a decoding function " + f"using `register_decoding_fn`, or posting an issue on GitHub. " + ) + ) + return try_constructor(t) + + +def decode_optional(t: type[T]) -> Callable[[Any | None], T | None]: + decode = get_decoding_fn(t) + + def _decode_optional(val: Any | None) -> T | None: + return val if val is None else decode(val) + + return _decode_optional + + +def try_functions(*funcs: Callable[[Any], T]) -> Callable[[Any], T | Any]: + """Tries to use the functions in succession, else returns the same value unchanged.""" + + def _try_functions(val: Any) -> T | Any: + e: Exception | None = None + for func in funcs: + try: + return func(val) + except Exception as ex: + e = ex + else: + logger.debug(f"Couldn't parse value {val}, returning it as-is. (exception: {e})") + return val + + return _try_functions + + +def decode_union(*types: type[T]) -> Callable[[Any], T | Any]: + types_list = list(types) + optional = type(None) in types_list + + # Partition the Union into None and non-None types. + while type(None) in types_list: + types_list.remove(type(None)) + + decoding_fns: list[Callable[[Any], T]] = [ + decode_optional(t) if optional else get_decoding_fn(t) for t in types_list + ] + + # TODO: We could be a bit smarter about the order in which we try the functions, but for now, + # we just try the functions in the same order as the annotation, and return the result from the + # first function that doesn't raise an exception. + + # Try using each of the non-None types, in succession. Worst case, return the value. + return try_functions(*decoding_fns) + + +def decode_list(t: type[T]) -> Callable[[list[Any]], list[T]]: + decode_item = get_decoding_fn(t) + + def _decode_list(val: list[Any]) -> list[T]: + return [decode_item(v) for v in val] + + return _decode_list + + +def decode_tuple(*tuple_item_types: type[T]) -> Callable[[list[T]], tuple[T, ...]]: + """Makes a parsing function for creating tuples. + + Can handle tuples with different item types, for instance: + - `Tuple[int, Foo, str, float, ...]`. + + Returns: + Callable[[List[T]], Tuple[T, ...]]: A parsing function for creating tuples. + """ + # Get the decoding function for each item type + has_ellipsis = False + if Ellipsis in tuple_item_types: + # TODO: This isn't necessary, the ellipsis will always be at index 1. + ellipsis_index = tuple_item_types.index(Ellipsis) + decoding_fn_index = ellipsis_index - 1 + decoding_fn = get_decoding_fn(tuple_item_types[decoding_fn_index]) + has_ellipsis = True + else: + decoding_fns = [get_decoding_fn(t) for t in tuple_item_types] + # Note, if there are more values than types in the tuple type, then the + # last type is used. + + def _decode_tuple(val: tuple[Any, ...]) -> tuple[T, ...]: + if has_ellipsis: + return tuple(decoding_fn(v) for v in val) + else: + return tuple(decoding_fns[i](v) for i, v in enumerate(val)) + + return _decode_tuple + + +def decode_set(item_type: type[T]) -> Callable[[list[T]], set[T]]: + """Makes a parsing function for creating sets with items of type `item_type`. + + Args: + item_type (Type[T]): the type of the items in the set. + + Returns: + Callable[[List[T]], Set[T]]: [description] + """ + # Get the parse fn for a list of items of type `item_type`. + parse_list_fn = decode_list(item_type) + + def _decode_set(val: list[Any]) -> set[T]: + return set(parse_list_fn(val)) + + return _decode_set + + +def decode_dict(K_: type[K], V_: type[V]) -> Callable[[list[tuple[Any, Any]]], dict[K, V]]: + """Creates a decoding function for a dict type. Works with OrderedDict too. + + Args: + K_ (Type[K]): The type of the keys. + V_ (Type[V]): The type of the values. + + Returns: + Callable[[List[Tuple[Any, Any]]], Dict[K, V]]: A function that parses a + Dict[K_, V_]. + """ + decode_k = get_decoding_fn(K_) + decode_v = get_decoding_fn(V_) + + def _decode_dict(val: dict[Any, Any] | list[tuple[Any, Any]]) -> dict[K, V]: + result: dict[K, V] = {} + if isinstance(val, list): + result = OrderedDict() + items = val + elif isinstance(val, OrderedDict): + # NOTE(ycho): Needed to propagate `OrderedDict` type + result = OrderedDict() + items = val.items() + else: + items = val.items() + for k, v in items: + k_ = decode_k(k) + v_ = decode_v(v) + result[k_] = v_ + return result + + return _decode_dict + + +def decode_enum(item_type: type[Enum]) -> Callable[[str], Enum]: + """Creates a decoding function for an enum type. + + Args: + item_type (Type[Enum]): the type of the items in the set. + + Returns: + Callable[[str], Enum]: A function that returns the enum member for the given name. + """ + + def _decode_enum(val: str) -> Enum: + return item_type[val] + + return _decode_enum + + +def decode_literal(*possible_vals: Any) -> Callable[[Any], Any]: + """Creates a decoding function for a Literal type. + + Args: + *possible_vals (Any): The permissible values for the Literal type. + + Returns: + Callable[[Any], Any]: A function that checks if a given value is one of the + permissible values for the Literal. If not, raises a TypeError. + """ + + def _decode_literal(val: Any) -> Any: + if val not in possible_vals: + raise TypeError(f"Expected one of {possible_vals} for Literal, got {val}") + + return val + + return _decode_literal + + +def no_op(v: T) -> T: + """Decoding function that gives back the value as-is. + + Args: + v ([Any]): Any value. + + Returns: + [type]: The value unchanged. + """ + return v + + +def try_constructor(t: type[T]) -> Callable[[Any], T | Any]: + """Tries to use the type as a constructor. If that fails, returns the value as-is. + + Args: + t (Type[T]): A type. + + Returns: + Callable[[Any], Union[T, Any]]: A decoding function that might return nothing. + """ + + def constructor(val): + if isinstance(val, Mapping): + return t(**val) + else: + return t(val) + + return try_functions(constructor) + + +register_decoding_fn(Path, Path) + + +class UnsafeCastingWarning(RuntimeWarning): + def __init__(self, raw_value: Any, decoded_value: Any) -> None: + super().__init__() + self.raw_value = raw_value + self.decoded_value = decoded_value