ZTWHHH commited on
Commit
0756ea4
·
verified ·
1 Parent(s): 44984d9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/libtinfo.so.6 +3 -0
  3. parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD +119 -0
  4. parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED +0 -0
  5. parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt +1 -0
  6. parrot/lib/python3.10/site-packages/mdit_py_plugins/anchors/__init__.py +1 -0
  7. parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__init__.py +1 -0
  8. parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/__init__.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/index.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/parse.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/index.py +123 -0
  12. parrot/lib/python3.10/site-packages/mdit_py_plugins/deflist/__pycache__/index.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/LICENSE +22 -0
  14. parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/__pycache__/__init__.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/index.py +430 -0
  16. parrot/lib/python3.10/site-packages/mdit_py_plugins/front_matter/port.yaml +4 -0
  17. parrot/lib/python3.10/site-packages/pyarrow/__init__.pxd +42 -0
  18. parrot/lib/python3.10/site-packages/pyarrow/_acero.pxd +44 -0
  19. parrot/lib/python3.10/site-packages/pyarrow/_compute.pxd +70 -0
  20. parrot/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py +56 -0
  21. parrot/lib/python3.10/site-packages/pyarrow/_csv.pxd +55 -0
  22. parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx +51 -0
  23. parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx +178 -0
  24. parrot/lib/python3.10/site-packages/pyarrow/_dlpack.pxi +46 -0
  25. parrot/lib/python3.10/site-packages/pyarrow/_feather.pyx +117 -0
  26. parrot/lib/python3.10/site-packages/pyarrow/_fs.pxd +91 -0
  27. parrot/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx +212 -0
  28. parrot/lib/python3.10/site-packages/pyarrow/_generated_version.py +16 -0
  29. parrot/lib/python3.10/site-packages/pyarrow/_hdfs.pyx +160 -0
  30. parrot/lib/python3.10/site-packages/pyarrow/_json.pxd +36 -0
  31. parrot/lib/python3.10/site-packages/pyarrow/_json.pyx +310 -0
  32. parrot/lib/python3.10/site-packages/pyarrow/_orc.pxd +134 -0
  33. parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd +56 -0
  34. parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx +484 -0
  35. parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd +33 -0
  36. parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx +62 -0
  37. parrot/lib/python3.10/site-packages/pyarrow/_s3fs.pyx +479 -0
  38. parrot/lib/python3.10/site-packages/pyarrow/acero.py +403 -0
  39. parrot/lib/python3.10/site-packages/pyarrow/array.pxi +0 -0
  40. parrot/lib/python3.10/site-packages/pyarrow/benchmark.pxi +20 -0
  41. parrot/lib/python3.10/site-packages/pyarrow/compat.pxi +71 -0
  42. parrot/lib/python3.10/site-packages/pyarrow/compute.py +732 -0
  43. parrot/lib/python3.10/site-packages/pyarrow/csv.py +22 -0
  44. parrot/lib/python3.10/site-packages/pyarrow/cuda.py +25 -0
  45. parrot/lib/python3.10/site-packages/pyarrow/dataset.py +1035 -0
  46. parrot/lib/python3.10/site-packages/pyarrow/error.pxi +274 -0
  47. parrot/lib/python3.10/site-packages/pyarrow/feather.py +277 -0
  48. parrot/lib/python3.10/site-packages/pyarrow/fs.py +431 -0
  49. parrot/lib/python3.10/site-packages/pyarrow/gandiva.pyx +760 -0
  50. parrot/lib/python3.10/site-packages/pyarrow/io.pxi +2919 -0
.gitattributes CHANGED
@@ -102,3 +102,4 @@ parrot/lib/libncursesw.so.6.4 filter=lfs diff=lfs merge=lfs -text
102
  parrot/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
103
  parrot/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
104
  parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
102
  parrot/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
103
  parrot/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
104
  parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
105
+ parrot/lib/libtinfo.so.6 filter=lfs diff=lfs merge=lfs -text
parrot/lib/libtinfo.so.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff9b333bc4b796b31c188c2dadd7840788cb963dbf4f34567deb3f326326b02
3
+ size 287080
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/pygrun,sha256=zdiJT5vo3EKhHl5thuM5QFtN0Dn5DetmpY_ZBW8J4XA,6133
2
+ antlr4/BufferedTokenStream.py,sha256=_BwmzOH1TO6yL2yC_ZaUzkghq8wzc0UPHfI3UpnZUwM,10780
3
+ antlr4/CommonTokenFactory.py,sha256=Tv16zg_pWD1Dv3IphsxFu8nwWdLeXYcqJ8CC5yHwjH8,2110
4
+ antlr4/CommonTokenStream.py,sha256=NNJHXwRg2_Zn46ZhJyDxZtvZzsPWhb6JjXa7BjM45eg,2770
5
+ antlr4/FileStream.py,sha256=-ZR_-jl_If9IIBYLINIwlQrlTSmu5k1VUKDc3ie7WR4,868
6
+ antlr4/InputStream.py,sha256=sggjE2jEGvSgQmxFvqeeuT3aOVgcH5tS7mMybW8wKS4,2334
7
+ antlr4/IntervalSet.py,sha256=Cd0WKhd_kYbiLYKkDNncgSM19GAuS7OaTOC4-5Yubs4,5965
8
+ antlr4/LL1Analyzer.py,sha256=oJBvO7_S8cAlb_D4qWNxd2IlK0qP4ka-oeoDxx16CZ4,7752
9
+ antlr4/Lexer.py,sha256=C72hqayfkympxb46AcSnhPD9kVZ0quWgboGxa6gcIcg,11542
10
+ antlr4/ListTokenSource.py,sha256=IffLMo7YQnD_CjKryrrgNWSk0q5QSYd7puZyyUk7vOk,5356
11
+ antlr4/Parser.py,sha256=F2Q25z0-__KHfa354KQhDu3ZOVzLFfag3s2ixJ4dl_o,22883
12
+ antlr4/ParserInterpreter.py,sha256=-QU9kn4x3WCQ-LSA99R231HoicTqakiHZ5KM72l-hIo,7206
13
+ antlr4/ParserRuleContext.py,sha256=wHAVdOxMAO5jkUqloTXVzn_xYnJhiHbvvuhZpth0ZF8,6762
14
+ antlr4/PredictionContext.py,sha256=cb4KI6EGpS7sRzJ8UvPEkxphINZuWhyiZ95752g3prI,22977
15
+ antlr4/Recognizer.py,sha256=vmKAtSjIgR9LQr5YzuK5OmPZWMJ3x69OuVZQ_FTzQHE,5383
16
+ antlr4/RuleContext.py,sha256=GiviRv2k_al1IBgdJOEEoD0ohJaVd-_h5T_CPG_Bsmg,8099
17
+ antlr4/StdinStream.py,sha256=MMSH4zN8T6i_nu-3_TlN-3E4nPM4b5KgK4GT6n_FUQA,303
18
+ antlr4/Token.py,sha256=OtWCab4Ut52X_nLLAA-8x4Zl6xaF6TEN-0033uaoaEo,5206
19
+ antlr4/TokenStreamRewriter.py,sha256=cuErQTrXwC_0kqVv3MsTWGZSm-E1Vy1yzA-3SOhKd_s,10324
20
+ antlr4/Utils.py,sha256=Oyg8CJCRL1TrF_QSB_LLlVdWOB4loVcKOgFNT-icO7c,931
21
+ antlr4/__init__.py,sha256=g8UGpflnlMWcAyLtihejzrgAP1Uo3b9GhwfI8QnZjtw,1125
22
+ antlr4/__pycache__/BufferedTokenStream.cpython-310.pyc,,
23
+ antlr4/__pycache__/CommonTokenFactory.cpython-310.pyc,,
24
+ antlr4/__pycache__/CommonTokenStream.cpython-310.pyc,,
25
+ antlr4/__pycache__/FileStream.cpython-310.pyc,,
26
+ antlr4/__pycache__/InputStream.cpython-310.pyc,,
27
+ antlr4/__pycache__/IntervalSet.cpython-310.pyc,,
28
+ antlr4/__pycache__/LL1Analyzer.cpython-310.pyc,,
29
+ antlr4/__pycache__/Lexer.cpython-310.pyc,,
30
+ antlr4/__pycache__/ListTokenSource.cpython-310.pyc,,
31
+ antlr4/__pycache__/Parser.cpython-310.pyc,,
32
+ antlr4/__pycache__/ParserInterpreter.cpython-310.pyc,,
33
+ antlr4/__pycache__/ParserRuleContext.cpython-310.pyc,,
34
+ antlr4/__pycache__/PredictionContext.cpython-310.pyc,,
35
+ antlr4/__pycache__/Recognizer.cpython-310.pyc,,
36
+ antlr4/__pycache__/RuleContext.cpython-310.pyc,,
37
+ antlr4/__pycache__/StdinStream.cpython-310.pyc,,
38
+ antlr4/__pycache__/Token.cpython-310.pyc,,
39
+ antlr4/__pycache__/TokenStreamRewriter.cpython-310.pyc,,
40
+ antlr4/__pycache__/Utils.cpython-310.pyc,,
41
+ antlr4/__pycache__/__init__.cpython-310.pyc,,
42
+ antlr4/atn/ATN.py,sha256=LYE8kT-D8FpUd5fpOtyOLqvXLFkUSa83TVFowhCWAiY,5789
43
+ antlr4/atn/ATNConfig.py,sha256=tNdIC6_GrxXllHBx3npAWyDh6KrohLZDV_XyPrydRMY,6565
44
+ antlr4/atn/ATNConfigSet.py,sha256=qRzVsBeMqk2txjG3DrGptwF6Vb2hHC5w3umkSL0GNJw,8312
45
+ antlr4/atn/ATNDeserializationOptions.py,sha256=lUV_bGW6mxj7t20esda5Yv-X9m-U_x1-0xaLifhXIPo,1010
46
+ antlr4/atn/ATNDeserializer.py,sha256=aYLDDtQ-wyo3gId6A-wD1E3QmpfrPZlXxj4_IDm-mUY,22252
47
+ antlr4/atn/ATNSimulator.py,sha256=mDc-G3GF3kSeqpfGDabUOLJ0WLVTqibxZlkvXQYmBRk,2298
48
+ antlr4/atn/ATNState.py,sha256=NbndISWUwFDF_vuBfbTiZZ8GPHoQa6UXdqbD-yjJE7c,7663
49
+ antlr4/atn/ATNType.py,sha256=xgv8AMVU7tc07U73_hRTm1AiZ7MvGhoaP5fTiOrrCGg,422
50
+ antlr4/atn/LexerATNSimulator.py,sha256=kYXRwUvHptSRU8T_K9pSrGlCk9YypWeHlAcjgry1VVo,25465
51
+ antlr4/atn/LexerAction.py,sha256=KUeJwKekBch0m1poSPskHIh-15dcKAG4lR7zlq98tzc,10014
52
+ antlr4/atn/LexerActionExecutor.py,sha256=7rlg17THcwLsuTmh7NsLrTbRH4DTrm8qIdW9_235CEc,6420
53
+ antlr4/atn/ParserATNSimulator.py,sha256=IKCzsDLcznROSVojU-daAygKr3svl0DmK5DhkUllASY,80365
54
+ antlr4/atn/PredictionMode.py,sha256=i8B7MULA7v-qbXeCY_xp6sgi21kHM6kybqIrG6rSrro,22486
55
+ antlr4/atn/SemanticContext.py,sha256=ds0TmM4qenb0LN-rl2Fp_N_xB959abN67I19EF6rs8o,10495
56
+ antlr4/atn/Transition.py,sha256=ZAsEFpa5I_n-zxD6U-DauM5_33jFK65x3PWu6-NW0RA,8762
57
+ antlr4/atn/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28
58
+ antlr4/atn/__pycache__/ATN.cpython-310.pyc,,
59
+ antlr4/atn/__pycache__/ATNConfig.cpython-310.pyc,,
60
+ antlr4/atn/__pycache__/ATNConfigSet.cpython-310.pyc,,
61
+ antlr4/atn/__pycache__/ATNDeserializationOptions.cpython-310.pyc,,
62
+ antlr4/atn/__pycache__/ATNDeserializer.cpython-310.pyc,,
63
+ antlr4/atn/__pycache__/ATNSimulator.cpython-310.pyc,,
64
+ antlr4/atn/__pycache__/ATNState.cpython-310.pyc,,
65
+ antlr4/atn/__pycache__/ATNType.cpython-310.pyc,,
66
+ antlr4/atn/__pycache__/LexerATNSimulator.cpython-310.pyc,,
67
+ antlr4/atn/__pycache__/LexerAction.cpython-310.pyc,,
68
+ antlr4/atn/__pycache__/LexerActionExecutor.cpython-310.pyc,,
69
+ antlr4/atn/__pycache__/ParserATNSimulator.cpython-310.pyc,,
70
+ antlr4/atn/__pycache__/PredictionMode.cpython-310.pyc,,
71
+ antlr4/atn/__pycache__/SemanticContext.cpython-310.pyc,,
72
+ antlr4/atn/__pycache__/Transition.cpython-310.pyc,,
73
+ antlr4/atn/__pycache__/__init__.cpython-310.pyc,,
74
+ antlr4/dfa/DFA.py,sha256=weIh0uaRfakP12mFvHo7U0tqO3GONV3-nHFkc2xk-ZE,5388
75
+ antlr4/dfa/DFASerializer.py,sha256=1st_HO85yXLYy7gInTEnkztgA6am4CT-yReh-mazp9E,2518
76
+ antlr4/dfa/DFAState.py,sha256=R7JwKf0GtAEs9J_MD_Y0WKcuzdt0BVX1sow-uv9yFYc,5583
77
+ antlr4/dfa/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28
78
+ antlr4/dfa/__pycache__/DFA.cpython-310.pyc,,
79
+ antlr4/dfa/__pycache__/DFASerializer.cpython-310.pyc,,
80
+ antlr4/dfa/__pycache__/DFAState.cpython-310.pyc,,
81
+ antlr4/dfa/__pycache__/__init__.cpython-310.pyc,,
82
+ antlr4/error/DiagnosticErrorListener.py,sha256=EwS2D_Ox6CmvCa16NPJ9ud4QYPHmlPXt6-Wdn1h5Kg8,4430
83
+ antlr4/error/ErrorListener.py,sha256=yP_MDguol4Cj0_pEPyNzeH3v4ZvUjW5iwDjhYTVAHbE,2722
84
+ antlr4/error/ErrorStrategy.py,sha256=0mhzFL57ZVnjKkGrtadta93Zm3NXdF-HW10DVD07VXs,30391
85
+ antlr4/error/Errors.py,sha256=hlKngclBfXdkDiAymhYsvh2OCXlvmHM2kTl_A1vgp-w,6759
86
+ antlr4/error/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28
87
+ antlr4/error/__pycache__/DiagnosticErrorListener.cpython-310.pyc,,
88
+ antlr4/error/__pycache__/ErrorListener.cpython-310.pyc,,
89
+ antlr4/error/__pycache__/ErrorStrategy.cpython-310.pyc,,
90
+ antlr4/error/__pycache__/Errors.cpython-310.pyc,,
91
+ antlr4/error/__pycache__/__init__.cpython-310.pyc,,
92
+ antlr4/tree/Chunk.py,sha256=oCIZjolLq9xkxtVDROEDxfUGgndcEnsDW0eUmLM7Gpk,695
93
+ antlr4/tree/ParseTreeMatch.py,sha256=Dc6GVWSUqoIAFXUaUZqUwCUlZfTcgUbGLGzNf6QxQvE,4485
94
+ antlr4/tree/ParseTreePattern.py,sha256=ASBNaQORh3f7f8KnFeZJC2yWFFx4uQlxvC2Y55ifhY0,2825
95
+ antlr4/tree/ParseTreePatternMatcher.py,sha256=HtE9yi1Urr2QPLGLJDBvr0lxv6bjuj9CHl-4clahSe8,16388
96
+ antlr4/tree/RuleTagToken.py,sha256=n4zXcmrrfsGyl91pj5ZYcc_CeKMhPrvYkUdppgMBpbY,2022
97
+ antlr4/tree/TokenTagToken.py,sha256=S3o3DJhfzL5kpClxsKyI-Il-xvuuZQiBAIsLCKFjRHo,1576
98
+ antlr4/tree/Tree.py,sha256=ZI7U_5IxBLm_IrnfJOtb12BCPIWyzfeZtLnhHKVVZIw,5572
99
+ antlr4/tree/Trees.py,sha256=JtQ7cYWmKwI9TIBP6y9XIgjlNS4mYjv3ARwOfwWc5Vg,3968
100
+ antlr4/tree/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
+ antlr4/tree/__pycache__/Chunk.cpython-310.pyc,,
102
+ antlr4/tree/__pycache__/ParseTreeMatch.cpython-310.pyc,,
103
+ antlr4/tree/__pycache__/ParseTreePattern.cpython-310.pyc,,
104
+ antlr4/tree/__pycache__/ParseTreePatternMatcher.cpython-310.pyc,,
105
+ antlr4/tree/__pycache__/RuleTagToken.cpython-310.pyc,,
106
+ antlr4/tree/__pycache__/TokenTagToken.cpython-310.pyc,,
107
+ antlr4/tree/__pycache__/Tree.cpython-310.pyc,,
108
+ antlr4/tree/__pycache__/Trees.cpython-310.pyc,,
109
+ antlr4/tree/__pycache__/__init__.cpython-310.pyc,,
110
+ antlr4/xpath/XPath.py,sha256=O9s4-EDvLbAbYytP_bae9Z2khLl0iAtRzPAtVbuWUM4,13015
111
+ antlr4/xpath/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28
112
+ antlr4/xpath/__pycache__/XPath.cpython-310.pyc,,
113
+ antlr4/xpath/__pycache__/__init__.cpython-310.pyc,,
114
+ antlr4_python3_runtime-4.9.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
115
+ antlr4_python3_runtime-4.9.3.dist-info/METADATA,sha256=huEAVSqtQkF-nXL3LBE0uElH1B5hGyPPX_41eQ03Cik,403
116
+ antlr4_python3_runtime-4.9.3.dist-info/RECORD,,
117
+ antlr4_python3_runtime-4.9.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
118
+ antlr4_python3_runtime-4.9.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
119
+ antlr4_python3_runtime-4.9.3.dist-info/top_level.txt,sha256=OsoZsh9bb30wgXb2zBUjdDwYg46MfV-RVZA6Pk8pcB0,7
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED ADDED
File without changes
parrot/lib/python3.10/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ antlr4
parrot/lib/python3.10/site-packages/mdit_py_plugins/anchors/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .index import anchors_plugin # noqa F401
parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .index import attrs_plugin # noqa: F401
parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (215 Bytes). View file
 
parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/index.cpython-310.pyc ADDED
Binary file (3.79 kB). View file
 
parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/__pycache__/parse.cpython-310.pyc ADDED
Binary file (7.05 kB). View file
 
parrot/lib/python3.10/site-packages/mdit_py_plugins/attrs/index.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from markdown_it import MarkdownIt
4
+ from markdown_it.rules_inline import StateInline
5
+ from markdown_it.token import Token
6
+
7
+ from .parse import ParseError, parse
8
+
9
+
10
+ def attrs_plugin(
11
+ md: MarkdownIt,
12
+ *,
13
+ after=("image", "code_inline", "link_close", "span_close"),
14
+ spans=False,
15
+ span_after="link",
16
+ ):
17
+ """Parse inline attributes that immediately follow certain inline elements::
18
+
19
+ ![alt](https://image.com){#id .a b=c}
20
+
21
+ This syntax is inspired by
22
+ `Djot spans
23
+ <https://htmlpreview.github.io/?https://github.com/jgm/djot/blob/master/doc/syntax.html#inline-attributes>`_.
24
+
25
+ Inside the curly braces, the following syntax is possible:
26
+
27
+ - `.foo` specifies foo as a class.
28
+ Multiple classes may be given in this way; they will be combined.
29
+ - `#foo` specifies foo as an identifier.
30
+ An element may have only one identifier;
31
+ if multiple identifiers are given, the last one is used.
32
+ - `key="value"` or `key=value` specifies a key-value attribute.
33
+ Quotes are not needed when the value consists entirely of
34
+ ASCII alphanumeric characters or `_` or `:` or `-`.
35
+ Backslash escapes may be used inside quoted values.
36
+ - `%` begins a comment, which ends with the next `%` or the end of the attribute (`}`).
37
+
38
+ Multiple attribute blocks are merged.
39
+
40
+ :param md: The MarkdownIt instance to modify.
41
+ :param after: The names of inline elements after which attributes may be specified.
42
+ This plugin does not support attributes after emphasis, strikethrough or text elements,
43
+ which all require post-parse processing.
44
+ :param spans: If True, also parse attributes after spans of text, encapsulated by `[]`.
45
+ Note Markdown link references take precedence over this syntax.
46
+ :param span_after: The name of an inline rule after which spans may be specified.
47
+ """
48
+
49
+ def _attr_rule(state: StateInline, silent: bool):
50
+ if state.pending or not state.tokens:
51
+ return False
52
+ token = state.tokens[-1]
53
+ if token.type not in after:
54
+ return False
55
+ try:
56
+ new_pos, attrs = parse(state.src[state.pos :])
57
+ except ParseError:
58
+ return False
59
+ token_index = _find_opening(state.tokens, len(state.tokens) - 1)
60
+ if token_index is None:
61
+ return False
62
+ state.pos += new_pos + 1
63
+ if not silent:
64
+ attr_token = state.tokens[token_index]
65
+ if "class" in attrs and "class" in token.attrs:
66
+ attrs["class"] = f"{attr_token.attrs['class']} {attrs['class']}"
67
+ attr_token.attrs.update(attrs)
68
+ return True
69
+
70
+ if spans:
71
+ md.inline.ruler.after(span_after, "span", _span_rule)
72
+ md.inline.ruler.push("attr", _attr_rule)
73
+
74
+
75
+ def _find_opening(tokens: List[Token], index: int) -> Optional[int]:
76
+ """Find the opening token index, if the token is closing."""
77
+ if tokens[index].nesting != -1:
78
+ return index
79
+ level = 0
80
+ while index >= 0:
81
+ level += tokens[index].nesting
82
+ if level == 0:
83
+ return index
84
+ index -= 1
85
+ return None
86
+
87
+
88
+ def _span_rule(state: StateInline, silent: bool):
89
+ if state.srcCharCode[state.pos] != 0x5B: # /* [ */
90
+ return False
91
+
92
+ maximum = state.posMax
93
+ labelStart = state.pos + 1
94
+ labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, False)
95
+
96
+ # parser failed to find ']', so it's not a valid span
97
+ if labelEnd < 0:
98
+ return False
99
+
100
+ pos = labelEnd + 1
101
+
102
+ # check not at end of inline
103
+ if pos >= maximum:
104
+ return False
105
+
106
+ try:
107
+ new_pos, attrs = parse(state.src[pos:])
108
+ except ParseError:
109
+ return False
110
+
111
+ pos += new_pos + 1
112
+
113
+ if not silent:
114
+ state.pos = labelStart
115
+ state.posMax = labelEnd
116
+ token = state.push("span_open", "span", 1)
117
+ token.attrs = attrs
118
+ state.md.inline.tokenize(state)
119
+ token = state.push("span_close", "span", -1)
120
+
121
+ state.pos = pos
122
+ state.posMax = maximum
123
+ return True
parrot/lib/python3.10/site-packages/mdit_py_plugins/deflist/__pycache__/index.cpython-310.pyc ADDED
Binary file (3.78 kB). View file
 
parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2014-2015 Vitaly Puzrin, Alex Kocharin.
2
+
3
+ Permission is hereby granted, free of charge, to any person
4
+ obtaining a copy of this software and associated documentation
5
+ files (the "Software"), to deal in the Software without
6
+ restriction, including without limitation the rights to use,
7
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ copies of the Software, and to permit persons to whom the
9
+ Software is furnished to do so, subject to the following
10
+ conditions:
11
+
12
+ The above copyright notice and this permission notice shall be
13
+ included in all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
+ OTHER DEALINGS IN THE SOFTWARE.
parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (221 Bytes). View file
 
parrot/lib/python3.10/site-packages/mdit_py_plugins/footnote/index.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Process footnotes
2
+ #
3
+
4
+ from typing import List, Optional
5
+
6
+ from markdown_it import MarkdownIt
7
+ from markdown_it.common.utils import isSpace
8
+ from markdown_it.helpers import parseLinkLabel
9
+ from markdown_it.rules_block import StateBlock
10
+ from markdown_it.rules_inline import StateInline
11
+ from markdown_it.token import Token
12
+
13
+
14
+ def footnote_plugin(md: MarkdownIt):
15
+ """Plugin ported from
16
+ `markdown-it-footnote <https://github.com/markdown-it/markdown-it-footnote>`__.
17
+
18
+ It is based on the
19
+ `pandoc definition <http://johnmacfarlane.net/pandoc/README.html#footnotes>`__:
20
+
21
+ .. code-block:: md
22
+
23
+ Normal footnote:
24
+
25
+ Here is a footnote reference,[^1] and another.[^longnote]
26
+
27
+ [^1]: Here is the footnote.
28
+
29
+ [^longnote]: Here's one with multiple blocks.
30
+
31
+ Subsequent paragraphs are indented to show that they
32
+ belong to the previous footnote.
33
+
34
+ """
35
+ md.block.ruler.before(
36
+ "reference", "footnote_def", footnote_def, {"alt": ["paragraph", "reference"]}
37
+ )
38
+ md.inline.ruler.after("image", "footnote_inline", footnote_inline)
39
+ md.inline.ruler.after("footnote_inline", "footnote_ref", footnote_ref)
40
+ md.core.ruler.after("inline", "footnote_tail", footnote_tail)
41
+
42
+ md.add_render_rule("footnote_ref", render_footnote_ref)
43
+ md.add_render_rule("footnote_block_open", render_footnote_block_open)
44
+ md.add_render_rule("footnote_block_close", render_footnote_block_close)
45
+ md.add_render_rule("footnote_open", render_footnote_open)
46
+ md.add_render_rule("footnote_close", render_footnote_close)
47
+ md.add_render_rule("footnote_anchor", render_footnote_anchor)
48
+
49
+ # helpers (only used in other rules, no tokens are attached to those)
50
+ md.add_render_rule("footnote_caption", render_footnote_caption)
51
+ md.add_render_rule("footnote_anchor_name", render_footnote_anchor_name)
52
+
53
+
54
+ # ## RULES ##
55
+
56
+
57
+ def footnote_def(state: StateBlock, startLine: int, endLine: int, silent: bool):
58
+ """Process footnote block definition"""
59
+
60
+ start = state.bMarks[startLine] + state.tShift[startLine]
61
+ maximum = state.eMarks[startLine]
62
+
63
+ # line should be at least 5 chars - "[^x]:"
64
+ if start + 4 > maximum:
65
+ return False
66
+
67
+ if state.srcCharCode[start] != 0x5B: # /* [ */
68
+ return False
69
+ if state.srcCharCode[start + 1] != 0x5E: # /* ^ */
70
+ return False
71
+
72
+ pos = start + 2
73
+ while pos < maximum:
74
+ if state.srcCharCode[pos] == 0x20:
75
+ return False
76
+ if state.srcCharCode[pos] == 0x5D: # /* ] */
77
+ break
78
+ pos += 1
79
+
80
+ if pos == start + 2: # no empty footnote labels
81
+ return False
82
+ pos += 1
83
+ if pos >= maximum or state.srcCharCode[pos] != 0x3A: # /* : */
84
+ return False
85
+ if silent:
86
+ return True
87
+ pos += 1
88
+
89
+ label = state.src[start + 2 : pos - 2]
90
+ state.env.setdefault("footnotes", {}).setdefault("refs", {})[":" + label] = -1
91
+
92
+ open_token = Token("footnote_reference_open", "", 1)
93
+ open_token.meta = {"label": label}
94
+ open_token.level = state.level
95
+ state.level += 1
96
+ state.tokens.append(open_token)
97
+
98
+ oldBMark = state.bMarks[startLine]
99
+ oldTShift = state.tShift[startLine]
100
+ oldSCount = state.sCount[startLine]
101
+ oldParentType = state.parentType
102
+
103
+ posAfterColon = pos
104
+ initial = offset = (
105
+ state.sCount[startLine]
106
+ + pos
107
+ - (state.bMarks[startLine] + state.tShift[startLine])
108
+ )
109
+
110
+ while pos < maximum:
111
+ ch = state.srcCharCode[pos]
112
+
113
+ if isSpace(ch):
114
+ if ch == 0x09:
115
+ offset += 4 - offset % 4
116
+ else:
117
+ offset += 1
118
+
119
+ else:
120
+ break
121
+
122
+ pos += 1
123
+
124
+ state.tShift[startLine] = pos - posAfterColon
125
+ state.sCount[startLine] = offset - initial
126
+
127
+ state.bMarks[startLine] = posAfterColon
128
+ state.blkIndent += 4
129
+ state.parentType = "footnote"
130
+
131
+ if state.sCount[startLine] < state.blkIndent:
132
+ state.sCount[startLine] += state.blkIndent
133
+
134
+ state.md.block.tokenize(state, startLine, endLine, True)
135
+
136
+ state.parentType = oldParentType
137
+ state.blkIndent -= 4
138
+ state.tShift[startLine] = oldTShift
139
+ state.sCount[startLine] = oldSCount
140
+ state.bMarks[startLine] = oldBMark
141
+
142
+ open_token.map = [startLine, state.line]
143
+
144
+ token = Token("footnote_reference_close", "", -1)
145
+ state.level -= 1
146
+ token.level = state.level
147
+ state.tokens.append(token)
148
+
149
+ return True
150
+
151
+
152
+ def footnote_inline(state: StateInline, silent: bool):
153
+ """Process inline footnotes (^[...])"""
154
+
155
+ maximum = state.posMax
156
+ start = state.pos
157
+
158
+ if start + 2 >= maximum:
159
+ return False
160
+ if state.srcCharCode[start] != 0x5E: # /* ^ */
161
+ return False
162
+ if state.srcCharCode[start + 1] != 0x5B: # /* [ */
163
+ return False
164
+
165
+ labelStart = start + 2
166
+ labelEnd = parseLinkLabel(state, start + 1)
167
+
168
+ # parser failed to find ']', so it's not a valid note
169
+ if labelEnd < 0:
170
+ return False
171
+
172
+ # We found the end of the link, and know for a fact it's a valid link
173
+ # so all that's left to do is to call tokenizer.
174
+ #
175
+ if not silent:
176
+ refs = state.env.setdefault("footnotes", {}).setdefault("list", {})
177
+ footnoteId = len(refs)
178
+
179
+ tokens: List[Token] = []
180
+ state.md.inline.parse(
181
+ state.src[labelStart:labelEnd], state.md, state.env, tokens
182
+ )
183
+
184
+ token = state.push("footnote_ref", "", 0)
185
+ token.meta = {"id": footnoteId}
186
+
187
+ refs[footnoteId] = {"content": state.src[labelStart:labelEnd], "tokens": tokens}
188
+
189
+ state.pos = labelEnd + 1
190
+ state.posMax = maximum
191
+ return True
192
+
193
+
194
+ def footnote_ref(state: StateInline, silent: bool):
195
+ """Process footnote references ([^...])"""
196
+
197
+ maximum = state.posMax
198
+ start = state.pos
199
+
200
+ # should be at least 4 chars - "[^x]"
201
+ if start + 3 > maximum:
202
+ return False
203
+
204
+ if "footnotes" not in state.env or "refs" not in state.env["footnotes"]:
205
+ return False
206
+ if state.srcCharCode[start] != 0x5B: # /* [ */
207
+ return False
208
+ if state.srcCharCode[start + 1] != 0x5E: # /* ^ */
209
+ return False
210
+
211
+ pos = start + 2
212
+ while pos < maximum:
213
+ if state.srcCharCode[pos] == 0x20:
214
+ return False
215
+ if state.srcCharCode[pos] == 0x0A:
216
+ return False
217
+ if state.srcCharCode[pos] == 0x5D: # /* ] */
218
+ break
219
+ pos += 1
220
+
221
+ if pos == start + 2: # no empty footnote labels
222
+ return False
223
+ if pos >= maximum:
224
+ return False
225
+ pos += 1
226
+
227
+ label = state.src[start + 2 : pos - 1]
228
+ if (":" + label) not in state.env["footnotes"]["refs"]:
229
+ return False
230
+
231
+ if not silent:
232
+ if "list" not in state.env["footnotes"]:
233
+ state.env["footnotes"]["list"] = {}
234
+
235
+ if state.env["footnotes"]["refs"][":" + label] < 0:
236
+ footnoteId = len(state.env["footnotes"]["list"])
237
+ state.env["footnotes"]["list"][footnoteId] = {"label": label, "count": 0}
238
+ state.env["footnotes"]["refs"][":" + label] = footnoteId
239
+ else:
240
+ footnoteId = state.env["footnotes"]["refs"][":" + label]
241
+
242
+ footnoteSubId = state.env["footnotes"]["list"][footnoteId]["count"]
243
+ state.env["footnotes"]["list"][footnoteId]["count"] += 1
244
+
245
+ token = state.push("footnote_ref", "", 0)
246
+ token.meta = {"id": footnoteId, "subId": footnoteSubId, "label": label}
247
+
248
+ state.pos = pos
249
+ state.posMax = maximum
250
+ return True
251
+
252
+
253
+ def footnote_tail(state: StateBlock, *args, **kwargs):
254
+ """Post-processing step, to move footnote tokens to end of the token stream.
255
+
256
+ Also removes un-referenced tokens.
257
+ """
258
+
259
+ insideRef = False
260
+ refTokens = {}
261
+
262
+ if "footnotes" not in state.env:
263
+ return
264
+
265
+ current: List[Token] = []
266
+ tok_filter = []
267
+ for tok in state.tokens:
268
+
269
+ if tok.type == "footnote_reference_open":
270
+ insideRef = True
271
+ current = []
272
+ currentLabel = tok.meta["label"]
273
+ tok_filter.append(False)
274
+ continue
275
+
276
+ if tok.type == "footnote_reference_close":
277
+ insideRef = False
278
+ # prepend ':' to avoid conflict with Object.prototype members
279
+ refTokens[":" + currentLabel] = current
280
+ tok_filter.append(False)
281
+ continue
282
+
283
+ if insideRef:
284
+ current.append(tok)
285
+
286
+ tok_filter.append((not insideRef))
287
+
288
+ state.tokens = [t for t, f in zip(state.tokens, tok_filter) if f]
289
+
290
+ if "list" not in state.env.get("footnotes", {}):
291
+ return
292
+ foot_list = state.env["footnotes"]["list"]
293
+
294
+ token = Token("footnote_block_open", "", 1)
295
+ state.tokens.append(token)
296
+
297
+ for i, foot_note in foot_list.items():
298
+ token = Token("footnote_open", "", 1)
299
+ token.meta = {"id": i, "label": foot_note.get("label", None)}
300
+ # TODO propagate line positions of original foot note
301
+ # (but don't store in token.map, because this is used for scroll syncing)
302
+ state.tokens.append(token)
303
+
304
+ if "tokens" in foot_note:
305
+ tokens = []
306
+
307
+ token = Token("paragraph_open", "p", 1)
308
+ token.block = True
309
+ tokens.append(token)
310
+
311
+ token = Token("inline", "", 0)
312
+ token.children = foot_note["tokens"]
313
+ token.content = foot_note["content"]
314
+ tokens.append(token)
315
+
316
+ token = Token("paragraph_close", "p", -1)
317
+ token.block = True
318
+ tokens.append(token)
319
+
320
+ elif "label" in foot_note:
321
+ tokens = refTokens[":" + foot_note["label"]]
322
+
323
+ state.tokens.extend(tokens)
324
+ if state.tokens[len(state.tokens) - 1].type == "paragraph_close":
325
+ lastParagraph: Optional[Token] = state.tokens.pop()
326
+ else:
327
+ lastParagraph = None
328
+
329
+ t = (
330
+ foot_note["count"]
331
+ if (("count" in foot_note) and (foot_note["count"] > 0))
332
+ else 1
333
+ )
334
+ j = 0
335
+ while j < t:
336
+ token = Token("footnote_anchor", "", 0)
337
+ token.meta = {"id": i, "subId": j, "label": foot_note.get("label", None)}
338
+ state.tokens.append(token)
339
+ j += 1
340
+
341
+ if lastParagraph:
342
+ state.tokens.append(lastParagraph)
343
+
344
+ token = Token("footnote_close", "", -1)
345
+ state.tokens.append(token)
346
+
347
+ token = Token("footnote_block_close", "", -1)
348
+ state.tokens.append(token)
349
+
350
+
351
+ ########################################
352
+ # Renderer partials
353
+
354
+
355
+ def render_footnote_anchor_name(self, tokens, idx, options, env):
356
+ n = str(tokens[idx].meta["id"] + 1)
357
+ prefix = ""
358
+
359
+ doc_id = env.get("docId", None)
360
+ if isinstance(doc_id, str):
361
+ prefix = f"-{doc_id}-"
362
+
363
+ return prefix + n
364
+
365
+
366
+ def render_footnote_caption(self, tokens, idx, options, env):
367
+ n = str(tokens[idx].meta["id"] + 1)
368
+
369
+ if tokens[idx].meta.get("subId", -1) > 0:
370
+ n += ":" + str(tokens[idx].meta["subId"])
371
+
372
+ return "[" + n + "]"
373
+
374
+
375
+ def render_footnote_ref(self, tokens, idx, options, env):
376
+ ident = self.rules["footnote_anchor_name"](tokens, idx, options, env)
377
+ caption = self.rules["footnote_caption"](tokens, idx, options, env)
378
+ refid = ident
379
+
380
+ if tokens[idx].meta.get("subId", -1) > 0:
381
+ refid += ":" + str(tokens[idx].meta["subId"])
382
+
383
+ return (
384
+ '<sup class="footnote-ref"><a href="#fn'
385
+ + ident
386
+ + '" id="fnref'
387
+ + refid
388
+ + '">'
389
+ + caption
390
+ + "</a></sup>"
391
+ )
392
+
393
+
394
+ def render_footnote_block_open(self, tokens, idx, options, env):
395
+ return (
396
+ (
397
+ '<hr class="footnotes-sep" />\n'
398
+ if options.xhtmlOut
399
+ else '<hr class="footnotes-sep">\n'
400
+ )
401
+ + '<section class="footnotes">\n'
402
+ + '<ol class="footnotes-list">\n'
403
+ )
404
+
405
+
406
+ def render_footnote_block_close(self, tokens, idx, options, env):
407
+ return "</ol>\n</section>\n"
408
+
409
+
410
+ def render_footnote_open(self, tokens, idx, options, env):
411
+ ident = self.rules["footnote_anchor_name"](tokens, idx, options, env)
412
+
413
+ if tokens[idx].meta.get("subId", -1) > 0:
414
+ ident += ":" + tokens[idx].meta["subId"]
415
+
416
+ return '<li id="fn' + ident + '" class="footnote-item">'
417
+
418
+
419
+ def render_footnote_close(self, tokens, idx, options, env):
420
+ return "</li>\n"
421
+
422
+
423
+ def render_footnote_anchor(self, tokens, idx, options, env):
424
+ ident = self.rules["footnote_anchor_name"](tokens, idx, options, env)
425
+
426
+ if tokens[idx].meta["subId"] > 0:
427
+ ident += ":" + str(tokens[idx].meta["subId"])
428
+
429
+ # ↩ with escape code to prevent display as Apple Emoji on iOS
430
+ return ' <a href="#fnref' + ident + '" class="footnote-backref">\u21a9\uFE0E</a>'
parrot/lib/python3.10/site-packages/mdit_py_plugins/front_matter/port.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ - package: markdown-it-front-matter
2
+ commit: b404f5d8fd536e7e9ddb276267ae0b6f76e9cf9d
3
+ date: Feb 7, 2020
4
+ version: 0.2.1
parrot/lib/python3.10/site-packages/pyarrow/__init__.pxd ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from libcpp.memory cimport shared_ptr
19
+ from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType,
20
+ CField, CRecordBatch, CSchema,
21
+ CTable, CTensor, CSparseCOOTensor,
22
+ CSparseCSRMatrix, CSparseCSCMatrix,
23
+ CSparseCSFTensor)
24
+
25
+ cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
26
+ cdef int import_pyarrow() except -1
27
+ cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer)
28
+ cdef object wrap_data_type(const shared_ptr[CDataType]& type)
29
+ cdef object wrap_field(const shared_ptr[CField]& field)
30
+ cdef object wrap_schema(const shared_ptr[CSchema]& schema)
31
+ cdef object wrap_array(const shared_ptr[CArray]& sp_array)
32
+ cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor)
33
+ cdef object wrap_sparse_tensor_coo(
34
+ const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor)
35
+ cdef object wrap_sparse_tensor_csr(
36
+ const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor)
37
+ cdef object wrap_sparse_tensor_csc(
38
+ const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor)
39
+ cdef object wrap_sparse_tensor_csf(
40
+ const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor)
41
+ cdef object wrap_table(const shared_ptr[CTable]& ctable)
42
+ cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch)
parrot/lib/python3.10/site-packages/pyarrow/_acero.pxd ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.includes.libarrow_acero cimport *
24
+
25
+
26
+ cdef class ExecNodeOptions(_Weakrefable):
27
+ cdef:
28
+ shared_ptr[CExecNodeOptions] wrapped
29
+
30
+ cdef void init(self, const shared_ptr[CExecNodeOptions]& sp)
31
+ cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil
32
+
33
+
34
+ cdef class Declaration(_Weakrefable):
35
+
36
+ cdef:
37
+ CDeclaration decl
38
+
39
+ cdef void init(self, const CDeclaration& c_decl)
40
+
41
+ @staticmethod
42
+ cdef wrap(const CDeclaration& c_decl)
43
+
44
+ cdef inline CDeclaration unwrap(self) nogil
parrot/lib/python3.10/site-packages/pyarrow/_compute.pxd ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+
24
+ cdef class UdfContext(_Weakrefable):
25
+ cdef:
26
+ CUdfContext c_context
27
+
28
+ cdef void init(self, const CUdfContext& c_context)
29
+
30
+
31
+ cdef class FunctionOptions(_Weakrefable):
32
+ cdef:
33
+ shared_ptr[CFunctionOptions] wrapped
34
+
35
+ cdef const CFunctionOptions* get_options(self) except NULL
36
+ cdef void init(self, const shared_ptr[CFunctionOptions]& sp)
37
+
38
+ cdef inline shared_ptr[CFunctionOptions] unwrap(self)
39
+
40
+
41
+ cdef class _SortOptions(FunctionOptions):
42
+ pass
43
+
44
+
45
+ cdef CExpression _bind(Expression filter, Schema schema) except *
46
+
47
+
48
+ cdef class Expression(_Weakrefable):
49
+
50
+ cdef:
51
+ CExpression expr
52
+
53
+ cdef void init(self, const CExpression& sp)
54
+
55
+ @staticmethod
56
+ cdef wrap(const CExpression& sp)
57
+
58
+ cdef inline CExpression unwrap(self)
59
+
60
+ @staticmethod
61
+ cdef Expression _expr_or_scalar(object expr)
62
+
63
+
64
+ cdef CExpression _true
65
+
66
+ cdef CFieldRef _ensure_field_ref(value) except *
67
+
68
+ cdef CSortOrder unwrap_sort_order(order) except *
69
+
70
+ cdef CNullPlacement unwrap_null_placement(null_placement) except *
parrot/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """
19
+ Custom documentation additions for compute functions.
20
+ """
21
+
22
+ function_doc_additions = {}
23
+
24
+ function_doc_additions["filter"] = """
25
+ Examples
26
+ --------
27
+ >>> import pyarrow as pa
28
+ >>> arr = pa.array(["a", "b", "c", None, "e"])
29
+ >>> mask = pa.array([True, False, None, False, True])
30
+ >>> arr.filter(mask)
31
+ <pyarrow.lib.StringArray object at ...>
32
+ [
33
+ "a",
34
+ "e"
35
+ ]
36
+ >>> arr.filter(mask, null_selection_behavior='emit_null')
37
+ <pyarrow.lib.StringArray object at ...>
38
+ [
39
+ "a",
40
+ null,
41
+ "e"
42
+ ]
43
+ """
44
+
45
+ function_doc_additions["mode"] = """
46
+ Examples
47
+ --------
48
+ >>> import pyarrow as pa
49
+ >>> import pyarrow.compute as pc
50
+ >>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2])
51
+ >>> modes = pc.mode(arr, 2)
52
+ >>> modes[0]
53
+ <pyarrow.StructScalar: [('mode', 2), ('count', 5)]>
54
+ >>> modes[1]
55
+ <pyarrow.StructScalar: [('mode', 1), ('count', 2)]>
56
+ """
parrot/lib/python3.10/site-packages/pyarrow/_csv.pxd ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ConvertOptions(_Weakrefable):
25
+ cdef:
26
+ unique_ptr[CCSVConvertOptions] options
27
+
28
+ @staticmethod
29
+ cdef ConvertOptions wrap(CCSVConvertOptions options)
30
+
31
+
32
+ cdef class ParseOptions(_Weakrefable):
33
+ cdef:
34
+ unique_ptr[CCSVParseOptions] options
35
+ object _invalid_row_handler
36
+
37
+ @staticmethod
38
+ cdef ParseOptions wrap(CCSVParseOptions options)
39
+
40
+
41
+ cdef class ReadOptions(_Weakrefable):
42
+ cdef:
43
+ unique_ptr[CCSVReadOptions] options
44
+ public object encoding
45
+
46
+ @staticmethod
47
+ cdef ReadOptions wrap(CCSVReadOptions options)
48
+
49
+
50
+ cdef class WriteOptions(_Weakrefable):
51
+ cdef:
52
+ unique_ptr[CCSVWriteOptions] options
53
+
54
+ @staticmethod
55
+ cdef WriteOptions wrap(CCSVWriteOptions options)
parrot/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for ORC file format."""
21
+
22
+ from pyarrow.lib cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_dataset cimport *
25
+
26
+ from pyarrow._dataset cimport FileFormat
27
+
28
+
29
+ cdef class OrcFileFormat(FileFormat):
30
+
31
+ def __init__(self):
32
+ self.init(shared_ptr[CFileFormat](new COrcFileFormat()))
33
+
34
+ def equals(self, OrcFileFormat other):
35
+ """
36
+ Parameters
37
+ ----------
38
+ other : pyarrow.dataset.OrcFileFormat
39
+
40
+ Returns
41
+ -------
42
+ True
43
+ """
44
+ return True
45
+
46
+ @property
47
+ def default_extname(self):
48
+ return "orc"
49
+
50
+ def __reduce__(self):
51
+ return OrcFileFormat, tuple()
parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet encryption."""
21
+
22
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
23
+ from pyarrow._parquet_encryption cimport *
24
+ from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions
25
+
26
+
27
+ cdef class ParquetEncryptionConfig(_Weakrefable):
28
+ """
29
+ Core configuration class encapsulating parameters for high-level encryption
30
+ within the Parquet framework.
31
+
32
+ The ParquetEncryptionConfig class serves as a bridge for passing encryption-related
33
+ parameters to the appropriate components within the Parquet library. It maintains references
34
+ to objects that define the encryption strategy, Key Management Service (KMS) configuration,
35
+ and specific encryption configurations for Parquet data.
36
+
37
+ Parameters
38
+ ----------
39
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
40
+ Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for
41
+ creating cryptographic components, such as encryptors and decryptors.
42
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
43
+ Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration
44
+ parameters necessary for connecting to a Key Management Service (KMS).
45
+ encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration
46
+ Shared pointer to an `EncryptionConfiguration` object. This object defines specific
47
+ encryption settings for Parquet data, including the keys assigned to different columns.
48
+
49
+ Raises
50
+ ------
51
+ ValueError
52
+ Raised if `encryption_config` is None.
53
+ """
54
+ cdef:
55
+ shared_ptr[CParquetEncryptionConfig] c_config
56
+
57
+ # Avoid mistakenly creating attributes
58
+ __slots__ = ()
59
+
60
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
61
+ EncryptionConfiguration encryption_config):
62
+
63
+ cdef shared_ptr[CEncryptionConfiguration] c_encryption_config
64
+
65
+ if crypto_factory is None:
66
+ raise ValueError("crypto_factory cannot be None")
67
+
68
+ if kms_connection_config is None:
69
+ raise ValueError("kms_connection_config cannot be None")
70
+
71
+ if encryption_config is None:
72
+ raise ValueError("encryption_config cannot be None")
73
+
74
+ self.c_config.reset(new CParquetEncryptionConfig())
75
+
76
+ c_encryption_config = pyarrow_unwrap_encryptionconfig(
77
+ encryption_config)
78
+
79
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
80
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
81
+ kms_connection_config)
82
+ self.c_config.get().encryption_config = c_encryption_config
83
+
84
+ @staticmethod
85
+ cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config):
86
+ cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig)
87
+ python_config.c_config = c_config
88
+ return python_config
89
+
90
+ cdef shared_ptr[CParquetEncryptionConfig] unwrap(self):
91
+ return self.c_config
92
+
93
+
94
+ cdef class ParquetDecryptionConfig(_Weakrefable):
95
+ """
96
+ Core configuration class encapsulating parameters for high-level decryption
97
+ within the Parquet framework.
98
+
99
+ ParquetDecryptionConfig is designed to pass decryption-related parameters to
100
+ the appropriate decryption components within the Parquet library. It holds references to
101
+ objects that define the decryption strategy, Key Management Service (KMS) configuration,
102
+ and specific decryption configurations for reading encrypted Parquet data.
103
+
104
+ Parameters
105
+ ----------
106
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
107
+ Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic
108
+ components for the decryption process.
109
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
110
+ Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary
111
+ for connecting to a Key Management Service (KMS) during decryption.
112
+ decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration
113
+ Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings
114
+ for reading encrypted Parquet data.
115
+
116
+ Raises
117
+ ------
118
+ ValueError
119
+ Raised if `decryption_config` is None.
120
+ """
121
+
122
+ cdef:
123
+ shared_ptr[CParquetDecryptionConfig] c_config
124
+
125
+ # Avoid mistakingly creating attributes
126
+ __slots__ = ()
127
+
128
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
129
+ DecryptionConfiguration decryption_config):
130
+
131
+ cdef shared_ptr[CDecryptionConfiguration] c_decryption_config
132
+
133
+ if decryption_config is None:
134
+ raise ValueError(
135
+ "decryption_config cannot be None")
136
+
137
+ self.c_config.reset(new CParquetDecryptionConfig())
138
+
139
+ c_decryption_config = pyarrow_unwrap_decryptionconfig(
140
+ decryption_config)
141
+
142
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
143
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
144
+ kms_connection_config)
145
+ self.c_config.get().decryption_config = c_decryption_config
146
+
147
+ @staticmethod
148
+ cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config):
149
+ cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig)
150
+ python_config.c_config = c_config
151
+ return python_config
152
+
153
+ cdef shared_ptr[CParquetDecryptionConfig] unwrap(self):
154
+ return self.c_config
155
+
156
+
157
+ def set_encryption_config(
158
+ ParquetFileWriteOptions opts not None,
159
+ ParquetEncryptionConfig config not None
160
+ ):
161
+ cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap()
162
+ opts.parquet_options.parquet_encryption_config = c_config
163
+
164
+
165
+ def set_decryption_properties(
166
+ ParquetFragmentScanOptions opts not None,
167
+ FileDecryptionProperties config not None
168
+ ):
169
+ cdef CReaderProperties* reader_props = opts.reader_properties()
170
+ reader_props.file_decryption_properties(config.unwrap())
171
+
172
+
173
+ def set_decryption_config(
174
+ ParquetFragmentScanOptions opts not None,
175
+ ParquetDecryptionConfig config not None
176
+ ):
177
+ cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap()
178
+ opts.parquet_options.parquet_decryption_config = c_config
parrot/lib/python3.10/site-packages/pyarrow/_dlpack.pxi ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ cimport cpython
19
+ from cpython.pycapsule cimport PyCapsule_New
20
+
21
+
22
+ cdef void dlpack_pycapsule_deleter(object dltensor) noexcept:
23
+ cdef DLManagedTensor* dlm_tensor
24
+ cdef PyObject* err_type
25
+ cdef PyObject* err_value
26
+ cdef PyObject* err_traceback
27
+
28
+ # Do nothing if the capsule has been consumed
29
+ if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"):
30
+ return
31
+
32
+ # An exception may be in-flight, we must save it in case
33
+ # we create another one
34
+ cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback)
35
+
36
+ dlm_tensor = <DLManagedTensor*>cpython.PyCapsule_GetPointer(dltensor, 'dltensor')
37
+ if dlm_tensor == NULL:
38
+ cpython.PyErr_WriteUnraisable(dltensor)
39
+ # The deleter can be NULL if there is no way for the caller
40
+ # to provide a reasonable destructor
41
+ elif dlm_tensor.deleter:
42
+ dlm_tensor.deleter(dlm_tensor)
43
+ assert (not cpython.PyErr_Occurred())
44
+
45
+ # Set the error indicator from err_type, err_value, err_traceback
46
+ cpython.PyErr_Restore(err_type, err_value, err_traceback)
parrot/lib/python3.10/site-packages/pyarrow/_feather.pyx ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # ---------------------------------------------------------------------
19
+ # Implement Feather file format
20
+
21
+ # cython: profile=False
22
+ # distutils: language = c++
23
+ # cython: language_level=3
24
+
25
+ from cython.operator cimport dereference as deref
26
+ from pyarrow.includes.common cimport *
27
+ from pyarrow.includes.libarrow cimport *
28
+ from pyarrow.includes.libarrow_feather cimport *
29
+ from pyarrow.lib cimport (check_status, Table, _Weakrefable,
30
+ get_writer, get_reader, pyarrow_wrap_table)
31
+ from pyarrow.lib import tobytes
32
+
33
+
34
+ class FeatherError(Exception):
35
+ pass
36
+
37
+
38
+ def write_feather(Table table, object dest, compression=None,
39
+ compression_level=None, chunksize=None, version=2):
40
+ cdef shared_ptr[COutputStream] sink
41
+ get_writer(dest, &sink)
42
+
43
+ cdef CFeatherProperties properties
44
+ if version == 2:
45
+ properties.version = kFeatherV2Version
46
+ else:
47
+ properties.version = kFeatherV1Version
48
+
49
+ if compression == 'zstd':
50
+ properties.compression = CCompressionType_ZSTD
51
+ elif compression == 'lz4':
52
+ properties.compression = CCompressionType_LZ4_FRAME
53
+ else:
54
+ properties.compression = CCompressionType_UNCOMPRESSED
55
+
56
+ if chunksize is not None:
57
+ properties.chunksize = chunksize
58
+
59
+ if compression_level is not None:
60
+ properties.compression_level = compression_level
61
+
62
+ with nogil:
63
+ check_status(WriteFeather(deref(table.table), sink.get(),
64
+ properties))
65
+
66
+
67
+ cdef class FeatherReader(_Weakrefable):
68
+ cdef:
69
+ shared_ptr[CFeatherReader] reader
70
+
71
+ def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads):
72
+ cdef:
73
+ shared_ptr[CRandomAccessFile] reader
74
+ CIpcReadOptions options = CIpcReadOptions.Defaults()
75
+ options.use_threads = use_threads
76
+
77
+ get_reader(source, use_memory_map, &reader)
78
+ with nogil:
79
+ self.reader = GetResultValue(CFeatherReader.Open(reader, options))
80
+
81
+ @property
82
+ def version(self):
83
+ return self.reader.get().version()
84
+
85
+ def read(self):
86
+ cdef shared_ptr[CTable] sp_table
87
+ with nogil:
88
+ check_status(self.reader.get()
89
+ .Read(&sp_table))
90
+
91
+ return pyarrow_wrap_table(sp_table)
92
+
93
+ def read_indices(self, indices):
94
+ cdef:
95
+ shared_ptr[CTable] sp_table
96
+ vector[int] c_indices
97
+
98
+ for index in indices:
99
+ c_indices.push_back(index)
100
+ with nogil:
101
+ check_status(self.reader.get()
102
+ .Read(c_indices, &sp_table))
103
+
104
+ return pyarrow_wrap_table(sp_table)
105
+
106
+ def read_names(self, names):
107
+ cdef:
108
+ shared_ptr[CTable] sp_table
109
+ vector[c_string] c_names
110
+
111
+ for name in names:
112
+ c_names.push_back(tobytes(name))
113
+ with nogil:
114
+ check_status(self.reader.get()
115
+ .Read(c_names, &sp_table))
116
+
117
+ return pyarrow_wrap_table(sp_table)
parrot/lib/python3.10/site-packages/pyarrow/_fs.pxd ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.common cimport *
21
+ from pyarrow.includes.libarrow_fs cimport *
22
+ from pyarrow.lib import _detect_compression, frombytes, tobytes
23
+ from pyarrow.lib cimport *
24
+
25
+
26
+ cpdef enum FileType:
27
+ NotFound = <int8_t> CFileType_NotFound
28
+ Unknown = <int8_t> CFileType_Unknown
29
+ File = <int8_t> CFileType_File
30
+ Directory = <int8_t> CFileType_Directory
31
+
32
+
33
+ cdef class FileInfo(_Weakrefable):
34
+ cdef:
35
+ CFileInfo info
36
+
37
+ @staticmethod
38
+ cdef wrap(CFileInfo info)
39
+
40
+ cdef inline CFileInfo unwrap(self) nogil
41
+
42
+ @staticmethod
43
+ cdef CFileInfo unwrap_safe(obj)
44
+
45
+
46
+ cdef class FileSelector(_Weakrefable):
47
+ cdef:
48
+ CFileSelector selector
49
+
50
+ @staticmethod
51
+ cdef FileSelector wrap(CFileSelector selector)
52
+
53
+ cdef inline CFileSelector unwrap(self) nogil
54
+
55
+
56
+ cdef class FileSystem(_Weakrefable):
57
+ cdef:
58
+ shared_ptr[CFileSystem] wrapped
59
+ CFileSystem* fs
60
+
61
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
62
+
63
+ @staticmethod
64
+ cdef wrap(const shared_ptr[CFileSystem]& sp)
65
+
66
+ cdef inline shared_ptr[CFileSystem] unwrap(self) nogil
67
+
68
+
69
+ cdef class LocalFileSystem(FileSystem):
70
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
71
+
72
+
73
+ cdef class SubTreeFileSystem(FileSystem):
74
+ cdef:
75
+ CSubTreeFileSystem* subtreefs
76
+
77
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
78
+
79
+
80
+ cdef class _MockFileSystem(FileSystem):
81
+ cdef:
82
+ CMockFileSystem* mockfs
83
+
84
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
85
+
86
+
87
+ cdef class PyFileSystem(FileSystem):
88
+ cdef:
89
+ CPyFileSystem* pyfs
90
+
91
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
parrot/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.lib cimport (pyarrow_wrap_metadata,
23
+ pyarrow_unwrap_metadata)
24
+ from pyarrow.lib import frombytes, tobytes, ensure_metadata
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_fs cimport *
28
+ from pyarrow._fs cimport FileSystem, TimePoint_to_ns, PyDateTime_to_TimePoint
29
+
30
+ from datetime import datetime, timedelta, timezone
31
+
32
+
33
+ cdef class GcsFileSystem(FileSystem):
34
+ """
35
+ Google Cloud Storage (GCS) backed FileSystem implementation
36
+
37
+ By default uses the process described in https://google.aip.dev/auth/4110
38
+ to resolve credentials. If not running on Google Cloud Platform (GCP),
39
+ this generally requires the environment variable
40
+ GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file
41
+ containing credentials.
42
+
43
+ Note: GCS buckets are special and the operations available on them may be
44
+ limited or more expensive than expected compared to local file systems.
45
+
46
+ Note: When pickling a GcsFileSystem that uses default credentials, resolution
47
+ credentials are not stored in the serialized data. Therefore, when unpickling
48
+ it is assumed that the necessary credentials are in place for the target
49
+ process.
50
+
51
+ Parameters
52
+ ----------
53
+ anonymous : boolean, default False
54
+ Whether to connect anonymously.
55
+ If true, will not attempt to look up credentials using standard GCP
56
+ configuration methods.
57
+ access_token : str, default None
58
+ GCP access token. If provided, temporary credentials will be fetched by
59
+ assuming this role; also, a `credential_token_expiration` must be
60
+ specified as well.
61
+ target_service_account : str, default None
62
+ An optional service account to try to impersonate when accessing GCS. This
63
+ requires the specified credential user or service account to have the necessary
64
+ permissions.
65
+ credential_token_expiration : datetime, default None
66
+ Expiration for credential generated with an access token. Must be specified
67
+ if `access_token` is specified.
68
+ default_bucket_location : str, default 'US'
69
+ GCP region to create buckets in.
70
+ scheme : str, default 'https'
71
+ GCS connection transport scheme.
72
+ endpoint_override : str, default None
73
+ Override endpoint with a connect string such as "localhost:9000"
74
+ default_metadata : mapping or pyarrow.KeyValueMetadata, default None
75
+ Default metadata for `open_output_stream`. This will be ignored if
76
+ non-empty metadata is passed to `open_output_stream`.
77
+ retry_time_limit : timedelta, default None
78
+ Set the maximum amount of time the GCS client will attempt to retry
79
+ transient errors. Subsecond granularity is ignored.
80
+ project_id : str, default None
81
+ The GCP project identifier to use for creating buckets.
82
+ If not set, the library uses the GOOGLE_CLOUD_PROJECT environment
83
+ variable. Most I/O operations do not need a project id, only applications
84
+ that create new buckets need a project id.
85
+ """
86
+
87
+ cdef:
88
+ CGcsFileSystem* gcsfs
89
+
90
+ def __init__(self, *, bint anonymous=False, access_token=None,
91
+ target_service_account=None, credential_token_expiration=None,
92
+ default_bucket_location='US',
93
+ scheme=None,
94
+ endpoint_override=None,
95
+ default_metadata=None,
96
+ retry_time_limit=None,
97
+ project_id=None):
98
+ cdef:
99
+ CGcsOptions options
100
+ shared_ptr[CGcsFileSystem] wrapped
101
+ double time_limit_seconds
102
+
103
+ # Intentional use of truthiness because empty strings aren't valid and
104
+ # for reconstruction from pickling will give empty strings.
105
+ if anonymous and (target_service_account or access_token):
106
+ raise ValueError(
107
+ 'anonymous option is not compatible with target_service_account and '
108
+ 'access_token'
109
+ )
110
+ elif bool(access_token) != bool(credential_token_expiration):
111
+ raise ValueError(
112
+ 'access_token and credential_token_expiration must be '
113
+ 'specified together'
114
+ )
115
+
116
+ elif anonymous:
117
+ options = CGcsOptions.Anonymous()
118
+ elif access_token:
119
+ if not isinstance(credential_token_expiration, datetime):
120
+ raise ValueError(
121
+ "credential_token_expiration must be a datetime")
122
+ options = CGcsOptions.FromAccessToken(
123
+ tobytes(access_token),
124
+ PyDateTime_to_TimePoint(<PyDateTime_DateTime*>credential_token_expiration))
125
+ else:
126
+ options = CGcsOptions.Defaults()
127
+
128
+ # Target service account requires base credentials so
129
+ # it is not part of the if/else chain above which only
130
+ # handles base credentials.
131
+ if target_service_account:
132
+ options = CGcsOptions.FromImpersonatedServiceAccount(
133
+ options.credentials, tobytes(target_service_account))
134
+
135
+ options.default_bucket_location = tobytes(default_bucket_location)
136
+
137
+ if scheme is not None:
138
+ options.scheme = tobytes(scheme)
139
+ if endpoint_override is not None:
140
+ options.endpoint_override = tobytes(endpoint_override)
141
+ if default_metadata is not None:
142
+ options.default_metadata = pyarrow_unwrap_metadata(
143
+ ensure_metadata(default_metadata))
144
+ if retry_time_limit is not None:
145
+ time_limit_seconds = retry_time_limit.total_seconds()
146
+ options.retry_limit_seconds = time_limit_seconds
147
+ if project_id is not None:
148
+ options.project_id = <c_string>tobytes(project_id)
149
+
150
+ with nogil:
151
+ wrapped = GetResultValue(CGcsFileSystem.Make(options))
152
+
153
+ self.init(<shared_ptr[CFileSystem]> wrapped)
154
+
155
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
156
+ FileSystem.init(self, wrapped)
157
+ self.gcsfs = <CGcsFileSystem*> wrapped.get()
158
+
159
+ def _expiration_datetime_from_options(self):
160
+ expiration_ns = TimePoint_to_ns(
161
+ self.gcsfs.options().credentials.expiration())
162
+ if expiration_ns == 0:
163
+ return None
164
+ return datetime.fromtimestamp(expiration_ns / 1.0e9, timezone.utc)
165
+
166
+ @staticmethod
167
+ @binding(True) # Required for cython < 3
168
+ def _reconstruct(kwargs):
169
+ # __reduce__ doesn't allow passing named arguments directly to the
170
+ # reconstructor, hence this wrapper.
171
+ return GcsFileSystem(**kwargs)
172
+
173
+ def __reduce__(self):
174
+ cdef CGcsOptions opts = self.gcsfs.options()
175
+ service_account = frombytes(opts.credentials.target_service_account())
176
+ expiration_dt = self._expiration_datetime_from_options()
177
+ retry_time_limit = None
178
+ if opts.retry_limit_seconds.has_value():
179
+ retry_time_limit = timedelta(
180
+ seconds=opts.retry_limit_seconds.value())
181
+ project_id = None
182
+ if opts.project_id.has_value():
183
+ project_id = frombytes(opts.project_id.value())
184
+ return (
185
+ GcsFileSystem._reconstruct, (dict(
186
+ access_token=frombytes(opts.credentials.access_token()),
187
+ anonymous=opts.credentials.anonymous(),
188
+ credential_token_expiration=expiration_dt,
189
+ target_service_account=service_account,
190
+ scheme=frombytes(opts.scheme),
191
+ endpoint_override=frombytes(opts.endpoint_override),
192
+ default_bucket_location=frombytes(
193
+ opts.default_bucket_location),
194
+ default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
195
+ retry_time_limit=retry_time_limit,
196
+ project_id=project_id
197
+ ),))
198
+
199
+ @property
200
+ def default_bucket_location(self):
201
+ """
202
+ The GCP location this filesystem will write to.
203
+ """
204
+ return frombytes(self.gcsfs.options().default_bucket_location)
205
+
206
+ @property
207
+ def project_id(self):
208
+ """
209
+ The GCP project id this filesystem will use.
210
+ """
211
+ if self.gcsfs.options().project_id.has_value():
212
+ return frombytes(self.gcsfs.options().project_id.value())
parrot/lib/python3.10/site-packages/pyarrow/_generated_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '17.0.0'
16
+ __version_tuple__ = version_tuple = (17, 0, 0)
parrot/lib/python3.10/site-packages/pyarrow/_hdfs.pyx ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_fs cimport *
25
+ from pyarrow._fs cimport FileSystem
26
+
27
+ from pyarrow.lib import frombytes, tobytes
28
+ from pyarrow.util import _stringify_path
29
+
30
+
31
+ cdef class HadoopFileSystem(FileSystem):
32
+ """
33
+ HDFS backed FileSystem implementation
34
+
35
+ Parameters
36
+ ----------
37
+ host : str
38
+ HDFS host to connect to. Set to "default" for fs.defaultFS from
39
+ core-site.xml.
40
+ port : int, default 8020
41
+ HDFS port to connect to. Set to 0 for default or logical (HA) nodes.
42
+ user : str, default None
43
+ Username when connecting to HDFS; None implies login user.
44
+ replication : int, default 3
45
+ Number of copies each block will have.
46
+ buffer_size : int, default 0
47
+ If 0, no buffering will happen otherwise the size of the temporary read
48
+ and write buffer.
49
+ default_block_size : int, default None
50
+ None means the default configuration for HDFS, a typical block size is
51
+ 128 MB.
52
+ kerb_ticket : string or path, default None
53
+ If not None, the path to the Kerberos ticket cache.
54
+ extra_conf : dict, default None
55
+ Extra key/value pairs for configuration; will override any
56
+ hdfs-site.xml properties.
57
+
58
+ Examples
59
+ --------
60
+ >>> from pyarrow import fs
61
+ >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP
62
+
63
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
64
+ """
65
+
66
+ cdef:
67
+ CHadoopFileSystem* hdfs
68
+
69
+ def __init__(self, str host, int port=8020, *, str user=None,
70
+ int replication=3, int buffer_size=0,
71
+ default_block_size=None, kerb_ticket=None,
72
+ extra_conf=None):
73
+ cdef:
74
+ CHdfsOptions options
75
+ shared_ptr[CHadoopFileSystem] wrapped
76
+
77
+ if not host.startswith(('hdfs://', 'viewfs://')) and host != "default":
78
+ # TODO(kszucs): do more sanitization
79
+ host = 'hdfs://{}'.format(host)
80
+
81
+ options.ConfigureEndPoint(tobytes(host), int(port))
82
+ options.ConfigureReplication(replication)
83
+ options.ConfigureBufferSize(buffer_size)
84
+
85
+ if user is not None:
86
+ options.ConfigureUser(tobytes(user))
87
+ if default_block_size is not None:
88
+ options.ConfigureBlockSize(default_block_size)
89
+ if kerb_ticket is not None:
90
+ options.ConfigureKerberosTicketCachePath(
91
+ tobytes(_stringify_path(kerb_ticket)))
92
+ if extra_conf is not None:
93
+ for k, v in extra_conf.items():
94
+ options.ConfigureExtraConf(tobytes(k), tobytes(v))
95
+
96
+ with nogil:
97
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
98
+ self.init(<shared_ptr[CFileSystem]> wrapped)
99
+
100
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
101
+ FileSystem.init(self, wrapped)
102
+ self.hdfs = <CHadoopFileSystem*> wrapped.get()
103
+
104
+ @staticmethod
105
+ def from_uri(uri):
106
+ """
107
+ Instantiate HadoopFileSystem object from an URI string.
108
+
109
+ The following two calls are equivalent
110
+
111
+ * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\
112
+ &replication=1')``
113
+ * ``HadoopFileSystem('localhost', port=8020, user='test', \
114
+ replication=1)``
115
+
116
+ Parameters
117
+ ----------
118
+ uri : str
119
+ A string URI describing the connection to HDFS.
120
+ In order to change the user, replication, buffer_size or
121
+ default_block_size pass the values as query parts.
122
+
123
+ Returns
124
+ -------
125
+ HadoopFileSystem
126
+ """
127
+ cdef:
128
+ HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem)
129
+ shared_ptr[CHadoopFileSystem] wrapped
130
+ CHdfsOptions options
131
+
132
+ options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri)))
133
+ with nogil:
134
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
135
+
136
+ self.init(<shared_ptr[CFileSystem]> wrapped)
137
+ return self
138
+
139
+ @staticmethod
140
+ @binding(True) # Required for cython < 3
141
+ def _reconstruct(kwargs):
142
+ # __reduce__ doesn't allow passing named arguments directly to the
143
+ # reconstructor, hence this wrapper.
144
+ return HadoopFileSystem(**kwargs)
145
+
146
+ def __reduce__(self):
147
+ cdef CHdfsOptions opts = self.hdfs.options()
148
+ return (
149
+ HadoopFileSystem._reconstruct, (dict(
150
+ host=frombytes(opts.connection_config.host),
151
+ port=opts.connection_config.port,
152
+ user=frombytes(opts.connection_config.user),
153
+ replication=opts.replication,
154
+ buffer_size=opts.buffer_size,
155
+ default_block_size=opts.default_block_size,
156
+ kerb_ticket=frombytes(opts.connection_config.kerb_ticket),
157
+ extra_conf={frombytes(k): frombytes(v)
158
+ for k, v in opts.connection_config.extra_conf},
159
+ ),)
160
+ )
parrot/lib/python3.10/site-packages/pyarrow/_json.pxd ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ParseOptions(_Weakrefable):
25
+ cdef:
26
+ CJSONParseOptions options
27
+
28
+ @staticmethod
29
+ cdef ParseOptions wrap(CJSONParseOptions options)
30
+
31
+ cdef class ReadOptions(_Weakrefable):
32
+ cdef:
33
+ CJSONReadOptions options
34
+
35
+ @staticmethod
36
+ cdef ReadOptions wrap(CJSONReadOptions options)
parrot/lib/python3.10/site-packages/pyarrow/_json.pyx ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+ # cython: language_level = 3
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.lib cimport (_Weakrefable, MemoryPool,
25
+ maybe_unbox_memory_pool,
26
+ get_input_stream, pyarrow_wrap_table,
27
+ pyarrow_wrap_schema, pyarrow_unwrap_schema)
28
+
29
+
30
+ cdef class ReadOptions(_Weakrefable):
31
+ """
32
+ Options for reading JSON files.
33
+
34
+ Parameters
35
+ ----------
36
+ use_threads : bool, optional (default True)
37
+ Whether to use multiple threads to accelerate reading
38
+ block_size : int, optional
39
+ How much bytes to process at a time from the input stream.
40
+ This will determine multi-threading granularity as well as
41
+ the size of individual chunks in the Table.
42
+ """
43
+
44
+ # Avoid mistakingly creating attributes
45
+ __slots__ = ()
46
+
47
+ def __init__(self, use_threads=None, block_size=None):
48
+ self.options = CJSONReadOptions.Defaults()
49
+ if use_threads is not None:
50
+ self.use_threads = use_threads
51
+ if block_size is not None:
52
+ self.block_size = block_size
53
+
54
+ @property
55
+ def use_threads(self):
56
+ """
57
+ Whether to use multiple threads to accelerate reading.
58
+ """
59
+ return self.options.use_threads
60
+
61
+ @use_threads.setter
62
+ def use_threads(self, value):
63
+ self.options.use_threads = value
64
+
65
+ @property
66
+ def block_size(self):
67
+ """
68
+ How much bytes to process at a time from the input stream.
69
+
70
+ This will determine multi-threading granularity as well as the size of
71
+ individual chunks in the Table.
72
+ """
73
+ return self.options.block_size
74
+
75
+ @block_size.setter
76
+ def block_size(self, value):
77
+ self.options.block_size = value
78
+
79
+ def __reduce__(self):
80
+ return ReadOptions, (
81
+ self.use_threads,
82
+ self.block_size
83
+ )
84
+
85
+ def equals(self, ReadOptions other):
86
+ """
87
+ Parameters
88
+ ----------
89
+ other : pyarrow.json.ReadOptions
90
+
91
+ Returns
92
+ -------
93
+ bool
94
+ """
95
+ return (
96
+ self.use_threads == other.use_threads and
97
+ self.block_size == other.block_size
98
+ )
99
+
100
+ def __eq__(self, other):
101
+ try:
102
+ return self.equals(other)
103
+ except TypeError:
104
+ return False
105
+
106
+ @staticmethod
107
+ cdef ReadOptions wrap(CJSONReadOptions options):
108
+ out = ReadOptions()
109
+ out.options = options # shallow copy
110
+ return out
111
+
112
+
113
+ cdef class ParseOptions(_Weakrefable):
114
+ """
115
+ Options for parsing JSON files.
116
+
117
+ Parameters
118
+ ----------
119
+ explicit_schema : Schema, optional (default None)
120
+ Optional explicit schema (no type inference, ignores other fields).
121
+ newlines_in_values : bool, optional (default False)
122
+ Whether objects may be printed across multiple lines (for example
123
+ pretty printed). If false, input must end with an empty line.
124
+ unexpected_field_behavior : str, default "infer"
125
+ How JSON fields outside of explicit_schema (if given) are treated.
126
+
127
+ Possible behaviors:
128
+
129
+ - "ignore": unexpected JSON fields are ignored
130
+ - "error": error out on unexpected JSON fields
131
+ - "infer": unexpected JSON fields are type-inferred and included in
132
+ the output
133
+ """
134
+
135
+ __slots__ = ()
136
+
137
+ def __init__(self, explicit_schema=None, newlines_in_values=None,
138
+ unexpected_field_behavior=None):
139
+ self.options = CJSONParseOptions.Defaults()
140
+ if explicit_schema is not None:
141
+ self.explicit_schema = explicit_schema
142
+ if newlines_in_values is not None:
143
+ self.newlines_in_values = newlines_in_values
144
+ if unexpected_field_behavior is not None:
145
+ self.unexpected_field_behavior = unexpected_field_behavior
146
+
147
+ def __reduce__(self):
148
+ return ParseOptions, (
149
+ self.explicit_schema,
150
+ self.newlines_in_values,
151
+ self.unexpected_field_behavior
152
+ )
153
+
154
+ @property
155
+ def explicit_schema(self):
156
+ """
157
+ Optional explicit schema (no type inference, ignores other fields)
158
+ """
159
+ if self.options.explicit_schema.get() == NULL:
160
+ return None
161
+ else:
162
+ return pyarrow_wrap_schema(self.options.explicit_schema)
163
+
164
+ @explicit_schema.setter
165
+ def explicit_schema(self, value):
166
+ self.options.explicit_schema = pyarrow_unwrap_schema(value)
167
+
168
+ @property
169
+ def newlines_in_values(self):
170
+ """
171
+ Whether newline characters are allowed in JSON values.
172
+ Setting this to True reduces the performance of multi-threaded
173
+ JSON reading.
174
+ """
175
+ return self.options.newlines_in_values
176
+
177
+ @newlines_in_values.setter
178
+ def newlines_in_values(self, value):
179
+ self.options.newlines_in_values = value
180
+
181
+ @property
182
+ def unexpected_field_behavior(self):
183
+ """
184
+ How JSON fields outside of explicit_schema (if given) are treated.
185
+
186
+ Possible behaviors:
187
+
188
+ - "ignore": unexpected JSON fields are ignored
189
+ - "error": error out on unexpected JSON fields
190
+ - "infer": unexpected JSON fields are type-inferred and included in
191
+ the output
192
+
193
+ Set to "infer" by default.
194
+ """
195
+ v = self.options.unexpected_field_behavior
196
+ if v == CUnexpectedFieldBehavior_Ignore:
197
+ return "ignore"
198
+ elif v == CUnexpectedFieldBehavior_Error:
199
+ return "error"
200
+ elif v == CUnexpectedFieldBehavior_InferType:
201
+ return "infer"
202
+ else:
203
+ raise ValueError('Unexpected value for unexpected_field_behavior')
204
+
205
+ @unexpected_field_behavior.setter
206
+ def unexpected_field_behavior(self, value):
207
+ cdef CUnexpectedFieldBehavior v
208
+
209
+ if value == "ignore":
210
+ v = CUnexpectedFieldBehavior_Ignore
211
+ elif value == "error":
212
+ v = CUnexpectedFieldBehavior_Error
213
+ elif value == "infer":
214
+ v = CUnexpectedFieldBehavior_InferType
215
+ else:
216
+ raise ValueError(
217
+ "Unexpected value `{}` for `unexpected_field_behavior`, pass "
218
+ "either `ignore`, `error` or `infer`.".format(value)
219
+ )
220
+
221
+ self.options.unexpected_field_behavior = v
222
+
223
+ def equals(self, ParseOptions other):
224
+ """
225
+ Parameters
226
+ ----------
227
+ other : pyarrow.json.ParseOptions
228
+
229
+ Returns
230
+ -------
231
+ bool
232
+ """
233
+ return (
234
+ self.explicit_schema == other.explicit_schema and
235
+ self.newlines_in_values == other.newlines_in_values and
236
+ self.unexpected_field_behavior == other.unexpected_field_behavior
237
+ )
238
+
239
+ def __eq__(self, other):
240
+ try:
241
+ return self.equals(other)
242
+ except TypeError:
243
+ return False
244
+
245
+ @staticmethod
246
+ cdef ParseOptions wrap(CJSONParseOptions options):
247
+ out = ParseOptions()
248
+ out.options = options # shallow copy
249
+ return out
250
+
251
+
252
+ cdef _get_reader(input_file, shared_ptr[CInputStream]* out):
253
+ use_memory_map = False
254
+ get_input_stream(input_file, use_memory_map, out)
255
+
256
+ cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out):
257
+ if read_options is None:
258
+ out[0] = CJSONReadOptions.Defaults()
259
+ else:
260
+ out[0] = read_options.options
261
+
262
+ cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out):
263
+ if parse_options is None:
264
+ out[0] = CJSONParseOptions.Defaults()
265
+ else:
266
+ out[0] = parse_options.options
267
+
268
+
269
+ def read_json(input_file, read_options=None, parse_options=None,
270
+ MemoryPool memory_pool=None):
271
+ """
272
+ Read a Table from a stream of JSON data.
273
+
274
+ Parameters
275
+ ----------
276
+ input_file : str, path or file-like object
277
+ The location of JSON data. Currently only the line-delimited JSON
278
+ format is supported.
279
+ read_options : pyarrow.json.ReadOptions, optional
280
+ Options for the JSON reader (see ReadOptions constructor for defaults).
281
+ parse_options : pyarrow.json.ParseOptions, optional
282
+ Options for the JSON parser
283
+ (see ParseOptions constructor for defaults).
284
+ memory_pool : MemoryPool, optional
285
+ Pool to allocate Table memory from.
286
+
287
+ Returns
288
+ -------
289
+ :class:`pyarrow.Table`
290
+ Contents of the JSON file as a in-memory table.
291
+ """
292
+ cdef:
293
+ shared_ptr[CInputStream] stream
294
+ CJSONReadOptions c_read_options
295
+ CJSONParseOptions c_parse_options
296
+ shared_ptr[CJSONReader] reader
297
+ shared_ptr[CTable] table
298
+
299
+ _get_reader(input_file, &stream)
300
+ _get_read_options(read_options, &c_read_options)
301
+ _get_parse_options(parse_options, &c_parse_options)
302
+
303
+ reader = GetResultValue(
304
+ CJSONReader.Make(maybe_unbox_memory_pool(memory_pool),
305
+ stream, c_read_options, c_parse_options))
306
+
307
+ with nogil:
308
+ table = GetResultValue(reader.get().Read())
309
+
310
+ return pyarrow_wrap_table(table)
parrot/lib/python3.10/site-packages/pyarrow/_orc.pxd ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from libcpp cimport bool as c_bool
22
+ from libc.string cimport const_char
23
+ from libcpp.vector cimport vector as std_vector
24
+ from pyarrow.includes.common cimport *
25
+ from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus,
26
+ CResult, CTable, CMemoryPool,
27
+ CKeyValueMetadata,
28
+ CRecordBatch,
29
+ CTable, CCompressionType,
30
+ CRandomAccessFile, COutputStream,
31
+ TimeUnit)
32
+
33
+ cdef extern from "arrow/adapters/orc/options.h" \
34
+ namespace "arrow::adapters::orc" nogil:
35
+ cdef enum CompressionStrategy \
36
+ " arrow::adapters::orc::CompressionStrategy":
37
+ _CompressionStrategy_SPEED \
38
+ " arrow::adapters::orc::CompressionStrategy::kSpeed"
39
+ _CompressionStrategy_COMPRESSION \
40
+ " arrow::adapters::orc::CompressionStrategy::kCompression"
41
+
42
+ cdef enum WriterId" arrow::adapters::orc::WriterId":
43
+ _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava"
44
+ _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp"
45
+ _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto"
46
+ _WriterId_SCRITCHLEY_GO \
47
+ " arrow::adapters::orc::WriterId::kScritchleyGo"
48
+ _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino"
49
+ _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown"
50
+
51
+ cdef enum WriterVersion" arrow::adapters::orc::WriterVersion":
52
+ _WriterVersion_ORIGINAL \
53
+ " arrow::adapters::orc::WriterVersion::kOriginal"
54
+ _WriterVersion_HIVE_8732 \
55
+ " arrow::adapters::orc::WriterVersion::kHive8732"
56
+ _WriterVersion_HIVE_4243 \
57
+ " arrow::adapters::orc::WriterVersion::kHive4243"
58
+ _WriterVersion_HIVE_12055 \
59
+ " arrow::adapters::orc::WriterVersion::kHive12055"
60
+ _WriterVersion_HIVE_13083 \
61
+ " arrow::adapters::orc::WriterVersion::kHive13083"
62
+ _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101"
63
+ _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135"
64
+ _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517"
65
+ _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203"
66
+ _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14"
67
+ _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax"
68
+
69
+ cdef cppclass FileVersion" arrow::adapters::orc::FileVersion":
70
+ FileVersion(uint32_t major_version, uint32_t minor_version)
71
+ uint32_t major_version()
72
+ uint32_t minor_version()
73
+ c_string ToString()
74
+
75
+ cdef struct WriteOptions" arrow::adapters::orc::WriteOptions":
76
+ int64_t batch_size
77
+ FileVersion file_version
78
+ int64_t stripe_size
79
+ CCompressionType compression
80
+ int64_t compression_block_size
81
+ CompressionStrategy compression_strategy
82
+ int64_t row_index_stride
83
+ double padding_tolerance
84
+ double dictionary_key_size_threshold
85
+ std_vector[int64_t] bloom_filter_columns
86
+ double bloom_filter_fpp
87
+
88
+
89
+ cdef extern from "arrow/adapters/orc/adapter.h" \
90
+ namespace "arrow::adapters::orc" nogil:
91
+
92
+ cdef cppclass ORCFileReader:
93
+ @staticmethod
94
+ CResult[unique_ptr[ORCFileReader]] Open(
95
+ const shared_ptr[CRandomAccessFile]& file,
96
+ CMemoryPool* pool)
97
+
98
+ CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata()
99
+
100
+ CResult[shared_ptr[CSchema]] ReadSchema()
101
+
102
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe)
103
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(
104
+ int64_t stripe, std_vector[c_string])
105
+
106
+ CResult[shared_ptr[CTable]] Read()
107
+ CResult[shared_ptr[CTable]] Read(std_vector[c_string])
108
+
109
+ int64_t NumberOfStripes()
110
+ int64_t NumberOfRows()
111
+ FileVersion GetFileVersion()
112
+ c_string GetSoftwareVersion()
113
+ CResult[CCompressionType] GetCompression()
114
+ int64_t GetCompressionSize()
115
+ int64_t GetRowIndexStride()
116
+ WriterId GetWriterId()
117
+ int32_t GetWriterIdValue()
118
+ WriterVersion GetWriterVersion()
119
+ int64_t GetNumberOfStripeStatistics()
120
+ int64_t GetContentLength()
121
+ int64_t GetStripeStatisticsLength()
122
+ int64_t GetFileFooterLength()
123
+ int64_t GetFilePostscriptLength()
124
+ int64_t GetFileLength()
125
+ c_string GetSerializedFileTail()
126
+
127
+ cdef cppclass ORCFileWriter:
128
+ @staticmethod
129
+ CResult[unique_ptr[ORCFileWriter]] Open(
130
+ COutputStream* output_stream, const WriteOptions& writer_options)
131
+
132
+ CStatus Write(const CTable& table)
133
+
134
+ CStatus Close()
parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libparquet_encryption cimport *
23
+ from pyarrow._parquet cimport (ParquetCipher,
24
+ CFileEncryptionProperties,
25
+ CFileDecryptionProperties,
26
+ FileEncryptionProperties,
27
+ FileDecryptionProperties,
28
+ ParquetCipher_AES_GCM_V1,
29
+ ParquetCipher_AES_GCM_CTR_V1)
30
+ from pyarrow.lib cimport _Weakrefable
31
+
32
+ cdef class CryptoFactory(_Weakrefable):
33
+ cdef shared_ptr[CPyCryptoFactory] factory
34
+ cdef init(self, callable_client_factory)
35
+ cdef inline shared_ptr[CPyCryptoFactory] unwrap(self)
36
+
37
+ cdef class EncryptionConfiguration(_Weakrefable):
38
+ cdef shared_ptr[CEncryptionConfiguration] configuration
39
+ cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil
40
+
41
+ cdef class DecryptionConfiguration(_Weakrefable):
42
+ cdef shared_ptr[CDecryptionConfiguration] configuration
43
+ cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil
44
+
45
+ cdef class KmsConnectionConfig(_Weakrefable):
46
+ cdef shared_ptr[CKmsConnectionConfig] configuration
47
+ cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil
48
+
49
+ @staticmethod
50
+ cdef wrap(const CKmsConnectionConfig& config)
51
+
52
+
53
+ cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *
54
+ cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *
55
+ cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *
56
+ cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *
parrot/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from datetime import timedelta
22
+
23
+ from cython.operator cimport dereference as deref
24
+ from libcpp.memory cimport shared_ptr
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.lib cimport _Weakrefable
28
+ from pyarrow.lib import tobytes, frombytes
29
+
30
+
31
+ cdef ParquetCipher cipher_from_name(name):
32
+ name = name.upper()
33
+ if name == 'AES_GCM_V1':
34
+ return ParquetCipher_AES_GCM_V1
35
+ elif name == 'AES_GCM_CTR_V1':
36
+ return ParquetCipher_AES_GCM_CTR_V1
37
+ else:
38
+ raise ValueError(f'Invalid cipher name: {name!r}')
39
+
40
+
41
+ cdef cipher_to_name(ParquetCipher cipher):
42
+ if ParquetCipher_AES_GCM_V1 == cipher:
43
+ return 'AES_GCM_V1'
44
+ elif ParquetCipher_AES_GCM_CTR_V1 == cipher:
45
+ return 'AES_GCM_CTR_V1'
46
+ else:
47
+ raise ValueError('Invalid cipher value: {0}'.format(cipher))
48
+
49
+ cdef class EncryptionConfiguration(_Weakrefable):
50
+ """Configuration of the encryption, such as which columns to encrypt"""
51
+ # Avoid mistakingly creating attributes
52
+ __slots__ = ()
53
+
54
+ def __init__(self, footer_key, *, column_keys=None,
55
+ encryption_algorithm=None,
56
+ plaintext_footer=None, double_wrapping=None,
57
+ cache_lifetime=None, internal_key_material=None,
58
+ data_key_length_bits=None):
59
+ self.configuration.reset(
60
+ new CEncryptionConfiguration(tobytes(footer_key)))
61
+ if column_keys is not None:
62
+ self.column_keys = column_keys
63
+ if encryption_algorithm is not None:
64
+ self.encryption_algorithm = encryption_algorithm
65
+ if plaintext_footer is not None:
66
+ self.plaintext_footer = plaintext_footer
67
+ if double_wrapping is not None:
68
+ self.double_wrapping = double_wrapping
69
+ if cache_lifetime is not None:
70
+ self.cache_lifetime = cache_lifetime
71
+ if internal_key_material is not None:
72
+ self.internal_key_material = internal_key_material
73
+ if data_key_length_bits is not None:
74
+ self.data_key_length_bits = data_key_length_bits
75
+
76
+ @property
77
+ def footer_key(self):
78
+ """ID of the master key for footer encryption/signing"""
79
+ return frombytes(self.configuration.get().footer_key)
80
+
81
+ @property
82
+ def column_keys(self):
83
+ """
84
+ List of columns to encrypt, with master key IDs.
85
+ """
86
+ column_keys_str = frombytes(self.configuration.get().column_keys)
87
+ # Convert from "masterKeyID:colName,colName;masterKeyID:colName..."
88
+ # (see HIVE-21848) to dictionary of master key ID to column name lists
89
+ column_keys_to_key_list_str = dict(subString.replace(" ", "").split(
90
+ ":") for subString in column_keys_str.split(";"))
91
+ column_keys_dict = {k: v.split(
92
+ ",") for k, v in column_keys_to_key_list_str.items()}
93
+ return column_keys_dict
94
+
95
+ @column_keys.setter
96
+ def column_keys(self, dict value):
97
+ if value is not None:
98
+ # convert a dictionary such as
99
+ # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}''
100
+ # to the string defined by the spec
101
+ # 'key1: col1 , col2; key2: col3 , col4'
102
+ column_keys = "; ".join(
103
+ ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()])
104
+ self.configuration.get().column_keys = tobytes(column_keys)
105
+
106
+ @property
107
+ def encryption_algorithm(self):
108
+ """Parquet encryption algorithm.
109
+ Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1"."""
110
+ return cipher_to_name(self.configuration.get().encryption_algorithm)
111
+
112
+ @encryption_algorithm.setter
113
+ def encryption_algorithm(self, value):
114
+ cipher = cipher_from_name(value)
115
+ self.configuration.get().encryption_algorithm = cipher
116
+
117
+ @property
118
+ def plaintext_footer(self):
119
+ """Write files with plaintext footer."""
120
+ return self.configuration.get().plaintext_footer
121
+
122
+ @plaintext_footer.setter
123
+ def plaintext_footer(self, value):
124
+ self.configuration.get().plaintext_footer = value
125
+
126
+ @property
127
+ def double_wrapping(self):
128
+ """Use double wrapping - where data encryption keys (DEKs) are
129
+ encrypted with key encryption keys (KEKs), which in turn are
130
+ encrypted with master keys.
131
+ If set to false, use single wrapping - where DEKs are
132
+ encrypted directly with master keys."""
133
+ return self.configuration.get().double_wrapping
134
+
135
+ @double_wrapping.setter
136
+ def double_wrapping(self, value):
137
+ self.configuration.get().double_wrapping = value
138
+
139
+ @property
140
+ def cache_lifetime(self):
141
+ """Lifetime of cached entities (key encryption keys,
142
+ local wrapping keys, KMS client objects)."""
143
+ return timedelta(
144
+ seconds=self.configuration.get().cache_lifetime_seconds)
145
+
146
+ @cache_lifetime.setter
147
+ def cache_lifetime(self, value):
148
+ if not isinstance(value, timedelta):
149
+ raise TypeError("cache_lifetime should be a timedelta")
150
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
151
+
152
+ @property
153
+ def internal_key_material(self):
154
+ """Store key material inside Parquet file footers; this mode doesn’t
155
+ produce additional files. If set to false, key material is stored in
156
+ separate files in the same folder, which enables key rotation for
157
+ immutable Parquet files."""
158
+ return self.configuration.get().internal_key_material
159
+
160
+ @internal_key_material.setter
161
+ def internal_key_material(self, value):
162
+ self.configuration.get().internal_key_material = value
163
+
164
+ @property
165
+ def data_key_length_bits(self):
166
+ """Length of data encryption keys (DEKs), randomly generated by parquet key
167
+ management tools. Can be 128, 192 or 256 bits."""
168
+ return self.configuration.get().data_key_length_bits
169
+
170
+ @data_key_length_bits.setter
171
+ def data_key_length_bits(self, value):
172
+ self.configuration.get().data_key_length_bits = value
173
+
174
+ cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil:
175
+ return self.configuration
176
+
177
+
178
+ cdef class DecryptionConfiguration(_Weakrefable):
179
+ """Configuration of the decryption, such as cache timeout."""
180
+ # Avoid mistakingly creating attributes
181
+ __slots__ = ()
182
+
183
+ def __init__(self, *, cache_lifetime=None):
184
+ self.configuration.reset(new CDecryptionConfiguration())
185
+
186
+ @property
187
+ def cache_lifetime(self):
188
+ """Lifetime of cached entities (key encryption keys,
189
+ local wrapping keys, KMS client objects)."""
190
+ return timedelta(
191
+ seconds=self.configuration.get().cache_lifetime_seconds)
192
+
193
+ @cache_lifetime.setter
194
+ def cache_lifetime(self, value):
195
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
196
+
197
+ cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil:
198
+ return self.configuration
199
+
200
+
201
+ cdef class KmsConnectionConfig(_Weakrefable):
202
+ """Configuration of the connection to the Key Management Service (KMS)"""
203
+ # Avoid mistakingly creating attributes
204
+ __slots__ = ()
205
+
206
+ def __init__(self, *, kms_instance_id=None, kms_instance_url=None,
207
+ key_access_token=None, custom_kms_conf=None):
208
+ self.configuration.reset(new CKmsConnectionConfig())
209
+ if kms_instance_id is not None:
210
+ self.kms_instance_id = kms_instance_id
211
+ if kms_instance_url is not None:
212
+ self.kms_instance_url = kms_instance_url
213
+ if key_access_token is None:
214
+ self.key_access_token = b'DEFAULT'
215
+ else:
216
+ self.key_access_token = key_access_token
217
+ if custom_kms_conf is not None:
218
+ self.custom_kms_conf = custom_kms_conf
219
+
220
+ @property
221
+ def kms_instance_id(self):
222
+ """ID of the KMS instance that will be used for encryption
223
+ (if multiple KMS instances are available)."""
224
+ return frombytes(self.configuration.get().kms_instance_id)
225
+
226
+ @kms_instance_id.setter
227
+ def kms_instance_id(self, value):
228
+ self.configuration.get().kms_instance_id = tobytes(value)
229
+
230
+ @property
231
+ def kms_instance_url(self):
232
+ """URL of the KMS instance."""
233
+ return frombytes(self.configuration.get().kms_instance_url)
234
+
235
+ @kms_instance_url.setter
236
+ def kms_instance_url(self, value):
237
+ self.configuration.get().kms_instance_url = tobytes(value)
238
+
239
+ @property
240
+ def key_access_token(self):
241
+ """Authorization token that will be passed to KMS."""
242
+ return frombytes(self.configuration.get()
243
+ .refreshable_key_access_token.get().value())
244
+
245
+ @key_access_token.setter
246
+ def key_access_token(self, value):
247
+ self.refresh_key_access_token(value)
248
+
249
+ @property
250
+ def custom_kms_conf(self):
251
+ """A dictionary with KMS-type-specific configuration"""
252
+ custom_kms_conf = {
253
+ frombytes(k): frombytes(v)
254
+ for k, v in self.configuration.get().custom_kms_conf
255
+ }
256
+ return custom_kms_conf
257
+
258
+ @custom_kms_conf.setter
259
+ def custom_kms_conf(self, dict value):
260
+ if value is not None:
261
+ for k, v in value.items():
262
+ if isinstance(k, str) and isinstance(v, str):
263
+ self.configuration.get().custom_kms_conf[tobytes(k)] = \
264
+ tobytes(v)
265
+ else:
266
+ raise TypeError("Expected custom_kms_conf to be " +
267
+ "a dictionary of strings")
268
+
269
+ def refresh_key_access_token(self, value):
270
+ cdef:
271
+ shared_ptr[CKeyAccessToken] c_key_access_token = \
272
+ self.configuration.get().refreshable_key_access_token
273
+
274
+ c_key_access_token.get().Refresh(tobytes(value))
275
+
276
+ cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil:
277
+ return self.configuration
278
+
279
+ @staticmethod
280
+ cdef wrap(const CKmsConnectionConfig& config):
281
+ result = KmsConnectionConfig()
282
+ result.configuration = make_shared[CKmsConnectionConfig](move(config))
283
+ return result
284
+
285
+
286
+ # Callback definitions for CPyKmsClientVtable
287
+ cdef void _cb_wrap_key(
288
+ handler, const c_string& key_bytes,
289
+ const c_string& master_key_identifier, c_string* out) except *:
290
+ mkid_str = frombytes(master_key_identifier)
291
+ wrapped_key = handler.wrap_key(key_bytes, mkid_str)
292
+ out[0] = tobytes(wrapped_key)
293
+
294
+
295
+ cdef void _cb_unwrap_key(
296
+ handler, const c_string& wrapped_key,
297
+ const c_string& master_key_identifier, c_string* out) except *:
298
+ mkid_str = frombytes(master_key_identifier)
299
+ wk_str = frombytes(wrapped_key)
300
+ key = handler.unwrap_key(wk_str, mkid_str)
301
+ out[0] = tobytes(key)
302
+
303
+
304
+ cdef class KmsClient(_Weakrefable):
305
+ """The abstract base class for KmsClient implementations."""
306
+ cdef:
307
+ shared_ptr[CKmsClient] client
308
+
309
+ def __init__(self):
310
+ self.init()
311
+
312
+ cdef init(self):
313
+ cdef:
314
+ CPyKmsClientVtable vtable = CPyKmsClientVtable()
315
+
316
+ vtable.wrap_key = _cb_wrap_key
317
+ vtable.unwrap_key = _cb_unwrap_key
318
+
319
+ self.client.reset(new CPyKmsClient(self, vtable))
320
+
321
+ def wrap_key(self, key_bytes, master_key_identifier):
322
+ """Wrap a key - encrypt it with the master key."""
323
+ raise NotImplementedError()
324
+
325
+ def unwrap_key(self, wrapped_key, master_key_identifier):
326
+ """Unwrap a key - decrypt it with the master key."""
327
+ raise NotImplementedError()
328
+
329
+ cdef inline shared_ptr[CKmsClient] unwrap(self) nogil:
330
+ return self.client
331
+
332
+
333
+ # Callback definition for CPyKmsClientFactoryVtable
334
+ cdef void _cb_create_kms_client(
335
+ handler,
336
+ const CKmsConnectionConfig& kms_connection_config,
337
+ shared_ptr[CKmsClient]* out) except *:
338
+ connection_config = KmsConnectionConfig.wrap(kms_connection_config)
339
+
340
+ result = handler(connection_config)
341
+ if not isinstance(result, KmsClient):
342
+ raise TypeError(
343
+ "callable must return KmsClient instances, but got {}".format(
344
+ type(result)))
345
+
346
+ out[0] = (<KmsClient> result).unwrap()
347
+
348
+
349
+ cdef class CryptoFactory(_Weakrefable):
350
+ """ A factory that produces the low-level FileEncryptionProperties and
351
+ FileDecryptionProperties objects, from the high-level parameters."""
352
+ # Avoid mistakingly creating attributes
353
+ __slots__ = ()
354
+
355
+ def __init__(self, kms_client_factory):
356
+ """Create CryptoFactory.
357
+
358
+ Parameters
359
+ ----------
360
+ kms_client_factory : a callable that accepts KmsConnectionConfig
361
+ and returns a KmsClient
362
+ """
363
+ self.factory.reset(new CPyCryptoFactory())
364
+
365
+ if callable(kms_client_factory):
366
+ self.init(kms_client_factory)
367
+ else:
368
+ raise TypeError("Parameter kms_client_factory must be a callable")
369
+
370
+ cdef init(self, callable_client_factory):
371
+ cdef:
372
+ CPyKmsClientFactoryVtable vtable
373
+ shared_ptr[CPyKmsClientFactory] kms_client_factory
374
+
375
+ vtable.create_kms_client = _cb_create_kms_client
376
+ kms_client_factory.reset(
377
+ new CPyKmsClientFactory(callable_client_factory, vtable))
378
+ # A KmsClientFactory object must be registered
379
+ # via this method before calling any of
380
+ # file_encryption_properties()/file_decryption_properties() methods.
381
+ self.factory.get().RegisterKmsClientFactory(
382
+ static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory](
383
+ kms_client_factory))
384
+
385
+ def file_encryption_properties(self,
386
+ KmsConnectionConfig kms_connection_config,
387
+ EncryptionConfiguration encryption_config):
388
+ """Create file encryption properties.
389
+
390
+ Parameters
391
+ ----------
392
+ kms_connection_config : KmsConnectionConfig
393
+ Configuration of connection to KMS
394
+
395
+ encryption_config : EncryptionConfiguration
396
+ Configuration of the encryption, such as which columns to encrypt
397
+
398
+ Returns
399
+ -------
400
+ file_encryption_properties : FileEncryptionProperties
401
+ File encryption properties.
402
+ """
403
+ cdef:
404
+ CResult[shared_ptr[CFileEncryptionProperties]] \
405
+ file_encryption_properties_result
406
+ with nogil:
407
+ file_encryption_properties_result = \
408
+ self.factory.get().SafeGetFileEncryptionProperties(
409
+ deref(kms_connection_config.unwrap().get()),
410
+ deref(encryption_config.unwrap().get()))
411
+ file_encryption_properties = GetResultValue(
412
+ file_encryption_properties_result)
413
+ return FileEncryptionProperties.wrap(file_encryption_properties)
414
+
415
+ def file_decryption_properties(
416
+ self,
417
+ KmsConnectionConfig kms_connection_config,
418
+ DecryptionConfiguration decryption_config=None):
419
+ """Create file decryption properties.
420
+
421
+ Parameters
422
+ ----------
423
+ kms_connection_config : KmsConnectionConfig
424
+ Configuration of connection to KMS
425
+
426
+ decryption_config : DecryptionConfiguration, default None
427
+ Configuration of the decryption, such as cache timeout.
428
+ Can be None.
429
+
430
+ Returns
431
+ -------
432
+ file_decryption_properties : FileDecryptionProperties
433
+ File decryption properties.
434
+ """
435
+ cdef:
436
+ CDecryptionConfiguration c_decryption_config
437
+ CResult[shared_ptr[CFileDecryptionProperties]] \
438
+ c_file_decryption_properties
439
+ if decryption_config is None:
440
+ c_decryption_config = CDecryptionConfiguration()
441
+ else:
442
+ c_decryption_config = deref(decryption_config.unwrap().get())
443
+ with nogil:
444
+ c_file_decryption_properties = \
445
+ self.factory.get().SafeGetFileDecryptionProperties(
446
+ deref(kms_connection_config.unwrap().get()),
447
+ c_decryption_config)
448
+ file_decryption_properties = GetResultValue(
449
+ c_file_decryption_properties)
450
+ return FileDecryptionProperties.wrap(file_decryption_properties)
451
+
452
+ def remove_cache_entries_for_token(self, access_token):
453
+ self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token))
454
+
455
+ def remove_cache_entries_for_all_tokens(self):
456
+ self.factory.get().RemoveCacheEntriesForAllTokens()
457
+
458
+ cdef inline shared_ptr[CPyCryptoFactory] unwrap(self):
459
+ return self.factory
460
+
461
+
462
+ cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *:
463
+ if isinstance(crypto_factory, CryptoFactory):
464
+ pycf = (<CryptoFactory> crypto_factory).unwrap()
465
+ return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf)
466
+ raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory))
467
+
468
+
469
+ cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *:
470
+ if isinstance(kmsconnectionconfig, KmsConnectionConfig):
471
+ return (<KmsConnectionConfig> kmsconnectionconfig).unwrap()
472
+ raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig))
473
+
474
+
475
+ cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *:
476
+ if isinstance(encryptionconfig, EncryptionConfiguration):
477
+ return (<EncryptionConfiguration> encryptionconfig).unwrap()
478
+ raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig))
479
+
480
+
481
+ cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *:
482
+ if isinstance(decryptionconfig, DecryptionConfiguration):
483
+ return (<DecryptionConfiguration> decryptionconfig).unwrap()
484
+ raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig))
parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport CStatus
23
+
24
+
25
+ ctypedef CStatus cb_test_func()
26
+
27
+ cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil:
28
+
29
+ cdef cppclass CTestCase "arrow::py::testing::TestCase":
30
+ c_string name
31
+ cb_test_func func
32
+
33
+ vector[CTestCase] GetCppTestCases()
parrot/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False, binding=True
19
+ # distutils: language = c++
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.lib cimport check_status
24
+
25
+ from pyarrow.lib import frombytes
26
+
27
+
28
+ cdef class CppTestCase:
29
+ """
30
+ A simple wrapper for a C++ test case.
31
+ """
32
+ cdef:
33
+ CTestCase c_case
34
+
35
+ @staticmethod
36
+ cdef wrap(CTestCase c_case):
37
+ cdef:
38
+ CppTestCase obj
39
+ obj = CppTestCase.__new__(CppTestCase)
40
+ obj.c_case = c_case
41
+ return obj
42
+
43
+ @property
44
+ def name(self):
45
+ return frombytes(self.c_case.name)
46
+
47
+ def __repr__(self):
48
+ return f"<{self.__class__.__name__} {self.name!r}>"
49
+
50
+ def __call__(self):
51
+ check_status(self.c_case.func())
52
+
53
+
54
+ def get_cpp_tests():
55
+ """
56
+ Get a list of C++ test cases.
57
+ """
58
+ cases = []
59
+ c_cases = GetCppTestCases()
60
+ for c_case in c_cases:
61
+ cases.append(CppTestCase.wrap(c_case))
62
+ return cases
parrot/lib/python3.10/site-packages/pyarrow/_s3fs.pyx ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata,
23
+ pyarrow_unwrap_metadata)
24
+ from pyarrow.lib import frombytes, tobytes, KeyValueMetadata
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_fs cimport *
28
+ from pyarrow._fs cimport FileSystem
29
+
30
+
31
+ cpdef enum S3LogLevel:
32
+ Off = <int8_t> CS3LogLevel_Off
33
+ Fatal = <int8_t> CS3LogLevel_Fatal
34
+ Error = <int8_t> CS3LogLevel_Error
35
+ Warn = <int8_t> CS3LogLevel_Warn
36
+ Info = <int8_t> CS3LogLevel_Info
37
+ Debug = <int8_t> CS3LogLevel_Debug
38
+ Trace = <int8_t> CS3LogLevel_Trace
39
+
40
+
41
+ def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1):
42
+ """
43
+ Initialize S3 support
44
+
45
+ Parameters
46
+ ----------
47
+ log_level : S3LogLevel
48
+ level of logging
49
+ num_event_loop_threads : int, default 1
50
+ how many threads to use for the AWS SDK's I/O event loop
51
+
52
+ Examples
53
+ --------
54
+ >>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP
55
+ """
56
+ cdef CS3GlobalOptions options
57
+ options.log_level = <CS3LogLevel> log_level
58
+ options.num_event_loop_threads = num_event_loop_threads
59
+ check_status(CInitializeS3(options))
60
+
61
+
62
+ def ensure_s3_initialized():
63
+ """
64
+ Initialize S3 (with default options) if not already initialized
65
+ """
66
+ check_status(CEnsureS3Initialized())
67
+
68
+
69
+ def finalize_s3():
70
+ check_status(CFinalizeS3())
71
+
72
+
73
+ def ensure_s3_finalized():
74
+ """
75
+ Finalize S3 if already initialized
76
+ """
77
+ check_status(CEnsureS3Finalized())
78
+
79
+
80
+ def resolve_s3_region(bucket):
81
+ """
82
+ Resolve the S3 region of a bucket.
83
+
84
+ Parameters
85
+ ----------
86
+ bucket : str
87
+ A S3 bucket name
88
+
89
+ Returns
90
+ -------
91
+ region : str
92
+ A S3 region name
93
+
94
+ Examples
95
+ --------
96
+ >>> fs.resolve_s3_region('voltrondata-labs-datasets')
97
+ 'us-east-2'
98
+ """
99
+ cdef:
100
+ c_string c_bucket
101
+ c_string c_region
102
+
103
+ ensure_s3_initialized()
104
+
105
+ c_bucket = tobytes(bucket)
106
+ with nogil:
107
+ c_region = GetResultValue(ResolveS3BucketRegion(c_bucket))
108
+
109
+ return frombytes(c_region)
110
+
111
+
112
+ class S3RetryStrategy:
113
+ """
114
+ Base class for AWS retry strategies for use with S3.
115
+
116
+ Parameters
117
+ ----------
118
+ max_attempts : int, default 3
119
+ The maximum number of retry attempts to attempt before failing.
120
+ """
121
+
122
+ def __init__(self, max_attempts=3):
123
+ self.max_attempts = max_attempts
124
+
125
+
126
+ class AwsStandardS3RetryStrategy(S3RetryStrategy):
127
+ """
128
+ Represents an AWS Standard retry strategy for use with S3.
129
+
130
+ Parameters
131
+ ----------
132
+ max_attempts : int, default 3
133
+ The maximum number of retry attempts to attempt before failing.
134
+ """
135
+ pass
136
+
137
+
138
+ class AwsDefaultS3RetryStrategy(S3RetryStrategy):
139
+ """
140
+ Represents an AWS Default retry strategy for use with S3.
141
+
142
+ Parameters
143
+ ----------
144
+ max_attempts : int, default 3
145
+ The maximum number of retry attempts to attempt before failing.
146
+ """
147
+ pass
148
+
149
+
150
+ cdef class S3FileSystem(FileSystem):
151
+ """
152
+ S3-backed FileSystem implementation
153
+
154
+ AWS access_key and secret_key can be provided explicitly.
155
+
156
+ If role_arn is provided instead of access_key and secret_key, temporary
157
+ credentials will be fetched by issuing a request to STS to assume the
158
+ specified role.
159
+
160
+ If neither access_key nor secret_key are provided, and role_arn is also not
161
+ provided, then attempts to establish the credentials automatically.
162
+ S3FileSystem will try the following methods, in order:
163
+
164
+ * ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables
165
+ * configuration files such as ``~/.aws/credentials`` and ``~/.aws/config``
166
+ * for nodes on Amazon EC2, the EC2 Instance Metadata Service
167
+
168
+ Note: S3 buckets are special and the operations available on them may be
169
+ limited or more expensive than desired.
170
+
171
+ When S3FileSystem creates new buckets (assuming allow_bucket_creation is
172
+ True), it does not pass any non-default settings. In AWS S3, the bucket and
173
+ all objects will be not publicly visible, and will have no bucket policies
174
+ and no resource tags. To have more control over how buckets are created,
175
+ use a different API to create them.
176
+
177
+ Parameters
178
+ ----------
179
+ access_key : str, default None
180
+ AWS Access Key ID. Pass None to use the standard AWS environment
181
+ variables and/or configuration file.
182
+ secret_key : str, default None
183
+ AWS Secret Access key. Pass None to use the standard AWS environment
184
+ variables and/or configuration file.
185
+ session_token : str, default None
186
+ AWS Session Token. An optional session token, required if access_key
187
+ and secret_key are temporary credentials from STS.
188
+ anonymous : bool, default False
189
+ Whether to connect anonymously if access_key and secret_key are None.
190
+ If true, will not attempt to look up credentials using standard AWS
191
+ configuration methods.
192
+ role_arn : str, default None
193
+ AWS Role ARN. If provided instead of access_key and secret_key,
194
+ temporary credentials will be fetched by assuming this role.
195
+ session_name : str, default None
196
+ An optional identifier for the assumed role session.
197
+ external_id : str, default None
198
+ An optional unique identifier that might be required when you assume
199
+ a role in another account.
200
+ load_frequency : int, default 900
201
+ The frequency (in seconds) with which temporary credentials from an
202
+ assumed role session will be refreshed.
203
+ region : str, default None
204
+ AWS region to connect to. If not set, the AWS SDK will attempt to
205
+ determine the region using heuristics such as environment variables,
206
+ configuration profile, EC2 metadata, or default to 'us-east-1' when SDK
207
+ version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to
208
+ automatically resolve the region from a bucket name.
209
+ request_timeout : double, default None
210
+ Socket read timeouts on Windows and macOS, in seconds.
211
+ If omitted, the AWS SDK default value is used (typically 3 seconds).
212
+ This option is ignored on non-Windows, non-macOS systems.
213
+ connect_timeout : double, default None
214
+ Socket connection timeout, in seconds.
215
+ If omitted, the AWS SDK default value is used (typically 1 second).
216
+ scheme : str, default 'https'
217
+ S3 connection transport scheme.
218
+ endpoint_override : str, default None
219
+ Override region with a connect string such as "localhost:9000"
220
+ background_writes : bool, default True
221
+ Whether file writes will be issued in the background, without
222
+ blocking.
223
+ default_metadata : mapping or pyarrow.KeyValueMetadata, default None
224
+ Default metadata for open_output_stream. This will be ignored if
225
+ non-empty metadata is passed to open_output_stream.
226
+ proxy_options : dict or str, default None
227
+ If a proxy is used, provide the options here. Supported options are:
228
+ 'scheme' (str: 'http' or 'https'; required), 'host' (str; required),
229
+ 'port' (int; required), 'username' (str; optional),
230
+ 'password' (str; optional).
231
+ A proxy URI (str) can also be provided, in which case these options
232
+ will be derived from the provided URI.
233
+ The following are equivalent::
234
+
235
+ S3FileSystem(proxy_options='http://username:password@localhost:8020')
236
+ S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost',
237
+ 'port': 8020, 'username': 'username',
238
+ 'password': 'password'})
239
+ allow_bucket_creation : bool, default False
240
+ Whether to allow directory creation at the bucket-level. This option may also be
241
+ passed in a URI query parameter.
242
+ allow_bucket_deletion : bool, default False
243
+ Whether to allow directory deletion at the bucket-level. This option may also be
244
+ passed in a URI query parameter.
245
+ check_directory_existence_before_creation : bool, default false
246
+ Whether to check the directory existence before creating it.
247
+ If false, when creating a directory the code will not check if it already
248
+ exists or not. It's an optimization to try directory creation and catch the error,
249
+ rather than issue two dependent I/O calls.
250
+ If true, when creating a directory the code will only create the directory when necessary
251
+ at the cost of extra I/O calls. This can be used for key/value cloud storage which has
252
+ a hard rate limit to number of object mutation operations or scenerios such as
253
+ the directories already exist and you do not have creation access.
254
+ retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3)
255
+ The retry strategy to use with S3; fail after max_attempts. Available
256
+ strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy.
257
+ force_virtual_addressing : bool, default False
258
+ Whether to use virtual addressing of buckets.
259
+ If true, then virtual addressing is always enabled.
260
+ If false, then virtual addressing is only enabled if `endpoint_override` is empty.
261
+ This can be used for non-AWS backends that only support virtual hosted-style access.
262
+
263
+ Examples
264
+ --------
265
+ >>> from pyarrow import fs
266
+ >>> s3 = fs.S3FileSystem(region='us-west-2')
267
+ >>> s3.get_file_info(fs.FileSelector(
268
+ ... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True
269
+ ... ))
270
+ [<FileInfo for 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN/.zarray...
271
+
272
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
273
+ """
274
+
275
+ cdef:
276
+ CS3FileSystem* s3fs
277
+
278
+ def __init__(self, *, access_key=None, secret_key=None, session_token=None,
279
+ bint anonymous=False, region=None, request_timeout=None,
280
+ connect_timeout=None, scheme=None, endpoint_override=None,
281
+ bint background_writes=True, default_metadata=None,
282
+ role_arn=None, session_name=None, external_id=None,
283
+ load_frequency=900, proxy_options=None,
284
+ allow_bucket_creation=False, allow_bucket_deletion=False,
285
+ check_directory_existence_before_creation=False,
286
+ retry_strategy: S3RetryStrategy = AwsStandardS3RetryStrategy(
287
+ max_attempts=3),
288
+ force_virtual_addressing=False):
289
+ cdef:
290
+ optional[CS3Options] options
291
+ shared_ptr[CS3FileSystem] wrapped
292
+
293
+ # Need to do this before initializing `options` as the S3Options
294
+ # constructor has a debug check against use after S3 finalization.
295
+ ensure_s3_initialized()
296
+
297
+ if access_key is not None and secret_key is None:
298
+ raise ValueError(
299
+ 'In order to initialize with explicit credentials both '
300
+ 'access_key and secret_key must be provided, '
301
+ '`secret_key` is not set.'
302
+ )
303
+ elif access_key is None and secret_key is not None:
304
+ raise ValueError(
305
+ 'In order to initialize with explicit credentials both '
306
+ 'access_key and secret_key must be provided, '
307
+ '`access_key` is not set.'
308
+ )
309
+
310
+ elif session_token is not None and (access_key is None or
311
+ secret_key is None):
312
+ raise ValueError(
313
+ 'In order to initialize a session with temporary credentials, '
314
+ 'both secret_key and access_key must be provided in addition '
315
+ 'to session_token.'
316
+ )
317
+
318
+ elif (access_key is not None or secret_key is not None):
319
+ if anonymous:
320
+ raise ValueError(
321
+ 'Cannot pass anonymous=True together with access_key '
322
+ 'and secret_key.')
323
+
324
+ if role_arn:
325
+ raise ValueError(
326
+ 'Cannot provide role_arn with access_key and secret_key')
327
+
328
+ if session_token is None:
329
+ session_token = ""
330
+
331
+ options = CS3Options.FromAccessKey(
332
+ tobytes(access_key),
333
+ tobytes(secret_key),
334
+ tobytes(session_token)
335
+ )
336
+ elif anonymous:
337
+ if role_arn:
338
+ raise ValueError(
339
+ 'Cannot provide role_arn with anonymous=True')
340
+
341
+ options = CS3Options.Anonymous()
342
+ elif role_arn:
343
+ if session_name is None:
344
+ session_name = ''
345
+ if external_id is None:
346
+ external_id = ''
347
+
348
+ options = CS3Options.FromAssumeRole(
349
+ tobytes(role_arn),
350
+ tobytes(session_name),
351
+ tobytes(external_id),
352
+ load_frequency
353
+ )
354
+ else:
355
+ options = CS3Options.Defaults()
356
+
357
+ if region is not None:
358
+ options.value().region = tobytes(region)
359
+ if request_timeout is not None:
360
+ options.value().request_timeout = request_timeout
361
+ if connect_timeout is not None:
362
+ options.value().connect_timeout = connect_timeout
363
+ if scheme is not None:
364
+ options.value().scheme = tobytes(scheme)
365
+ if endpoint_override is not None:
366
+ options.value().endpoint_override = tobytes(endpoint_override)
367
+ if background_writes is not None:
368
+ options.value().background_writes = background_writes
369
+ if default_metadata is not None:
370
+ if not isinstance(default_metadata, KeyValueMetadata):
371
+ default_metadata = KeyValueMetadata(default_metadata)
372
+ options.value().default_metadata = pyarrow_unwrap_metadata(
373
+ default_metadata)
374
+
375
+ if proxy_options is not None:
376
+ if isinstance(proxy_options, dict):
377
+ options.value().proxy_options.scheme = tobytes(
378
+ proxy_options["scheme"])
379
+ options.value().proxy_options.host = tobytes(
380
+ proxy_options["host"])
381
+ options.value().proxy_options.port = proxy_options["port"]
382
+ proxy_username = proxy_options.get("username", None)
383
+ if proxy_username:
384
+ options.value().proxy_options.username = tobytes(
385
+ proxy_username)
386
+ proxy_password = proxy_options.get("password", None)
387
+ if proxy_password:
388
+ options.value().proxy_options.password = tobytes(
389
+ proxy_password)
390
+ elif isinstance(proxy_options, str):
391
+ options.value().proxy_options = GetResultValue(
392
+ CS3ProxyOptions.FromUriString(tobytes(proxy_options)))
393
+ else:
394
+ raise TypeError(
395
+ "'proxy_options': expected 'dict' or 'str', "
396
+ f"got {type(proxy_options)} instead.")
397
+
398
+ options.value().allow_bucket_creation = allow_bucket_creation
399
+ options.value().allow_bucket_deletion = allow_bucket_deletion
400
+ options.value().check_directory_existence_before_creation = check_directory_existence_before_creation
401
+ options.value().force_virtual_addressing = force_virtual_addressing
402
+
403
+ if isinstance(retry_strategy, AwsStandardS3RetryStrategy):
404
+ options.value().retry_strategy = CS3RetryStrategy.GetAwsStandardRetryStrategy(
405
+ retry_strategy.max_attempts)
406
+ elif isinstance(retry_strategy, AwsDefaultS3RetryStrategy):
407
+ options.value().retry_strategy = CS3RetryStrategy.GetAwsDefaultRetryStrategy(
408
+ retry_strategy.max_attempts)
409
+ else:
410
+ raise ValueError(f'Invalid retry_strategy {retry_strategy!r}')
411
+
412
+ with nogil:
413
+ wrapped = GetResultValue(CS3FileSystem.Make(options.value()))
414
+
415
+ self.init(<shared_ptr[CFileSystem]> wrapped)
416
+
417
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
418
+ FileSystem.init(self, wrapped)
419
+ self.s3fs = <CS3FileSystem*> wrapped.get()
420
+
421
+ @staticmethod
422
+ @binding(True) # Required for cython < 3
423
+ def _reconstruct(kwargs):
424
+ # __reduce__ doesn't allow passing named arguments directly to the
425
+ # reconstructor, hence this wrapper.
426
+ return S3FileSystem(**kwargs)
427
+
428
+ def __reduce__(self):
429
+ cdef CS3Options opts = self.s3fs.options()
430
+
431
+ # if creds were explicitly provided, then use them
432
+ # else obtain them as they were last time.
433
+ if opts.credentials_kind == CS3CredentialsKind_Explicit:
434
+ access_key = frombytes(opts.GetAccessKey())
435
+ secret_key = frombytes(opts.GetSecretKey())
436
+ session_token = frombytes(opts.GetSessionToken())
437
+ else:
438
+ access_key = None
439
+ secret_key = None
440
+ session_token = None
441
+
442
+ return (
443
+ S3FileSystem._reconstruct, (dict(
444
+ access_key=access_key,
445
+ secret_key=secret_key,
446
+ session_token=session_token,
447
+ anonymous=(opts.credentials_kind ==
448
+ CS3CredentialsKind_Anonymous),
449
+ region=frombytes(opts.region),
450
+ scheme=frombytes(opts.scheme),
451
+ connect_timeout=opts.connect_timeout,
452
+ request_timeout=opts.request_timeout,
453
+ endpoint_override=frombytes(opts.endpoint_override),
454
+ role_arn=frombytes(opts.role_arn),
455
+ session_name=frombytes(opts.session_name),
456
+ external_id=frombytes(opts.external_id),
457
+ load_frequency=opts.load_frequency,
458
+ background_writes=opts.background_writes,
459
+ allow_bucket_creation=opts.allow_bucket_creation,
460
+ allow_bucket_deletion=opts.allow_bucket_deletion,
461
+ check_directory_existence_before_creation=opts.check_directory_existence_before_creation,
462
+ default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
463
+ proxy_options={'scheme': frombytes(opts.proxy_options.scheme),
464
+ 'host': frombytes(opts.proxy_options.host),
465
+ 'port': opts.proxy_options.port,
466
+ 'username': frombytes(
467
+ opts.proxy_options.username),
468
+ 'password': frombytes(
469
+ opts.proxy_options.password)},
470
+ force_virtual_addressing=opts.force_virtual_addressing,
471
+ ),)
472
+ )
473
+
474
+ @property
475
+ def region(self):
476
+ """
477
+ The AWS region this filesystem connects to.
478
+ """
479
+ return frombytes(self.s3fs.region())
parrot/lib/python3.10/site-packages/pyarrow/acero.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # ---------------------------------------------------------------------
19
+ # Implement Internal ExecPlan bindings
20
+
21
+ # cython: profile=False
22
+ # distutils: language = c++
23
+ # cython: language_level = 3
24
+
25
+ from pyarrow.lib import Table, RecordBatch
26
+ from pyarrow.compute import Expression, field
27
+
28
+ try:
29
+ from pyarrow._acero import ( # noqa
30
+ Declaration,
31
+ ExecNodeOptions,
32
+ TableSourceNodeOptions,
33
+ FilterNodeOptions,
34
+ ProjectNodeOptions,
35
+ AggregateNodeOptions,
36
+ OrderByNodeOptions,
37
+ HashJoinNodeOptions,
38
+ AsofJoinNodeOptions,
39
+ )
40
+ except ImportError as exc:
41
+ raise ImportError(
42
+ f"The pyarrow installation is not built with support for 'acero' ({str(exc)})"
43
+ ) from None
44
+
45
+
46
+ try:
47
+ import pyarrow.dataset as ds
48
+ from pyarrow._dataset import ScanNodeOptions
49
+ except ImportError:
50
+ class DatasetModuleStub:
51
+ class Dataset:
52
+ pass
53
+
54
+ class InMemoryDataset:
55
+ pass
56
+ ds = DatasetModuleStub
57
+
58
+
59
+ def _dataset_to_decl(dataset, use_threads=True):
60
+ decl = Declaration("scan", ScanNodeOptions(dataset, use_threads=use_threads))
61
+
62
+ # Get rid of special dataset columns
63
+ # "__fragment_index", "__batch_index", "__last_in_fragment", "__filename"
64
+ projections = [field(f) for f in dataset.schema.names]
65
+ decl = Declaration.from_sequence(
66
+ [decl, Declaration("project", ProjectNodeOptions(projections))]
67
+ )
68
+
69
+ filter_expr = dataset._scan_options.get("filter")
70
+ if filter_expr is not None:
71
+ # Filters applied in CScanNodeOptions are "best effort" for the scan node itself
72
+ # so we always need to inject an additional Filter node to apply them for real.
73
+ decl = Declaration.from_sequence(
74
+ [decl, Declaration("filter", FilterNodeOptions(filter_expr))]
75
+ )
76
+
77
+ return decl
78
+
79
+
80
+ def _perform_join(join_type, left_operand, left_keys,
81
+ right_operand, right_keys,
82
+ left_suffix=None, right_suffix=None,
83
+ use_threads=True, coalesce_keys=False,
84
+ output_type=Table):
85
+ """
86
+ Perform join of two tables or datasets.
87
+
88
+ The result will be an output table with the result of the join operation
89
+
90
+ Parameters
91
+ ----------
92
+ join_type : str
93
+ One of supported join types.
94
+ left_operand : Table or Dataset
95
+ The left operand for the join operation.
96
+ left_keys : str or list[str]
97
+ The left key (or keys) on which the join operation should be performed.
98
+ right_operand : Table or Dataset
99
+ The right operand for the join operation.
100
+ right_keys : str or list[str]
101
+ The right key (or keys) on which the join operation should be performed.
102
+ left_suffix : str, default None
103
+ Which suffix to add to left column names. This prevents confusion
104
+ when the columns in left and right operands have colliding names.
105
+ right_suffix : str, default None
106
+ Which suffix to add to the right column names. This prevents confusion
107
+ when the columns in left and right operands have colliding names.
108
+ use_threads : bool, default True
109
+ Whether to use multithreading or not.
110
+ coalesce_keys : bool, default False
111
+ If the duplicated keys should be omitted from one of the sides
112
+ in the join result.
113
+ output_type: Table or InMemoryDataset
114
+ The output type for the exec plan result.
115
+
116
+ Returns
117
+ -------
118
+ result_table : Table or InMemoryDataset
119
+ """
120
+ if not isinstance(left_operand, (Table, ds.Dataset)):
121
+ raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}")
122
+ if not isinstance(right_operand, (Table, ds.Dataset)):
123
+ raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}")
124
+
125
+ # Prepare left and right tables Keys to send them to the C++ function
126
+ left_keys_order = {}
127
+ if not isinstance(left_keys, (tuple, list)):
128
+ left_keys = [left_keys]
129
+ for idx, key in enumerate(left_keys):
130
+ left_keys_order[key] = idx
131
+
132
+ right_keys_order = {}
133
+ if not isinstance(right_keys, (list, tuple)):
134
+ right_keys = [right_keys]
135
+ for idx, key in enumerate(right_keys):
136
+ right_keys_order[key] = idx
137
+
138
+ # By default expose all columns on both left and right table
139
+ left_columns = left_operand.schema.names
140
+ right_columns = right_operand.schema.names
141
+
142
+ # Pick the join type
143
+ if join_type == "left semi" or join_type == "left anti":
144
+ right_columns = []
145
+ elif join_type == "right semi" or join_type == "right anti":
146
+ left_columns = []
147
+ elif join_type == "inner" or join_type == "left outer":
148
+ right_columns = [
149
+ col for col in right_columns if col not in right_keys_order
150
+ ]
151
+ elif join_type == "right outer":
152
+ left_columns = [
153
+ col for col in left_columns if col not in left_keys_order
154
+ ]
155
+
156
+ # Turn the columns to vectors of FieldRefs
157
+ # and set aside indices of keys.
158
+ left_column_keys_indices = {}
159
+ for idx, colname in enumerate(left_columns):
160
+ if colname in left_keys:
161
+ left_column_keys_indices[colname] = idx
162
+ right_column_keys_indices = {}
163
+ for idx, colname in enumerate(right_columns):
164
+ if colname in right_keys:
165
+ right_column_keys_indices[colname] = idx
166
+
167
+ # Add the join node to the execplan
168
+ if isinstance(left_operand, ds.Dataset):
169
+ left_source = _dataset_to_decl(left_operand, use_threads=use_threads)
170
+ else:
171
+ left_source = Declaration("table_source", TableSourceNodeOptions(left_operand))
172
+ if isinstance(right_operand, ds.Dataset):
173
+ right_source = _dataset_to_decl(right_operand, use_threads=use_threads)
174
+ else:
175
+ right_source = Declaration(
176
+ "table_source", TableSourceNodeOptions(right_operand)
177
+ )
178
+
179
+ if coalesce_keys:
180
+ join_opts = HashJoinNodeOptions(
181
+ join_type, left_keys, right_keys, left_columns, right_columns,
182
+ output_suffix_for_left=left_suffix or "",
183
+ output_suffix_for_right=right_suffix or "",
184
+ )
185
+ else:
186
+ join_opts = HashJoinNodeOptions(
187
+ join_type, left_keys, right_keys,
188
+ output_suffix_for_left=left_suffix or "",
189
+ output_suffix_for_right=right_suffix or "",
190
+ )
191
+ decl = Declaration(
192
+ "hashjoin", options=join_opts, inputs=[left_source, right_source]
193
+ )
194
+
195
+ if coalesce_keys and join_type == "full outer":
196
+ # In case of full outer joins, the join operation will output all columns
197
+ # so that we can coalesce the keys and exclude duplicates in a subsequent
198
+ # projection.
199
+ left_columns_set = set(left_columns)
200
+ right_columns_set = set(right_columns)
201
+ # Where the right table columns start.
202
+ right_operand_index = len(left_columns)
203
+ projected_col_names = []
204
+ projections = []
205
+ for idx, col in enumerate(left_columns + right_columns):
206
+ if idx < len(left_columns) and col in left_column_keys_indices:
207
+ # Include keys only once and coalesce left+right table keys.
208
+ projected_col_names.append(col)
209
+ # Get the index of the right key that is being paired
210
+ # with this left key. We do so by retrieving the name
211
+ # of the right key that is in the same position in the provided keys
212
+ # and then looking up the index for that name in the right table.
213
+ right_key_index = right_column_keys_indices[
214
+ right_keys[left_keys_order[col]]]
215
+ projections.append(
216
+ Expression._call("coalesce", [
217
+ Expression._field(idx), Expression._field(
218
+ right_operand_index+right_key_index)
219
+ ])
220
+ )
221
+ elif idx >= right_operand_index and col in right_column_keys_indices:
222
+ # Do not include right table keys. As they would lead to duplicated keys
223
+ continue
224
+ else:
225
+ # For all the other columns include them as they are.
226
+ # Just recompute the suffixes that the join produced as the projection
227
+ # would lose them otherwise.
228
+ if (
229
+ left_suffix and idx < right_operand_index
230
+ and col in right_columns_set
231
+ ):
232
+ col += left_suffix
233
+ if (
234
+ right_suffix and idx >= right_operand_index
235
+ and col in left_columns_set
236
+ ):
237
+ col += right_suffix
238
+ projected_col_names.append(col)
239
+ projections.append(
240
+ Expression._field(idx)
241
+ )
242
+ projection = Declaration(
243
+ "project", ProjectNodeOptions(projections, projected_col_names)
244
+ )
245
+ decl = Declaration.from_sequence([decl, projection])
246
+
247
+ result_table = decl.to_table(use_threads=use_threads)
248
+
249
+ if output_type == Table:
250
+ return result_table
251
+ elif output_type == ds.InMemoryDataset:
252
+ return ds.InMemoryDataset(result_table)
253
+ else:
254
+ raise TypeError("Unsupported output type")
255
+
256
+
257
+ def _perform_join_asof(left_operand, left_on, left_by,
258
+ right_operand, right_on, right_by,
259
+ tolerance, use_threads=True,
260
+ output_type=Table):
261
+ """
262
+ Perform asof join of two tables or datasets.
263
+
264
+ The result will be an output table with the result of the join operation
265
+
266
+ Parameters
267
+ ----------
268
+ left_operand : Table or Dataset
269
+ The left operand for the join operation.
270
+ left_on : str
271
+ The left key (or keys) on which the join operation should be performed.
272
+ left_by: str or list[str]
273
+ The left key (or keys) on which the join operation should be performed.
274
+ right_operand : Table or Dataset
275
+ The right operand for the join operation.
276
+ right_on : str or list[str]
277
+ The right key (or keys) on which the join operation should be performed.
278
+ right_by: str or list[str]
279
+ The right key (or keys) on which the join operation should be performed.
280
+ tolerance : int
281
+ The tolerance to use for the asof join. The tolerance is interpreted in
282
+ the same units as the "on" key.
283
+ output_type: Table or InMemoryDataset
284
+ The output type for the exec plan result.
285
+
286
+ Returns
287
+ -------
288
+ result_table : Table or InMemoryDataset
289
+ """
290
+ if not isinstance(left_operand, (Table, ds.Dataset)):
291
+ raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}")
292
+ if not isinstance(right_operand, (Table, ds.Dataset)):
293
+ raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}")
294
+
295
+ if not isinstance(left_by, (tuple, list)):
296
+ left_by = [left_by]
297
+ if not isinstance(right_by, (tuple, list)):
298
+ right_by = [right_by]
299
+
300
+ # AsofJoin does not return on or by columns for right_operand.
301
+ right_columns = [
302
+ col for col in right_operand.schema.names
303
+ if col not in [right_on] + right_by
304
+ ]
305
+ columns_collisions = set(left_operand.schema.names) & set(right_columns)
306
+ if columns_collisions:
307
+ raise ValueError(
308
+ "Columns {} present in both tables. AsofJoin does not support "
309
+ "column collisions.".format(columns_collisions),
310
+ )
311
+
312
+ # Add the join node to the execplan
313
+ if isinstance(left_operand, ds.Dataset):
314
+ left_source = _dataset_to_decl(left_operand, use_threads=use_threads)
315
+ else:
316
+ left_source = Declaration(
317
+ "table_source", TableSourceNodeOptions(left_operand),
318
+ )
319
+ if isinstance(right_operand, ds.Dataset):
320
+ right_source = _dataset_to_decl(right_operand, use_threads=use_threads)
321
+ else:
322
+ right_source = Declaration(
323
+ "table_source", TableSourceNodeOptions(right_operand)
324
+ )
325
+
326
+ join_opts = AsofJoinNodeOptions(
327
+ left_on, left_by, right_on, right_by, tolerance
328
+ )
329
+ decl = Declaration(
330
+ "asofjoin", options=join_opts, inputs=[left_source, right_source]
331
+ )
332
+
333
+ result_table = decl.to_table(use_threads=use_threads)
334
+
335
+ if output_type == Table:
336
+ return result_table
337
+ elif output_type == ds.InMemoryDataset:
338
+ return ds.InMemoryDataset(result_table)
339
+ else:
340
+ raise TypeError("Unsupported output type")
341
+
342
+
343
+ def _filter_table(table, expression):
344
+ """Filter rows of a table based on the provided expression.
345
+
346
+ The result will be an output table with only the rows matching
347
+ the provided expression.
348
+
349
+ Parameters
350
+ ----------
351
+ table : Table or RecordBatch
352
+ Table that should be filtered.
353
+ expression : Expression
354
+ The expression on which rows should be filtered.
355
+
356
+ Returns
357
+ -------
358
+ Table
359
+ """
360
+ is_batch = False
361
+ if isinstance(table, RecordBatch):
362
+ table = Table.from_batches([table])
363
+ is_batch = True
364
+
365
+ decl = Declaration.from_sequence([
366
+ Declaration("table_source", options=TableSourceNodeOptions(table)),
367
+ Declaration("filter", options=FilterNodeOptions(expression))
368
+ ])
369
+ result = decl.to_table(use_threads=True)
370
+ if is_batch:
371
+ result = result.combine_chunks().to_batches()[0]
372
+ return result
373
+
374
+
375
+ def _sort_source(table_or_dataset, sort_keys, output_type=Table, **kwargs):
376
+
377
+ if isinstance(table_or_dataset, ds.Dataset):
378
+ data_source = _dataset_to_decl(table_or_dataset, use_threads=True)
379
+ else:
380
+ data_source = Declaration(
381
+ "table_source", TableSourceNodeOptions(table_or_dataset)
382
+ )
383
+
384
+ order_by = Declaration("order_by", OrderByNodeOptions(sort_keys, **kwargs))
385
+
386
+ decl = Declaration.from_sequence([data_source, order_by])
387
+ result_table = decl.to_table(use_threads=True)
388
+
389
+ if output_type == Table:
390
+ return result_table
391
+ elif output_type == ds.InMemoryDataset:
392
+ return ds.InMemoryDataset(result_table)
393
+ else:
394
+ raise TypeError("Unsupported output type")
395
+
396
+
397
+ def _group_by(table, aggregates, keys, use_threads=True):
398
+
399
+ decl = Declaration.from_sequence([
400
+ Declaration("table_source", TableSourceNodeOptions(table)),
401
+ Declaration("aggregate", AggregateNodeOptions(aggregates, keys=keys))
402
+ ])
403
+ return decl.to_table(use_threads=use_threads)
parrot/lib/python3.10/site-packages/pyarrow/array.pxi ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/pyarrow/benchmark.pxi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ def benchmark_PandasObjectIsNull(list obj):
20
+ Benchmark_PandasObjectIsNull(obj)
parrot/lib/python3.10/site-packages/pyarrow/compat.pxi ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ def encode_file_path(path):
20
+ if isinstance(path, str):
21
+ # POSIX systems can handle utf-8. UTF8 is converted to utf16-le in
22
+ # libarrow
23
+ encoded_path = path.encode('utf-8')
24
+ else:
25
+ encoded_path = path
26
+
27
+ # Windows file system requires utf-16le for file names; Arrow C++ libraries
28
+ # will convert utf8 to utf16
29
+ return encoded_path
30
+
31
+
32
+ # Starting with Python 3.7, dicts are guaranteed to be insertion-ordered.
33
+ ordered_dict = dict
34
+
35
+
36
+ try:
37
+ import cloudpickle as pickle
38
+ except ImportError:
39
+ import pickle
40
+
41
+
42
+ def tobytes(o):
43
+ """
44
+ Encode a unicode or bytes string to bytes.
45
+
46
+ Parameters
47
+ ----------
48
+ o : str or bytes
49
+ Input string.
50
+ """
51
+ if isinstance(o, str):
52
+ return o.encode('utf8')
53
+ else:
54
+ return o
55
+
56
+
57
+ def frombytes(o, *, safe=False):
58
+ """
59
+ Decode the given bytestring to unicode.
60
+
61
+ Parameters
62
+ ----------
63
+ o : bytes-like
64
+ Input object.
65
+ safe : bool, default False
66
+ If true, raise on encoding errors.
67
+ """
68
+ if safe:
69
+ return o.decode('utf8', errors='replace')
70
+ else:
71
+ return o.decode('utf8')
parrot/lib/python3.10/site-packages/pyarrow/compute.py ADDED
@@ -0,0 +1,732 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from pyarrow._compute import ( # noqa
19
+ Function,
20
+ FunctionOptions,
21
+ FunctionRegistry,
22
+ HashAggregateFunction,
23
+ HashAggregateKernel,
24
+ Kernel,
25
+ ScalarAggregateFunction,
26
+ ScalarAggregateKernel,
27
+ ScalarFunction,
28
+ ScalarKernel,
29
+ VectorFunction,
30
+ VectorKernel,
31
+ # Option classes
32
+ ArraySortOptions,
33
+ AssumeTimezoneOptions,
34
+ CastOptions,
35
+ CountOptions,
36
+ CumulativeOptions,
37
+ CumulativeSumOptions,
38
+ DayOfWeekOptions,
39
+ DictionaryEncodeOptions,
40
+ RunEndEncodeOptions,
41
+ ElementWiseAggregateOptions,
42
+ ExtractRegexOptions,
43
+ FilterOptions,
44
+ IndexOptions,
45
+ JoinOptions,
46
+ ListSliceOptions,
47
+ ListFlattenOptions,
48
+ MakeStructOptions,
49
+ MapLookupOptions,
50
+ MatchSubstringOptions,
51
+ ModeOptions,
52
+ NullOptions,
53
+ PadOptions,
54
+ PairwiseOptions,
55
+ PartitionNthOptions,
56
+ QuantileOptions,
57
+ RandomOptions,
58
+ RankOptions,
59
+ ReplaceSliceOptions,
60
+ ReplaceSubstringOptions,
61
+ RoundBinaryOptions,
62
+ RoundOptions,
63
+ RoundTemporalOptions,
64
+ RoundToMultipleOptions,
65
+ ScalarAggregateOptions,
66
+ SelectKOptions,
67
+ SetLookupOptions,
68
+ SliceOptions,
69
+ SortOptions,
70
+ SplitOptions,
71
+ SplitPatternOptions,
72
+ StrftimeOptions,
73
+ StrptimeOptions,
74
+ StructFieldOptions,
75
+ TakeOptions,
76
+ TDigestOptions,
77
+ TrimOptions,
78
+ Utf8NormalizeOptions,
79
+ VarianceOptions,
80
+ WeekOptions,
81
+ # Functions
82
+ call_function,
83
+ function_registry,
84
+ get_function,
85
+ list_functions,
86
+ # Udf
87
+ call_tabular_function,
88
+ register_scalar_function,
89
+ register_tabular_function,
90
+ register_aggregate_function,
91
+ register_vector_function,
92
+ UdfContext,
93
+ # Expressions
94
+ Expression,
95
+ )
96
+
97
+ from collections import namedtuple
98
+ import inspect
99
+ from textwrap import dedent
100
+ import warnings
101
+
102
+ import pyarrow as pa
103
+ from pyarrow import _compute_docstrings
104
+ from pyarrow.vendored import docscrape
105
+
106
+
107
+ def _get_arg_names(func):
108
+ return func._doc.arg_names
109
+
110
+
111
+ _OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',))
112
+
113
+
114
+ def _scrape_options_class_doc(options_class):
115
+ if not options_class.__doc__:
116
+ return None
117
+ doc = docscrape.NumpyDocString(options_class.__doc__)
118
+ return _OptionsClassDoc(doc['Parameters'])
119
+
120
+
121
+ def _decorate_compute_function(wrapper, exposed_name, func, options_class):
122
+ # Decorate the given compute function wrapper with useful metadata
123
+ # and documentation.
124
+ cpp_doc = func._doc
125
+
126
+ wrapper.__arrow_compute_function__ = dict(
127
+ name=func.name,
128
+ arity=func.arity,
129
+ options_class=cpp_doc.options_class,
130
+ options_required=cpp_doc.options_required)
131
+ wrapper.__name__ = exposed_name
132
+ wrapper.__qualname__ = exposed_name
133
+
134
+ doc_pieces = []
135
+
136
+ # 1. One-line summary
137
+ summary = cpp_doc.summary
138
+ if not summary:
139
+ arg_str = "arguments" if func.arity > 1 else "argument"
140
+ summary = ("Call compute function {!r} with the given {}"
141
+ .format(func.name, arg_str))
142
+
143
+ doc_pieces.append(f"{summary}.\n\n")
144
+
145
+ # 2. Multi-line description
146
+ description = cpp_doc.description
147
+ if description:
148
+ doc_pieces.append(f"{description}\n\n")
149
+
150
+ doc_addition = _compute_docstrings.function_doc_additions.get(func.name)
151
+
152
+ # 3. Parameter description
153
+ doc_pieces.append(dedent("""\
154
+ Parameters
155
+ ----------
156
+ """))
157
+
158
+ # 3a. Compute function parameters
159
+ arg_names = _get_arg_names(func)
160
+ for arg_name in arg_names:
161
+ if func.kind in ('vector', 'scalar_aggregate'):
162
+ arg_type = 'Array-like'
163
+ else:
164
+ arg_type = 'Array-like or scalar-like'
165
+ doc_pieces.append(f"{arg_name} : {arg_type}\n")
166
+ doc_pieces.append(" Argument to compute function.\n")
167
+
168
+ # 3b. Compute function option values
169
+ if options_class is not None:
170
+ options_class_doc = _scrape_options_class_doc(options_class)
171
+ if options_class_doc:
172
+ for p in options_class_doc.params:
173
+ doc_pieces.append(f"{p.name} : {p.type}\n")
174
+ for s in p.desc:
175
+ doc_pieces.append(f" {s}\n")
176
+ else:
177
+ warnings.warn(f"Options class {options_class.__name__} "
178
+ f"does not have a docstring", RuntimeWarning)
179
+ options_sig = inspect.signature(options_class)
180
+ for p in options_sig.parameters.values():
181
+ doc_pieces.append(dedent("""\
182
+ {0} : optional
183
+ Parameter for {1} constructor. Either `options`
184
+ or `{0}` can be passed, but not both at the same time.
185
+ """.format(p.name, options_class.__name__)))
186
+ doc_pieces.append(dedent(f"""\
187
+ options : pyarrow.compute.{options_class.__name__}, optional
188
+ Alternative way of passing options.
189
+ """))
190
+
191
+ doc_pieces.append(dedent("""\
192
+ memory_pool : pyarrow.MemoryPool, optional
193
+ If not passed, will allocate memory from the default memory pool.
194
+ """))
195
+
196
+ # 4. Custom addition (e.g. examples)
197
+ if doc_addition is not None:
198
+ doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n")))
199
+
200
+ wrapper.__doc__ = "".join(doc_pieces)
201
+ return wrapper
202
+
203
+
204
+ def _get_options_class(func):
205
+ class_name = func._doc.options_class
206
+ if not class_name:
207
+ return None
208
+ try:
209
+ return globals()[class_name]
210
+ except KeyError:
211
+ warnings.warn("Python binding for {} not exposed"
212
+ .format(class_name), RuntimeWarning)
213
+ return None
214
+
215
+
216
+ def _handle_options(name, options_class, options, args, kwargs):
217
+ if args or kwargs:
218
+ if options is not None:
219
+ raise TypeError(
220
+ "Function {!r} called with both an 'options' argument "
221
+ "and additional arguments"
222
+ .format(name))
223
+ return options_class(*args, **kwargs)
224
+
225
+ if options is not None:
226
+ if isinstance(options, dict):
227
+ return options_class(**options)
228
+ elif isinstance(options, options_class):
229
+ return options
230
+ raise TypeError(
231
+ "Function {!r} expected a {} parameter, got {}"
232
+ .format(name, options_class, type(options)))
233
+
234
+ return None
235
+
236
+
237
+ def _make_generic_wrapper(func_name, func, options_class, arity):
238
+ if options_class is None:
239
+ def wrapper(*args, memory_pool=None):
240
+ if arity is not Ellipsis and len(args) != arity:
241
+ raise TypeError(
242
+ f"{func_name} takes {arity} positional argument(s), "
243
+ f"but {len(args)} were given"
244
+ )
245
+ if args and isinstance(args[0], Expression):
246
+ return Expression._call(func_name, list(args))
247
+ return func.call(args, None, memory_pool)
248
+ else:
249
+ def wrapper(*args, memory_pool=None, options=None, **kwargs):
250
+ if arity is not Ellipsis:
251
+ if len(args) < arity:
252
+ raise TypeError(
253
+ f"{func_name} takes {arity} positional argument(s), "
254
+ f"but {len(args)} were given"
255
+ )
256
+ option_args = args[arity:]
257
+ args = args[:arity]
258
+ else:
259
+ option_args = ()
260
+ options = _handle_options(func_name, options_class, options,
261
+ option_args, kwargs)
262
+ if args and isinstance(args[0], Expression):
263
+ return Expression._call(func_name, list(args), options)
264
+ return func.call(args, options, memory_pool)
265
+ return wrapper
266
+
267
+
268
+ def _make_signature(arg_names, var_arg_names, options_class):
269
+ from inspect import Parameter
270
+ params = []
271
+ for name in arg_names:
272
+ params.append(Parameter(name, Parameter.POSITIONAL_ONLY))
273
+ for name in var_arg_names:
274
+ params.append(Parameter(name, Parameter.VAR_POSITIONAL))
275
+ if options_class is not None:
276
+ options_sig = inspect.signature(options_class)
277
+ for p in options_sig.parameters.values():
278
+ assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD,
279
+ Parameter.KEYWORD_ONLY)
280
+ if var_arg_names:
281
+ # Cannot have a positional argument after a *args
282
+ p = p.replace(kind=Parameter.KEYWORD_ONLY)
283
+ params.append(p)
284
+ params.append(Parameter("options", Parameter.KEYWORD_ONLY,
285
+ default=None))
286
+ params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY,
287
+ default=None))
288
+ return inspect.Signature(params)
289
+
290
+
291
+ def _wrap_function(name, func):
292
+ options_class = _get_options_class(func)
293
+ arg_names = _get_arg_names(func)
294
+ has_vararg = arg_names and arg_names[-1].startswith('*')
295
+ if has_vararg:
296
+ var_arg_names = [arg_names.pop().lstrip('*')]
297
+ else:
298
+ var_arg_names = []
299
+
300
+ wrapper = _make_generic_wrapper(
301
+ name, func, options_class, arity=func.arity)
302
+ wrapper.__signature__ = _make_signature(arg_names, var_arg_names,
303
+ options_class)
304
+ return _decorate_compute_function(wrapper, name, func, options_class)
305
+
306
+
307
+ def _make_global_functions():
308
+ """
309
+ Make global functions wrapping each compute function.
310
+
311
+ Note that some of the automatically-generated wrappers may be overridden
312
+ by custom versions below.
313
+ """
314
+ g = globals()
315
+ reg = function_registry()
316
+
317
+ # Avoid clashes with Python keywords
318
+ rewrites = {'and': 'and_',
319
+ 'or': 'or_'}
320
+
321
+ for cpp_name in reg.list_functions():
322
+ name = rewrites.get(cpp_name, cpp_name)
323
+ func = reg.get_function(cpp_name)
324
+ if func.kind == "hash_aggregate":
325
+ # Hash aggregate functions are not callable,
326
+ # so let's not expose them at module level.
327
+ continue
328
+ if func.kind == "scalar_aggregate" and func.arity == 0:
329
+ # Nullary scalar aggregate functions are not callable
330
+ # directly so let's not expose them at module level.
331
+ continue
332
+ assert name not in g, name
333
+ g[cpp_name] = g[name] = _wrap_function(name, func)
334
+
335
+
336
+ _make_global_functions()
337
+
338
+
339
+ def cast(arr, target_type=None, safe=None, options=None, memory_pool=None):
340
+ """
341
+ Cast array values to another data type. Can also be invoked as an array
342
+ instance method.
343
+
344
+ Parameters
345
+ ----------
346
+ arr : Array-like
347
+ target_type : DataType or str
348
+ Type to cast to
349
+ safe : bool, default True
350
+ Check for overflows or other unsafe conversions
351
+ options : CastOptions, default None
352
+ Additional checks pass by CastOptions
353
+ memory_pool : MemoryPool, optional
354
+ memory pool to use for allocations during function execution.
355
+
356
+ Examples
357
+ --------
358
+ >>> from datetime import datetime
359
+ >>> import pyarrow as pa
360
+ >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
361
+ >>> arr.type
362
+ TimestampType(timestamp[us])
363
+
364
+ You can use ``pyarrow.DataType`` objects to specify the target type:
365
+
366
+ >>> cast(arr, pa.timestamp('ms'))
367
+ <pyarrow.lib.TimestampArray object at ...>
368
+ [
369
+ 2010-01-01 00:00:00.000,
370
+ 2015-01-01 00:00:00.000
371
+ ]
372
+
373
+ >>> cast(arr, pa.timestamp('ms')).type
374
+ TimestampType(timestamp[ms])
375
+
376
+ Alternatively, it is also supported to use the string aliases for these
377
+ types:
378
+
379
+ >>> arr.cast('timestamp[ms]')
380
+ <pyarrow.lib.TimestampArray object at ...>
381
+ [
382
+ 2010-01-01 00:00:00.000,
383
+ 2015-01-01 00:00:00.000
384
+ ]
385
+ >>> arr.cast('timestamp[ms]').type
386
+ TimestampType(timestamp[ms])
387
+
388
+ Returns
389
+ -------
390
+ casted : Array
391
+ The cast result as a new Array
392
+ """
393
+ safe_vars_passed = (safe is not None) or (target_type is not None)
394
+
395
+ if safe_vars_passed and (options is not None):
396
+ raise ValueError("Must either pass values for 'target_type' and 'safe'"
397
+ " or pass a value for 'options'")
398
+
399
+ if options is None:
400
+ target_type = pa.types.lib.ensure_type(target_type)
401
+ if safe is False:
402
+ options = CastOptions.unsafe(target_type)
403
+ else:
404
+ options = CastOptions.safe(target_type)
405
+ return call_function("cast", [arr], options, memory_pool)
406
+
407
+
408
+ def index(data, value, start=None, end=None, *, memory_pool=None):
409
+ """
410
+ Find the index of the first occurrence of a given value.
411
+
412
+ Parameters
413
+ ----------
414
+ data : Array-like
415
+ value : Scalar-like object
416
+ The value to search for.
417
+ start : int, optional
418
+ end : int, optional
419
+ memory_pool : MemoryPool, optional
420
+ If not passed, will allocate memory from the default memory pool.
421
+
422
+ Returns
423
+ -------
424
+ index : int
425
+ the index, or -1 if not found
426
+ """
427
+ if start is not None:
428
+ if end is not None:
429
+ data = data.slice(start, end - start)
430
+ else:
431
+ data = data.slice(start)
432
+ elif end is not None:
433
+ data = data.slice(0, end)
434
+
435
+ if not isinstance(value, pa.Scalar):
436
+ value = pa.scalar(value, type=data.type)
437
+ elif data.type != value.type:
438
+ value = pa.scalar(value.as_py(), type=data.type)
439
+ options = IndexOptions(value=value)
440
+ result = call_function('index', [data], options, memory_pool)
441
+ if start is not None and result.as_py() >= 0:
442
+ result = pa.scalar(result.as_py() + start, type=pa.int64())
443
+ return result
444
+
445
+
446
+ def take(data, indices, *, boundscheck=True, memory_pool=None):
447
+ """
448
+ Select values (or records) from array- or table-like data given integer
449
+ selection indices.
450
+
451
+ The result will be of the same type(s) as the input, with elements taken
452
+ from the input array (or record batch / table fields) at the given
453
+ indices. If an index is null then the corresponding value in the output
454
+ will be null.
455
+
456
+ Parameters
457
+ ----------
458
+ data : Array, ChunkedArray, RecordBatch, or Table
459
+ indices : Array, ChunkedArray
460
+ Must be of integer type
461
+ boundscheck : boolean, default True
462
+ Whether to boundscheck the indices. If False and there is an out of
463
+ bounds index, will likely cause the process to crash.
464
+ memory_pool : MemoryPool, optional
465
+ If not passed, will allocate memory from the default memory pool.
466
+
467
+ Returns
468
+ -------
469
+ result : depends on inputs
470
+ Selected values for the given indices
471
+
472
+ Examples
473
+ --------
474
+ >>> import pyarrow as pa
475
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
476
+ >>> indices = pa.array([0, None, 4, 3])
477
+ >>> arr.take(indices)
478
+ <pyarrow.lib.StringArray object at ...>
479
+ [
480
+ "a",
481
+ null,
482
+ "e",
483
+ null
484
+ ]
485
+ """
486
+ options = TakeOptions(boundscheck=boundscheck)
487
+ return call_function('take', [data, indices], options, memory_pool)
488
+
489
+
490
+ def fill_null(values, fill_value):
491
+ """Replace each null element in values with a corresponding
492
+ element from fill_value.
493
+
494
+ If fill_value is scalar-like, then every null element in values
495
+ will be replaced with fill_value. If fill_value is array-like,
496
+ then the i-th element in values will be replaced with the i-th
497
+ element in fill_value.
498
+
499
+ The fill_value's type must be the same as that of values, or it
500
+ must be able to be implicitly casted to the array's type.
501
+
502
+ This is an alias for :func:`coalesce`.
503
+
504
+ Parameters
505
+ ----------
506
+ values : Array, ChunkedArray, or Scalar-like object
507
+ Each null element is replaced with the corresponding value
508
+ from fill_value.
509
+ fill_value : Array, ChunkedArray, or Scalar-like object
510
+ If not same type as values, will attempt to cast.
511
+
512
+ Returns
513
+ -------
514
+ result : depends on inputs
515
+ Values with all null elements replaced
516
+
517
+ Examples
518
+ --------
519
+ >>> import pyarrow as pa
520
+ >>> arr = pa.array([1, 2, None, 3], type=pa.int8())
521
+ >>> fill_value = pa.scalar(5, type=pa.int8())
522
+ >>> arr.fill_null(fill_value)
523
+ <pyarrow.lib.Int8Array object at ...>
524
+ [
525
+ 1,
526
+ 2,
527
+ 5,
528
+ 3
529
+ ]
530
+ >>> arr = pa.array([1, 2, None, 4, None])
531
+ >>> arr.fill_null(pa.array([10, 20, 30, 40, 50]))
532
+ <pyarrow.lib.Int64Array object at ...>
533
+ [
534
+ 1,
535
+ 2,
536
+ 30,
537
+ 4,
538
+ 50
539
+ ]
540
+ """
541
+ if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)):
542
+ fill_value = pa.scalar(fill_value, type=values.type)
543
+ elif values.type != fill_value.type:
544
+ fill_value = pa.scalar(fill_value.as_py(), type=values.type)
545
+
546
+ return call_function("coalesce", [values, fill_value])
547
+
548
+
549
+ def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
550
+ """
551
+ Select the indices of the top-k ordered elements from array- or table-like
552
+ data.
553
+
554
+ This is a specialization for :func:`select_k_unstable`. Output is not
555
+ guaranteed to be stable.
556
+
557
+ Parameters
558
+ ----------
559
+ values : Array, ChunkedArray, RecordBatch, or Table
560
+ Data to sort and get top indices from.
561
+ k : int
562
+ The number of `k` elements to keep.
563
+ sort_keys : List-like
564
+ Column key names to order by when input is table-like data.
565
+ memory_pool : MemoryPool, optional
566
+ If not passed, will allocate memory from the default memory pool.
567
+
568
+ Returns
569
+ -------
570
+ result : Array
571
+ Indices of the top-k ordered elements
572
+
573
+ Examples
574
+ --------
575
+ >>> import pyarrow as pa
576
+ >>> import pyarrow.compute as pc
577
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
578
+ >>> pc.top_k_unstable(arr, k=3)
579
+ <pyarrow.lib.UInt64Array object at ...>
580
+ [
581
+ 5,
582
+ 4,
583
+ 2
584
+ ]
585
+ """
586
+ if sort_keys is None:
587
+ sort_keys = []
588
+ if isinstance(values, (pa.Array, pa.ChunkedArray)):
589
+ sort_keys.append(("dummy", "descending"))
590
+ else:
591
+ sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys)
592
+ options = SelectKOptions(k, sort_keys)
593
+ return call_function("select_k_unstable", [values], options, memory_pool)
594
+
595
+
596
+ def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
597
+ """
598
+ Select the indices of the bottom-k ordered elements from
599
+ array- or table-like data.
600
+
601
+ This is a specialization for :func:`select_k_unstable`. Output is not
602
+ guaranteed to be stable.
603
+
604
+ Parameters
605
+ ----------
606
+ values : Array, ChunkedArray, RecordBatch, or Table
607
+ Data to sort and get bottom indices from.
608
+ k : int
609
+ The number of `k` elements to keep.
610
+ sort_keys : List-like
611
+ Column key names to order by when input is table-like data.
612
+ memory_pool : MemoryPool, optional
613
+ If not passed, will allocate memory from the default memory pool.
614
+
615
+ Returns
616
+ -------
617
+ result : Array of indices
618
+ Indices of the bottom-k ordered elements
619
+
620
+ Examples
621
+ --------
622
+ >>> import pyarrow as pa
623
+ >>> import pyarrow.compute as pc
624
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
625
+ >>> pc.bottom_k_unstable(arr, k=3)
626
+ <pyarrow.lib.UInt64Array object at ...>
627
+ [
628
+ 0,
629
+ 1,
630
+ 2
631
+ ]
632
+ """
633
+ if sort_keys is None:
634
+ sort_keys = []
635
+ if isinstance(values, (pa.Array, pa.ChunkedArray)):
636
+ sort_keys.append(("dummy", "ascending"))
637
+ else:
638
+ sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys)
639
+ options = SelectKOptions(k, sort_keys)
640
+ return call_function("select_k_unstable", [values], options, memory_pool)
641
+
642
+
643
+ def random(n, *, initializer='system', options=None, memory_pool=None):
644
+ """
645
+ Generate numbers in the range [0, 1).
646
+
647
+ Generated values are uniformly-distributed, double-precision
648
+ in range [0, 1). Algorithm and seed can be changed via RandomOptions.
649
+
650
+ Parameters
651
+ ----------
652
+ n : int
653
+ Number of values to generate, must be greater than or equal to 0
654
+ initializer : int or str
655
+ How to initialize the underlying random generator.
656
+ If an integer is given, it is used as a seed.
657
+ If "system" is given, the random generator is initialized with
658
+ a system-specific source of (hopefully true) randomness.
659
+ Other values are invalid.
660
+ options : pyarrow.compute.RandomOptions, optional
661
+ Alternative way of passing options.
662
+ memory_pool : pyarrow.MemoryPool, optional
663
+ If not passed, will allocate memory from the default memory pool.
664
+ """
665
+ options = RandomOptions(initializer=initializer)
666
+ return call_function("random", [], options, memory_pool, length=n)
667
+
668
+
669
+ def field(*name_or_index):
670
+ """Reference a column of the dataset.
671
+
672
+ Stores only the field's name. Type and other information is known only when
673
+ the expression is bound to a dataset having an explicit scheme.
674
+
675
+ Nested references are allowed by passing multiple names or a tuple of
676
+ names. For example ``('foo', 'bar')`` references the field named "bar"
677
+ inside the field named "foo".
678
+
679
+ Parameters
680
+ ----------
681
+ *name_or_index : string, multiple strings, tuple or int
682
+ The name or index of the (possibly nested) field the expression
683
+ references to.
684
+
685
+ Returns
686
+ -------
687
+ field_expr : Expression
688
+ Reference to the given field
689
+
690
+ Examples
691
+ --------
692
+ >>> import pyarrow.compute as pc
693
+ >>> pc.field("a")
694
+ <pyarrow.compute.Expression a>
695
+ >>> pc.field(1)
696
+ <pyarrow.compute.Expression FieldPath(1)>
697
+ >>> pc.field(("a", "b"))
698
+ <pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
699
+ >>> pc.field("a", "b")
700
+ <pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
701
+ """
702
+ n = len(name_or_index)
703
+ if n == 1:
704
+ if isinstance(name_or_index[0], (str, int)):
705
+ return Expression._field(name_or_index[0])
706
+ elif isinstance(name_or_index[0], tuple):
707
+ return Expression._nested_field(name_or_index[0])
708
+ else:
709
+ raise TypeError(
710
+ "field reference should be str, multiple str, tuple or "
711
+ f"integer, got {type(name_or_index[0])}"
712
+ )
713
+ # In case of multiple strings not supplied in a tuple
714
+ else:
715
+ return Expression._nested_field(name_or_index)
716
+
717
+
718
+ def scalar(value):
719
+ """Expression representing a scalar value.
720
+
721
+ Parameters
722
+ ----------
723
+ value : bool, int, float or string
724
+ Python value of the scalar. Note that only a subset of types are
725
+ currently supported.
726
+
727
+ Returns
728
+ -------
729
+ scalar_expr : Expression
730
+ An Expression representing the scalar value
731
+ """
732
+ return Expression._scalar(value)
parrot/lib/python3.10/site-packages/pyarrow/csv.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ from pyarrow._csv import ( # noqa
20
+ ReadOptions, ParseOptions, ConvertOptions, ISO8601,
21
+ open_csv, read_csv, CSVStreamingReader, write_csv,
22
+ WriteOptions, CSVWriter, InvalidRow)
parrot/lib/python3.10/site-packages/pyarrow/cuda.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+
21
+ from pyarrow._cuda import (Context, IpcMemHandle, CudaBuffer,
22
+ HostBuffer, BufferReader, BufferWriter,
23
+ new_host_buffer,
24
+ serialize_record_batch, read_message,
25
+ read_record_batch)
parrot/lib/python3.10/site-packages/pyarrow/dataset.py ADDED
@@ -0,0 +1,1035 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """Dataset is currently unstable. APIs subject to change without notice."""
19
+
20
+ import pyarrow as pa
21
+ from pyarrow.util import _is_iterable, _stringify_path, _is_path_like
22
+
23
+ try:
24
+ from pyarrow._dataset import ( # noqa
25
+ CsvFileFormat,
26
+ CsvFragmentScanOptions,
27
+ JsonFileFormat,
28
+ JsonFragmentScanOptions,
29
+ Dataset,
30
+ DatasetFactory,
31
+ DirectoryPartitioning,
32
+ FeatherFileFormat,
33
+ FilenamePartitioning,
34
+ FileFormat,
35
+ FileFragment,
36
+ FileSystemDataset,
37
+ FileSystemDatasetFactory,
38
+ FileSystemFactoryOptions,
39
+ FileWriteOptions,
40
+ Fragment,
41
+ FragmentScanOptions,
42
+ HivePartitioning,
43
+ IpcFileFormat,
44
+ IpcFileWriteOptions,
45
+ InMemoryDataset,
46
+ Partitioning,
47
+ PartitioningFactory,
48
+ Scanner,
49
+ TaggedRecordBatch,
50
+ UnionDataset,
51
+ UnionDatasetFactory,
52
+ WrittenFile,
53
+ get_partition_keys,
54
+ get_partition_keys as _get_partition_keys, # keep for backwards compatibility
55
+ _filesystemdataset_write,
56
+ )
57
+ except ImportError as exc:
58
+ raise ImportError(
59
+ f"The pyarrow installation is not built with support for 'dataset' ({str(exc)})"
60
+ ) from None
61
+
62
+ # keep Expression functionality exposed here for backwards compatibility
63
+ from pyarrow.compute import Expression, scalar, field # noqa
64
+
65
+
66
+ _orc_available = False
67
+ _orc_msg = (
68
+ "The pyarrow installation is not built with support for the ORC file "
69
+ "format."
70
+ )
71
+
72
+ try:
73
+ from pyarrow._dataset_orc import OrcFileFormat
74
+ _orc_available = True
75
+ except ImportError:
76
+ pass
77
+
78
+ _parquet_available = False
79
+ _parquet_msg = (
80
+ "The pyarrow installation is not built with support for the Parquet file "
81
+ "format."
82
+ )
83
+
84
+ try:
85
+ from pyarrow._dataset_parquet import ( # noqa
86
+ ParquetDatasetFactory,
87
+ ParquetFactoryOptions,
88
+ ParquetFileFormat,
89
+ ParquetFileFragment,
90
+ ParquetFileWriteOptions,
91
+ ParquetFragmentScanOptions,
92
+ ParquetReadOptions,
93
+ RowGroupInfo,
94
+ )
95
+ _parquet_available = True
96
+ except ImportError:
97
+ pass
98
+
99
+
100
+ try:
101
+ from pyarrow._dataset_parquet_encryption import ( # noqa
102
+ ParquetDecryptionConfig,
103
+ ParquetEncryptionConfig,
104
+ )
105
+ except ImportError:
106
+ pass
107
+
108
+
109
+ def __getattr__(name):
110
+ if name == "OrcFileFormat" and not _orc_available:
111
+ raise ImportError(_orc_msg)
112
+
113
+ if name == "ParquetFileFormat" and not _parquet_available:
114
+ raise ImportError(_parquet_msg)
115
+
116
+ raise AttributeError(
117
+ "module 'pyarrow.dataset' has no attribute '{0}'".format(name)
118
+ )
119
+
120
+
121
+ def partitioning(schema=None, field_names=None, flavor=None,
122
+ dictionaries=None):
123
+ """
124
+ Specify a partitioning scheme.
125
+
126
+ The supported schemes include:
127
+
128
+ - "DirectoryPartitioning": this scheme expects one segment in the file path
129
+ for each field in the specified schema (all fields are required to be
130
+ present). For example given schema<year:int16, month:int8> the path
131
+ "/2009/11" would be parsed to ("year"_ == 2009 and "month"_ == 11).
132
+ - "HivePartitioning": a scheme for "/$key=$value/" nested directories as
133
+ found in Apache Hive. This is a multi-level, directory based partitioning
134
+ scheme. Data is partitioned by static values of a particular column in
135
+ the schema. Partition keys are represented in the form $key=$value in
136
+ directory names. Field order is ignored, as are missing or unrecognized
137
+ field names.
138
+ For example, given schema<year:int16, month:int8, day:int8>, a possible
139
+ path would be "/year=2009/month=11/day=15" (but the field order does not
140
+ need to match).
141
+ - "FilenamePartitioning": this scheme expects the partitions will have
142
+ filenames containing the field values separated by "_".
143
+ For example, given schema<year:int16, month:int8, day:int8>, a possible
144
+ partition filename "2009_11_part-0.parquet" would be parsed
145
+ to ("year"_ == 2009 and "month"_ == 11).
146
+
147
+ Parameters
148
+ ----------
149
+ schema : pyarrow.Schema, default None
150
+ The schema that describes the partitions present in the file path.
151
+ If not specified, and `field_names` and/or `flavor` are specified,
152
+ the schema will be inferred from the file path (and a
153
+ PartitioningFactory is returned).
154
+ field_names : list of str, default None
155
+ A list of strings (field names). If specified, the schema's types are
156
+ inferred from the file paths (only valid for DirectoryPartitioning).
157
+ flavor : str, default None
158
+ The default is DirectoryPartitioning. Specify ``flavor="hive"`` for
159
+ a HivePartitioning, and ``flavor="filename"`` for a
160
+ FilenamePartitioning.
161
+ dictionaries : dict[str, Array]
162
+ If the type of any field of `schema` is a dictionary type, the
163
+ corresponding entry of `dictionaries` must be an array containing
164
+ every value which may be taken by the corresponding column or an
165
+ error will be raised in parsing. Alternatively, pass `infer` to have
166
+ Arrow discover the dictionary values, in which case a
167
+ PartitioningFactory is returned.
168
+
169
+ Returns
170
+ -------
171
+ Partitioning or PartitioningFactory
172
+ The partitioning scheme
173
+
174
+ Examples
175
+ --------
176
+
177
+ Specify the Schema for paths like "/2009/June":
178
+
179
+ >>> import pyarrow as pa
180
+ >>> import pyarrow.dataset as ds
181
+ >>> part = ds.partitioning(pa.schema([("year", pa.int16()),
182
+ ... ("month", pa.string())]))
183
+
184
+ or let the types be inferred by only specifying the field names:
185
+
186
+ >>> part = ds.partitioning(field_names=["year", "month"])
187
+
188
+ For paths like "/2009/June", the year will be inferred as int32 while month
189
+ will be inferred as string.
190
+
191
+ Specify a Schema with dictionary encoding, providing dictionary values:
192
+
193
+ >>> part = ds.partitioning(
194
+ ... pa.schema([
195
+ ... ("year", pa.int16()),
196
+ ... ("month", pa.dictionary(pa.int8(), pa.string()))
197
+ ... ]),
198
+ ... dictionaries={
199
+ ... "month": pa.array(["January", "February", "March"]),
200
+ ... })
201
+
202
+ Alternatively, specify a Schema with dictionary encoding, but have Arrow
203
+ infer the dictionary values:
204
+
205
+ >>> part = ds.partitioning(
206
+ ... pa.schema([
207
+ ... ("year", pa.int16()),
208
+ ... ("month", pa.dictionary(pa.int8(), pa.string()))
209
+ ... ]),
210
+ ... dictionaries="infer")
211
+
212
+ Create a Hive scheme for a path like "/year=2009/month=11":
213
+
214
+ >>> part = ds.partitioning(
215
+ ... pa.schema([("year", pa.int16()), ("month", pa.int8())]),
216
+ ... flavor="hive")
217
+
218
+ A Hive scheme can also be discovered from the directory structure (and
219
+ types will be inferred):
220
+
221
+ >>> part = ds.partitioning(flavor="hive")
222
+ """
223
+ if flavor is None:
224
+ # default flavor
225
+ if schema is not None:
226
+ if field_names is not None:
227
+ raise ValueError(
228
+ "Cannot specify both 'schema' and 'field_names'")
229
+ if dictionaries == 'infer':
230
+ return DirectoryPartitioning.discover(schema=schema)
231
+ return DirectoryPartitioning(schema, dictionaries)
232
+ elif field_names is not None:
233
+ if isinstance(field_names, list):
234
+ return DirectoryPartitioning.discover(field_names)
235
+ else:
236
+ raise ValueError(
237
+ "Expected list of field names, got {}".format(
238
+ type(field_names)))
239
+ else:
240
+ raise ValueError(
241
+ "For the default directory flavor, need to specify "
242
+ "a Schema or a list of field names")
243
+ if flavor == "filename":
244
+ if schema is not None:
245
+ if field_names is not None:
246
+ raise ValueError(
247
+ "Cannot specify both 'schema' and 'field_names'")
248
+ if dictionaries == 'infer':
249
+ return FilenamePartitioning.discover(schema=schema)
250
+ return FilenamePartitioning(schema, dictionaries)
251
+ elif field_names is not None:
252
+ if isinstance(field_names, list):
253
+ return FilenamePartitioning.discover(field_names)
254
+ else:
255
+ raise ValueError(
256
+ "Expected list of field names, got {}".format(
257
+ type(field_names)))
258
+ else:
259
+ raise ValueError(
260
+ "For the filename flavor, need to specify "
261
+ "a Schema or a list of field names")
262
+ elif flavor == 'hive':
263
+ if field_names is not None:
264
+ raise ValueError("Cannot specify 'field_names' for flavor 'hive'")
265
+ elif schema is not None:
266
+ if isinstance(schema, pa.Schema):
267
+ if dictionaries == 'infer':
268
+ return HivePartitioning.discover(schema=schema)
269
+ return HivePartitioning(schema, dictionaries)
270
+ else:
271
+ raise ValueError(
272
+ "Expected Schema for 'schema', got {}".format(
273
+ type(schema)))
274
+ else:
275
+ return HivePartitioning.discover()
276
+ else:
277
+ raise ValueError("Unsupported flavor")
278
+
279
+
280
+ def _ensure_partitioning(scheme):
281
+ """
282
+ Validate input and return a Partitioning(Factory).
283
+
284
+ It passes None through if no partitioning scheme is defined.
285
+ """
286
+ if scheme is None:
287
+ pass
288
+ elif isinstance(scheme, str):
289
+ scheme = partitioning(flavor=scheme)
290
+ elif isinstance(scheme, list):
291
+ scheme = partitioning(field_names=scheme)
292
+ elif isinstance(scheme, (Partitioning, PartitioningFactory)):
293
+ pass
294
+ else:
295
+ raise ValueError("Expected Partitioning or PartitioningFactory, got {}"
296
+ .format(type(scheme)))
297
+ return scheme
298
+
299
+
300
+ def _ensure_format(obj):
301
+ if isinstance(obj, FileFormat):
302
+ return obj
303
+ elif obj == "parquet":
304
+ if not _parquet_available:
305
+ raise ValueError(_parquet_msg)
306
+ return ParquetFileFormat()
307
+ elif obj in {"ipc", "arrow"}:
308
+ return IpcFileFormat()
309
+ elif obj == "feather":
310
+ return FeatherFileFormat()
311
+ elif obj == "csv":
312
+ return CsvFileFormat()
313
+ elif obj == "orc":
314
+ if not _orc_available:
315
+ raise ValueError(_orc_msg)
316
+ return OrcFileFormat()
317
+ elif obj == "json":
318
+ return JsonFileFormat()
319
+ else:
320
+ raise ValueError("format '{}' is not supported".format(obj))
321
+
322
+
323
+ def _ensure_multiple_sources(paths, filesystem=None):
324
+ """
325
+ Treat a list of paths as files belonging to a single file system
326
+
327
+ If the file system is local then also validates that all paths
328
+ are referencing existing *files* otherwise any non-file paths will be
329
+ silently skipped (for example on a remote filesystem).
330
+
331
+ Parameters
332
+ ----------
333
+ paths : list of path-like
334
+ Note that URIs are not allowed.
335
+ filesystem : FileSystem or str, optional
336
+ If an URI is passed, then its path component will act as a prefix for
337
+ the file paths.
338
+
339
+ Returns
340
+ -------
341
+ (FileSystem, list of str)
342
+ File system object and a list of normalized paths.
343
+
344
+ Raises
345
+ ------
346
+ TypeError
347
+ If the passed filesystem has wrong type.
348
+ IOError
349
+ If the file system is local and a referenced path is not available or
350
+ not a file.
351
+ """
352
+ from pyarrow.fs import (
353
+ LocalFileSystem, SubTreeFileSystem, _MockFileSystem, FileType,
354
+ _ensure_filesystem
355
+ )
356
+
357
+ if filesystem is None:
358
+ # fall back to local file system as the default
359
+ filesystem = LocalFileSystem()
360
+ else:
361
+ # construct a filesystem if it is a valid URI
362
+ filesystem = _ensure_filesystem(filesystem)
363
+
364
+ is_local = (
365
+ isinstance(filesystem, (LocalFileSystem, _MockFileSystem)) or
366
+ (isinstance(filesystem, SubTreeFileSystem) and
367
+ isinstance(filesystem.base_fs, LocalFileSystem))
368
+ )
369
+
370
+ # allow normalizing irregular paths such as Windows local paths
371
+ paths = [filesystem.normalize_path(_stringify_path(p)) for p in paths]
372
+
373
+ # validate that all of the paths are pointing to existing *files*
374
+ # possible improvement is to group the file_infos by type and raise for
375
+ # multiple paths per error category
376
+ if is_local:
377
+ for info in filesystem.get_file_info(paths):
378
+ file_type = info.type
379
+ if file_type == FileType.File:
380
+ continue
381
+ elif file_type == FileType.NotFound:
382
+ raise FileNotFoundError(info.path)
383
+ elif file_type == FileType.Directory:
384
+ raise IsADirectoryError(
385
+ 'Path {} points to a directory, but only file paths are '
386
+ 'supported. To construct a nested or union dataset pass '
387
+ 'a list of dataset objects instead.'.format(info.path)
388
+ )
389
+ else:
390
+ raise IOError(
391
+ 'Path {} exists but its type is unknown (could be a '
392
+ 'special file such as a Unix socket or character device, '
393
+ 'or Windows NUL / CON / ...)'.format(info.path)
394
+ )
395
+
396
+ return filesystem, paths
397
+
398
+
399
+ def _ensure_single_source(path, filesystem=None):
400
+ """
401
+ Treat path as either a recursively traversable directory or a single file.
402
+
403
+ Parameters
404
+ ----------
405
+ path : path-like
406
+ filesystem : FileSystem or str, optional
407
+ If an URI is passed, then its path component will act as a prefix for
408
+ the file paths.
409
+
410
+ Returns
411
+ -------
412
+ (FileSystem, list of str or fs.Selector)
413
+ File system object and either a single item list pointing to a file or
414
+ an fs.Selector object pointing to a directory.
415
+
416
+ Raises
417
+ ------
418
+ TypeError
419
+ If the passed filesystem has wrong type.
420
+ FileNotFoundError
421
+ If the referenced file or directory doesn't exist.
422
+ """
423
+ from pyarrow.fs import FileType, FileSelector, _resolve_filesystem_and_path
424
+
425
+ # at this point we already checked that `path` is a path-like
426
+ filesystem, path = _resolve_filesystem_and_path(path, filesystem)
427
+
428
+ # ensure that the path is normalized before passing to dataset discovery
429
+ path = filesystem.normalize_path(path)
430
+
431
+ # retrieve the file descriptor
432
+ file_info = filesystem.get_file_info(path)
433
+
434
+ # depending on the path type either return with a recursive
435
+ # directory selector or as a list containing a single file
436
+ if file_info.type == FileType.Directory:
437
+ paths_or_selector = FileSelector(path, recursive=True)
438
+ elif file_info.type == FileType.File:
439
+ paths_or_selector = [path]
440
+ else:
441
+ raise FileNotFoundError(path)
442
+
443
+ return filesystem, paths_or_selector
444
+
445
+
446
+ def _filesystem_dataset(source, schema=None, filesystem=None,
447
+ partitioning=None, format=None,
448
+ partition_base_dir=None, exclude_invalid_files=None,
449
+ selector_ignore_prefixes=None):
450
+ """
451
+ Create a FileSystemDataset which can be used to build a Dataset.
452
+
453
+ Parameters are documented in the dataset function.
454
+
455
+ Returns
456
+ -------
457
+ FileSystemDataset
458
+ """
459
+ from pyarrow.fs import LocalFileSystem, _ensure_filesystem, FileInfo
460
+
461
+ format = _ensure_format(format or 'parquet')
462
+ partitioning = _ensure_partitioning(partitioning)
463
+
464
+ if isinstance(source, (list, tuple)):
465
+ if source and isinstance(source[0], FileInfo):
466
+ if filesystem is None:
467
+ # fall back to local file system as the default
468
+ fs = LocalFileSystem()
469
+ else:
470
+ # construct a filesystem if it is a valid URI
471
+ fs = _ensure_filesystem(filesystem)
472
+ paths_or_selector = source
473
+ else:
474
+ fs, paths_or_selector = _ensure_multiple_sources(source, filesystem)
475
+ else:
476
+ fs, paths_or_selector = _ensure_single_source(source, filesystem)
477
+
478
+ options = FileSystemFactoryOptions(
479
+ partitioning=partitioning,
480
+ partition_base_dir=partition_base_dir,
481
+ exclude_invalid_files=exclude_invalid_files,
482
+ selector_ignore_prefixes=selector_ignore_prefixes
483
+ )
484
+ factory = FileSystemDatasetFactory(fs, paths_or_selector, format, options)
485
+
486
+ return factory.finish(schema)
487
+
488
+
489
+ def _in_memory_dataset(source, schema=None, **kwargs):
490
+ if any(v is not None for v in kwargs.values()):
491
+ raise ValueError(
492
+ "For in-memory datasets, you cannot pass any additional arguments")
493
+ return InMemoryDataset(source, schema)
494
+
495
+
496
+ def _union_dataset(children, schema=None, **kwargs):
497
+ if any(v is not None for v in kwargs.values()):
498
+ raise ValueError(
499
+ "When passing a list of Datasets, you cannot pass any additional "
500
+ "arguments"
501
+ )
502
+
503
+ if schema is None:
504
+ # unify the children datasets' schemas
505
+ schema = pa.unify_schemas([child.schema for child in children])
506
+
507
+ for child in children:
508
+ if getattr(child, "_scan_options", None):
509
+ raise ValueError(
510
+ "Creating an UnionDataset from filtered or projected Datasets "
511
+ "is currently not supported. Union the unfiltered datasets "
512
+ "and apply the filter to the resulting union."
513
+ )
514
+
515
+ # create datasets with the requested schema
516
+ children = [child.replace_schema(schema) for child in children]
517
+
518
+ return UnionDataset(schema, children)
519
+
520
+
521
+ def parquet_dataset(metadata_path, schema=None, filesystem=None, format=None,
522
+ partitioning=None, partition_base_dir=None):
523
+ """
524
+ Create a FileSystemDataset from a `_metadata` file created via
525
+ `pyarrow.parquet.write_metadata`.
526
+
527
+ Parameters
528
+ ----------
529
+ metadata_path : path,
530
+ Path pointing to a single file parquet metadata file
531
+ schema : Schema, optional
532
+ Optionally provide the Schema for the Dataset, in which case it will
533
+ not be inferred from the source.
534
+ filesystem : FileSystem or URI string, default None
535
+ If a single path is given as source and filesystem is None, then the
536
+ filesystem will be inferred from the path.
537
+ If an URI string is passed, then a filesystem object is constructed
538
+ using the URI's optional path component as a directory prefix. See the
539
+ examples below.
540
+ Note that the URIs on Windows must follow 'file:///C:...' or
541
+ 'file:/C:...' patterns.
542
+ format : ParquetFileFormat
543
+ An instance of a ParquetFileFormat if special options needs to be
544
+ passed.
545
+ partitioning : Partitioning, PartitioningFactory, str, list of str
546
+ The partitioning scheme specified with the ``partitioning()``
547
+ function. A flavor string can be used as shortcut, and with a list of
548
+ field names a DirectoryPartitioning will be inferred.
549
+ partition_base_dir : str, optional
550
+ For the purposes of applying the partitioning, paths will be
551
+ stripped of the partition_base_dir. Files not matching the
552
+ partition_base_dir prefix will be skipped for partitioning discovery.
553
+ The ignored files will still be part of the Dataset, but will not
554
+ have partition information.
555
+
556
+ Returns
557
+ -------
558
+ FileSystemDataset
559
+ The dataset corresponding to the given metadata
560
+ """
561
+ from pyarrow.fs import LocalFileSystem, _ensure_filesystem
562
+
563
+ if format is None:
564
+ format = ParquetFileFormat()
565
+ elif not isinstance(format, ParquetFileFormat):
566
+ raise ValueError("format argument must be a ParquetFileFormat")
567
+
568
+ if filesystem is None:
569
+ filesystem = LocalFileSystem()
570
+ else:
571
+ filesystem = _ensure_filesystem(filesystem)
572
+
573
+ metadata_path = filesystem.normalize_path(_stringify_path(metadata_path))
574
+ options = ParquetFactoryOptions(
575
+ partition_base_dir=partition_base_dir,
576
+ partitioning=_ensure_partitioning(partitioning)
577
+ )
578
+
579
+ factory = ParquetDatasetFactory(
580
+ metadata_path, filesystem, format, options=options)
581
+ return factory.finish(schema)
582
+
583
+
584
+ def dataset(source, schema=None, format=None, filesystem=None,
585
+ partitioning=None, partition_base_dir=None,
586
+ exclude_invalid_files=None, ignore_prefixes=None):
587
+ """
588
+ Open a dataset.
589
+
590
+ Datasets provides functionality to efficiently work with tabular,
591
+ potentially larger than memory and multi-file dataset.
592
+
593
+ - A unified interface for different sources, like Parquet and Feather
594
+ - Discovery of sources (crawling directories, handle directory-based
595
+ partitioned datasets, basic schema normalization)
596
+ - Optimized reading with predicate pushdown (filtering rows), projection
597
+ (selecting columns), parallel reading or fine-grained managing of tasks.
598
+
599
+ Note that this is the high-level API, to have more control over the dataset
600
+ construction use the low-level API classes (FileSystemDataset,
601
+ FilesystemDatasetFactory, etc.)
602
+
603
+ Parameters
604
+ ----------
605
+ source : path, list of paths, dataset, list of datasets, (list of) \
606
+ RecordBatch or Table, iterable of RecordBatch, RecordBatchReader, or URI
607
+ Path pointing to a single file:
608
+ Open a FileSystemDataset from a single file.
609
+ Path pointing to a directory:
610
+ The directory gets discovered recursively according to a
611
+ partitioning scheme if given.
612
+ List of file paths:
613
+ Create a FileSystemDataset from explicitly given files. The files
614
+ must be located on the same filesystem given by the filesystem
615
+ parameter.
616
+ Note that in contrary of construction from a single file, passing
617
+ URIs as paths is not allowed.
618
+ List of datasets:
619
+ A nested UnionDataset gets constructed, it allows arbitrary
620
+ composition of other datasets.
621
+ Note that additional keyword arguments are not allowed.
622
+ (List of) batches or tables, iterable of batches, or RecordBatchReader:
623
+ Create an InMemoryDataset. If an iterable or empty list is given,
624
+ a schema must also be given. If an iterable or RecordBatchReader
625
+ is given, the resulting dataset can only be scanned once; further
626
+ attempts will raise an error.
627
+ schema : Schema, optional
628
+ Optionally provide the Schema for the Dataset, in which case it will
629
+ not be inferred from the source.
630
+ format : FileFormat or str
631
+ Currently "parquet", "ipc"/"arrow"/"feather", "csv", "json", and "orc" are
632
+ supported. For Feather, only version 2 files are supported.
633
+ filesystem : FileSystem or URI string, default None
634
+ If a single path is given as source and filesystem is None, then the
635
+ filesystem will be inferred from the path.
636
+ If an URI string is passed, then a filesystem object is constructed
637
+ using the URI's optional path component as a directory prefix. See the
638
+ examples below.
639
+ Note that the URIs on Windows must follow 'file:///C:...' or
640
+ 'file:/C:...' patterns.
641
+ partitioning : Partitioning, PartitioningFactory, str, list of str
642
+ The partitioning scheme specified with the ``partitioning()``
643
+ function. A flavor string can be used as shortcut, and with a list of
644
+ field names a DirectoryPartitioning will be inferred.
645
+ partition_base_dir : str, optional
646
+ For the purposes of applying the partitioning, paths will be
647
+ stripped of the partition_base_dir. Files not matching the
648
+ partition_base_dir prefix will be skipped for partitioning discovery.
649
+ The ignored files will still be part of the Dataset, but will not
650
+ have partition information.
651
+ exclude_invalid_files : bool, optional (default True)
652
+ If True, invalid files will be excluded (file format specific check).
653
+ This will incur IO for each files in a serial and single threaded
654
+ fashion. Disabling this feature will skip the IO, but unsupported
655
+ files may be present in the Dataset (resulting in an error at scan
656
+ time).
657
+ ignore_prefixes : list, optional
658
+ Files matching any of these prefixes will be ignored by the
659
+ discovery process. This is matched to the basename of a path.
660
+ By default this is ['.', '_'].
661
+ Note that discovery happens only if a directory is passed as source.
662
+
663
+ Returns
664
+ -------
665
+ dataset : Dataset
666
+ Either a FileSystemDataset or a UnionDataset depending on the source
667
+ parameter.
668
+
669
+ Examples
670
+ --------
671
+ Creating an example Table:
672
+
673
+ >>> import pyarrow as pa
674
+ >>> import pyarrow.parquet as pq
675
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
676
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
677
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
678
+ ... "Brittle stars", "Centipede"]})
679
+ >>> pq.write_table(table, "file.parquet")
680
+
681
+ Opening a single file:
682
+
683
+ >>> import pyarrow.dataset as ds
684
+ >>> dataset = ds.dataset("file.parquet", format="parquet")
685
+ >>> dataset.to_table()
686
+ pyarrow.Table
687
+ year: int64
688
+ n_legs: int64
689
+ animal: string
690
+ ----
691
+ year: [[2020,2022,2021,2022,2019,2021]]
692
+ n_legs: [[2,2,4,4,5,100]]
693
+ animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
694
+
695
+ Opening a single file with an explicit schema:
696
+
697
+ >>> myschema = pa.schema([
698
+ ... ('n_legs', pa.int64()),
699
+ ... ('animal', pa.string())])
700
+ >>> dataset = ds.dataset("file.parquet", schema=myschema, format="parquet")
701
+ >>> dataset.to_table()
702
+ pyarrow.Table
703
+ n_legs: int64
704
+ animal: string
705
+ ----
706
+ n_legs: [[2,2,4,4,5,100]]
707
+ animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
708
+
709
+ Opening a dataset for a single directory:
710
+
711
+ >>> ds.write_dataset(table, "partitioned_dataset", format="parquet",
712
+ ... partitioning=['year'])
713
+ >>> dataset = ds.dataset("partitioned_dataset", format="parquet")
714
+ >>> dataset.to_table()
715
+ pyarrow.Table
716
+ n_legs: int64
717
+ animal: string
718
+ ----
719
+ n_legs: [[5],[2],[4,100],[2,4]]
720
+ animal: [["Brittle stars"],["Flamingo"],...["Parrot","Horse"]]
721
+
722
+ For a single directory from a S3 bucket:
723
+
724
+ >>> ds.dataset("s3://mybucket/nyc-taxi/",
725
+ ... format="parquet") # doctest: +SKIP
726
+
727
+ Opening a dataset from a list of relatives local paths:
728
+
729
+ >>> dataset = ds.dataset([
730
+ ... "partitioned_dataset/2019/part-0.parquet",
731
+ ... "partitioned_dataset/2020/part-0.parquet",
732
+ ... "partitioned_dataset/2021/part-0.parquet",
733
+ ... ], format='parquet')
734
+ >>> dataset.to_table()
735
+ pyarrow.Table
736
+ n_legs: int64
737
+ animal: string
738
+ ----
739
+ n_legs: [[5],[2],[4,100]]
740
+ animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"]]
741
+
742
+ With filesystem provided:
743
+
744
+ >>> paths = [
745
+ ... 'part0/data.parquet',
746
+ ... 'part1/data.parquet',
747
+ ... 'part3/data.parquet',
748
+ ... ]
749
+ >>> ds.dataset(paths, filesystem='file:///directory/prefix,
750
+ ... format='parquet') # doctest: +SKIP
751
+
752
+ Which is equivalent with:
753
+
754
+ >>> fs = SubTreeFileSystem("/directory/prefix",
755
+ ... LocalFileSystem()) # doctest: +SKIP
756
+ >>> ds.dataset(paths, filesystem=fs, format='parquet') # doctest: +SKIP
757
+
758
+ With a remote filesystem URI:
759
+
760
+ >>> paths = [
761
+ ... 'nested/directory/part0/data.parquet',
762
+ ... 'nested/directory/part1/data.parquet',
763
+ ... 'nested/directory/part3/data.parquet',
764
+ ... ]
765
+ >>> ds.dataset(paths, filesystem='s3://bucket/',
766
+ ... format='parquet') # doctest: +SKIP
767
+
768
+ Similarly to the local example, the directory prefix may be included in the
769
+ filesystem URI:
770
+
771
+ >>> ds.dataset(paths, filesystem='s3://bucket/nested/directory',
772
+ ... format='parquet') # doctest: +SKIP
773
+
774
+ Construction of a nested dataset:
775
+
776
+ >>> ds.dataset([
777
+ ... dataset("s3://old-taxi-data", format="parquet"),
778
+ ... dataset("local/path/to/data", format="ipc")
779
+ ... ]) # doctest: +SKIP
780
+ """
781
+ from pyarrow.fs import FileInfo
782
+ # collect the keyword arguments for later reuse
783
+ kwargs = dict(
784
+ schema=schema,
785
+ filesystem=filesystem,
786
+ partitioning=partitioning,
787
+ format=format,
788
+ partition_base_dir=partition_base_dir,
789
+ exclude_invalid_files=exclude_invalid_files,
790
+ selector_ignore_prefixes=ignore_prefixes
791
+ )
792
+
793
+ if _is_path_like(source):
794
+ return _filesystem_dataset(source, **kwargs)
795
+ elif isinstance(source, (tuple, list)):
796
+ if all(_is_path_like(elem) or isinstance(elem, FileInfo) for elem in source):
797
+ return _filesystem_dataset(source, **kwargs)
798
+ elif all(isinstance(elem, Dataset) for elem in source):
799
+ return _union_dataset(source, **kwargs)
800
+ elif all(isinstance(elem, (pa.RecordBatch, pa.Table))
801
+ for elem in source):
802
+ return _in_memory_dataset(source, **kwargs)
803
+ else:
804
+ unique_types = set(type(elem).__name__ for elem in source)
805
+ type_names = ', '.join('{}'.format(t) for t in unique_types)
806
+ raise TypeError(
807
+ 'Expected a list of path-like or dataset objects, or a list '
808
+ 'of batches or tables. The given list contains the following '
809
+ 'types: {}'.format(type_names)
810
+ )
811
+ elif isinstance(source, (pa.RecordBatch, pa.Table)):
812
+ return _in_memory_dataset(source, **kwargs)
813
+ else:
814
+ raise TypeError(
815
+ 'Expected a path-like, list of path-likes or a list of Datasets '
816
+ 'instead of the given type: {}'.format(type(source).__name__)
817
+ )
818
+
819
+
820
+ def _ensure_write_partitioning(part, schema, flavor):
821
+ if isinstance(part, PartitioningFactory):
822
+ raise ValueError("A PartitioningFactory cannot be used. "
823
+ "Did you call the partitioning function "
824
+ "without supplying a schema?")
825
+
826
+ if isinstance(part, Partitioning) and flavor:
827
+ raise ValueError(
828
+ "Providing a partitioning_flavor with "
829
+ "a Partitioning object is not supported"
830
+ )
831
+ elif isinstance(part, (tuple, list)):
832
+ # Name of fields were provided instead of a partitioning object.
833
+ # Create a partitioning factory with those field names.
834
+ part = partitioning(
835
+ schema=pa.schema([schema.field(f) for f in part]),
836
+ flavor=flavor
837
+ )
838
+ elif part is None:
839
+ part = partitioning(pa.schema([]), flavor=flavor)
840
+
841
+ if not isinstance(part, Partitioning):
842
+ raise ValueError(
843
+ "partitioning must be a Partitioning object or "
844
+ "a list of column names"
845
+ )
846
+
847
+ return part
848
+
849
+
850
+ def write_dataset(data, base_dir, *, basename_template=None, format=None,
851
+ partitioning=None, partitioning_flavor=None, schema=None,
852
+ filesystem=None, file_options=None, use_threads=True,
853
+ max_partitions=None, max_open_files=None,
854
+ max_rows_per_file=None, min_rows_per_group=None,
855
+ max_rows_per_group=None, file_visitor=None,
856
+ existing_data_behavior='error', create_dir=True):
857
+ """
858
+ Write a dataset to a given format and partitioning.
859
+
860
+ Parameters
861
+ ----------
862
+ data : Dataset, Table/RecordBatch, RecordBatchReader, list of \
863
+ Table/RecordBatch, or iterable of RecordBatch
864
+ The data to write. This can be a Dataset instance or
865
+ in-memory Arrow data. If an iterable is given, the schema must
866
+ also be given.
867
+ base_dir : str
868
+ The root directory where to write the dataset.
869
+ basename_template : str, optional
870
+ A template string used to generate basenames of written data files.
871
+ The token '{i}' will be replaced with an automatically incremented
872
+ integer. If not specified, it defaults to
873
+ "part-{i}." + format.default_extname
874
+ format : FileFormat or str
875
+ The format in which to write the dataset. Currently supported:
876
+ "parquet", "ipc"/"arrow"/"feather", and "csv". If a FileSystemDataset
877
+ is being written and `format` is not specified, it defaults to the
878
+ same format as the specified FileSystemDataset. When writing a
879
+ Table or RecordBatch, this keyword is required.
880
+ partitioning : Partitioning or list[str], optional
881
+ The partitioning scheme specified with the ``partitioning()``
882
+ function or a list of field names. When providing a list of
883
+ field names, you can use ``partitioning_flavor`` to drive which
884
+ partitioning type should be used.
885
+ partitioning_flavor : str, optional
886
+ One of the partitioning flavors supported by
887
+ ``pyarrow.dataset.partitioning``. If omitted will use the
888
+ default of ``partitioning()`` which is directory partitioning.
889
+ schema : Schema, optional
890
+ filesystem : FileSystem, optional
891
+ file_options : pyarrow.dataset.FileWriteOptions, optional
892
+ FileFormat specific write options, created using the
893
+ ``FileFormat.make_write_options()`` function.
894
+ use_threads : bool, default True
895
+ Write files in parallel. If enabled, then maximum parallelism will be
896
+ used determined by the number of available CPU cores.
897
+ max_partitions : int, default 1024
898
+ Maximum number of partitions any batch may be written into.
899
+ max_open_files : int, default 1024
900
+ If greater than 0 then this will limit the maximum number of
901
+ files that can be left open. If an attempt is made to open
902
+ too many files then the least recently used file will be closed.
903
+ If this setting is set too low you may end up fragmenting your
904
+ data into many small files.
905
+ max_rows_per_file : int, default 0
906
+ Maximum number of rows per file. If greater than 0 then this will
907
+ limit how many rows are placed in any single file. Otherwise there
908
+ will be no limit and one file will be created in each output
909
+ directory unless files need to be closed to respect max_open_files
910
+ min_rows_per_group : int, default 0
911
+ Minimum number of rows per group. When the value is greater than 0,
912
+ the dataset writer will batch incoming data and only write the row
913
+ groups to the disk when sufficient rows have accumulated.
914
+ max_rows_per_group : int, default 1024 * 1024
915
+ Maximum number of rows per group. If the value is greater than 0,
916
+ then the dataset writer may split up large incoming batches into
917
+ multiple row groups. If this value is set, then min_rows_per_group
918
+ should also be set. Otherwise it could end up with very small row
919
+ groups.
920
+ file_visitor : function
921
+ If set, this function will be called with a WrittenFile instance
922
+ for each file created during the call. This object will have both
923
+ a path attribute and a metadata attribute.
924
+
925
+ The path attribute will be a string containing the path to
926
+ the created file.
927
+
928
+ The metadata attribute will be the parquet metadata of the file.
929
+ This metadata will have the file path attribute set and can be used
930
+ to build a _metadata file. The metadata attribute will be None if
931
+ the format is not parquet.
932
+
933
+ Example visitor which simple collects the filenames created::
934
+
935
+ visited_paths = []
936
+
937
+ def file_visitor(written_file):
938
+ visited_paths.append(written_file.path)
939
+ existing_data_behavior : 'error' | 'overwrite_or_ignore' | \
940
+ 'delete_matching'
941
+ Controls how the dataset will handle data that already exists in
942
+ the destination. The default behavior ('error') is to raise an error
943
+ if any data exists in the destination.
944
+
945
+ 'overwrite_or_ignore' will ignore any existing data and will
946
+ overwrite files with the same name as an output file. Other
947
+ existing files will be ignored. This behavior, in combination
948
+ with a unique basename_template for each write, will allow for
949
+ an append workflow.
950
+
951
+ 'delete_matching' is useful when you are writing a partitioned
952
+ dataset. The first time each partition directory is encountered
953
+ the entire directory will be deleted. This allows you to overwrite
954
+ old partitions completely.
955
+ create_dir : bool, default True
956
+ If False, directories will not be created. This can be useful for
957
+ filesystems that do not require directories.
958
+ """
959
+ from pyarrow.fs import _resolve_filesystem_and_path
960
+
961
+ if isinstance(data, (list, tuple)):
962
+ schema = schema or data[0].schema
963
+ data = InMemoryDataset(data, schema=schema)
964
+ elif isinstance(data, (pa.RecordBatch, pa.Table)):
965
+ schema = schema or data.schema
966
+ data = InMemoryDataset(data, schema=schema)
967
+ elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data):
968
+ data = Scanner.from_batches(data, schema=schema)
969
+ schema = None
970
+ elif not isinstance(data, (Dataset, Scanner)):
971
+ raise ValueError(
972
+ "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, "
973
+ "a list of Tables/RecordBatches, or iterable of batches are "
974
+ "supported."
975
+ )
976
+
977
+ if format is None and isinstance(data, FileSystemDataset):
978
+ format = data.format
979
+ else:
980
+ format = _ensure_format(format)
981
+
982
+ if file_options is None:
983
+ file_options = format.make_write_options()
984
+
985
+ if format != file_options.format:
986
+ raise TypeError("Supplied FileWriteOptions have format {}, "
987
+ "which doesn't match supplied FileFormat {}".format(
988
+ format, file_options))
989
+
990
+ if basename_template is None:
991
+ basename_template = "part-{i}." + format.default_extname
992
+
993
+ if max_partitions is None:
994
+ max_partitions = 1024
995
+
996
+ if max_open_files is None:
997
+ max_open_files = 1024
998
+
999
+ if max_rows_per_file is None:
1000
+ max_rows_per_file = 0
1001
+
1002
+ if max_rows_per_group is None:
1003
+ max_rows_per_group = 1 << 20
1004
+
1005
+ if min_rows_per_group is None:
1006
+ min_rows_per_group = 0
1007
+
1008
+ # at this point data is a Scanner or a Dataset, anything else
1009
+ # was converted to one of those two. So we can grab the schema
1010
+ # to build the partitioning object from Dataset.
1011
+ if isinstance(data, Scanner):
1012
+ partitioning_schema = data.projected_schema
1013
+ else:
1014
+ partitioning_schema = data.schema
1015
+ partitioning = _ensure_write_partitioning(partitioning,
1016
+ schema=partitioning_schema,
1017
+ flavor=partitioning_flavor)
1018
+
1019
+ filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem)
1020
+
1021
+ if isinstance(data, Dataset):
1022
+ scanner = data.scanner(use_threads=use_threads)
1023
+ else:
1024
+ # scanner was passed directly by the user, in which case a schema
1025
+ # cannot be passed
1026
+ if schema is not None:
1027
+ raise ValueError("Cannot specify a schema when writing a Scanner")
1028
+ scanner = data
1029
+
1030
+ _filesystemdataset_write(
1031
+ scanner, base_dir, basename_template, filesystem, partitioning,
1032
+ file_options, max_partitions, file_visitor, existing_data_behavior,
1033
+ max_open_files, max_rows_per_file,
1034
+ min_rows_per_group, max_rows_per_group, create_dir
1035
+ )
parrot/lib/python3.10/site-packages/pyarrow/error.pxi ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from cpython.exc cimport PyErr_CheckSignals, PyErr_SetInterrupt
19
+
20
+ from pyarrow.includes.libarrow cimport CStatus
21
+ from pyarrow.includes.libarrow_python cimport IsPyError, RestorePyError
22
+ from pyarrow.includes.common cimport c_string
23
+
24
+ from contextlib import contextmanager
25
+ import os
26
+ import signal
27
+ import threading
28
+
29
+ from pyarrow.lib import is_threading_enabled
30
+ from pyarrow.util import _break_traceback_cycle_from_frame
31
+
32
+
33
+ class ArrowException(Exception):
34
+ pass
35
+
36
+
37
+ class ArrowInvalid(ValueError, ArrowException):
38
+ pass
39
+
40
+
41
+ class ArrowMemoryError(MemoryError, ArrowException):
42
+ pass
43
+
44
+
45
+ class ArrowKeyError(KeyError, ArrowException):
46
+ def __str__(self):
47
+ # Override KeyError.__str__, as it uses the repr() of the key
48
+ return ArrowException.__str__(self)
49
+
50
+
51
+ class ArrowTypeError(TypeError, ArrowException):
52
+ pass
53
+
54
+
55
+ class ArrowNotImplementedError(NotImplementedError, ArrowException):
56
+ pass
57
+
58
+
59
+ class ArrowCapacityError(ArrowException):
60
+ pass
61
+
62
+
63
+ class ArrowIndexError(IndexError, ArrowException):
64
+ pass
65
+
66
+
67
+ class ArrowSerializationError(ArrowException):
68
+ pass
69
+
70
+
71
+ class ArrowCancelled(ArrowException):
72
+ def __init__(self, message, signum=None):
73
+ super().__init__(message)
74
+ self.signum = signum
75
+
76
+
77
+ # Compatibility alias
78
+ ArrowIOError = IOError
79
+
80
+
81
+ # check_status() and convert_status() could be written directly in C++
82
+ # if we didn't define Arrow-specific subclasses (ArrowInvalid etc.)
83
+ cdef int check_status(const CStatus& status) except -1 nogil:
84
+ if status.ok():
85
+ return 0
86
+
87
+ with gil:
88
+ if IsPyError(status):
89
+ RestorePyError(status)
90
+ return -1
91
+
92
+ raise convert_status(status)
93
+
94
+
95
+ cdef object convert_status(const CStatus& status):
96
+ if IsPyError(status):
97
+ try:
98
+ RestorePyError(status)
99
+ except BaseException as e:
100
+ return e
101
+
102
+ # We don't use Status::ToString() as it would redundantly include
103
+ # the C++ class name.
104
+ message = frombytes(status.message(), safe=True)
105
+ detail = status.detail()
106
+ if detail != nullptr:
107
+ message += ". Detail: " + frombytes(detail.get().ToString(),
108
+ safe=True)
109
+
110
+ if status.IsInvalid():
111
+ return ArrowInvalid(message)
112
+ elif status.IsIOError():
113
+ # Note: OSError constructor is
114
+ # OSError(message)
115
+ # or
116
+ # OSError(errno, message, filename=None)
117
+ # or (on Windows)
118
+ # OSError(errno, message, filename, winerror)
119
+ errno = ErrnoFromStatus(status)
120
+ winerror = WinErrorFromStatus(status)
121
+ if winerror != 0:
122
+ return IOError(errno, message, None, winerror)
123
+ elif errno != 0:
124
+ return IOError(errno, message)
125
+ else:
126
+ return IOError(message)
127
+ elif status.IsOutOfMemory():
128
+ return ArrowMemoryError(message)
129
+ elif status.IsKeyError():
130
+ return ArrowKeyError(message)
131
+ elif status.IsNotImplemented():
132
+ return ArrowNotImplementedError(message)
133
+ elif status.IsTypeError():
134
+ return ArrowTypeError(message)
135
+ elif status.IsCapacityError():
136
+ return ArrowCapacityError(message)
137
+ elif status.IsIndexError():
138
+ return ArrowIndexError(message)
139
+ elif status.IsSerializationError():
140
+ return ArrowSerializationError(message)
141
+ elif status.IsCancelled():
142
+ signum = SignalFromStatus(status)
143
+ if signum > 0:
144
+ return ArrowCancelled(message, signum)
145
+ else:
146
+ return ArrowCancelled(message)
147
+ else:
148
+ message = frombytes(status.ToString(), safe=True)
149
+ return ArrowException(message)
150
+
151
+
152
+ # These are API functions for C++ PyArrow
153
+ cdef api int pyarrow_internal_check_status(const CStatus& status) \
154
+ except -1 nogil:
155
+ return check_status(status)
156
+
157
+ cdef api object pyarrow_internal_convert_status(const CStatus& status):
158
+ return convert_status(status)
159
+
160
+
161
+ cdef class StopToken:
162
+ cdef void init(self, CStopToken stop_token):
163
+ self.stop_token = move(stop_token)
164
+
165
+
166
+ cdef c_bool signal_handlers_enabled = True
167
+
168
+
169
+ def enable_signal_handlers(c_bool enable):
170
+ """
171
+ Enable or disable interruption of long-running operations.
172
+
173
+ By default, certain long running operations will detect user
174
+ interruptions, such as by pressing Ctrl-C. This detection relies
175
+ on setting a signal handler for the duration of the long-running
176
+ operation, and may therefore interfere with other frameworks or
177
+ libraries (such as an event loop).
178
+
179
+ Parameters
180
+ ----------
181
+ enable : bool
182
+ Whether to enable user interruption by setting a temporary
183
+ signal handler.
184
+ """
185
+ global signal_handlers_enabled
186
+ signal_handlers_enabled = enable
187
+
188
+
189
+ # For internal use
190
+
191
+ # Whether we need a workaround for https://bugs.python.org/issue42248
192
+ have_signal_refcycle = (sys.version_info < (3, 8, 10) or
193
+ (3, 9) <= sys.version_info < (3, 9, 5) or
194
+ sys.version_info[:2] == (3, 10))
195
+
196
+ cdef class SignalStopHandler:
197
+ cdef:
198
+ StopToken _stop_token
199
+ vector[int] _signals
200
+ c_bool _enabled
201
+
202
+ def __cinit__(self):
203
+ self._enabled = False
204
+
205
+ self._init_signals()
206
+ if have_signal_refcycle:
207
+ _break_traceback_cycle_from_frame(sys._getframe(0))
208
+
209
+ self._stop_token = StopToken()
210
+
211
+ if not self._signals.empty():
212
+ maybe_source = SetSignalStopSource()
213
+ if not maybe_source.ok():
214
+ # See ARROW-11841 / ARROW-17173: in complex interaction
215
+ # scenarios (such as R calling into Python), SetSignalStopSource()
216
+ # may have already activated a signal-receiving StopSource.
217
+ # Just warn instead of erroring out.
218
+ maybe_source.status().Warn()
219
+ else:
220
+ self._stop_token.init(deref(maybe_source).token())
221
+ # signals don't work on Emscripten without threads.
222
+ # and possibly other single-thread environments.
223
+ self._enabled = is_threading_enabled()
224
+
225
+ def _init_signals(self):
226
+ if (signal_handlers_enabled and
227
+ threading.current_thread() is threading.main_thread()):
228
+ self._signals = [
229
+ sig for sig in (signal.SIGINT, signal.SIGTERM)
230
+ if signal.getsignal(sig) not in (signal.SIG_DFL,
231
+ signal.SIG_IGN, None)]
232
+
233
+ def __enter__(self):
234
+ if self._enabled:
235
+ check_status(RegisterCancellingSignalHandler(self._signals))
236
+ return self
237
+
238
+ def __exit__(self, exc_type, exc_value, exc_tb):
239
+ if self._enabled:
240
+ UnregisterCancellingSignalHandler()
241
+ if exc_value is None:
242
+ # Make sure we didn't lose a signal
243
+ try:
244
+ check_status(self._stop_token.stop_token.Poll())
245
+ except ArrowCancelled as e:
246
+ exc_value = e
247
+ if isinstance(exc_value, ArrowCancelled):
248
+ if exc_value.signum:
249
+ # Re-emit the exact same signal. We restored the Python signal
250
+ # handler above, so it should receive it.
251
+ if os.name == 'nt':
252
+ SendSignal(exc_value.signum)
253
+ else:
254
+ SendSignalToThread(exc_value.signum,
255
+ threading.main_thread().ident)
256
+ else:
257
+ # Simulate Python receiving a SIGINT
258
+ # (see https://bugs.python.org/issue43356 for why we can't
259
+ # simulate the exact signal number)
260
+ PyErr_SetInterrupt()
261
+ # Maximize chances of the Python signal handler being executed now.
262
+ # Otherwise a potential KeyboardInterrupt might be missed by an
263
+ # immediately enclosing try/except block.
264
+ PyErr_CheckSignals()
265
+ # ArrowCancelled will be re-raised if PyErr_CheckSignals()
266
+ # returned successfully.
267
+
268
+ def __dealloc__(self):
269
+ if self._enabled:
270
+ ResetSignalStopSource()
271
+
272
+ @property
273
+ def stop_token(self):
274
+ return self._stop_token
parrot/lib/python3.10/site-packages/pyarrow/feather.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ import os
20
+
21
+ from pyarrow.pandas_compat import _pandas_api # noqa
22
+ from pyarrow.lib import (Codec, Table, # noqa
23
+ concat_tables, schema)
24
+ import pyarrow.lib as ext
25
+ from pyarrow import _feather
26
+ from pyarrow._feather import FeatherError # noqa: F401
27
+
28
+
29
+ class FeatherDataset:
30
+ """
31
+ Encapsulates details of reading a list of Feather files.
32
+
33
+ Parameters
34
+ ----------
35
+ path_or_paths : List[str]
36
+ A list of file names
37
+ validate_schema : bool, default True
38
+ Check that individual file schemas are all the same / compatible
39
+ """
40
+
41
+ def __init__(self, path_or_paths, validate_schema=True):
42
+ self.paths = path_or_paths
43
+ self.validate_schema = validate_schema
44
+
45
+ def read_table(self, columns=None):
46
+ """
47
+ Read multiple feather files as a single pyarrow.Table
48
+
49
+ Parameters
50
+ ----------
51
+ columns : List[str]
52
+ Names of columns to read from the file
53
+
54
+ Returns
55
+ -------
56
+ pyarrow.Table
57
+ Content of the file as a table (of columns)
58
+ """
59
+ _fil = read_table(self.paths[0], columns=columns)
60
+ self._tables = [_fil]
61
+ self.schema = _fil.schema
62
+
63
+ for path in self.paths[1:]:
64
+ table = read_table(path, columns=columns)
65
+ if self.validate_schema:
66
+ self.validate_schemas(path, table)
67
+ self._tables.append(table)
68
+ return concat_tables(self._tables)
69
+
70
+ def validate_schemas(self, piece, table):
71
+ if not self.schema.equals(table.schema):
72
+ raise ValueError('Schema in {!s} was different. \n'
73
+ '{!s}\n\nvs\n\n{!s}'
74
+ .format(piece, self.schema,
75
+ table.schema))
76
+
77
+ def read_pandas(self, columns=None, use_threads=True):
78
+ """
79
+ Read multiple Parquet files as a single pandas DataFrame
80
+
81
+ Parameters
82
+ ----------
83
+ columns : List[str]
84
+ Names of columns to read from the file
85
+ use_threads : bool, default True
86
+ Use multiple threads when converting to pandas
87
+
88
+ Returns
89
+ -------
90
+ pandas.DataFrame
91
+ Content of the file as a pandas DataFrame (of columns)
92
+ """
93
+ return self.read_table(columns=columns).to_pandas(
94
+ use_threads=use_threads)
95
+
96
+
97
+ def check_chunked_overflow(name, col):
98
+ if col.num_chunks == 1:
99
+ return
100
+
101
+ if col.type in (ext.binary(), ext.string()):
102
+ raise ValueError("Column '{}' exceeds 2GB maximum capacity of "
103
+ "a Feather binary column. This restriction may be "
104
+ "lifted in the future".format(name))
105
+ else:
106
+ # TODO(wesm): Not sure when else this might be reached
107
+ raise ValueError("Column '{}' of type {} was chunked on conversion "
108
+ "to Arrow and cannot be currently written to "
109
+ "Feather format".format(name, str(col.type)))
110
+
111
+
112
+ _FEATHER_SUPPORTED_CODECS = {'lz4', 'zstd', 'uncompressed'}
113
+
114
+
115
+ def write_feather(df, dest, compression=None, compression_level=None,
116
+ chunksize=None, version=2):
117
+ """
118
+ Write a pandas.DataFrame to Feather format.
119
+
120
+ Parameters
121
+ ----------
122
+ df : pandas.DataFrame or pyarrow.Table
123
+ Data to write out as Feather format.
124
+ dest : str
125
+ Local destination path.
126
+ compression : string, default None
127
+ Can be one of {"zstd", "lz4", "uncompressed"}. The default of None uses
128
+ LZ4 for V2 files if it is available, otherwise uncompressed.
129
+ compression_level : int, default None
130
+ Use a compression level particular to the chosen compressor. If None
131
+ use the default compression level
132
+ chunksize : int, default None
133
+ For V2 files, the internal maximum size of Arrow RecordBatch chunks
134
+ when writing the Arrow IPC file format. None means use the default,
135
+ which is currently 64K
136
+ version : int, default 2
137
+ Feather file version. Version 2 is the current. Version 1 is the more
138
+ limited legacy format
139
+ """
140
+ if _pandas_api.have_pandas:
141
+ if (_pandas_api.has_sparse and
142
+ isinstance(df, _pandas_api.pd.SparseDataFrame)):
143
+ df = df.to_dense()
144
+
145
+ if _pandas_api.is_data_frame(df):
146
+ # Feather v1 creates a new column in the resultant Table to
147
+ # store index information if index type is not RangeIndex
148
+
149
+ if version == 1:
150
+ preserve_index = False
151
+ elif version == 2:
152
+ preserve_index = None
153
+ else:
154
+ raise ValueError("Version value should either be 1 or 2")
155
+
156
+ table = Table.from_pandas(df, preserve_index=preserve_index)
157
+
158
+ if version == 1:
159
+ # Version 1 does not chunking
160
+ for i, name in enumerate(table.schema.names):
161
+ col = table[i]
162
+ check_chunked_overflow(name, col)
163
+ else:
164
+ table = df
165
+
166
+ if version == 1:
167
+ if len(table.column_names) > len(set(table.column_names)):
168
+ raise ValueError("cannot serialize duplicate column names")
169
+
170
+ if compression is not None:
171
+ raise ValueError("Feather V1 files do not support compression "
172
+ "option")
173
+
174
+ if chunksize is not None:
175
+ raise ValueError("Feather V1 files do not support chunksize "
176
+ "option")
177
+ else:
178
+ if compression is None and Codec.is_available('lz4_frame'):
179
+ compression = 'lz4'
180
+ elif (compression is not None and
181
+ compression not in _FEATHER_SUPPORTED_CODECS):
182
+ raise ValueError('compression="{}" not supported, must be '
183
+ 'one of {}'.format(compression,
184
+ _FEATHER_SUPPORTED_CODECS))
185
+
186
+ try:
187
+ _feather.write_feather(table, dest, compression=compression,
188
+ compression_level=compression_level,
189
+ chunksize=chunksize, version=version)
190
+ except Exception:
191
+ if isinstance(dest, str):
192
+ try:
193
+ os.remove(dest)
194
+ except os.error:
195
+ pass
196
+ raise
197
+
198
+
199
+ def read_feather(source, columns=None, use_threads=True,
200
+ memory_map=False, **kwargs):
201
+ """
202
+ Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use
203
+ feather.read_table.
204
+
205
+ Parameters
206
+ ----------
207
+ source : str file path, or file-like object
208
+ You can use MemoryMappedFile as source, for explicitly use memory map.
209
+ columns : sequence, optional
210
+ Only read a specific set of columns. If not provided, all columns are
211
+ read.
212
+ use_threads : bool, default True
213
+ Whether to parallelize reading using multiple threads. If false the
214
+ restriction is used in the conversion to Pandas as well as in the
215
+ reading from Feather format.
216
+ memory_map : boolean, default False
217
+ Use memory mapping when opening file on disk, when source is a str.
218
+ **kwargs
219
+ Additional keyword arguments passed on to `pyarrow.Table.to_pandas`.
220
+
221
+ Returns
222
+ -------
223
+ df : pandas.DataFrame
224
+ The contents of the Feather file as a pandas.DataFrame
225
+ """
226
+ return (read_table(
227
+ source, columns=columns, memory_map=memory_map,
228
+ use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs))
229
+
230
+
231
+ def read_table(source, columns=None, memory_map=False, use_threads=True):
232
+ """
233
+ Read a pyarrow.Table from Feather format
234
+
235
+ Parameters
236
+ ----------
237
+ source : str file path, or file-like object
238
+ You can use MemoryMappedFile as source, for explicitly use memory map.
239
+ columns : sequence, optional
240
+ Only read a specific set of columns. If not provided, all columns are
241
+ read.
242
+ memory_map : boolean, default False
243
+ Use memory mapping when opening file on disk, when source is a str
244
+ use_threads : bool, default True
245
+ Whether to parallelize reading using multiple threads.
246
+
247
+ Returns
248
+ -------
249
+ table : pyarrow.Table
250
+ The contents of the Feather file as a pyarrow.Table
251
+ """
252
+ reader = _feather.FeatherReader(
253
+ source, use_memory_map=memory_map, use_threads=use_threads)
254
+
255
+ if columns is None:
256
+ return reader.read()
257
+
258
+ column_types = [type(column) for column in columns]
259
+ if all(map(lambda t: t == int, column_types)):
260
+ table = reader.read_indices(columns)
261
+ elif all(map(lambda t: t == str, column_types)):
262
+ table = reader.read_names(columns)
263
+ else:
264
+ column_type_names = [t.__name__ for t in column_types]
265
+ raise TypeError("Columns must be indices or names. "
266
+ "Got columns {} of types {}"
267
+ .format(columns, column_type_names))
268
+
269
+ # Feather v1 already respects the column selection
270
+ if reader.version < 3:
271
+ return table
272
+ # Feather v2 reads with sorted / deduplicated selection
273
+ elif sorted(set(columns)) == columns:
274
+ return table
275
+ else:
276
+ # follow exact order / selection of names
277
+ return table.select(columns)
parrot/lib/python3.10/site-packages/pyarrow/fs.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """
19
+ FileSystem abstraction to interact with various local and remote filesystems.
20
+ """
21
+
22
+ from pyarrow.util import _is_path_like, _stringify_path
23
+
24
+ from pyarrow._fs import ( # noqa
25
+ FileSelector,
26
+ FileType,
27
+ FileInfo,
28
+ FileSystem,
29
+ LocalFileSystem,
30
+ SubTreeFileSystem,
31
+ _MockFileSystem,
32
+ FileSystemHandler,
33
+ PyFileSystem,
34
+ _copy_files,
35
+ _copy_files_selector,
36
+ )
37
+
38
+ # For backward compatibility.
39
+ FileStats = FileInfo
40
+
41
+ _not_imported = []
42
+ try:
43
+ from pyarrow._azurefs import AzureFileSystem # noqa
44
+ except ImportError:
45
+ _not_imported.append("AzureFileSystem")
46
+
47
+ try:
48
+ from pyarrow._hdfs import HadoopFileSystem # noqa
49
+ except ImportError:
50
+ _not_imported.append("HadoopFileSystem")
51
+
52
+ try:
53
+ from pyarrow._gcsfs import GcsFileSystem # noqa
54
+ except ImportError:
55
+ _not_imported.append("GcsFileSystem")
56
+
57
+ try:
58
+ from pyarrow._s3fs import ( # noqa
59
+ AwsDefaultS3RetryStrategy, AwsStandardS3RetryStrategy,
60
+ S3FileSystem, S3LogLevel, S3RetryStrategy, ensure_s3_initialized,
61
+ finalize_s3, ensure_s3_finalized, initialize_s3, resolve_s3_region)
62
+ except ImportError:
63
+ _not_imported.append("S3FileSystem")
64
+ else:
65
+ # GH-38364: we don't initialize S3 eagerly as that could lead
66
+ # to crashes at shutdown even when S3 isn't used.
67
+ # Instead, S3 is initialized lazily using `ensure_s3_initialized`
68
+ # in assorted places.
69
+ import atexit
70
+ atexit.register(ensure_s3_finalized)
71
+
72
+
73
+ def __getattr__(name):
74
+ if name in _not_imported:
75
+ raise ImportError(
76
+ "The pyarrow installation is not built with support for "
77
+ "'{0}'".format(name)
78
+ )
79
+
80
+ raise AttributeError(
81
+ "module 'pyarrow.fs' has no attribute '{0}'".format(name)
82
+ )
83
+
84
+
85
+ def _filesystem_from_str(uri):
86
+ # instantiate the file system from an uri, if the uri has a path
87
+ # component then it will be treated as a path prefix
88
+ filesystem, prefix = FileSystem.from_uri(uri)
89
+ prefix = filesystem.normalize_path(prefix)
90
+ if prefix:
91
+ # validate that the prefix is pointing to a directory
92
+ prefix_info = filesystem.get_file_info([prefix])[0]
93
+ if prefix_info.type != FileType.Directory:
94
+ raise ValueError(
95
+ "The path component of the filesystem URI must point to a "
96
+ "directory but it has a type: `{}`. The path component "
97
+ "is `{}` and the given filesystem URI is `{}`".format(
98
+ prefix_info.type.name, prefix_info.path, uri
99
+ )
100
+ )
101
+ filesystem = SubTreeFileSystem(prefix, filesystem)
102
+ return filesystem
103
+
104
+
105
+ def _ensure_filesystem(filesystem, *, use_mmap=False):
106
+ if isinstance(filesystem, FileSystem):
107
+ return filesystem
108
+ elif isinstance(filesystem, str):
109
+ if use_mmap:
110
+ raise ValueError(
111
+ "Specifying to use memory mapping not supported for "
112
+ "filesystem specified as an URI string"
113
+ )
114
+ return _filesystem_from_str(filesystem)
115
+
116
+ # handle fsspec-compatible filesystems
117
+ try:
118
+ import fsspec
119
+ except ImportError:
120
+ pass
121
+ else:
122
+ if isinstance(filesystem, fsspec.AbstractFileSystem):
123
+ if type(filesystem).__name__ == 'LocalFileSystem':
124
+ # In case its a simple LocalFileSystem, use native arrow one
125
+ return LocalFileSystem(use_mmap=use_mmap)
126
+ return PyFileSystem(FSSpecHandler(filesystem))
127
+
128
+ raise TypeError(
129
+ "Unrecognized filesystem: {}. `filesystem` argument must be a "
130
+ "FileSystem instance or a valid file system URI'".format(
131
+ type(filesystem))
132
+ )
133
+
134
+
135
+ def _resolve_filesystem_and_path(path, filesystem=None, *, memory_map=False):
136
+ """
137
+ Return filesystem/path from path which could be an URI or a plain
138
+ filesystem path.
139
+ """
140
+ if not _is_path_like(path):
141
+ if filesystem is not None:
142
+ raise ValueError(
143
+ "'filesystem' passed but the specified path is file-like, so"
144
+ " there is nothing to open with 'filesystem'."
145
+ )
146
+ return filesystem, path
147
+
148
+ if filesystem is not None:
149
+ filesystem = _ensure_filesystem(filesystem, use_mmap=memory_map)
150
+ if isinstance(filesystem, LocalFileSystem):
151
+ path = _stringify_path(path)
152
+ elif not isinstance(path, str):
153
+ raise TypeError(
154
+ "Expected string path; path-like objects are only allowed "
155
+ "with a local filesystem"
156
+ )
157
+ path = filesystem.normalize_path(path)
158
+ return filesystem, path
159
+
160
+ path = _stringify_path(path)
161
+
162
+ # if filesystem is not given, try to automatically determine one
163
+ # first check if the file exists as a local (relative) file path
164
+ # if not then try to parse the path as an URI
165
+ filesystem = LocalFileSystem(use_mmap=memory_map)
166
+
167
+ try:
168
+ file_info = filesystem.get_file_info(path)
169
+ except ValueError: # ValueError means path is likely an URI
170
+ file_info = None
171
+ exists_locally = False
172
+ else:
173
+ exists_locally = (file_info.type != FileType.NotFound)
174
+
175
+ # if the file or directory doesn't exists locally, then assume that
176
+ # the path is an URI describing the file system as well
177
+ if not exists_locally:
178
+ try:
179
+ filesystem, path = FileSystem.from_uri(path)
180
+ except ValueError as e:
181
+ # neither an URI nor a locally existing path, so assume that
182
+ # local path was given and propagate a nicer file not found error
183
+ # instead of a more confusing scheme parsing error
184
+ if "empty scheme" not in str(e) \
185
+ and "Cannot parse URI" not in str(e):
186
+ raise
187
+ else:
188
+ path = filesystem.normalize_path(path)
189
+
190
+ return filesystem, path
191
+
192
+
193
+ def copy_files(source, destination,
194
+ source_filesystem=None, destination_filesystem=None,
195
+ *, chunk_size=1024*1024, use_threads=True):
196
+ """
197
+ Copy files between FileSystems.
198
+
199
+ This functions allows you to recursively copy directories of files from
200
+ one file system to another, such as from S3 to your local machine.
201
+
202
+ Parameters
203
+ ----------
204
+ source : string
205
+ Source file path or URI to a single file or directory.
206
+ If a directory, files will be copied recursively from this path.
207
+ destination : string
208
+ Destination file path or URI. If `source` is a file, `destination`
209
+ is also interpreted as the destination file (not directory).
210
+ Directories will be created as necessary.
211
+ source_filesystem : FileSystem, optional
212
+ Source filesystem, needs to be specified if `source` is not a URI,
213
+ otherwise inferred.
214
+ destination_filesystem : FileSystem, optional
215
+ Destination filesystem, needs to be specified if `destination` is not
216
+ a URI, otherwise inferred.
217
+ chunk_size : int, default 1MB
218
+ The maximum size of block to read before flushing to the
219
+ destination file. A larger chunk_size will use more memory while
220
+ copying but may help accommodate high latency FileSystems.
221
+ use_threads : bool, default True
222
+ Whether to use multiple threads to accelerate copying.
223
+
224
+ Examples
225
+ --------
226
+ Inspect an S3 bucket's files:
227
+
228
+ >>> s3, path = fs.FileSystem.from_uri(
229
+ ... "s3://registry.opendata.aws/roda/ndjson/")
230
+ >>> selector = fs.FileSelector(path)
231
+ >>> s3.get_file_info(selector)
232
+ [<FileInfo for 'registry.opendata.aws/roda/ndjson/index.ndjson':...]
233
+
234
+ Copy one file from S3 bucket to a local directory:
235
+
236
+ >>> fs.copy_files("s3://registry.opendata.aws/roda/ndjson/index.ndjson",
237
+ ... "file:///{}/index_copy.ndjson".format(local_path))
238
+
239
+ >>> fs.LocalFileSystem().get_file_info(str(local_path)+
240
+ ... '/index_copy.ndjson')
241
+ <FileInfo for '.../index_copy.ndjson': type=FileType.File, size=...>
242
+
243
+ Copy file using a FileSystem object:
244
+
245
+ >>> fs.copy_files("registry.opendata.aws/roda/ndjson/index.ndjson",
246
+ ... "file:///{}/index_copy.ndjson".format(local_path),
247
+ ... source_filesystem=fs.S3FileSystem())
248
+ """
249
+ source_fs, source_path = _resolve_filesystem_and_path(
250
+ source, source_filesystem
251
+ )
252
+ destination_fs, destination_path = _resolve_filesystem_and_path(
253
+ destination, destination_filesystem
254
+ )
255
+
256
+ file_info = source_fs.get_file_info(source_path)
257
+ if file_info.type == FileType.Directory:
258
+ source_sel = FileSelector(source_path, recursive=True)
259
+ _copy_files_selector(source_fs, source_sel,
260
+ destination_fs, destination_path,
261
+ chunk_size, use_threads)
262
+ else:
263
+ _copy_files(source_fs, source_path,
264
+ destination_fs, destination_path,
265
+ chunk_size, use_threads)
266
+
267
+
268
+ class FSSpecHandler(FileSystemHandler):
269
+ """
270
+ Handler for fsspec-based Python filesystems.
271
+
272
+ https://filesystem-spec.readthedocs.io/en/latest/index.html
273
+
274
+ Parameters
275
+ ----------
276
+ fs : FSSpec-compliant filesystem instance
277
+
278
+ Examples
279
+ --------
280
+ >>> PyFileSystem(FSSpecHandler(fsspec_fs)) # doctest: +SKIP
281
+ """
282
+
283
+ def __init__(self, fs):
284
+ self.fs = fs
285
+
286
+ def __eq__(self, other):
287
+ if isinstance(other, FSSpecHandler):
288
+ return self.fs == other.fs
289
+ return NotImplemented
290
+
291
+ def __ne__(self, other):
292
+ if isinstance(other, FSSpecHandler):
293
+ return self.fs != other.fs
294
+ return NotImplemented
295
+
296
+ def get_type_name(self):
297
+ protocol = self.fs.protocol
298
+ if isinstance(protocol, list):
299
+ protocol = protocol[0]
300
+ return "fsspec+{0}".format(protocol)
301
+
302
+ def normalize_path(self, path):
303
+ return path
304
+
305
+ @staticmethod
306
+ def _create_file_info(path, info):
307
+ size = info["size"]
308
+ if info["type"] == "file":
309
+ ftype = FileType.File
310
+ elif info["type"] == "directory":
311
+ ftype = FileType.Directory
312
+ # some fsspec filesystems include a file size for directories
313
+ size = None
314
+ else:
315
+ ftype = FileType.Unknown
316
+ return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None))
317
+
318
+ def get_file_info(self, paths):
319
+ infos = []
320
+ for path in paths:
321
+ try:
322
+ info = self.fs.info(path)
323
+ except FileNotFoundError:
324
+ infos.append(FileInfo(path, FileType.NotFound))
325
+ else:
326
+ infos.append(self._create_file_info(path, info))
327
+ return infos
328
+
329
+ def get_file_info_selector(self, selector):
330
+ if not self.fs.isdir(selector.base_dir):
331
+ if self.fs.exists(selector.base_dir):
332
+ raise NotADirectoryError(selector.base_dir)
333
+ else:
334
+ if selector.allow_not_found:
335
+ return []
336
+ else:
337
+ raise FileNotFoundError(selector.base_dir)
338
+
339
+ if selector.recursive:
340
+ maxdepth = None
341
+ else:
342
+ maxdepth = 1
343
+
344
+ infos = []
345
+ selected_files = self.fs.find(
346
+ selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True
347
+ )
348
+ for path, info in selected_files.items():
349
+ _path = path.strip("/")
350
+ base_dir = selector.base_dir.strip("/")
351
+ # Need to exclude base directory from selected files if present
352
+ # (fsspec filesystems, see GH-37555)
353
+ if _path != base_dir:
354
+ infos.append(self._create_file_info(path, info))
355
+
356
+ return infos
357
+
358
+ def create_dir(self, path, recursive):
359
+ # mkdir also raises FileNotFoundError when base directory is not found
360
+ try:
361
+ self.fs.mkdir(path, create_parents=recursive)
362
+ except FileExistsError:
363
+ pass
364
+
365
+ def delete_dir(self, path):
366
+ self.fs.rm(path, recursive=True)
367
+
368
+ def _delete_dir_contents(self, path, missing_dir_ok):
369
+ try:
370
+ subpaths = self.fs.listdir(path, detail=False)
371
+ except FileNotFoundError:
372
+ if missing_dir_ok:
373
+ return
374
+ raise
375
+ for subpath in subpaths:
376
+ if self.fs.isdir(subpath):
377
+ self.fs.rm(subpath, recursive=True)
378
+ elif self.fs.isfile(subpath):
379
+ self.fs.rm(subpath)
380
+
381
+ def delete_dir_contents(self, path, missing_dir_ok):
382
+ if path.strip("/") == "":
383
+ raise ValueError(
384
+ "delete_dir_contents called on path '", path, "'")
385
+ self._delete_dir_contents(path, missing_dir_ok)
386
+
387
+ def delete_root_dir_contents(self):
388
+ self._delete_dir_contents("/")
389
+
390
+ def delete_file(self, path):
391
+ # fs.rm correctly raises IsADirectoryError when `path` is a directory
392
+ # instead of a file and `recursive` is not set to True
393
+ if not self.fs.exists(path):
394
+ raise FileNotFoundError(path)
395
+ self.fs.rm(path)
396
+
397
+ def move(self, src, dest):
398
+ self.fs.mv(src, dest, recursive=True)
399
+
400
+ def copy_file(self, src, dest):
401
+ # fs.copy correctly raises IsADirectoryError when `src` is a directory
402
+ # instead of a file
403
+ self.fs.copy(src, dest)
404
+
405
+ # TODO can we read/pass metadata (e.g. Content-Type) in the methods below?
406
+
407
+ def open_input_stream(self, path):
408
+ from pyarrow import PythonFile
409
+
410
+ if not self.fs.isfile(path):
411
+ raise FileNotFoundError(path)
412
+
413
+ return PythonFile(self.fs.open(path, mode="rb"), mode="r")
414
+
415
+ def open_input_file(self, path):
416
+ from pyarrow import PythonFile
417
+
418
+ if not self.fs.isfile(path):
419
+ raise FileNotFoundError(path)
420
+
421
+ return PythonFile(self.fs.open(path, mode="rb"), mode="r")
422
+
423
+ def open_output_stream(self, path, metadata):
424
+ from pyarrow import PythonFile
425
+
426
+ return PythonFile(self.fs.open(path, mode="wb"), mode="w")
427
+
428
+ def open_append_stream(self, path, metadata):
429
+ from pyarrow import PythonFile
430
+
431
+ return PythonFile(self.fs.open(path, mode="ab"), mode="w")
parrot/lib/python3.10/site-packages/pyarrow/gandiva.pyx ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+ # cython: language_level = 3
21
+
22
+ from libcpp.memory cimport shared_ptr
23
+ from libcpp.string cimport string as c_string
24
+ from libcpp.vector cimport vector as c_vector
25
+ from libcpp.unordered_set cimport unordered_set as c_unordered_set
26
+ from libc.stdint cimport int64_t, int32_t
27
+
28
+ from pyarrow.includes.libarrow cimport *
29
+ from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch,
30
+ Schema, check_status, pyarrow_wrap_array,
31
+ pyarrow_wrap_data_type, ensure_type, _Weakrefable,
32
+ pyarrow_wrap_field)
33
+
34
+ from pyarrow.includes.libgandiva cimport (
35
+ CCondition, CGandivaExpression,
36
+ CNode, CProjector, CFilter,
37
+ CSelectionVector,
38
+ _ensure_selection_mode,
39
+ CConfiguration,
40
+ CConfigurationBuilder,
41
+ TreeExprBuilder_MakeExpression,
42
+ TreeExprBuilder_MakeFunction,
43
+ TreeExprBuilder_MakeBoolLiteral,
44
+ TreeExprBuilder_MakeUInt8Literal,
45
+ TreeExprBuilder_MakeUInt16Literal,
46
+ TreeExprBuilder_MakeUInt32Literal,
47
+ TreeExprBuilder_MakeUInt64Literal,
48
+ TreeExprBuilder_MakeInt8Literal,
49
+ TreeExprBuilder_MakeInt16Literal,
50
+ TreeExprBuilder_MakeInt32Literal,
51
+ TreeExprBuilder_MakeInt64Literal,
52
+ TreeExprBuilder_MakeFloatLiteral,
53
+ TreeExprBuilder_MakeDoubleLiteral,
54
+ TreeExprBuilder_MakeStringLiteral,
55
+ TreeExprBuilder_MakeBinaryLiteral,
56
+ TreeExprBuilder_MakeField,
57
+ TreeExprBuilder_MakeIf,
58
+ TreeExprBuilder_MakeAnd,
59
+ TreeExprBuilder_MakeOr,
60
+ TreeExprBuilder_MakeCondition,
61
+ TreeExprBuilder_MakeInExpressionInt32,
62
+ TreeExprBuilder_MakeInExpressionInt64,
63
+ TreeExprBuilder_MakeInExpressionTime32,
64
+ TreeExprBuilder_MakeInExpressionTime64,
65
+ TreeExprBuilder_MakeInExpressionDate32,
66
+ TreeExprBuilder_MakeInExpressionDate64,
67
+ TreeExprBuilder_MakeInExpressionTimeStamp,
68
+ TreeExprBuilder_MakeInExpressionString,
69
+ SelectionVector_MakeInt16,
70
+ SelectionVector_MakeInt32,
71
+ SelectionVector_MakeInt64,
72
+ Projector_Make,
73
+ Filter_Make,
74
+ CFunctionSignature,
75
+ GetRegisteredFunctionSignatures)
76
+
77
+
78
+ cdef class Node(_Weakrefable):
79
+ cdef:
80
+ shared_ptr[CNode] node
81
+
82
+ def __init__(self):
83
+ raise TypeError("Do not call {}'s constructor directly, use the "
84
+ "TreeExprBuilder API directly"
85
+ .format(self.__class__.__name__))
86
+
87
+ @staticmethod
88
+ cdef create(shared_ptr[CNode] node):
89
+ cdef Node self = Node.__new__(Node)
90
+ self.node = node
91
+ return self
92
+
93
+ def __str__(self):
94
+ return self.node.get().ToString().decode()
95
+
96
+ def __repr__(self):
97
+ type_format = object.__repr__(self)
98
+ return '{0}\n{1}'.format(type_format, str(self))
99
+
100
+ def return_type(self):
101
+ return pyarrow_wrap_data_type(self.node.get().return_type())
102
+
103
+
104
+ cdef class Expression(_Weakrefable):
105
+ cdef:
106
+ shared_ptr[CGandivaExpression] expression
107
+
108
+ cdef void init(self, shared_ptr[CGandivaExpression] expression):
109
+ self.expression = expression
110
+
111
+ def __str__(self):
112
+ return self.expression.get().ToString().decode()
113
+
114
+ def __repr__(self):
115
+ type_format = object.__repr__(self)
116
+ return '{0}\n{1}'.format(type_format, str(self))
117
+
118
+ def root(self):
119
+ return Node.create(self.expression.get().root())
120
+
121
+ def result(self):
122
+ return pyarrow_wrap_field(self.expression.get().result())
123
+
124
+
125
+ cdef class Condition(_Weakrefable):
126
+ cdef:
127
+ shared_ptr[CCondition] condition
128
+
129
+ def __init__(self):
130
+ raise TypeError("Do not call {}'s constructor directly, use the "
131
+ "TreeExprBuilder API instead"
132
+ .format(self.__class__.__name__))
133
+
134
+ @staticmethod
135
+ cdef create(shared_ptr[CCondition] condition):
136
+ cdef Condition self = Condition.__new__(Condition)
137
+ self.condition = condition
138
+ return self
139
+
140
+ def __str__(self):
141
+ return self.condition.get().ToString().decode()
142
+
143
+ def __repr__(self):
144
+ type_format = object.__repr__(self)
145
+ return '{0}\n{1}'.format(type_format, str(self))
146
+
147
+ def root(self):
148
+ return Node.create(self.condition.get().root())
149
+
150
+ def result(self):
151
+ return pyarrow_wrap_field(self.condition.get().result())
152
+
153
+
154
+ cdef class SelectionVector(_Weakrefable):
155
+ cdef:
156
+ shared_ptr[CSelectionVector] selection_vector
157
+
158
+ def __init__(self):
159
+ raise TypeError("Do not call {}'s constructor directly."
160
+ .format(self.__class__.__name__))
161
+
162
+ @staticmethod
163
+ cdef create(shared_ptr[CSelectionVector] selection_vector):
164
+ cdef SelectionVector self = SelectionVector.__new__(SelectionVector)
165
+ self.selection_vector = selection_vector
166
+ return self
167
+
168
+ def to_array(self):
169
+ cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray()
170
+ return pyarrow_wrap_array(result)
171
+
172
+
173
+ cdef class Projector(_Weakrefable):
174
+ cdef:
175
+ shared_ptr[CProjector] projector
176
+ MemoryPool pool
177
+
178
+ def __init__(self):
179
+ raise TypeError("Do not call {}'s constructor directly, use "
180
+ "make_projector instead"
181
+ .format(self.__class__.__name__))
182
+
183
+ @staticmethod
184
+ cdef create(shared_ptr[CProjector] projector, MemoryPool pool):
185
+ cdef Projector self = Projector.__new__(Projector)
186
+ self.projector = projector
187
+ self.pool = pool
188
+ return self
189
+
190
+ @property
191
+ def llvm_ir(self):
192
+ return self.projector.get().DumpIR().decode()
193
+
194
+ def evaluate(self, RecordBatch batch, SelectionVector selection=None):
195
+ """
196
+ Evaluate the specified record batch and return the arrays at the
197
+ filtered positions.
198
+
199
+ Parameters
200
+ ----------
201
+ batch : pyarrow.RecordBatch
202
+ selection : pyarrow.gandiva.SelectionVector
203
+
204
+ Returns
205
+ -------
206
+ list[pyarrow.Array]
207
+ """
208
+ cdef vector[shared_ptr[CArray]] results
209
+ if selection is None:
210
+ check_status(self.projector.get().Evaluate(
211
+ batch.sp_batch.get()[0], self.pool.pool, &results))
212
+ else:
213
+ check_status(
214
+ self.projector.get().Evaluate(
215
+ batch.sp_batch.get()[0], selection.selection_vector.get(),
216
+ self.pool.pool, &results))
217
+ cdef shared_ptr[CArray] result
218
+ arrays = []
219
+ for result in results:
220
+ arrays.append(pyarrow_wrap_array(result))
221
+ return arrays
222
+
223
+
224
+ cdef class Filter(_Weakrefable):
225
+ cdef:
226
+ shared_ptr[CFilter] filter
227
+
228
+ def __init__(self):
229
+ raise TypeError("Do not call {}'s constructor directly, use "
230
+ "make_filter instead"
231
+ .format(self.__class__.__name__))
232
+
233
+ @staticmethod
234
+ cdef create(shared_ptr[CFilter] filter):
235
+ cdef Filter self = Filter.__new__(Filter)
236
+ self.filter = filter
237
+ return self
238
+
239
+ @property
240
+ def llvm_ir(self):
241
+ return self.filter.get().DumpIR().decode()
242
+
243
+ def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'):
244
+ """
245
+ Evaluate the specified record batch and return a selection vector.
246
+
247
+ Parameters
248
+ ----------
249
+ batch : pyarrow.RecordBatch
250
+ pool : MemoryPool
251
+ dtype : DataType or str, default int32
252
+
253
+ Returns
254
+ -------
255
+ pyarrow.gandiva.SelectionVector
256
+ """
257
+ cdef:
258
+ DataType type = ensure_type(dtype)
259
+ shared_ptr[CSelectionVector] selection
260
+
261
+ if type.id == _Type_INT16:
262
+ check_status(SelectionVector_MakeInt16(
263
+ batch.num_rows, pool.pool, &selection))
264
+ elif type.id == _Type_INT32:
265
+ check_status(SelectionVector_MakeInt32(
266
+ batch.num_rows, pool.pool, &selection))
267
+ elif type.id == _Type_INT64:
268
+ check_status(SelectionVector_MakeInt64(
269
+ batch.num_rows, pool.pool, &selection))
270
+ else:
271
+ raise ValueError("'dtype' of the selection vector should be "
272
+ "one of 'int16', 'int32' and 'int64'.")
273
+
274
+ check_status(self.filter.get().Evaluate(
275
+ batch.sp_batch.get()[0], selection))
276
+ return SelectionVector.create(selection)
277
+
278
+
279
+ cdef class TreeExprBuilder(_Weakrefable):
280
+
281
+ def make_literal(self, value, dtype):
282
+ """
283
+ Create a node on a literal.
284
+
285
+ Parameters
286
+ ----------
287
+ value : a literal value
288
+ dtype : DataType
289
+
290
+ Returns
291
+ -------
292
+ pyarrow.gandiva.Node
293
+ """
294
+ cdef:
295
+ DataType type = ensure_type(dtype)
296
+ shared_ptr[CNode] r
297
+
298
+ if type.id == _Type_BOOL:
299
+ r = TreeExprBuilder_MakeBoolLiteral(value)
300
+ elif type.id == _Type_UINT8:
301
+ r = TreeExprBuilder_MakeUInt8Literal(value)
302
+ elif type.id == _Type_UINT16:
303
+ r = TreeExprBuilder_MakeUInt16Literal(value)
304
+ elif type.id == _Type_UINT32:
305
+ r = TreeExprBuilder_MakeUInt32Literal(value)
306
+ elif type.id == _Type_UINT64:
307
+ r = TreeExprBuilder_MakeUInt64Literal(value)
308
+ elif type.id == _Type_INT8:
309
+ r = TreeExprBuilder_MakeInt8Literal(value)
310
+ elif type.id == _Type_INT16:
311
+ r = TreeExprBuilder_MakeInt16Literal(value)
312
+ elif type.id == _Type_INT32:
313
+ r = TreeExprBuilder_MakeInt32Literal(value)
314
+ elif type.id == _Type_INT64:
315
+ r = TreeExprBuilder_MakeInt64Literal(value)
316
+ elif type.id == _Type_FLOAT:
317
+ r = TreeExprBuilder_MakeFloatLiteral(value)
318
+ elif type.id == _Type_DOUBLE:
319
+ r = TreeExprBuilder_MakeDoubleLiteral(value)
320
+ elif type.id == _Type_STRING:
321
+ r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8'))
322
+ elif type.id == _Type_BINARY:
323
+ r = TreeExprBuilder_MakeBinaryLiteral(value)
324
+ else:
325
+ raise TypeError("Didn't recognize dtype " + str(dtype))
326
+
327
+ return Node.create(r)
328
+
329
+ def make_expression(self, Node root_node not None,
330
+ Field return_field not None):
331
+ """
332
+ Create an expression with the specified root_node,
333
+ and the result written to result_field.
334
+
335
+ Parameters
336
+ ----------
337
+ root_node : pyarrow.gandiva.Node
338
+ return_field : pyarrow.Field
339
+
340
+ Returns
341
+ -------
342
+ pyarrow.gandiva.Expression
343
+ """
344
+ cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression(
345
+ root_node.node, return_field.sp_field)
346
+ cdef Expression expression = Expression()
347
+ expression.init(r)
348
+ return expression
349
+
350
+ def make_function(self, name, children, DataType return_type):
351
+ """
352
+ Create a node with a function.
353
+
354
+ Parameters
355
+ ----------
356
+ name : str
357
+ children : pyarrow.gandiva.NodeVector
358
+ return_type : DataType
359
+
360
+ Returns
361
+ -------
362
+ pyarrow.gandiva.Node
363
+ """
364
+ cdef c_vector[shared_ptr[CNode]] c_children
365
+ cdef Node child
366
+ for child in children:
367
+ if child is None:
368
+ raise TypeError("Child nodes must not be None")
369
+ c_children.push_back(child.node)
370
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction(
371
+ name.encode(), c_children, return_type.sp_type)
372
+ return Node.create(r)
373
+
374
+ def make_field(self, Field field not None):
375
+ """
376
+ Create a node with an Arrow field.
377
+
378
+ Parameters
379
+ ----------
380
+ field : pyarrow.Field
381
+
382
+ Returns
383
+ -------
384
+ pyarrow.gandiva.Node
385
+ """
386
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field)
387
+ return Node.create(r)
388
+
389
+ def make_if(self, Node condition not None, Node this_node not None,
390
+ Node else_node not None, DataType return_type not None):
391
+ """
392
+ Create a node with an if-else expression.
393
+
394
+ Parameters
395
+ ----------
396
+ condition : pyarrow.gandiva.Node
397
+ this_node : pyarrow.gandiva.Node
398
+ else_node : pyarrow.gandiva.Node
399
+ return_type : DataType
400
+
401
+ Returns
402
+ -------
403
+ pyarrow.gandiva.Node
404
+ """
405
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf(
406
+ condition.node, this_node.node, else_node.node,
407
+ return_type.sp_type)
408
+ return Node.create(r)
409
+
410
+ def make_and(self, children):
411
+ """
412
+ Create a Node with a boolean AND expression.
413
+
414
+ Parameters
415
+ ----------
416
+ children : list[pyarrow.gandiva.Node]
417
+
418
+ Returns
419
+ -------
420
+ pyarrow.gandiva.Node
421
+ """
422
+ cdef c_vector[shared_ptr[CNode]] c_children
423
+ cdef Node child
424
+ for child in children:
425
+ if child is None:
426
+ raise TypeError("Child nodes must not be None")
427
+ c_children.push_back(child.node)
428
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children)
429
+ return Node.create(r)
430
+
431
+ def make_or(self, children):
432
+ """
433
+ Create a Node with a boolean OR expression.
434
+
435
+ Parameters
436
+ ----------
437
+ children : list[pyarrow.gandiva.Node]
438
+
439
+ Returns
440
+ -------
441
+ pyarrow.gandiva.Node
442
+ """
443
+ cdef c_vector[shared_ptr[CNode]] c_children
444
+ cdef Node child
445
+ for child in children:
446
+ if child is None:
447
+ raise TypeError("Child nodes must not be None")
448
+ c_children.push_back(child.node)
449
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children)
450
+ return Node.create(r)
451
+
452
+ def _make_in_expression_int32(self, Node node not None, values):
453
+ cdef shared_ptr[CNode] r
454
+ cdef c_unordered_set[int32_t] c_values
455
+ cdef int32_t v
456
+ for v in values:
457
+ c_values.insert(v)
458
+ r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values)
459
+ return Node.create(r)
460
+
461
+ def _make_in_expression_int64(self, Node node not None, values):
462
+ cdef shared_ptr[CNode] r
463
+ cdef c_unordered_set[int64_t] c_values
464
+ cdef int64_t v
465
+ for v in values:
466
+ c_values.insert(v)
467
+ r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values)
468
+ return Node.create(r)
469
+
470
+ def _make_in_expression_time32(self, Node node not None, values):
471
+ cdef shared_ptr[CNode] r
472
+ cdef c_unordered_set[int32_t] c_values
473
+ cdef int32_t v
474
+ for v in values:
475
+ c_values.insert(v)
476
+ r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values)
477
+ return Node.create(r)
478
+
479
+ def _make_in_expression_time64(self, Node node not None, values):
480
+ cdef shared_ptr[CNode] r
481
+ cdef c_unordered_set[int64_t] c_values
482
+ cdef int64_t v
483
+ for v in values:
484
+ c_values.insert(v)
485
+ r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values)
486
+ return Node.create(r)
487
+
488
+ def _make_in_expression_date32(self, Node node not None, values):
489
+ cdef shared_ptr[CNode] r
490
+ cdef c_unordered_set[int32_t] c_values
491
+ cdef int32_t v
492
+ for v in values:
493
+ c_values.insert(v)
494
+ r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values)
495
+ return Node.create(r)
496
+
497
+ def _make_in_expression_date64(self, Node node not None, values):
498
+ cdef shared_ptr[CNode] r
499
+ cdef c_unordered_set[int64_t] c_values
500
+ cdef int64_t v
501
+ for v in values:
502
+ c_values.insert(v)
503
+ r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values)
504
+ return Node.create(r)
505
+
506
+ def _make_in_expression_timestamp(self, Node node not None, values):
507
+ cdef shared_ptr[CNode] r
508
+ cdef c_unordered_set[int64_t] c_values
509
+ cdef int64_t v
510
+ for v in values:
511
+ c_values.insert(v)
512
+ r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values)
513
+ return Node.create(r)
514
+
515
+ def _make_in_expression_binary(self, Node node not None, values):
516
+ cdef shared_ptr[CNode] r
517
+ cdef c_unordered_set[c_string] c_values
518
+ cdef c_string v
519
+ for v in values:
520
+ c_values.insert(v)
521
+ r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
522
+ return Node.create(r)
523
+
524
+ def _make_in_expression_string(self, Node node not None, values):
525
+ cdef shared_ptr[CNode] r
526
+ cdef c_unordered_set[c_string] c_values
527
+ cdef c_string _v
528
+ for v in values:
529
+ _v = v.encode('UTF-8')
530
+ c_values.insert(_v)
531
+ r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
532
+ return Node.create(r)
533
+
534
+ def make_in_expression(self, Node node not None, values, dtype):
535
+ """
536
+ Create a Node with an IN expression.
537
+
538
+ Parameters
539
+ ----------
540
+ node : pyarrow.gandiva.Node
541
+ values : iterable
542
+ dtype : DataType
543
+
544
+ Returns
545
+ -------
546
+ pyarrow.gandiva.Node
547
+ """
548
+ cdef DataType type = ensure_type(dtype)
549
+
550
+ if type.id == _Type_INT32:
551
+ return self._make_in_expression_int32(node, values)
552
+ elif type.id == _Type_INT64:
553
+ return self._make_in_expression_int64(node, values)
554
+ elif type.id == _Type_TIME32:
555
+ return self._make_in_expression_time32(node, values)
556
+ elif type.id == _Type_TIME64:
557
+ return self._make_in_expression_time64(node, values)
558
+ elif type.id == _Type_TIMESTAMP:
559
+ return self._make_in_expression_timestamp(node, values)
560
+ elif type.id == _Type_DATE32:
561
+ return self._make_in_expression_date32(node, values)
562
+ elif type.id == _Type_DATE64:
563
+ return self._make_in_expression_date64(node, values)
564
+ elif type.id == _Type_BINARY:
565
+ return self._make_in_expression_binary(node, values)
566
+ elif type.id == _Type_STRING:
567
+ return self._make_in_expression_string(node, values)
568
+ else:
569
+ raise TypeError("Data type " + str(dtype) + " not supported.")
570
+
571
+ def make_condition(self, Node condition not None):
572
+ """
573
+ Create a condition with the specified node.
574
+
575
+ Parameters
576
+ ----------
577
+ condition : pyarrow.gandiva.Node
578
+
579
+ Returns
580
+ -------
581
+ pyarrow.gandiva.Condition
582
+ """
583
+ cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition(
584
+ condition.node)
585
+ return Condition.create(r)
586
+
587
+ cdef class Configuration(_Weakrefable):
588
+ cdef:
589
+ shared_ptr[CConfiguration] configuration
590
+
591
+ def __cinit__(self, bint optimize=True, bint dump_ir=False):
592
+ """
593
+ Initialize the configuration with specified options.
594
+
595
+ Parameters
596
+ ----------
597
+ optimize : bool, default True
598
+ Whether to enable optimizations.
599
+ dump_ir : bool, default False
600
+ Whether to dump LLVM IR.
601
+ """
602
+ self.configuration = CConfigurationBuilder().build()
603
+ self.configuration.get().set_optimize(optimize)
604
+ self.configuration.get().set_dump_ir(dump_ir)
605
+
606
+ @staticmethod
607
+ cdef create(shared_ptr[CConfiguration] configuration):
608
+ """
609
+ Create a Configuration instance from an existing CConfiguration pointer.
610
+
611
+ Parameters
612
+ ----------
613
+ configuration : shared_ptr[CConfiguration]
614
+ Existing CConfiguration pointer.
615
+
616
+ Returns
617
+ -------
618
+ Configuration instance
619
+ """
620
+ cdef Configuration self = Configuration.__new__(Configuration)
621
+ self.configuration = configuration
622
+ return self
623
+
624
+
625
+ cpdef make_projector(Schema schema, children, MemoryPool pool,
626
+ str selection_mode="NONE",
627
+ Configuration configuration=None):
628
+ """
629
+ Construct a projection using expressions.
630
+
631
+ A projector is built for a specific schema and vector of expressions.
632
+ Once the projector is built, it can be used to evaluate many row batches.
633
+
634
+ Parameters
635
+ ----------
636
+ schema : pyarrow.Schema
637
+ Schema for the record batches, and the expressions.
638
+ children : list[pyarrow.gandiva.Expression]
639
+ List of projectable expression objects.
640
+ pool : pyarrow.MemoryPool
641
+ Memory pool used to allocate output arrays.
642
+ selection_mode : str, default "NONE"
643
+ Possible values are NONE, UINT16, UINT32, UINT64.
644
+ configuration : pyarrow.gandiva.Configuration, default None
645
+ Configuration for the projector.
646
+
647
+ Returns
648
+ -------
649
+ Projector instance
650
+ """
651
+ cdef:
652
+ Expression child
653
+ c_vector[shared_ptr[CGandivaExpression]] c_children
654
+ shared_ptr[CProjector] result
655
+
656
+ if configuration is None:
657
+ configuration = Configuration()
658
+
659
+ for child in children:
660
+ if child is None:
661
+ raise TypeError("Expressions must not be None")
662
+ c_children.push_back(child.expression)
663
+
664
+ check_status(
665
+ Projector_Make(schema.sp_schema, c_children,
666
+ _ensure_selection_mode(selection_mode),
667
+ configuration.configuration,
668
+ &result))
669
+ return Projector.create(result, pool)
670
+
671
+
672
+ cpdef make_filter(Schema schema, Condition condition,
673
+ Configuration configuration=None):
674
+ """
675
+ Construct a filter based on a condition.
676
+
677
+ A filter is built for a specific schema and condition. Once the filter is
678
+ built, it can be used to evaluate many row batches.
679
+
680
+ Parameters
681
+ ----------
682
+ schema : pyarrow.Schema
683
+ Schema for the record batches, and the condition.
684
+ condition : pyarrow.gandiva.Condition
685
+ Filter condition.
686
+ configuration : pyarrow.gandiva.Configuration, default None
687
+ Configuration for the filter.
688
+
689
+ Returns
690
+ -------
691
+ Filter instance
692
+ """
693
+ cdef shared_ptr[CFilter] result
694
+ if condition is None:
695
+ raise TypeError("Condition must not be None")
696
+
697
+ if configuration is None:
698
+ configuration = Configuration()
699
+
700
+ check_status(
701
+ Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result))
702
+ return Filter.create(result)
703
+
704
+
705
+ cdef class FunctionSignature(_Weakrefable):
706
+ """
707
+ Signature of a Gandiva function including name, parameter types
708
+ and return type.
709
+ """
710
+
711
+ cdef:
712
+ shared_ptr[CFunctionSignature] signature
713
+
714
+ def __init__(self):
715
+ raise TypeError("Do not call {}'s constructor directly."
716
+ .format(self.__class__.__name__))
717
+
718
+ @staticmethod
719
+ cdef create(shared_ptr[CFunctionSignature] signature):
720
+ cdef FunctionSignature self = FunctionSignature.__new__(
721
+ FunctionSignature)
722
+ self.signature = signature
723
+ return self
724
+
725
+ def return_type(self):
726
+ return pyarrow_wrap_data_type(self.signature.get().ret_type())
727
+
728
+ def param_types(self):
729
+ result = []
730
+ cdef vector[shared_ptr[CDataType]] types = \
731
+ self.signature.get().param_types()
732
+ for t in types:
733
+ result.append(pyarrow_wrap_data_type(t))
734
+ return result
735
+
736
+ def name(self):
737
+ return self.signature.get().base_name().decode()
738
+
739
+ def __repr__(self):
740
+ signature = self.signature.get().ToString().decode()
741
+ return "FunctionSignature(" + signature + ")"
742
+
743
+
744
+ def get_registered_function_signatures():
745
+ """
746
+ Return the function in Gandiva's ExpressionRegistry.
747
+
748
+ Returns
749
+ -------
750
+ registry: a list of registered function signatures
751
+ """
752
+ results = []
753
+
754
+ cdef vector[shared_ptr[CFunctionSignature]] signatures = \
755
+ GetRegisteredFunctionSignatures()
756
+
757
+ for signature in signatures:
758
+ results.append(FunctionSignature.create(signature))
759
+
760
+ return results
parrot/lib/python3.10/site-packages/pyarrow/io.pxi ADDED
@@ -0,0 +1,2919 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # Cython wrappers for IO interfaces defined in arrow::io and messaging in
19
+ # arrow::ipc
20
+
21
+ from libc.stdlib cimport malloc, free
22
+
23
+ import codecs
24
+ import pickle
25
+ import re
26
+ import sys
27
+ import threading
28
+ import time
29
+ import warnings
30
+ from io import BufferedIOBase, IOBase, TextIOBase, UnsupportedOperation
31
+ from queue import Queue, Empty as QueueEmpty
32
+
33
+ from pyarrow.lib cimport check_status, HaveLibHdfs
34
+ from pyarrow.util import _is_path_like, _stringify_path
35
+
36
+
37
+ # 64K
38
+ DEFAULT_BUFFER_SIZE = 2 ** 16
39
+
40
+
41
+ cdef extern from "Python.h":
42
+ # To let us get a PyObject* and avoid Cython auto-ref-counting
43
+ PyObject* PyBytes_FromStringAndSizeNative" PyBytes_FromStringAndSize"(
44
+ char *v, Py_ssize_t len) except NULL
45
+
46
+ # Workaround https://github.com/cython/cython/issues/4707
47
+ bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len)
48
+
49
+
50
+ def have_libhdfs():
51
+ """
52
+ Return true if HDFS (HadoopFileSystem) library is set up correctly.
53
+ """
54
+ try:
55
+ with nogil:
56
+ check_status(HaveLibHdfs())
57
+ return True
58
+ except Exception:
59
+ return False
60
+
61
+
62
+ def io_thread_count():
63
+ """
64
+ Return the number of threads to use for I/O operations.
65
+
66
+ Many operations, such as scanning a dataset, will implicitly make
67
+ use of this pool. The number of threads is set to a fixed value at
68
+ startup. It can be modified at runtime by calling
69
+ :func:`set_io_thread_count()`.
70
+
71
+ See Also
72
+ --------
73
+ set_io_thread_count : Modify the size of this pool.
74
+ cpu_count : The analogous function for the CPU thread pool.
75
+ """
76
+ return GetIOThreadPoolCapacity()
77
+
78
+
79
+ def set_io_thread_count(int count):
80
+ """
81
+ Set the number of threads to use for I/O operations.
82
+
83
+ Many operations, such as scanning a dataset, will implicitly make
84
+ use of this pool.
85
+
86
+ Parameters
87
+ ----------
88
+ count : int
89
+ The max number of threads that may be used for I/O.
90
+ Must be positive.
91
+
92
+ See Also
93
+ --------
94
+ io_thread_count : Get the size of this pool.
95
+ set_cpu_count : The analogous function for the CPU thread pool.
96
+ """
97
+ if count < 1:
98
+ raise ValueError("IO thread count must be strictly positive")
99
+ check_status(SetIOThreadPoolCapacity(count))
100
+
101
+
102
+ cdef class NativeFile(_Weakrefable):
103
+ """
104
+ The base class for all Arrow streams.
105
+
106
+ Streams are either readable, writable, or both.
107
+ They optionally support seeking.
108
+
109
+ While this class exposes methods to read or write data from Python, the
110
+ primary intent of using a Arrow stream is to pass it to other Arrow
111
+ facilities that will make use of it, such as Arrow IPC routines.
112
+
113
+ Be aware that there are subtle differences with regular Python files,
114
+ e.g. destroying a writable Arrow stream without closing it explicitly
115
+ will not flush any pending data.
116
+ """
117
+
118
+ # Default chunk size for chunked reads.
119
+ # Use a large enough value for networked filesystems.
120
+ _default_chunk_size = 256 * 1024
121
+
122
+ def __cinit__(self):
123
+ self.own_file = False
124
+ self.is_readable = False
125
+ self.is_writable = False
126
+ self.is_seekable = False
127
+ self._is_appending = False
128
+
129
+ def __dealloc__(self):
130
+ if self.own_file:
131
+ self.close()
132
+
133
+ def __enter__(self):
134
+ return self
135
+
136
+ def __exit__(self, exc_type, exc_value, tb):
137
+ self.close()
138
+
139
+ def __repr__(self):
140
+ name = f"pyarrow.{self.__class__.__name__}"
141
+ return (f"<{name} "
142
+ f"closed={self.closed} "
143
+ f"own_file={self.own_file} "
144
+ f"is_seekable={self.is_seekable} "
145
+ f"is_writable={self.is_writable} "
146
+ f"is_readable={self.is_readable}>")
147
+
148
+ @property
149
+ def mode(self):
150
+ """
151
+ The file mode. Currently instances of NativeFile may support:
152
+
153
+ * rb: binary read
154
+ * wb: binary write
155
+ * rb+: binary read and write
156
+ * ab: binary append
157
+ """
158
+ # Emulate built-in file modes
159
+ if self.is_readable and self.is_writable:
160
+ return 'rb+'
161
+ elif self.is_readable:
162
+ return 'rb'
163
+ elif self.is_writable and self._is_appending:
164
+ return 'ab'
165
+ elif self.is_writable:
166
+ return 'wb'
167
+ else:
168
+ raise ValueError('File object is malformed, has no mode')
169
+
170
+ def readable(self):
171
+ self._assert_open()
172
+ return self.is_readable
173
+
174
+ def writable(self):
175
+ self._assert_open()
176
+ return self.is_writable
177
+
178
+ def seekable(self):
179
+ self._assert_open()
180
+ return self.is_seekable
181
+
182
+ def isatty(self):
183
+ self._assert_open()
184
+ return False
185
+
186
+ def fileno(self):
187
+ """
188
+ NOT IMPLEMENTED
189
+ """
190
+ raise UnsupportedOperation()
191
+
192
+ @property
193
+ def closed(self):
194
+ if self.is_readable:
195
+ return self.input_stream.get().closed()
196
+ elif self.is_writable:
197
+ return self.output_stream.get().closed()
198
+ else:
199
+ return True
200
+
201
+ def close(self):
202
+ if not self.closed:
203
+ with nogil:
204
+ if self.is_readable:
205
+ check_status(self.input_stream.get().Close())
206
+ else:
207
+ check_status(self.output_stream.get().Close())
208
+
209
+ cdef set_random_access_file(self, shared_ptr[CRandomAccessFile] handle):
210
+ self.input_stream = <shared_ptr[CInputStream]> handle
211
+ self.random_access = handle
212
+ self.is_seekable = True
213
+
214
+ cdef set_input_stream(self, shared_ptr[CInputStream] handle):
215
+ self.input_stream = handle
216
+ self.random_access.reset()
217
+ self.is_seekable = False
218
+
219
+ cdef set_output_stream(self, shared_ptr[COutputStream] handle):
220
+ self.output_stream = handle
221
+
222
+ cdef shared_ptr[CRandomAccessFile] get_random_access_file(self) except *:
223
+ self._assert_readable()
224
+ self._assert_seekable()
225
+ return self.random_access
226
+
227
+ cdef shared_ptr[CInputStream] get_input_stream(self) except *:
228
+ self._assert_readable()
229
+ return self.input_stream
230
+
231
+ cdef shared_ptr[COutputStream] get_output_stream(self) except *:
232
+ self._assert_writable()
233
+ return self.output_stream
234
+
235
+ def _assert_open(self):
236
+ if self.closed:
237
+ raise ValueError("I/O operation on closed file")
238
+
239
+ def _assert_readable(self):
240
+ self._assert_open()
241
+ if not self.is_readable:
242
+ # XXX UnsupportedOperation
243
+ raise IOError("only valid on readable files")
244
+
245
+ def _assert_writable(self):
246
+ self._assert_open()
247
+ if not self.is_writable:
248
+ raise IOError("only valid on writable files")
249
+
250
+ def _assert_seekable(self):
251
+ self._assert_open()
252
+ if not self.is_seekable:
253
+ raise IOError("only valid on seekable files")
254
+
255
+ def size(self):
256
+ """
257
+ Return file size
258
+ """
259
+ cdef int64_t size
260
+
261
+ handle = self.get_random_access_file()
262
+ with nogil:
263
+ size = GetResultValue(handle.get().GetSize())
264
+
265
+ return size
266
+
267
+ def metadata(self):
268
+ """
269
+ Return file metadata
270
+ """
271
+ cdef:
272
+ shared_ptr[const CKeyValueMetadata] c_metadata
273
+
274
+ handle = self.get_input_stream()
275
+ with nogil:
276
+ c_metadata = GetResultValue(handle.get().ReadMetadata())
277
+
278
+ metadata = {}
279
+ if c_metadata.get() != nullptr:
280
+ for i in range(c_metadata.get().size()):
281
+ metadata[frombytes(c_metadata.get().key(i))] = \
282
+ c_metadata.get().value(i)
283
+ return metadata
284
+
285
+ def tell(self):
286
+ """
287
+ Return current stream position
288
+ """
289
+ cdef int64_t position
290
+
291
+ if self.is_readable:
292
+ rd_handle = self.get_random_access_file()
293
+ with nogil:
294
+ position = GetResultValue(rd_handle.get().Tell())
295
+ else:
296
+ wr_handle = self.get_output_stream()
297
+ with nogil:
298
+ position = GetResultValue(wr_handle.get().Tell())
299
+
300
+ return position
301
+
302
+ def seek(self, int64_t position, int whence=0):
303
+ """
304
+ Change current file stream position
305
+
306
+ Parameters
307
+ ----------
308
+ position : int
309
+ Byte offset, interpreted relative to value of whence argument
310
+ whence : int, default 0
311
+ Point of reference for seek offset
312
+
313
+ Notes
314
+ -----
315
+ Values of whence:
316
+ * 0 -- start of stream (the default); offset should be zero or positive
317
+ * 1 -- current stream position; offset may be negative
318
+ * 2 -- end of stream; offset is usually negative
319
+
320
+ Returns
321
+ -------
322
+ int
323
+ The new absolute stream position.
324
+ """
325
+ cdef int64_t offset
326
+ handle = self.get_random_access_file()
327
+
328
+ with nogil:
329
+ if whence == 0:
330
+ offset = position
331
+ elif whence == 1:
332
+ offset = GetResultValue(handle.get().Tell())
333
+ offset = offset + position
334
+ elif whence == 2:
335
+ offset = GetResultValue(handle.get().GetSize())
336
+ offset = offset + position
337
+ else:
338
+ with gil:
339
+ raise ValueError("Invalid value of whence: {0}"
340
+ .format(whence))
341
+ check_status(handle.get().Seek(offset))
342
+
343
+ return self.tell()
344
+
345
+ def flush(self):
346
+ """
347
+ Flush the stream, if applicable.
348
+
349
+ An error is raised if stream is not writable.
350
+ """
351
+ self._assert_open()
352
+ # For IOBase compatibility, flush() on an input stream is a no-op
353
+ if self.is_writable:
354
+ handle = self.get_output_stream()
355
+ with nogil:
356
+ check_status(handle.get().Flush())
357
+
358
+ def write(self, data):
359
+ """
360
+ Write data to the file.
361
+
362
+ Parameters
363
+ ----------
364
+ data : bytes-like object or exporter of buffer protocol
365
+
366
+ Returns
367
+ -------
368
+ int
369
+ nbytes: number of bytes written
370
+ """
371
+ self._assert_writable()
372
+ handle = self.get_output_stream()
373
+
374
+ cdef shared_ptr[CBuffer] buf = as_c_buffer(data)
375
+
376
+ with nogil:
377
+ check_status(handle.get().WriteBuffer(buf))
378
+ return buf.get().size()
379
+
380
+ def read(self, nbytes=None):
381
+ """
382
+ Read and return up to n bytes.
383
+
384
+ If *nbytes* is None, then the entire remaining file contents are read.
385
+
386
+ Parameters
387
+ ----------
388
+ nbytes : int, default None
389
+
390
+ Returns
391
+ -------
392
+ data : bytes
393
+ """
394
+ cdef:
395
+ int64_t c_nbytes
396
+ int64_t bytes_read = 0
397
+ PyObject* obj
398
+
399
+ if nbytes is None:
400
+ if not self.is_seekable:
401
+ # Cannot get file size => read chunkwise
402
+ bs = self._default_chunk_size
403
+ chunks = []
404
+ while True:
405
+ chunk = self.read(bs)
406
+ if not chunk:
407
+ break
408
+ chunks.append(chunk)
409
+ return b"".join(chunks)
410
+
411
+ c_nbytes = self.size() - self.tell()
412
+ else:
413
+ c_nbytes = nbytes
414
+
415
+ handle = self.get_input_stream()
416
+
417
+ # Allocate empty write space
418
+ obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes)
419
+
420
+ cdef uint8_t* buf = <uint8_t*> cp.PyBytes_AS_STRING(<object> obj)
421
+ with nogil:
422
+ bytes_read = GetResultValue(handle.get().Read(c_nbytes, buf))
423
+
424
+ if bytes_read < c_nbytes:
425
+ cp._PyBytes_Resize(&obj, <Py_ssize_t> bytes_read)
426
+
427
+ return PyObject_to_object(obj)
428
+
429
+ def get_stream(self, file_offset, nbytes):
430
+ """
431
+ Return an input stream that reads a file segment independent of the
432
+ state of the file.
433
+
434
+ Allows reading portions of a random access file as an input stream
435
+ without interfering with each other.
436
+
437
+ Parameters
438
+ ----------
439
+ file_offset : int
440
+ nbytes : int
441
+
442
+ Returns
443
+ -------
444
+ stream : NativeFile
445
+ """
446
+ cdef:
447
+ shared_ptr[CInputStream] data
448
+ int64_t c_file_offset
449
+ int64_t c_nbytes
450
+
451
+ c_file_offset = file_offset
452
+ c_nbytes = nbytes
453
+
454
+ handle = self.get_random_access_file()
455
+
456
+ data = GetResultValue(
457
+ CRandomAccessFile.GetStream(handle, c_file_offset, c_nbytes))
458
+
459
+ stream = NativeFile()
460
+ stream.set_input_stream(data)
461
+ stream.is_readable = True
462
+
463
+ return stream
464
+
465
+ def read_at(self, nbytes, offset):
466
+ """
467
+ Read indicated number of bytes at offset from the file
468
+
469
+ Parameters
470
+ ----------
471
+ nbytes : int
472
+ offset : int
473
+
474
+ Returns
475
+ -------
476
+ data : bytes
477
+ """
478
+ cdef:
479
+ int64_t c_nbytes
480
+ int64_t c_offset
481
+ int64_t bytes_read = 0
482
+ PyObject* obj
483
+
484
+ c_nbytes = nbytes
485
+
486
+ c_offset = offset
487
+
488
+ handle = self.get_random_access_file()
489
+
490
+ # Allocate empty write space
491
+ obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes)
492
+
493
+ cdef uint8_t* buf = <uint8_t*> cp.PyBytes_AS_STRING(<object> obj)
494
+ with nogil:
495
+ bytes_read = GetResultValue(handle.get().
496
+ ReadAt(c_offset, c_nbytes, buf))
497
+
498
+ if bytes_read < c_nbytes:
499
+ cp._PyBytes_Resize(&obj, <Py_ssize_t> bytes_read)
500
+
501
+ return PyObject_to_object(obj)
502
+
503
+ def read1(self, nbytes=None):
504
+ """Read and return up to n bytes.
505
+
506
+ Unlike read(), if *nbytes* is None then a chunk is read, not the
507
+ entire file.
508
+
509
+ Parameters
510
+ ----------
511
+ nbytes : int, default None
512
+ The maximum number of bytes to read.
513
+
514
+ Returns
515
+ -------
516
+ data : bytes
517
+ """
518
+ if nbytes is None:
519
+ # The expectation when passing `nbytes=None` is not to read the
520
+ # entire file but to issue a single underlying read call up to
521
+ # a reasonable size (the use case being to read a bufferable
522
+ # amount of bytes, such as with io.TextIOWrapper).
523
+ nbytes = self._default_chunk_size
524
+ return self.read(nbytes)
525
+
526
+ def readall(self):
527
+ return self.read()
528
+
529
+ def readinto(self, b):
530
+ """
531
+ Read into the supplied buffer
532
+
533
+ Parameters
534
+ ----------
535
+ b : buffer-like object
536
+ A writable buffer object (such as a bytearray).
537
+
538
+ Returns
539
+ -------
540
+ written : int
541
+ number of bytes written
542
+ """
543
+
544
+ cdef:
545
+ int64_t bytes_read
546
+ uint8_t* buf
547
+ Buffer py_buf
548
+ int64_t buf_len
549
+
550
+ handle = self.get_input_stream()
551
+
552
+ py_buf = py_buffer(b)
553
+ buf_len = py_buf.size
554
+ buf = py_buf.buffer.get().mutable_data()
555
+
556
+ with nogil:
557
+ bytes_read = GetResultValue(handle.get().Read(buf_len, buf))
558
+
559
+ return bytes_read
560
+
561
+ def readline(self, size=None):
562
+ """NOT IMPLEMENTED. Read and return a line of bytes from the file.
563
+
564
+ If size is specified, read at most size bytes.
565
+
566
+ Line terminator is always b"\\n".
567
+
568
+ Parameters
569
+ ----------
570
+ size : int
571
+ maximum number of bytes read
572
+ """
573
+ raise UnsupportedOperation()
574
+
575
+ def readlines(self, hint=None):
576
+ """NOT IMPLEMENTED. Read lines of the file
577
+
578
+ Parameters
579
+ ----------
580
+ hint : int
581
+ maximum number of bytes read until we stop
582
+ """
583
+ raise UnsupportedOperation()
584
+
585
+ def __iter__(self):
586
+ self._assert_readable()
587
+ return self
588
+
589
+ def __next__(self):
590
+ line = self.readline()
591
+ if not line:
592
+ raise StopIteration
593
+ return line
594
+
595
+ def read_buffer(self, nbytes=None):
596
+ """
597
+ Read from buffer.
598
+
599
+ Parameters
600
+ ----------
601
+ nbytes : int, optional
602
+ maximum number of bytes read
603
+ """
604
+ cdef:
605
+ int64_t c_nbytes
606
+ int64_t bytes_read = 0
607
+ shared_ptr[CBuffer] output
608
+
609
+ handle = self.get_input_stream()
610
+
611
+ if nbytes is None:
612
+ if not self.is_seekable:
613
+ # Cannot get file size => read chunkwise
614
+ return py_buffer(self.read())
615
+ c_nbytes = self.size() - self.tell()
616
+ else:
617
+ c_nbytes = nbytes
618
+
619
+ with nogil:
620
+ output = GetResultValue(handle.get().ReadBuffer(c_nbytes))
621
+
622
+ return pyarrow_wrap_buffer(output)
623
+
624
+ def truncate(self):
625
+ """
626
+ NOT IMPLEMENTED
627
+ """
628
+ raise UnsupportedOperation()
629
+
630
+ def writelines(self, lines):
631
+ """
632
+ Write lines to the file.
633
+
634
+ Parameters
635
+ ----------
636
+ lines : iterable
637
+ Iterable of bytes-like objects or exporters of buffer protocol
638
+ """
639
+ self._assert_writable()
640
+
641
+ for line in lines:
642
+ self.write(line)
643
+
644
+ def download(self, stream_or_path, buffer_size=None):
645
+ """
646
+ Read this file completely to a local path or destination stream.
647
+
648
+ This method first seeks to the beginning of the file.
649
+
650
+ Parameters
651
+ ----------
652
+ stream_or_path : str or file-like object
653
+ If a string, a local file path to write to; otherwise,
654
+ should be a writable stream.
655
+ buffer_size : int, optional
656
+ The buffer size to use for data transfers.
657
+ """
658
+ cdef:
659
+ int64_t bytes_read = 0
660
+ uint8_t* buf
661
+
662
+ if not is_threading_enabled():
663
+ return self._download_nothreads(stream_or_path, buffer_size)
664
+
665
+ handle = self.get_input_stream()
666
+
667
+ buffer_size = buffer_size or DEFAULT_BUFFER_SIZE
668
+
669
+ write_queue = Queue(50)
670
+
671
+ if not hasattr(stream_or_path, 'read'):
672
+ stream = open(stream_or_path, 'wb')
673
+
674
+ def cleanup():
675
+ stream.close()
676
+ else:
677
+ stream = stream_or_path
678
+
679
+ def cleanup():
680
+ pass
681
+
682
+ done = False
683
+ exc_info = None
684
+
685
+ def bg_write():
686
+ try:
687
+ while not done or write_queue.qsize() > 0:
688
+ try:
689
+ buf = write_queue.get(timeout=0.01)
690
+ except QueueEmpty:
691
+ continue
692
+ stream.write(buf)
693
+ except Exception as e:
694
+ exc_info = sys.exc_info()
695
+ finally:
696
+ cleanup()
697
+
698
+ self.seek(0)
699
+
700
+ writer_thread = threading.Thread(target=bg_write)
701
+
702
+ # This isn't ideal -- PyBytes_FromStringAndSize copies the data from
703
+ # the passed buffer, so it's hard for us to avoid doubling the memory
704
+ buf = <uint8_t*> malloc(buffer_size)
705
+ if buf == NULL:
706
+ raise MemoryError("Failed to allocate {0} bytes"
707
+ .format(buffer_size))
708
+
709
+ writer_thread.start()
710
+
711
+ cdef int64_t total_bytes = 0
712
+ cdef int32_t c_buffer_size = buffer_size
713
+
714
+ try:
715
+ while True:
716
+ with nogil:
717
+ bytes_read = GetResultValue(
718
+ handle.get().Read(c_buffer_size, buf))
719
+
720
+ total_bytes += bytes_read
721
+
722
+ # EOF
723
+ if bytes_read == 0:
724
+ break
725
+
726
+ pybuf = cp.PyBytes_FromStringAndSize(<const char*>buf,
727
+ bytes_read)
728
+
729
+ if writer_thread.is_alive():
730
+ while write_queue.full():
731
+ time.sleep(0.01)
732
+ else:
733
+ break
734
+
735
+ write_queue.put_nowait(pybuf)
736
+ finally:
737
+ free(buf)
738
+ done = True
739
+
740
+ writer_thread.join()
741
+ if exc_info is not None:
742
+ raise exc_info[0], exc_info[1], exc_info[2]
743
+
744
+ def _download_nothreads(self, stream_or_path, buffer_size=None):
745
+ """
746
+ Internal method to do a download without separate threads, queues etc.
747
+ Called by download above if is_threading_enabled() == False
748
+ """
749
+ cdef:
750
+ int64_t bytes_read = 0
751
+ uint8_t* buf
752
+
753
+ handle = self.get_input_stream()
754
+
755
+ buffer_size = buffer_size or DEFAULT_BUFFER_SIZE
756
+
757
+ if not hasattr(stream_or_path, 'read'):
758
+ stream = open(stream_or_path, 'wb')
759
+
760
+ def cleanup():
761
+ stream.close()
762
+ else:
763
+ stream = stream_or_path
764
+
765
+ def cleanup():
766
+ pass
767
+
768
+ self.seek(0)
769
+
770
+ # This isn't ideal -- PyBytes_FromStringAndSize copies the data from
771
+ # the passed buffer, so it's hard for us to avoid doubling the memory
772
+ buf = <uint8_t*> malloc(buffer_size)
773
+ if buf == NULL:
774
+ raise MemoryError("Failed to allocate {0} bytes"
775
+ .format(buffer_size))
776
+
777
+ cdef int64_t total_bytes = 0
778
+ cdef int32_t c_buffer_size = buffer_size
779
+
780
+ try:
781
+ while True:
782
+ with nogil:
783
+ bytes_read = GetResultValue(
784
+ handle.get().Read(c_buffer_size, buf))
785
+
786
+ total_bytes += bytes_read
787
+
788
+ # EOF
789
+ if bytes_read == 0:
790
+ break
791
+
792
+ pybuf = cp.PyBytes_FromStringAndSize(<const char*>buf,
793
+ bytes_read)
794
+
795
+ # no background thread - write on main thread
796
+ stream.write(pybuf)
797
+ finally:
798
+ free(buf)
799
+ cleanup()
800
+
801
+ def upload(self, stream, buffer_size=None):
802
+ """
803
+ Write from a source stream to this file.
804
+
805
+ Parameters
806
+ ----------
807
+ stream : file-like object
808
+ Source stream to pipe to this file.
809
+ buffer_size : int, optional
810
+ The buffer size to use for data transfers.
811
+ """
812
+ if not is_threading_enabled():
813
+ return self._upload_nothreads(stream, buffer_size)
814
+
815
+ write_queue = Queue(50)
816
+ self._assert_writable()
817
+
818
+ buffer_size = buffer_size or DEFAULT_BUFFER_SIZE
819
+
820
+ done = False
821
+ exc_info = None
822
+
823
+ def bg_write():
824
+ try:
825
+ while not done or write_queue.qsize() > 0:
826
+ try:
827
+ buf = write_queue.get(timeout=0.01)
828
+ except QueueEmpty:
829
+ continue
830
+
831
+ self.write(buf)
832
+
833
+ except Exception as e:
834
+ exc_info = sys.exc_info()
835
+
836
+ writer_thread = threading.Thread(target=bg_write)
837
+ writer_thread.start()
838
+
839
+ try:
840
+ while True:
841
+ buf = stream.read(buffer_size)
842
+ if not buf:
843
+ break
844
+
845
+ if writer_thread.is_alive():
846
+ while write_queue.full():
847
+ time.sleep(0.01)
848
+ else:
849
+ break
850
+
851
+ write_queue.put_nowait(buf)
852
+ finally:
853
+ done = True
854
+
855
+ writer_thread.join()
856
+ if exc_info is not None:
857
+ raise exc_info[0], exc_info[1], exc_info[2]
858
+
859
+ def _upload_nothreads(self, stream, buffer_size=None):
860
+ """
861
+ Internal method to do an upload without separate threads, queues etc.
862
+ Called by upload above if is_threading_enabled() == False
863
+ """
864
+ self._assert_writable()
865
+
866
+ buffer_size = buffer_size or DEFAULT_BUFFER_SIZE
867
+
868
+ while True:
869
+ buf = stream.read(buffer_size)
870
+ if not buf:
871
+ break
872
+
873
+ # no threading - just write
874
+ self.write(buf)
875
+
876
+
877
+ BufferedIOBase.register(NativeFile)
878
+
879
+ # ----------------------------------------------------------------------
880
+ # Python file-like objects
881
+
882
+
883
+ cdef class PythonFile(NativeFile):
884
+ """
885
+ A stream backed by a Python file object.
886
+
887
+ This class allows using Python file objects with arbitrary Arrow
888
+ functions, including functions written in another language than Python.
889
+
890
+ As a downside, there is a non-zero redirection cost in translating
891
+ Arrow stream calls to Python method calls. Furthermore, Python's
892
+ Global Interpreter Lock may limit parallelism in some situations.
893
+
894
+ Examples
895
+ --------
896
+ >>> import io
897
+ >>> import pyarrow as pa
898
+ >>> pa.PythonFile(io.BytesIO())
899
+ <pyarrow.PythonFile closed=False own_file=False is_seekable=False is_writable=True is_readable=False>
900
+
901
+ Create a stream for writing:
902
+
903
+ >>> buf = io.BytesIO()
904
+ >>> f = pa.PythonFile(buf, mode = 'w')
905
+ >>> f.writable()
906
+ True
907
+ >>> f.write(b'PythonFile')
908
+ 10
909
+ >>> buf.getvalue()
910
+ b'PythonFile'
911
+ >>> f.close()
912
+ >>> f
913
+ <pyarrow.PythonFile closed=True own_file=False is_seekable=False is_writable=True is_readable=False>
914
+
915
+ Create a stream for reading:
916
+
917
+ >>> buf = io.BytesIO(b'PythonFile')
918
+ >>> f = pa.PythonFile(buf, mode = 'r')
919
+ >>> f.mode
920
+ 'rb'
921
+ >>> f.read()
922
+ b'PythonFile'
923
+ >>> f
924
+ <pyarrow.PythonFile closed=False own_file=False is_seekable=True is_writable=False is_readable=True>
925
+ >>> f.close()
926
+ >>> f
927
+ <pyarrow.PythonFile closed=True own_file=False is_seekable=True is_writable=False is_readable=True>
928
+ """
929
+ cdef:
930
+ object handle
931
+
932
+ def __cinit__(self, handle, mode=None):
933
+ self.handle = handle
934
+
935
+ if mode is None:
936
+ try:
937
+ inferred_mode = handle.mode
938
+ except AttributeError:
939
+ # Not all file-like objects have a mode attribute
940
+ # (e.g. BytesIO)
941
+ try:
942
+ inferred_mode = 'w' if handle.writable() else 'r'
943
+ except AttributeError:
944
+ raise ValueError("could not infer open mode for file-like "
945
+ "object %r, please pass it explicitly"
946
+ % (handle,))
947
+ else:
948
+ inferred_mode = mode
949
+
950
+ if inferred_mode.startswith('w'):
951
+ kind = 'w'
952
+ elif inferred_mode.startswith('r'):
953
+ kind = 'r'
954
+ else:
955
+ raise ValueError('Invalid file mode: {0}'.format(mode))
956
+
957
+ # If mode was given, check it matches the given file
958
+ if mode is not None:
959
+ if isinstance(handle, IOBase):
960
+ # Python 3 IO object
961
+ if kind == 'r':
962
+ if not handle.readable():
963
+ raise TypeError("readable file expected")
964
+ else:
965
+ if not handle.writable():
966
+ raise TypeError("writable file expected")
967
+ # (other duck-typed file-like objects are possible)
968
+
969
+ # If possible, check the file is a binary file
970
+ if isinstance(handle, TextIOBase):
971
+ raise TypeError("binary file expected, got text file")
972
+
973
+ if kind == 'r':
974
+ self.set_random_access_file(
975
+ shared_ptr[CRandomAccessFile](new PyReadableFile(handle)))
976
+ self.is_readable = True
977
+ else:
978
+ self.set_output_stream(
979
+ shared_ptr[COutputStream](new PyOutputStream(handle)))
980
+ self.is_writable = True
981
+
982
+ def truncate(self, pos=None):
983
+ """
984
+ Parameters
985
+ ----------
986
+ pos : int, optional
987
+ """
988
+ self.handle.truncate(pos)
989
+
990
+ def readline(self, size=None):
991
+ """
992
+ Read and return a line of bytes from the file.
993
+
994
+ If size is specified, read at most size bytes.
995
+
996
+ Parameters
997
+ ----------
998
+ size : int
999
+ Maximum number of bytes read
1000
+ """
1001
+ return self.handle.readline(size)
1002
+
1003
+ def readlines(self, hint=None):
1004
+ """
1005
+ Read lines of the file.
1006
+
1007
+ Parameters
1008
+ ----------
1009
+ hint : int
1010
+ Maximum number of bytes read until we stop
1011
+ """
1012
+ return self.handle.readlines(hint)
1013
+
1014
+
1015
+ cdef class MemoryMappedFile(NativeFile):
1016
+ """
1017
+ A stream that represents a memory-mapped file.
1018
+
1019
+ Supports 'r', 'r+', 'w' modes.
1020
+
1021
+ Examples
1022
+ --------
1023
+ Create a new file with memory map:
1024
+
1025
+ >>> import pyarrow as pa
1026
+ >>> mmap = pa.create_memory_map('example_mmap.dat', 10)
1027
+ >>> mmap
1028
+ <pyarrow.MemoryMappedFile closed=False own_file=False is_seekable=True is_writable=True is_readable=True>
1029
+ >>> mmap.close()
1030
+
1031
+ Open an existing file with memory map:
1032
+
1033
+ >>> with pa.memory_map('example_mmap.dat') as mmap:
1034
+ ... mmap
1035
+ ...
1036
+ <pyarrow.MemoryMappedFile closed=False own_file=False is_seekable=True is_writable=False is_readable=True>
1037
+ """
1038
+ cdef:
1039
+ shared_ptr[CMemoryMappedFile] handle
1040
+ object path
1041
+
1042
+ @staticmethod
1043
+ def create(path, size):
1044
+ """
1045
+ Create a MemoryMappedFile
1046
+
1047
+ Parameters
1048
+ ----------
1049
+ path : str
1050
+ Where to create the file.
1051
+ size : int
1052
+ Size of the memory mapped file.
1053
+ """
1054
+ cdef:
1055
+ shared_ptr[CMemoryMappedFile] handle
1056
+ c_string c_path = encode_file_path(path)
1057
+ int64_t c_size = size
1058
+
1059
+ with nogil:
1060
+ handle = GetResultValue(CMemoryMappedFile.Create(c_path, c_size))
1061
+
1062
+ cdef MemoryMappedFile result = MemoryMappedFile()
1063
+ result.path = path
1064
+ result.is_readable = True
1065
+ result.is_writable = True
1066
+ result.set_output_stream(<shared_ptr[COutputStream]> handle)
1067
+ result.set_random_access_file(<shared_ptr[CRandomAccessFile]> handle)
1068
+ result.handle = handle
1069
+
1070
+ return result
1071
+
1072
+ def _open(self, path, mode='r'):
1073
+ self.path = path
1074
+
1075
+ cdef:
1076
+ FileMode c_mode
1077
+ shared_ptr[CMemoryMappedFile] handle
1078
+ c_string c_path = encode_file_path(path)
1079
+
1080
+ if mode in ('r', 'rb'):
1081
+ c_mode = FileMode_READ
1082
+ self.is_readable = True
1083
+ elif mode in ('w', 'wb'):
1084
+ c_mode = FileMode_WRITE
1085
+ self.is_writable = True
1086
+ elif mode in ('r+', 'r+b', 'rb+'):
1087
+ c_mode = FileMode_READWRITE
1088
+ self.is_readable = True
1089
+ self.is_writable = True
1090
+ else:
1091
+ raise ValueError('Invalid file mode: {0}'.format(mode))
1092
+
1093
+ with nogil:
1094
+ handle = GetResultValue(CMemoryMappedFile.Open(c_path, c_mode))
1095
+
1096
+ self.set_output_stream(<shared_ptr[COutputStream]> handle)
1097
+ self.set_random_access_file(<shared_ptr[CRandomAccessFile]> handle)
1098
+ self.handle = handle
1099
+
1100
+ def resize(self, new_size):
1101
+ """
1102
+ Resize the map and underlying file.
1103
+
1104
+ Parameters
1105
+ ----------
1106
+ new_size : new size in bytes
1107
+ """
1108
+ check_status(self.handle.get().Resize(new_size))
1109
+
1110
+ def fileno(self):
1111
+ self._assert_open()
1112
+ return self.handle.get().file_descriptor()
1113
+
1114
+
1115
+ def memory_map(path, mode='r'):
1116
+ """
1117
+ Open memory map at file path. Size of the memory map cannot change.
1118
+
1119
+ Parameters
1120
+ ----------
1121
+ path : str
1122
+ mode : {'r', 'r+', 'w'}, default 'r'
1123
+ Whether the file is opened for reading ('r'), writing ('w')
1124
+ or both ('r+').
1125
+
1126
+ Returns
1127
+ -------
1128
+ mmap : MemoryMappedFile
1129
+
1130
+ Examples
1131
+ --------
1132
+ Reading from a memory map without any memory allocation or copying:
1133
+
1134
+ >>> import pyarrow as pa
1135
+ >>> with pa.output_stream('example_mmap.txt') as stream:
1136
+ ... stream.write(b'Constructing a buffer referencing the mapped memory')
1137
+ ...
1138
+ 51
1139
+ >>> with pa.memory_map('example_mmap.txt') as mmap:
1140
+ ... mmap.read_at(6,45)
1141
+ ...
1142
+ b'memory'
1143
+ """
1144
+ _check_is_file(path)
1145
+
1146
+ cdef MemoryMappedFile mmap = MemoryMappedFile()
1147
+ mmap._open(path, mode)
1148
+ return mmap
1149
+
1150
+
1151
+ cdef _check_is_file(path):
1152
+ if os.path.isdir(path):
1153
+ raise IOError("Expected file path, but {0} is a directory"
1154
+ .format(path))
1155
+
1156
+
1157
+ def create_memory_map(path, size):
1158
+ """
1159
+ Create a file of the given size and memory-map it.
1160
+
1161
+ Parameters
1162
+ ----------
1163
+ path : str
1164
+ The file path to create, on the local filesystem.
1165
+ size : int
1166
+ The file size to create.
1167
+
1168
+ Returns
1169
+ -------
1170
+ mmap : MemoryMappedFile
1171
+
1172
+ Examples
1173
+ --------
1174
+ Create a file with a memory map:
1175
+
1176
+ >>> import pyarrow as pa
1177
+ >>> with pa.create_memory_map('example_mmap_create.dat', 27) as mmap:
1178
+ ... mmap.write(b'Create a memory-mapped file')
1179
+ ... mmap.read_at(10, 9)
1180
+ ...
1181
+ 27
1182
+ b'memory-map'
1183
+ """
1184
+ return MemoryMappedFile.create(path, size)
1185
+
1186
+
1187
+ cdef class OSFile(NativeFile):
1188
+ """
1189
+ A stream backed by a regular file descriptor.
1190
+
1191
+ Examples
1192
+ --------
1193
+ Create a new file to write to:
1194
+
1195
+ >>> import pyarrow as pa
1196
+ >>> with pa.OSFile('example_osfile.arrow', mode='w') as f:
1197
+ ... f.writable()
1198
+ ... f.write(b'OSFile')
1199
+ ... f.seekable()
1200
+ ...
1201
+ True
1202
+ 6
1203
+ False
1204
+
1205
+ Open the file to read:
1206
+
1207
+ >>> with pa.OSFile('example_osfile.arrow', mode='r') as f:
1208
+ ... f.mode
1209
+ ... f.read()
1210
+ ...
1211
+ 'rb'
1212
+ b'OSFile'
1213
+
1214
+ Open the file to append:
1215
+
1216
+ >>> with pa.OSFile('example_osfile.arrow', mode='ab') as f:
1217
+ ... f.mode
1218
+ ... f.write(b' is super!')
1219
+ ...
1220
+ 'ab'
1221
+ 10
1222
+ >>> with pa.OSFile('example_osfile.arrow') as f:
1223
+ ... f.read()
1224
+ ...
1225
+ b'OSFile is super!'
1226
+
1227
+ Inspect created OSFile:
1228
+
1229
+ >>> pa.OSFile('example_osfile.arrow')
1230
+ <pyarrow.OSFile closed=False own_file=False is_seekable=True is_writable=False is_readable=True>
1231
+ """
1232
+ cdef:
1233
+ object path
1234
+
1235
+ def __cinit__(self, path, mode='r', MemoryPool memory_pool=None):
1236
+ _check_is_file(path)
1237
+ self.path = path
1238
+
1239
+ cdef:
1240
+ FileMode c_mode
1241
+ shared_ptr[Readable] handle
1242
+ c_string c_path = encode_file_path(path)
1243
+
1244
+ if mode in ('r', 'rb'):
1245
+ self._open_readable(c_path, maybe_unbox_memory_pool(memory_pool))
1246
+ elif mode in ('w', 'wb'):
1247
+ self._open_writable(c_path)
1248
+ elif mode in ('a', 'ab'):
1249
+ self._open_writable(c_path, append=True)
1250
+ else:
1251
+ raise ValueError('Invalid file mode: {0}'.format(mode))
1252
+
1253
+ cdef _open_readable(self, c_string path, CMemoryPool* pool):
1254
+ cdef shared_ptr[ReadableFile] handle
1255
+
1256
+ with nogil:
1257
+ handle = GetResultValue(ReadableFile.Open(path, pool))
1258
+
1259
+ self.is_readable = True
1260
+ self.set_random_access_file(<shared_ptr[CRandomAccessFile]> handle)
1261
+
1262
+ cdef _open_writable(self, c_string path, c_bool append=False):
1263
+ with nogil:
1264
+ self.output_stream = GetResultValue(
1265
+ FileOutputStream.OpenWithAppend(path, append)
1266
+ )
1267
+ self.is_writable = True
1268
+ self._is_appending = append
1269
+
1270
+ def fileno(self):
1271
+ self._assert_open()
1272
+ return self.handle.file_descriptor()
1273
+
1274
+
1275
+ cdef class FixedSizeBufferWriter(NativeFile):
1276
+ """
1277
+ A stream writing to a Arrow buffer.
1278
+
1279
+ Examples
1280
+ --------
1281
+ Create a stream to write to ``pyarrow.Buffer``:
1282
+
1283
+ >>> import pyarrow as pa
1284
+ >>> buf = pa.allocate_buffer(5)
1285
+ >>> with pa.output_stream(buf) as stream:
1286
+ ... stream.write(b'abcde')
1287
+ ... stream
1288
+ ...
1289
+ 5
1290
+ <pyarrow.FixedSizeBufferWriter closed=False own_file=False is_seekable=False is_writable=True is_readable=False>
1291
+
1292
+ Inspect the buffer:
1293
+
1294
+ >>> buf.to_pybytes()
1295
+ b'abcde'
1296
+ >>> buf
1297
+ <pyarrow.Buffer address=... size=5 is_cpu=True is_mutable=True>
1298
+ """
1299
+
1300
+ def __cinit__(self, Buffer buffer):
1301
+ self.output_stream.reset(new CFixedSizeBufferWriter(buffer.buffer))
1302
+ self.is_writable = True
1303
+
1304
+ def set_memcopy_threads(self, int num_threads):
1305
+ """
1306
+ Parameters
1307
+ ----------
1308
+ num_threads : int
1309
+ """
1310
+ cdef CFixedSizeBufferWriter* writer = \
1311
+ <CFixedSizeBufferWriter*> self.output_stream.get()
1312
+ writer.set_memcopy_threads(num_threads)
1313
+
1314
+ def set_memcopy_blocksize(self, int64_t blocksize):
1315
+ """
1316
+ Parameters
1317
+ ----------
1318
+ blocksize : int64
1319
+ """
1320
+ cdef CFixedSizeBufferWriter* writer = \
1321
+ <CFixedSizeBufferWriter*> self.output_stream.get()
1322
+ writer.set_memcopy_blocksize(blocksize)
1323
+
1324
+ def set_memcopy_threshold(self, int64_t threshold):
1325
+ """
1326
+ Parameters
1327
+ ----------
1328
+ threshold : int64
1329
+ """
1330
+ cdef CFixedSizeBufferWriter* writer = \
1331
+ <CFixedSizeBufferWriter*> self.output_stream.get()
1332
+ writer.set_memcopy_threshold(threshold)
1333
+
1334
+
1335
+ # ----------------------------------------------------------------------
1336
+ # Arrow buffers
1337
+
1338
+
1339
+ cdef class Buffer(_Weakrefable):
1340
+ """
1341
+ The base class for all Arrow buffers.
1342
+
1343
+ A buffer represents a contiguous memory area. Many buffers will own
1344
+ their memory, though not all of them do.
1345
+ """
1346
+
1347
+ def __cinit__(self):
1348
+ pass
1349
+
1350
+ def __init__(self):
1351
+ raise TypeError("Do not call Buffer's constructor directly, use "
1352
+ "`pyarrow.py_buffer` function instead.")
1353
+
1354
+ cdef void init(self, const shared_ptr[CBuffer]& buffer):
1355
+ self.buffer = buffer
1356
+ self.shape[0] = self.size
1357
+ self.strides[0] = <Py_ssize_t>(1)
1358
+
1359
+ def __len__(self):
1360
+ return self.size
1361
+
1362
+ def __repr__(self):
1363
+ name = f"pyarrow.{self.__class__.__name__}"
1364
+ return (f"<{name} "
1365
+ f"address={hex(self.address)} "
1366
+ f"size={self.size} "
1367
+ f"is_cpu={self.is_cpu} "
1368
+ f"is_mutable={self.is_mutable}>")
1369
+
1370
+ def _assert_cpu(self):
1371
+ if not self.is_cpu:
1372
+ raise NotImplementedError("Implemented only for data on CPU device")
1373
+
1374
+ @property
1375
+ def size(self):
1376
+ """
1377
+ The buffer size in bytes.
1378
+ """
1379
+ return self.buffer.get().size()
1380
+
1381
+ @property
1382
+ def address(self):
1383
+ """
1384
+ The buffer's address, as an integer.
1385
+
1386
+ The returned address may point to CPU or device memory.
1387
+ Use `is_cpu()` to disambiguate.
1388
+ """
1389
+ return self.buffer.get().address()
1390
+
1391
+ def hex(self):
1392
+ """
1393
+ Compute hexadecimal representation of the buffer.
1394
+
1395
+ Returns
1396
+ -------
1397
+ : bytes
1398
+ """
1399
+ self._assert_cpu()
1400
+ return self.buffer.get().ToHexString()
1401
+
1402
+ @property
1403
+ def is_mutable(self):
1404
+ """
1405
+ Whether the buffer is mutable.
1406
+ """
1407
+ return self.buffer.get().is_mutable()
1408
+
1409
+ @property
1410
+ def is_cpu(self):
1411
+ """
1412
+ Whether the buffer is CPU-accessible.
1413
+ """
1414
+ return self.buffer.get().is_cpu()
1415
+
1416
+ @property
1417
+ def device(self):
1418
+ """
1419
+ The device where the buffer resides.
1420
+
1421
+ Returns
1422
+ -------
1423
+ Device
1424
+ """
1425
+ return Device.wrap(self.buffer.get().device())
1426
+
1427
+ @property
1428
+ def memory_manager(self):
1429
+ """
1430
+ The memory manager associated with the buffer.
1431
+
1432
+ Returns
1433
+ -------
1434
+ MemoryManager
1435
+ """
1436
+ return MemoryManager.wrap(self.buffer.get().memory_manager())
1437
+
1438
+ @property
1439
+ def device_type(self):
1440
+ """
1441
+ The device type where the buffer resides.
1442
+
1443
+ Returns
1444
+ -------
1445
+ DeviceAllocationType
1446
+ """
1447
+ return _wrap_device_allocation_type(self.buffer.get().device_type())
1448
+
1449
+ @property
1450
+ def parent(self):
1451
+ cdef shared_ptr[CBuffer] parent_buf = self.buffer.get().parent()
1452
+
1453
+ if parent_buf.get() == NULL:
1454
+ return None
1455
+ else:
1456
+ return pyarrow_wrap_buffer(parent_buf)
1457
+
1458
+ def __getitem__(self, key):
1459
+ if isinstance(key, slice):
1460
+ if (key.step or 1) != 1:
1461
+ raise IndexError('only slices with step 1 supported')
1462
+ return _normalize_slice(self, key)
1463
+
1464
+ return self.getitem(_normalize_index(key, self.size))
1465
+
1466
+ cdef getitem(self, int64_t i):
1467
+ self._assert_cpu()
1468
+ return self.buffer.get().data()[i]
1469
+
1470
+ def slice(self, offset=0, length=None):
1471
+ """
1472
+ Slice this buffer. Memory is not copied.
1473
+
1474
+ You can also use the Python slice notation ``buffer[start:stop]``.
1475
+
1476
+ Parameters
1477
+ ----------
1478
+ offset : int, default 0
1479
+ Offset from start of buffer to slice.
1480
+ length : int, default None
1481
+ Length of slice (default is until end of Buffer starting from
1482
+ offset).
1483
+
1484
+ Returns
1485
+ -------
1486
+ sliced : Buffer
1487
+ A logical view over this buffer.
1488
+ """
1489
+ cdef shared_ptr[CBuffer] result
1490
+
1491
+ if offset < 0:
1492
+ raise IndexError('Offset must be non-negative')
1493
+
1494
+ if length is None:
1495
+ result = GetResultValue(SliceBufferSafe(self.buffer, offset))
1496
+ else:
1497
+ result = GetResultValue(SliceBufferSafe(self.buffer, offset,
1498
+ length))
1499
+ return pyarrow_wrap_buffer(result)
1500
+
1501
+ def equals(self, Buffer other):
1502
+ """
1503
+ Determine if two buffers contain exactly the same data.
1504
+
1505
+ Parameters
1506
+ ----------
1507
+ other : Buffer
1508
+
1509
+ Returns
1510
+ -------
1511
+ are_equal : bool
1512
+ True if buffer contents and size are equal
1513
+ """
1514
+ if self.device != other.device:
1515
+ raise ValueError(
1516
+ "Device on which the data resides differs between buffers: "
1517
+ f"{self.device.type_name} and {other.device.type_name}."
1518
+ )
1519
+ if not self.is_cpu:
1520
+ if self.address != other.address:
1521
+ raise NotImplementedError(
1522
+ "Implemented only for data on CPU device or data with equal "
1523
+ "addresses"
1524
+ )
1525
+
1526
+ cdef c_bool result = False
1527
+ with nogil:
1528
+ result = self.buffer.get().Equals(deref(other.buffer.get()))
1529
+ return result
1530
+
1531
+ def __eq__(self, other):
1532
+ if isinstance(other, Buffer):
1533
+ return self.equals(other)
1534
+ else:
1535
+ return self.equals(py_buffer(other))
1536
+
1537
+ def __reduce_ex__(self, protocol):
1538
+ self._assert_cpu()
1539
+
1540
+ if protocol >= 5:
1541
+ bufobj = pickle.PickleBuffer(self)
1542
+ elif self.buffer.get().is_mutable():
1543
+ # Need to pass a bytearray to recreate a mutable buffer when
1544
+ # unpickling.
1545
+ bufobj = PyByteArray_FromStringAndSize(
1546
+ <const char*>self.buffer.get().data(),
1547
+ self.buffer.get().size())
1548
+ else:
1549
+ bufobj = self.to_pybytes()
1550
+ return py_buffer, (bufobj,)
1551
+
1552
+ def to_pybytes(self):
1553
+ """
1554
+ Return this buffer as a Python bytes object. Memory is copied.
1555
+ """
1556
+ self._assert_cpu()
1557
+
1558
+ return cp.PyBytes_FromStringAndSize(
1559
+ <const char*>self.buffer.get().data(),
1560
+ self.buffer.get().size())
1561
+
1562
+ def __getbuffer__(self, cp.Py_buffer* buffer, int flags):
1563
+ self._assert_cpu()
1564
+
1565
+ if self.buffer.get().is_mutable():
1566
+ buffer.readonly = 0
1567
+ else:
1568
+ if flags & cp.PyBUF_WRITABLE:
1569
+ raise BufferError("Writable buffer requested but Arrow "
1570
+ "buffer was not mutable")
1571
+ buffer.readonly = 1
1572
+ buffer.buf = <char *>self.buffer.get().data()
1573
+ buffer.len = self.size
1574
+ if buffer.buf == NULL:
1575
+ # ARROW-16048: Ensure we don't export a NULL address.
1576
+ assert buffer.len == 0
1577
+ buffer.buf = cp.PyBytes_AS_STRING(b"")
1578
+ buffer.format = 'b'
1579
+ buffer.internal = NULL
1580
+ buffer.itemsize = 1
1581
+ buffer.ndim = 1
1582
+ buffer.obj = self
1583
+ buffer.shape = self.shape
1584
+ buffer.strides = self.strides
1585
+ buffer.suboffsets = NULL
1586
+
1587
+
1588
+ cdef class ResizableBuffer(Buffer):
1589
+ """
1590
+ A base class for buffers that can be resized.
1591
+ """
1592
+
1593
+ cdef void init_rz(self, const shared_ptr[CResizableBuffer]& buffer):
1594
+ self.init(<shared_ptr[CBuffer]> buffer)
1595
+
1596
+ def resize(self, int64_t new_size, shrink_to_fit=False):
1597
+ """
1598
+ Resize buffer to indicated size.
1599
+
1600
+ Parameters
1601
+ ----------
1602
+ new_size : int
1603
+ New size of buffer (padding may be added internally).
1604
+ shrink_to_fit : bool, default False
1605
+ If this is true, the buffer is shrunk when new_size is less
1606
+ than the current size.
1607
+ If this is false, the buffer is never shrunk.
1608
+ """
1609
+ cdef c_bool c_shrink_to_fit = shrink_to_fit
1610
+ with nogil:
1611
+ check_status((<CResizableBuffer*> self.buffer.get())
1612
+ .Resize(new_size, c_shrink_to_fit))
1613
+
1614
+
1615
+ cdef shared_ptr[CResizableBuffer] _allocate_buffer(CMemoryPool* pool) except *:
1616
+ with nogil:
1617
+ return to_shared(GetResultValue(AllocateResizableBuffer(0, pool)))
1618
+
1619
+
1620
+ def allocate_buffer(int64_t size, MemoryPool memory_pool=None,
1621
+ resizable=False):
1622
+ """
1623
+ Allocate a mutable buffer.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ size : int
1628
+ Number of bytes to allocate (plus internal padding)
1629
+ memory_pool : MemoryPool, optional
1630
+ The pool to allocate memory from.
1631
+ If not given, the default memory pool is used.
1632
+ resizable : bool, default False
1633
+ If true, the returned buffer is resizable.
1634
+
1635
+ Returns
1636
+ -------
1637
+ buffer : Buffer or ResizableBuffer
1638
+ """
1639
+ cdef:
1640
+ CMemoryPool* cpool = maybe_unbox_memory_pool(memory_pool)
1641
+ shared_ptr[CResizableBuffer] c_rz_buffer
1642
+ shared_ptr[CBuffer] c_buffer
1643
+
1644
+ if resizable:
1645
+ with nogil:
1646
+ c_rz_buffer = to_shared(GetResultValue(
1647
+ AllocateResizableBuffer(size, cpool)))
1648
+ return pyarrow_wrap_resizable_buffer(c_rz_buffer)
1649
+ else:
1650
+ with nogil:
1651
+ c_buffer = to_shared(GetResultValue(AllocateBuffer(size, cpool)))
1652
+ return pyarrow_wrap_buffer(c_buffer)
1653
+
1654
+
1655
+ cdef class BufferOutputStream(NativeFile):
1656
+ """
1657
+ An output stream that writes to a resizable buffer.
1658
+
1659
+ The buffer is produced as a result when ``getvalue()`` is called.
1660
+
1661
+ Examples
1662
+ --------
1663
+ Create an output stream, write data to it and finalize it with
1664
+ ``getvalue()``:
1665
+
1666
+ >>> import pyarrow as pa
1667
+ >>> f = pa.BufferOutputStream()
1668
+ >>> f.write(b'pyarrow.Buffer')
1669
+ 14
1670
+ >>> f.closed
1671
+ False
1672
+ >>> f.getvalue()
1673
+ <pyarrow.Buffer address=... size=14 is_cpu=True is_mutable=True>
1674
+ >>> f.closed
1675
+ True
1676
+ """
1677
+
1678
+ cdef:
1679
+ shared_ptr[CResizableBuffer] buffer
1680
+
1681
+ def __cinit__(self, MemoryPool memory_pool=None):
1682
+ self.buffer = _allocate_buffer(maybe_unbox_memory_pool(memory_pool))
1683
+ self.output_stream.reset(new CBufferOutputStream(
1684
+ <shared_ptr[CResizableBuffer]> self.buffer))
1685
+ self.is_writable = True
1686
+
1687
+ def getvalue(self):
1688
+ """
1689
+ Finalize output stream and return result as pyarrow.Buffer.
1690
+
1691
+ Returns
1692
+ -------
1693
+ value : Buffer
1694
+ """
1695
+ with nogil:
1696
+ check_status(self.output_stream.get().Close())
1697
+ return pyarrow_wrap_buffer(<shared_ptr[CBuffer]> self.buffer)
1698
+
1699
+
1700
+ cdef class MockOutputStream(NativeFile):
1701
+
1702
+ def __cinit__(self):
1703
+ self.output_stream.reset(new CMockOutputStream())
1704
+ self.is_writable = True
1705
+
1706
+ def size(self):
1707
+ handle = <CMockOutputStream*> self.output_stream.get()
1708
+ return handle.GetExtentBytesWritten()
1709
+
1710
+
1711
+ cdef class BufferReader(NativeFile):
1712
+ """
1713
+ Zero-copy reader from objects convertible to Arrow buffer.
1714
+
1715
+ Parameters
1716
+ ----------
1717
+ obj : Python bytes or pyarrow.Buffer
1718
+
1719
+ Examples
1720
+ --------
1721
+ Create an Arrow input stream and inspect it:
1722
+
1723
+ >>> import pyarrow as pa
1724
+ >>> data = b'reader data'
1725
+ >>> buf = memoryview(data)
1726
+ >>> with pa.input_stream(buf) as stream:
1727
+ ... stream.size()
1728
+ ... stream.read(6)
1729
+ ... stream.seek(7)
1730
+ ... stream.read(15)
1731
+ ...
1732
+ 11
1733
+ b'reader'
1734
+ 7
1735
+ b'data'
1736
+ """
1737
+ cdef:
1738
+ Buffer buffer
1739
+
1740
+ # XXX Needed to make numpydoc happy
1741
+ def __init__(self, obj):
1742
+ pass
1743
+
1744
+ def __cinit__(self, object obj):
1745
+ self.buffer = as_buffer(obj)
1746
+ self.set_random_access_file(shared_ptr[CRandomAccessFile](
1747
+ new CBufferReader(self.buffer.buffer)))
1748
+ self.is_readable = True
1749
+
1750
+
1751
+ cdef class CompressedInputStream(NativeFile):
1752
+ """
1753
+ An input stream wrapper which decompresses data on the fly.
1754
+
1755
+ Parameters
1756
+ ----------
1757
+ stream : string, path, pyarrow.NativeFile, or file-like object
1758
+ Input stream object to wrap with the compression.
1759
+ compression : str
1760
+ The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd").
1761
+
1762
+ Examples
1763
+ --------
1764
+ Create an output stream wich compresses the data:
1765
+
1766
+ >>> import pyarrow as pa
1767
+ >>> data = b"Compressed stream"
1768
+ >>> raw = pa.BufferOutputStream()
1769
+ >>> with pa.CompressedOutputStream(raw, "gzip") as compressed:
1770
+ ... compressed.write(data)
1771
+ ...
1772
+ 17
1773
+
1774
+ Create an input stream with decompression referencing the
1775
+ buffer with compressed data:
1776
+
1777
+ >>> cdata = raw.getvalue()
1778
+ >>> with pa.input_stream(cdata, compression="gzip") as compressed:
1779
+ ... compressed.read()
1780
+ ...
1781
+ b'Compressed stream'
1782
+
1783
+ which actually translates to the use of ``BufferReader``and
1784
+ ``CompressedInputStream``:
1785
+
1786
+ >>> raw = pa.BufferReader(cdata)
1787
+ >>> with pa.CompressedInputStream(raw, "gzip") as compressed:
1788
+ ... compressed.read()
1789
+ ...
1790
+ b'Compressed stream'
1791
+ """
1792
+
1793
+ def __init__(self, object stream, str compression not None):
1794
+ cdef:
1795
+ NativeFile nf
1796
+ Codec codec = Codec(compression)
1797
+ shared_ptr[CInputStream] c_reader
1798
+ shared_ptr[CCompressedInputStream] compressed_stream
1799
+ nf = get_native_file(stream, False)
1800
+ c_reader = nf.get_input_stream()
1801
+ compressed_stream = GetResultValue(
1802
+ CCompressedInputStream.Make(codec.unwrap(), c_reader)
1803
+ )
1804
+ self.set_input_stream(<shared_ptr[CInputStream]> compressed_stream)
1805
+ self.is_readable = True
1806
+
1807
+
1808
+ cdef class CompressedOutputStream(NativeFile):
1809
+ """
1810
+ An output stream wrapper which compresses data on the fly.
1811
+
1812
+ Parameters
1813
+ ----------
1814
+ stream : string, path, pyarrow.NativeFile, or file-like object
1815
+ Input stream object to wrap with the compression.
1816
+ compression : str
1817
+ The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd").
1818
+
1819
+ Examples
1820
+ --------
1821
+ Create an output stream wich compresses the data:
1822
+
1823
+ >>> import pyarrow as pa
1824
+ >>> data = b"Compressed stream"
1825
+ >>> raw = pa.BufferOutputStream()
1826
+ >>> with pa.CompressedOutputStream(raw, "gzip") as compressed:
1827
+ ... compressed.write(data)
1828
+ ...
1829
+ 17
1830
+ """
1831
+
1832
+ def __init__(self, object stream, str compression not None):
1833
+ cdef:
1834
+ Codec codec = Codec(compression)
1835
+ shared_ptr[COutputStream] c_writer
1836
+ shared_ptr[CCompressedOutputStream] compressed_stream
1837
+ get_writer(stream, &c_writer)
1838
+ compressed_stream = GetResultValue(
1839
+ CCompressedOutputStream.Make(codec.unwrap(), c_writer)
1840
+ )
1841
+ self.set_output_stream(<shared_ptr[COutputStream]> compressed_stream)
1842
+ self.is_writable = True
1843
+
1844
+
1845
+ ctypedef CBufferedInputStream* _CBufferedInputStreamPtr
1846
+ ctypedef CBufferedOutputStream* _CBufferedOutputStreamPtr
1847
+ ctypedef CRandomAccessFile* _RandomAccessFilePtr
1848
+
1849
+
1850
+ cdef class BufferedInputStream(NativeFile):
1851
+ """
1852
+ An input stream that performs buffered reads from
1853
+ an unbuffered input stream, which can mitigate the overhead
1854
+ of many small reads in some cases.
1855
+
1856
+ Parameters
1857
+ ----------
1858
+ stream : NativeFile
1859
+ The input stream to wrap with the buffer
1860
+ buffer_size : int
1861
+ Size of the temporary read buffer.
1862
+ memory_pool : MemoryPool
1863
+ The memory pool used to allocate the buffer.
1864
+ """
1865
+
1866
+ def __init__(self, NativeFile stream, int buffer_size,
1867
+ MemoryPool memory_pool=None):
1868
+ cdef shared_ptr[CBufferedInputStream] buffered_stream
1869
+
1870
+ if buffer_size <= 0:
1871
+ raise ValueError('Buffer size must be larger than zero')
1872
+ buffered_stream = GetResultValue(CBufferedInputStream.Create(
1873
+ buffer_size, maybe_unbox_memory_pool(memory_pool),
1874
+ stream.get_input_stream()))
1875
+
1876
+ self.set_input_stream(<shared_ptr[CInputStream]> buffered_stream)
1877
+ self.is_readable = True
1878
+
1879
+ def detach(self):
1880
+ """
1881
+ Release the raw InputStream.
1882
+ Further operations on this stream are invalid.
1883
+
1884
+ Returns
1885
+ -------
1886
+ raw : NativeFile
1887
+ The underlying raw input stream
1888
+ """
1889
+ cdef:
1890
+ shared_ptr[CInputStream] c_raw
1891
+ _CBufferedInputStreamPtr buffered
1892
+ NativeFile raw
1893
+
1894
+ buffered = dynamic_cast[_CBufferedInputStreamPtr](
1895
+ self.input_stream.get())
1896
+ assert buffered != nullptr
1897
+
1898
+ with nogil:
1899
+ c_raw = GetResultValue(buffered.Detach())
1900
+
1901
+ raw = NativeFile()
1902
+ raw.is_readable = True
1903
+ # Find out whether the raw stream is a RandomAccessFile
1904
+ # or a mere InputStream. This helps us support seek() etc.
1905
+ # selectively.
1906
+ if dynamic_cast[_RandomAccessFilePtr](c_raw.get()) != nullptr:
1907
+ raw.set_random_access_file(
1908
+ static_pointer_cast[CRandomAccessFile, CInputStream](c_raw))
1909
+ else:
1910
+ raw.set_input_stream(c_raw)
1911
+ return raw
1912
+
1913
+
1914
+ cdef class BufferedOutputStream(NativeFile):
1915
+ """
1916
+ An output stream that performs buffered reads from
1917
+ an unbuffered output stream, which can mitigate the overhead
1918
+ of many small writes in some cases.
1919
+
1920
+ Parameters
1921
+ ----------
1922
+ stream : NativeFile
1923
+ The writable output stream to wrap with the buffer
1924
+ buffer_size : int
1925
+ Size of the buffer that should be added.
1926
+ memory_pool : MemoryPool
1927
+ The memory pool used to allocate the buffer.
1928
+ """
1929
+
1930
+ def __init__(self, NativeFile stream, int buffer_size,
1931
+ MemoryPool memory_pool=None):
1932
+ cdef shared_ptr[CBufferedOutputStream] buffered_stream
1933
+
1934
+ if buffer_size <= 0:
1935
+ raise ValueError('Buffer size must be larger than zero')
1936
+ buffered_stream = GetResultValue(CBufferedOutputStream.Create(
1937
+ buffer_size, maybe_unbox_memory_pool(memory_pool),
1938
+ stream.get_output_stream()))
1939
+
1940
+ self.set_output_stream(<shared_ptr[COutputStream]> buffered_stream)
1941
+ self.is_writable = True
1942
+
1943
+ def detach(self):
1944
+ """
1945
+ Flush any buffered writes and release the raw OutputStream.
1946
+ Further operations on this stream are invalid.
1947
+
1948
+ Returns
1949
+ -------
1950
+ raw : NativeFile
1951
+ The underlying raw output stream.
1952
+ """
1953
+ cdef:
1954
+ shared_ptr[COutputStream] c_raw
1955
+ _CBufferedOutputStreamPtr buffered
1956
+ NativeFile raw
1957
+
1958
+ buffered = dynamic_cast[_CBufferedOutputStreamPtr](
1959
+ self.output_stream.get())
1960
+ assert buffered != nullptr
1961
+
1962
+ with nogil:
1963
+ c_raw = GetResultValue(buffered.Detach())
1964
+
1965
+ raw = NativeFile()
1966
+ raw.is_writable = True
1967
+ raw.set_output_stream(c_raw)
1968
+ return raw
1969
+
1970
+
1971
+ cdef void _cb_transform(transform_func, const shared_ptr[CBuffer]& src,
1972
+ shared_ptr[CBuffer]* dest) except *:
1973
+ py_dest = transform_func(pyarrow_wrap_buffer(src))
1974
+ dest[0] = pyarrow_unwrap_buffer(py_buffer(py_dest))
1975
+
1976
+
1977
+ cdef class TransformInputStream(NativeFile):
1978
+ """
1979
+ Transform an input stream.
1980
+
1981
+ Parameters
1982
+ ----------
1983
+ stream : NativeFile
1984
+ The stream to transform.
1985
+ transform_func : callable
1986
+ The transformation to apply.
1987
+ """
1988
+
1989
+ def __init__(self, NativeFile stream, transform_func):
1990
+ self.set_input_stream(TransformInputStream.make_native(
1991
+ stream.get_input_stream(), transform_func))
1992
+ self.is_readable = True
1993
+
1994
+ @staticmethod
1995
+ cdef shared_ptr[CInputStream] make_native(
1996
+ shared_ptr[CInputStream] stream, transform_func) except *:
1997
+ cdef:
1998
+ shared_ptr[CInputStream] transform_stream
1999
+ CTransformInputStreamVTable vtable
2000
+
2001
+ vtable.transform = _cb_transform
2002
+ return MakeTransformInputStream(stream, move(vtable),
2003
+ transform_func)
2004
+
2005
+
2006
+ class Transcoder:
2007
+
2008
+ def __init__(self, decoder, encoder):
2009
+ self._decoder = decoder
2010
+ self._encoder = encoder
2011
+
2012
+ def __call__(self, buf):
2013
+ final = len(buf) == 0
2014
+ return self._encoder.encode(self._decoder.decode(buf, final), final)
2015
+
2016
+
2017
+ cdef shared_ptr[function[StreamWrapFunc]] make_streamwrap_func(
2018
+ src_encoding, dest_encoding) except *:
2019
+ """
2020
+ Create a function that will add a transcoding transformation to a stream.
2021
+ Data from that stream will be decoded according to ``src_encoding`` and
2022
+ then re-encoded according to ``dest_encoding``.
2023
+ The created function can be used to wrap streams.
2024
+
2025
+ Parameters
2026
+ ----------
2027
+ src_encoding : str
2028
+ The codec to use when reading data.
2029
+ dest_encoding : str
2030
+ The codec to use for emitted data.
2031
+ """
2032
+ cdef:
2033
+ shared_ptr[function[StreamWrapFunc]] empty_func
2034
+ CTransformInputStreamVTable vtable
2035
+
2036
+ vtable.transform = _cb_transform
2037
+ src_codec = codecs.lookup(src_encoding)
2038
+ dest_codec = codecs.lookup(dest_encoding)
2039
+ return MakeStreamTransformFunc(move(vtable),
2040
+ Transcoder(src_codec.incrementaldecoder(),
2041
+ dest_codec.incrementalencoder()))
2042
+
2043
+
2044
+ def transcoding_input_stream(stream, src_encoding, dest_encoding):
2045
+ """
2046
+ Add a transcoding transformation to the stream.
2047
+ Incoming data will be decoded according to ``src_encoding`` and
2048
+ then re-encoded according to ``dest_encoding``.
2049
+
2050
+ Parameters
2051
+ ----------
2052
+ stream : NativeFile
2053
+ The stream to which the transformation should be applied.
2054
+ src_encoding : str
2055
+ The codec to use when reading data.
2056
+ dest_encoding : str
2057
+ The codec to use for emitted data.
2058
+ """
2059
+ src_codec = codecs.lookup(src_encoding)
2060
+ dest_codec = codecs.lookup(dest_encoding)
2061
+ if src_codec.name == dest_codec.name:
2062
+ # Avoid losing performance on no-op transcoding
2063
+ # (encoding errors won't be detected)
2064
+ return stream
2065
+ return TransformInputStream(stream,
2066
+ Transcoder(src_codec.incrementaldecoder(),
2067
+ dest_codec.incrementalencoder()))
2068
+
2069
+
2070
+ cdef shared_ptr[CInputStream] native_transcoding_input_stream(
2071
+ shared_ptr[CInputStream] stream, src_encoding,
2072
+ dest_encoding) except *:
2073
+ src_codec = codecs.lookup(src_encoding)
2074
+ dest_codec = codecs.lookup(dest_encoding)
2075
+ if src_codec.name == dest_codec.name:
2076
+ # Avoid losing performance on no-op transcoding
2077
+ # (encoding errors won't be detected)
2078
+ return stream
2079
+ return TransformInputStream.make_native(
2080
+ stream, Transcoder(src_codec.incrementaldecoder(),
2081
+ dest_codec.incrementalencoder()))
2082
+
2083
+
2084
+ def py_buffer(object obj):
2085
+ """
2086
+ Construct an Arrow buffer from a Python bytes-like or buffer-like object
2087
+
2088
+ Parameters
2089
+ ----------
2090
+ obj : object
2091
+ the object from which the buffer should be constructed.
2092
+ """
2093
+ cdef shared_ptr[CBuffer] buf
2094
+ buf = GetResultValue(PyBuffer.FromPyObject(obj))
2095
+ return pyarrow_wrap_buffer(buf)
2096
+
2097
+
2098
+ def foreign_buffer(address, size, base=None):
2099
+ """
2100
+ Construct an Arrow buffer with the given *address* and *size*.
2101
+
2102
+ The buffer will be optionally backed by the Python *base* object, if given.
2103
+ The *base* object will be kept alive as long as this buffer is alive,
2104
+ including across language boundaries (for example if the buffer is
2105
+ referenced by C++ code).
2106
+
2107
+ Parameters
2108
+ ----------
2109
+ address : int
2110
+ The starting address of the buffer. The address can
2111
+ refer to both device or host memory but it must be
2112
+ accessible from device after mapping it with
2113
+ `get_device_address` method.
2114
+ size : int
2115
+ The size of device buffer in bytes.
2116
+ base : {None, object}
2117
+ Object that owns the referenced memory.
2118
+ """
2119
+ cdef:
2120
+ uintptr_t c_addr = address
2121
+ int64_t c_size = size
2122
+ shared_ptr[CBuffer] buf
2123
+
2124
+ check_status(PyForeignBuffer.Make(<uint8_t*> c_addr, c_size,
2125
+ base, &buf))
2126
+ return pyarrow_wrap_buffer(buf)
2127
+
2128
+
2129
+ def as_buffer(object o):
2130
+ if isinstance(o, Buffer):
2131
+ return o
2132
+ return py_buffer(o)
2133
+
2134
+
2135
+ cdef shared_ptr[CBuffer] as_c_buffer(object o) except *:
2136
+ cdef shared_ptr[CBuffer] buf
2137
+ if isinstance(o, Buffer):
2138
+ buf = (<Buffer> o).buffer
2139
+ if buf == nullptr:
2140
+ raise ValueError("got null buffer")
2141
+ else:
2142
+ buf = GetResultValue(PyBuffer.FromPyObject(o))
2143
+ return buf
2144
+
2145
+
2146
+ cdef NativeFile get_native_file(object source, c_bool use_memory_map):
2147
+ try:
2148
+ source_path = _stringify_path(source)
2149
+ except TypeError:
2150
+ if isinstance(source, Buffer):
2151
+ source = BufferReader(source)
2152
+ elif not isinstance(source, NativeFile) and hasattr(source, 'read'):
2153
+ # Optimistically hope this is file-like
2154
+ source = PythonFile(source, mode='r')
2155
+ else:
2156
+ if use_memory_map:
2157
+ source = memory_map(source_path, mode='r')
2158
+ else:
2159
+ source = OSFile(source_path, mode='r')
2160
+
2161
+ return source
2162
+
2163
+
2164
+ cdef get_reader(object source, c_bool use_memory_map,
2165
+ shared_ptr[CRandomAccessFile]* reader):
2166
+ cdef NativeFile nf
2167
+
2168
+ nf = get_native_file(source, use_memory_map)
2169
+ reader[0] = nf.get_random_access_file()
2170
+
2171
+
2172
+ cdef get_input_stream(object source, c_bool use_memory_map,
2173
+ shared_ptr[CInputStream]* out):
2174
+ """
2175
+ Like get_reader(), but can automatically decompress, and returns
2176
+ an InputStream.
2177
+ """
2178
+ cdef:
2179
+ NativeFile nf
2180
+ Codec codec
2181
+ shared_ptr[CInputStream] input_stream
2182
+
2183
+ try:
2184
+ codec = Codec.detect(source)
2185
+ except TypeError:
2186
+ codec = None
2187
+
2188
+ nf = get_native_file(source, use_memory_map)
2189
+ input_stream = nf.get_input_stream()
2190
+
2191
+ # codec is None if compression can't be detected
2192
+ if codec is not None:
2193
+ input_stream = <shared_ptr[CInputStream]> GetResultValue(
2194
+ CCompressedInputStream.Make(codec.unwrap(), input_stream)
2195
+ )
2196
+
2197
+ out[0] = input_stream
2198
+
2199
+
2200
+ cdef get_writer(object source, shared_ptr[COutputStream]* writer):
2201
+ cdef NativeFile nf
2202
+
2203
+ try:
2204
+ source_path = _stringify_path(source)
2205
+ except TypeError:
2206
+ if not isinstance(source, NativeFile) and hasattr(source, 'write'):
2207
+ # Optimistically hope this is file-like
2208
+ source = PythonFile(source, mode='w')
2209
+ else:
2210
+ source = OSFile(source_path, mode='w')
2211
+
2212
+ if isinstance(source, NativeFile):
2213
+ nf = source
2214
+ writer[0] = nf.get_output_stream()
2215
+ else:
2216
+ raise TypeError('Unable to write to object of type: {0}'
2217
+ .format(type(source)))
2218
+
2219
+
2220
+ # ---------------------------------------------------------------------
2221
+
2222
+
2223
+ def _detect_compression(path):
2224
+ if isinstance(path, str):
2225
+ if path.endswith('.bz2'):
2226
+ return 'bz2'
2227
+ elif path.endswith('.gz'):
2228
+ return 'gzip'
2229
+ elif path.endswith('.lz4'):
2230
+ return 'lz4'
2231
+ elif path.endswith('.zst'):
2232
+ return 'zstd'
2233
+
2234
+
2235
+ cdef CCompressionType _ensure_compression(str name) except *:
2236
+ uppercase = name.upper()
2237
+ if uppercase == 'BZ2':
2238
+ return CCompressionType_BZ2
2239
+ elif uppercase == 'GZIP':
2240
+ return CCompressionType_GZIP
2241
+ elif uppercase == 'BROTLI':
2242
+ return CCompressionType_BROTLI
2243
+ elif uppercase == 'LZ4' or uppercase == 'LZ4_FRAME':
2244
+ return CCompressionType_LZ4_FRAME
2245
+ elif uppercase == 'LZ4_RAW':
2246
+ return CCompressionType_LZ4
2247
+ elif uppercase == 'SNAPPY':
2248
+ return CCompressionType_SNAPPY
2249
+ elif uppercase == 'ZSTD':
2250
+ return CCompressionType_ZSTD
2251
+ else:
2252
+ raise ValueError('Invalid value for compression: {!r}'.format(name))
2253
+
2254
+
2255
+ cdef class CacheOptions(_Weakrefable):
2256
+ """
2257
+ Cache options for a pre-buffered fragment scan.
2258
+
2259
+ Parameters
2260
+ ----------
2261
+ hole_size_limit : int, default 8KiB
2262
+ The maximum distance in bytes between two consecutive ranges; beyond
2263
+ this value, ranges are not combined.
2264
+ range_size_limit : int, default 32MiB
2265
+ The maximum size in bytes of a combined range; if combining two
2266
+ consecutive ranges would produce a range of a size greater than this,
2267
+ they are not combined
2268
+ lazy : bool, default True
2269
+ lazy = false: request all byte ranges when PreBuffer or WillNeed is called.
2270
+ lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader
2271
+ needs them.
2272
+ lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the
2273
+ range that is currently being read.
2274
+ prefetch_limit : int, default 0
2275
+ The maximum number of ranges to be prefetched. This is only used for
2276
+ lazy cache to asynchronously read some ranges after reading the target
2277
+ range.
2278
+ """
2279
+
2280
+ def __init__(self, *, hole_size_limit=None, range_size_limit=None, lazy=None, prefetch_limit=None):
2281
+ self.wrapped = CCacheOptions.LazyDefaults()
2282
+ if hole_size_limit is not None:
2283
+ self.hole_size_limit = hole_size_limit
2284
+ if range_size_limit is not None:
2285
+ self.range_size_limit = range_size_limit
2286
+ if lazy is not None:
2287
+ self.lazy = lazy
2288
+ if prefetch_limit is not None:
2289
+ self.prefetch_limit = prefetch_limit
2290
+
2291
+ cdef void init(self, CCacheOptions options):
2292
+ self.wrapped = options
2293
+
2294
+ cdef inline CCacheOptions unwrap(self):
2295
+ return self.wrapped
2296
+
2297
+ @staticmethod
2298
+ cdef wrap(CCacheOptions options):
2299
+ self = CacheOptions()
2300
+ self.init(options)
2301
+ return self
2302
+
2303
+ @property
2304
+ def hole_size_limit(self):
2305
+ return self.wrapped.hole_size_limit
2306
+
2307
+ @hole_size_limit.setter
2308
+ def hole_size_limit(self, hole_size_limit):
2309
+ self.wrapped.hole_size_limit = hole_size_limit
2310
+
2311
+ @property
2312
+ def range_size_limit(self):
2313
+ return self.wrapped.range_size_limit
2314
+
2315
+ @range_size_limit.setter
2316
+ def range_size_limit(self, range_size_limit):
2317
+ self.wrapped.range_size_limit = range_size_limit
2318
+
2319
+ @property
2320
+ def lazy(self):
2321
+ return self.wrapped.lazy
2322
+
2323
+ @lazy.setter
2324
+ def lazy(self, lazy):
2325
+ self.wrapped.lazy = lazy
2326
+
2327
+ @property
2328
+ def prefetch_limit(self):
2329
+ return self.wrapped.prefetch_limit
2330
+
2331
+ @prefetch_limit.setter
2332
+ def prefetch_limit(self, prefetch_limit):
2333
+ self.wrapped.prefetch_limit = prefetch_limit
2334
+
2335
+ def __eq__(self, CacheOptions other):
2336
+ try:
2337
+ return self.unwrap().Equals(other.unwrap())
2338
+ except TypeError:
2339
+ return False
2340
+
2341
+ @staticmethod
2342
+ def from_network_metrics(time_to_first_byte_millis, transfer_bandwidth_mib_per_sec,
2343
+ ideal_bandwidth_utilization_frac=0.9, max_ideal_request_size_mib=64):
2344
+ """
2345
+ Create suiteable CacheOptions based on provided network metrics.
2346
+
2347
+ Typically this will be used with object storage solutions like Amazon S3,
2348
+ Google Cloud Storage and Azure Blob Storage.
2349
+
2350
+ Parameters
2351
+ ----------
2352
+ time_to_first_byte_millis : int
2353
+ Seek-time or Time-To-First-Byte (TTFB) in milliseconds, also called call
2354
+ setup latency of a new read request. The value is a positive integer.
2355
+ transfer_bandwidth_mib_per_sec : int
2356
+ Data transfer Bandwidth (BW) in MiB/sec (per connection). The value is a positive
2357
+ integer.
2358
+ ideal_bandwidth_utilization_frac : int, default 0.9
2359
+ Transfer bandwidth utilization fraction (per connection) to maximize the net
2360
+ data load. The value is a positive float less than 1.
2361
+ max_ideal_request_size_mib : int, default 64
2362
+ The maximum single data request size (in MiB) to maximize the net data load.
2363
+
2364
+ Returns
2365
+ -------
2366
+ CacheOptions
2367
+ """
2368
+ return CacheOptions.wrap(CCacheOptions.MakeFromNetworkMetrics(
2369
+ time_to_first_byte_millis, transfer_bandwidth_mib_per_sec,
2370
+ ideal_bandwidth_utilization_frac, max_ideal_request_size_mib))
2371
+
2372
+ @staticmethod
2373
+ @binding(True) # Required for Cython < 3
2374
+ def _reconstruct(kwargs):
2375
+ # __reduce__ doesn't allow passing named arguments directly to the
2376
+ # reconstructor, hence this wrapper.
2377
+ return CacheOptions(**kwargs)
2378
+
2379
+ def __reduce__(self):
2380
+ kwargs = dict(
2381
+ hole_size_limit=self.hole_size_limit,
2382
+ range_size_limit=self.range_size_limit,
2383
+ lazy=self.lazy,
2384
+ prefetch_limit=self.prefetch_limit,
2385
+ )
2386
+ return CacheOptions._reconstruct, (kwargs,)
2387
+
2388
+
2389
+ cdef class Codec(_Weakrefable):
2390
+ """
2391
+ Compression codec.
2392
+
2393
+ Parameters
2394
+ ----------
2395
+ compression : str
2396
+ Type of compression codec to initialize, valid values are: 'gzip',
2397
+ 'bz2', 'brotli', 'lz4' (or 'lz4_frame'), 'lz4_raw', 'zstd' and
2398
+ 'snappy'.
2399
+ compression_level : int, None
2400
+ Optional parameter specifying how aggressively to compress. The
2401
+ possible ranges and effect of this parameter depend on the specific
2402
+ codec chosen. Higher values compress more but typically use more
2403
+ resources (CPU/RAM). Some codecs support negative values.
2404
+
2405
+ gzip
2406
+ The compression_level maps to the memlevel parameter of
2407
+ deflateInit2. Higher levels use more RAM but are faster
2408
+ and should have higher compression ratios.
2409
+
2410
+ bz2
2411
+ The compression level maps to the blockSize100k parameter of
2412
+ the BZ2_bzCompressInit function. Higher levels use more RAM
2413
+ but are faster and should have higher compression ratios.
2414
+
2415
+ brotli
2416
+ The compression level maps to the BROTLI_PARAM_QUALITY
2417
+ parameter. Higher values are slower and should have higher
2418
+ compression ratios.
2419
+
2420
+ lz4/lz4_frame/lz4_raw
2421
+ The compression level parameter is not supported and must
2422
+ be None
2423
+
2424
+ zstd
2425
+ The compression level maps to the compressionLevel parameter
2426
+ of ZSTD_initCStream. Negative values are supported. Higher
2427
+ values are slower and should have higher compression ratios.
2428
+
2429
+ snappy
2430
+ The compression level parameter is not supported and must
2431
+ be None
2432
+
2433
+
2434
+ Raises
2435
+ ------
2436
+ ValueError
2437
+ If invalid compression value is passed.
2438
+
2439
+ Examples
2440
+ --------
2441
+ >>> import pyarrow as pa
2442
+ >>> pa.Codec.is_available('gzip')
2443
+ True
2444
+ >>> codec = pa.Codec('gzip')
2445
+ >>> codec.name
2446
+ 'gzip'
2447
+ >>> codec.compression_level
2448
+ 9
2449
+ """
2450
+
2451
+ def __init__(self, str compression not None, compression_level=None):
2452
+ cdef CCompressionType typ = _ensure_compression(compression)
2453
+ if compression_level is not None:
2454
+ self.wrapped = shared_ptr[CCodec](move(GetResultValue(
2455
+ CCodec.CreateWithLevel(typ, compression_level))))
2456
+ else:
2457
+ self.wrapped = shared_ptr[CCodec](move(GetResultValue(
2458
+ CCodec.Create(typ))))
2459
+
2460
+ cdef inline CCodec* unwrap(self) nogil:
2461
+ return self.wrapped.get()
2462
+
2463
+ @staticmethod
2464
+ def detect(path):
2465
+ """
2466
+ Detect and instantiate compression codec based on file extension.
2467
+
2468
+ Parameters
2469
+ ----------
2470
+ path : str, path-like
2471
+ File-path to detect compression from.
2472
+
2473
+ Raises
2474
+ ------
2475
+ TypeError
2476
+ If the passed value is not path-like.
2477
+ ValueError
2478
+ If the compression can't be detected from the path.
2479
+
2480
+ Returns
2481
+ -------
2482
+ Codec
2483
+ """
2484
+ return Codec(_detect_compression(_stringify_path(path)))
2485
+
2486
+ @staticmethod
2487
+ def is_available(str compression not None):
2488
+ """
2489
+ Returns whether the compression support has been built and enabled.
2490
+
2491
+ Parameters
2492
+ ----------
2493
+ compression : str
2494
+ Type of compression codec,
2495
+ refer to Codec docstring for a list of supported ones.
2496
+
2497
+ Returns
2498
+ -------
2499
+ bool
2500
+ """
2501
+ cdef CCompressionType typ = _ensure_compression(compression)
2502
+ return CCodec.IsAvailable(typ)
2503
+
2504
+ @staticmethod
2505
+ def supports_compression_level(str compression not None):
2506
+ """
2507
+ Returns true if the compression level parameter is supported
2508
+ for the given codec.
2509
+
2510
+ Parameters
2511
+ ----------
2512
+ compression : str
2513
+ Type of compression codec,
2514
+ refer to Codec docstring for a list of supported ones.
2515
+ """
2516
+ cdef CCompressionType typ = _ensure_compression(compression)
2517
+ return CCodec.SupportsCompressionLevel(typ)
2518
+
2519
+ @staticmethod
2520
+ def default_compression_level(str compression not None):
2521
+ """
2522
+ Returns the compression level that Arrow will use for the codec if
2523
+ None is specified.
2524
+
2525
+ Parameters
2526
+ ----------
2527
+ compression : str
2528
+ Type of compression codec,
2529
+ refer to Codec docstring for a list of supported ones.
2530
+ """
2531
+ cdef CCompressionType typ = _ensure_compression(compression)
2532
+ return GetResultValue(CCodec.DefaultCompressionLevel(typ))
2533
+
2534
+ @staticmethod
2535
+ def minimum_compression_level(str compression not None):
2536
+ """
2537
+ Returns the smallest valid value for the compression level
2538
+
2539
+ Parameters
2540
+ ----------
2541
+ compression : str
2542
+ Type of compression codec,
2543
+ refer to Codec docstring for a list of supported ones.
2544
+ """
2545
+ cdef CCompressionType typ = _ensure_compression(compression)
2546
+ return GetResultValue(CCodec.MinimumCompressionLevel(typ))
2547
+
2548
+ @staticmethod
2549
+ def maximum_compression_level(str compression not None):
2550
+ """
2551
+ Returns the largest valid value for the compression level
2552
+
2553
+ Parameters
2554
+ ----------
2555
+ compression : str
2556
+ Type of compression codec,
2557
+ refer to Codec docstring for a list of supported ones.
2558
+ """
2559
+ cdef CCompressionType typ = _ensure_compression(compression)
2560
+ return GetResultValue(CCodec.MaximumCompressionLevel(typ))
2561
+
2562
+ @property
2563
+ def name(self):
2564
+ """Returns the name of the codec"""
2565
+ return frombytes(self.unwrap().name())
2566
+
2567
+ @property
2568
+ def compression_level(self):
2569
+ """Returns the compression level parameter of the codec"""
2570
+ if self.name == 'snappy':
2571
+ return None
2572
+ return self.unwrap().compression_level()
2573
+
2574
+ def compress(self, object buf, asbytes=False, memory_pool=None):
2575
+ """
2576
+ Compress data from buffer-like object.
2577
+
2578
+ Parameters
2579
+ ----------
2580
+ buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol
2581
+ asbytes : bool, default False
2582
+ Return result as Python bytes object, otherwise Buffer
2583
+ memory_pool : MemoryPool, default None
2584
+ Memory pool to use for buffer allocations, if any
2585
+
2586
+ Returns
2587
+ -------
2588
+ compressed : pyarrow.Buffer or bytes (if asbytes=True)
2589
+ """
2590
+ cdef:
2591
+ shared_ptr[CBuffer] owned_buf
2592
+ CBuffer* c_buf
2593
+ PyObject* pyobj
2594
+ ResizableBuffer out_buf
2595
+ int64_t max_output_size
2596
+ int64_t output_length
2597
+ uint8_t* output_buffer = NULL
2598
+
2599
+ owned_buf = as_c_buffer(buf)
2600
+ c_buf = owned_buf.get()
2601
+
2602
+ max_output_size = self.wrapped.get().MaxCompressedLen(
2603
+ c_buf.size(), c_buf.data()
2604
+ )
2605
+
2606
+ if asbytes:
2607
+ pyobj = PyBytes_FromStringAndSizeNative(NULL, max_output_size)
2608
+ output_buffer = <uint8_t*> cp.PyBytes_AS_STRING(<object> pyobj)
2609
+ else:
2610
+ out_buf = allocate_buffer(
2611
+ max_output_size, memory_pool=memory_pool, resizable=True
2612
+ )
2613
+ output_buffer = out_buf.buffer.get().mutable_data()
2614
+
2615
+ with nogil:
2616
+ output_length = GetResultValue(
2617
+ self.unwrap().Compress(
2618
+ c_buf.size(),
2619
+ c_buf.data(),
2620
+ max_output_size,
2621
+ output_buffer
2622
+ )
2623
+ )
2624
+
2625
+ if asbytes:
2626
+ cp._PyBytes_Resize(&pyobj, <Py_ssize_t> output_length)
2627
+ return PyObject_to_object(pyobj)
2628
+ else:
2629
+ out_buf.resize(output_length)
2630
+ return out_buf
2631
+
2632
+ def decompress(self, object buf, decompressed_size=None, asbytes=False,
2633
+ memory_pool=None):
2634
+ """
2635
+ Decompress data from buffer-like object.
2636
+
2637
+ Parameters
2638
+ ----------
2639
+ buf : pyarrow.Buffer, bytes, or memoryview-compatible object
2640
+ decompressed_size : int, default None
2641
+ Size of the decompressed result
2642
+ asbytes : boolean, default False
2643
+ Return result as Python bytes object, otherwise Buffer
2644
+ memory_pool : MemoryPool, default None
2645
+ Memory pool to use for buffer allocations, if any.
2646
+
2647
+ Returns
2648
+ -------
2649
+ uncompressed : pyarrow.Buffer or bytes (if asbytes=True)
2650
+ """
2651
+ cdef:
2652
+ shared_ptr[CBuffer] owned_buf
2653
+ CBuffer* c_buf
2654
+ Buffer out_buf
2655
+ int64_t output_size
2656
+ uint8_t* output_buffer = NULL
2657
+
2658
+ owned_buf = as_c_buffer(buf)
2659
+ c_buf = owned_buf.get()
2660
+
2661
+ if decompressed_size is None:
2662
+ raise ValueError(
2663
+ "Must pass decompressed_size"
2664
+ )
2665
+
2666
+ output_size = decompressed_size
2667
+
2668
+ if asbytes:
2669
+ pybuf = cp.PyBytes_FromStringAndSize(NULL, output_size)
2670
+ output_buffer = <uint8_t*> cp.PyBytes_AS_STRING(pybuf)
2671
+ else:
2672
+ out_buf = allocate_buffer(output_size, memory_pool=memory_pool)
2673
+ output_buffer = out_buf.buffer.get().mutable_data()
2674
+
2675
+ with nogil:
2676
+ GetResultValue(
2677
+ self.unwrap().Decompress(
2678
+ c_buf.size(),
2679
+ c_buf.data(),
2680
+ output_size,
2681
+ output_buffer
2682
+ )
2683
+ )
2684
+
2685
+ return pybuf if asbytes else out_buf
2686
+
2687
+ def __repr__(self):
2688
+ name = f"pyarrow.{self.__class__.__name__}"
2689
+ return (f"<{name} "
2690
+ f"name={self.name} "
2691
+ f"compression_level={self.compression_level}>")
2692
+
2693
+
2694
+ def compress(object buf, codec='lz4', asbytes=False, memory_pool=None):
2695
+ """
2696
+ Compress data from buffer-like object.
2697
+
2698
+ Parameters
2699
+ ----------
2700
+ buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol
2701
+ codec : str, default 'lz4'
2702
+ Compression codec.
2703
+ Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'}
2704
+ asbytes : bool, default False
2705
+ Return result as Python bytes object, otherwise Buffer.
2706
+ memory_pool : MemoryPool, default None
2707
+ Memory pool to use for buffer allocations, if any.
2708
+
2709
+ Returns
2710
+ -------
2711
+ compressed : pyarrow.Buffer or bytes (if asbytes=True)
2712
+ """
2713
+ cdef Codec coder = Codec(codec)
2714
+ return coder.compress(buf, asbytes=asbytes, memory_pool=memory_pool)
2715
+
2716
+
2717
+ def decompress(object buf, decompressed_size=None, codec='lz4',
2718
+ asbytes=False, memory_pool=None):
2719
+ """
2720
+ Decompress data from buffer-like object.
2721
+
2722
+ Parameters
2723
+ ----------
2724
+ buf : pyarrow.Buffer, bytes, or memoryview-compatible object
2725
+ Input object to decompress data from.
2726
+ decompressed_size : int, default None
2727
+ Size of the decompressed result
2728
+ codec : str, default 'lz4'
2729
+ Compression codec.
2730
+ Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'}
2731
+ asbytes : bool, default False
2732
+ Return result as Python bytes object, otherwise Buffer.
2733
+ memory_pool : MemoryPool, default None
2734
+ Memory pool to use for buffer allocations, if any.
2735
+
2736
+ Returns
2737
+ -------
2738
+ uncompressed : pyarrow.Buffer or bytes (if asbytes=True)
2739
+ """
2740
+ cdef Codec decoder = Codec(codec)
2741
+ return decoder.decompress(buf, asbytes=asbytes, memory_pool=memory_pool,
2742
+ decompressed_size=decompressed_size)
2743
+
2744
+
2745
+ def input_stream(source, compression='detect', buffer_size=None):
2746
+ """
2747
+ Create an Arrow input stream.
2748
+
2749
+ Parameters
2750
+ ----------
2751
+ source : str, Path, buffer, or file-like object
2752
+ The source to open for reading.
2753
+ compression : str optional, default 'detect'
2754
+ The compression algorithm to use for on-the-fly decompression.
2755
+ If "detect" and source is a file path, then compression will be
2756
+ chosen based on the file extension.
2757
+ If None, no compression will be applied.
2758
+ Otherwise, a well-known algorithm name must be supplied (e.g. "gzip").
2759
+ buffer_size : int, default None
2760
+ If None or 0, no buffering will happen. Otherwise the size of the
2761
+ temporary read buffer.
2762
+
2763
+ Examples
2764
+ --------
2765
+ Create a readable BufferReader (NativeFile) from a Buffer or a memoryview object:
2766
+
2767
+ >>> import pyarrow as pa
2768
+ >>> buf = memoryview(b"some data")
2769
+ >>> with pa.input_stream(buf) as stream:
2770
+ ... stream.read(4)
2771
+ ...
2772
+ b'some'
2773
+
2774
+ Create a readable OSFile (NativeFile) from a string or file path:
2775
+
2776
+ >>> import gzip
2777
+ >>> with gzip.open('example.gz', 'wb') as f:
2778
+ ... f.write(b'some data')
2779
+ ...
2780
+ 9
2781
+ >>> with pa.input_stream('example.gz') as stream:
2782
+ ... stream.read()
2783
+ ...
2784
+ b'some data'
2785
+
2786
+ Create a readable PythonFile (NativeFile) from a a Python file object:
2787
+
2788
+ >>> with open('example.txt', mode='w') as f:
2789
+ ... f.write('some text')
2790
+ ...
2791
+ 9
2792
+ >>> with pa.input_stream('example.txt') as stream:
2793
+ ... stream.read(6)
2794
+ ...
2795
+ b'some t'
2796
+ """
2797
+ cdef NativeFile stream
2798
+
2799
+ try:
2800
+ source_path = _stringify_path(source)
2801
+ except TypeError:
2802
+ source_path = None
2803
+
2804
+ if isinstance(source, NativeFile):
2805
+ stream = source
2806
+ elif source_path is not None:
2807
+ stream = OSFile(source_path, 'r')
2808
+ elif isinstance(source, (Buffer, memoryview)):
2809
+ stream = BufferReader(as_buffer(source))
2810
+ elif (hasattr(source, 'read') and
2811
+ hasattr(source, 'close') and
2812
+ hasattr(source, 'closed')):
2813
+ stream = PythonFile(source, 'r')
2814
+ else:
2815
+ raise TypeError("pa.input_stream() called with instance of '{}'"
2816
+ .format(source.__class__))
2817
+
2818
+ if compression == 'detect':
2819
+ # detect for OSFile too
2820
+ compression = _detect_compression(source_path)
2821
+
2822
+ if buffer_size is not None and buffer_size != 0:
2823
+ stream = BufferedInputStream(stream, buffer_size)
2824
+
2825
+ if compression is not None:
2826
+ stream = CompressedInputStream(stream, compression)
2827
+
2828
+ return stream
2829
+
2830
+
2831
+ def output_stream(source, compression='detect', buffer_size=None):
2832
+ """
2833
+ Create an Arrow output stream.
2834
+
2835
+ Parameters
2836
+ ----------
2837
+ source : str, Path, buffer, file-like object
2838
+ The source to open for writing.
2839
+ compression : str optional, default 'detect'
2840
+ The compression algorithm to use for on-the-fly compression.
2841
+ If "detect" and source is a file path, then compression will be
2842
+ chosen based on the file extension.
2843
+ If None, no compression will be applied.
2844
+ Otherwise, a well-known algorithm name must be supplied (e.g. "gzip").
2845
+ buffer_size : int, default None
2846
+ If None or 0, no buffering will happen. Otherwise the size of the
2847
+ temporary write buffer.
2848
+
2849
+ Examples
2850
+ --------
2851
+ Create a writable NativeFile from a pyarrow Buffer:
2852
+
2853
+ >>> import pyarrow as pa
2854
+ >>> data = b"buffer data"
2855
+ >>> empty_obj = bytearray(11)
2856
+ >>> buf = pa.py_buffer(empty_obj)
2857
+ >>> with pa.output_stream(buf) as stream:
2858
+ ... stream.write(data)
2859
+ ...
2860
+ 11
2861
+ >>> with pa.input_stream(buf) as stream:
2862
+ ... stream.read(6)
2863
+ ...
2864
+ b'buffer'
2865
+
2866
+ or from a memoryview object:
2867
+
2868
+ >>> buf = memoryview(empty_obj)
2869
+ >>> with pa.output_stream(buf) as stream:
2870
+ ... stream.write(data)
2871
+ ...
2872
+ 11
2873
+ >>> with pa.input_stream(buf) as stream:
2874
+ ... stream.read()
2875
+ ...
2876
+ b'buffer data'
2877
+
2878
+ Create a writable NativeFile from a string or file path:
2879
+
2880
+ >>> with pa.output_stream('example_second.txt') as stream:
2881
+ ... stream.write(b'Write some data')
2882
+ ...
2883
+ 15
2884
+ >>> with pa.input_stream('example_second.txt') as stream:
2885
+ ... stream.read()
2886
+ ...
2887
+ b'Write some data'
2888
+ """
2889
+ cdef NativeFile stream
2890
+
2891
+ try:
2892
+ source_path = _stringify_path(source)
2893
+ except TypeError:
2894
+ source_path = None
2895
+
2896
+ if isinstance(source, NativeFile):
2897
+ stream = source
2898
+ elif source_path is not None:
2899
+ stream = OSFile(source_path, 'w')
2900
+ elif isinstance(source, (Buffer, memoryview)):
2901
+ stream = FixedSizeBufferWriter(as_buffer(source))
2902
+ elif (hasattr(source, 'write') and
2903
+ hasattr(source, 'close') and
2904
+ hasattr(source, 'closed')):
2905
+ stream = PythonFile(source, 'w')
2906
+ else:
2907
+ raise TypeError("pa.output_stream() called with instance of '{}'"
2908
+ .format(source.__class__))
2909
+
2910
+ if compression == 'detect':
2911
+ compression = _detect_compression(source_path)
2912
+
2913
+ if buffer_size is not None and buffer_size != 0:
2914
+ stream = BufferedOutputStream(stream, buffer_size)
2915
+
2916
+ if compression is not None:
2917
+ stream = CompressedOutputStream(stream, compression)
2918
+
2919
+ return stream