ZTWHHH commited on
Commit
9f1d4cd
·
verified ·
1 Parent(s): ab24758

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. llava_next/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc +3 -0
  3. llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverDn.h +0 -0
  4. llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverRf.h +339 -0
  5. llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolver_common.h +266 -0
  6. llava_next/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so +3 -0
  7. llava_next/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so +3 -0
  8. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so +0 -0
  9. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.pyi +141 -0
  10. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.pyi +287 -0
  11. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi +14 -0
  12. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi +174 -0
  13. llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi +241 -0
  14. llava_next/lib/python3.10/site-packages/pandas/core/apply.py +2062 -0
  15. llava_next/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py +90 -0
  16. llava_next/lib/python3.10/site-packages/pandas/core/array_algos/replace.py +152 -0
  17. llava_next/lib/python3.10/site-packages/pandas/core/arraylike.py +530 -0
  18. llava_next/lib/python3.10/site-packages/pandas/core/arrays/__init__.py +43 -0
  19. llava_next/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py +84 -0
  20. llava_next/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py +547 -0
  21. llava_next/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py +207 -0
  22. llava_next/lib/python3.10/site-packages/pandas/core/arrays/_utils.py +63 -0
  23. llava_next/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py +2556 -0
  24. llava_next/lib/python3.10/site-packages/pandas/core/arrays/integer.py +272 -0
  25. llava_next/lib/python3.10/site-packages/pandas/core/arrays/masked.py +1650 -0
  26. llava_next/lib/python3.10/site-packages/pandas/core/arrays/numeric.py +286 -0
  27. llava_next/lib/python3.10/site-packages/pandas/core/arrays/period.py +1313 -0
  28. llava_next/lib/python3.10/site-packages/pandas/core/config_init.py +924 -0
  29. llava_next/lib/python3.10/site-packages/pandas/core/construction.py +824 -0
  30. llava_next/lib/python3.10/site-packages/pandas/core/flags.py +117 -0
  31. llava_next/lib/python3.10/site-packages/pandas/core/frame.py +0 -0
  32. llava_next/lib/python3.10/site-packages/pandas/core/roperator.py +62 -0
  33. llava_next/lib/python3.10/site-packages/pandas/core/series.py +0 -0
  34. parrot/lib/python3.10/site-packages/transformers/models/canine/__init__.py +67 -0
  35. parrot/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py +66 -0
  38. parrot/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py +1642 -0
  39. parrot/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py +241 -0
  40. parrot/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/modeling_clipseg.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/processing_clipseg.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/transformers/models/clipseg/processing_clipseg.py +161 -0
  43. parrot/lib/python3.10/site-packages/transformers/models/codegen/__init__.py +71 -0
  44. parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py +226 -0
  49. parrot/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py +724 -0
  50. parrot/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py +417 -0
.gitattributes CHANGED
@@ -319,3 +319,6 @@ llava_next/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter
319
  llava_next/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
320
  parrot/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
321
  parrot/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
319
  llava_next/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
320
  parrot/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
321
  parrot/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
322
+ llava_next/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
323
+ llava_next/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
324
+ llava_next/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea855103e6064cfe436f33d30a1231403155565b5485d4d2199d6cdbd426950
3
+ size 1002304
llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverDn.h ADDED
The diff for this file is too large to render. See raw diff
 
llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverRf.h ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVERRF_H_)
51
+ #define CUSOLVERRF_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h"
55
+ #include "cusolver_common.h"
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /* CUSOLVERRF mode */
62
+ typedef enum {
63
+ CUSOLVERRF_RESET_VALUES_FAST_MODE_OFF = 0, // default
64
+ CUSOLVERRF_RESET_VALUES_FAST_MODE_ON = 1
65
+ } cusolverRfResetValuesFastMode_t;
66
+
67
+ /* CUSOLVERRF matrix format */
68
+ typedef enum {
69
+ CUSOLVERRF_MATRIX_FORMAT_CSR = 0, // default
70
+ CUSOLVERRF_MATRIX_FORMAT_CSC = 1
71
+ } cusolverRfMatrixFormat_t;
72
+
73
+ /* CUSOLVERRF unit diagonal */
74
+ typedef enum {
75
+ CUSOLVERRF_UNIT_DIAGONAL_STORED_L = 0, // default
76
+ CUSOLVERRF_UNIT_DIAGONAL_STORED_U = 1,
77
+ CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_L = 2,
78
+ CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_U = 3
79
+ } cusolverRfUnitDiagonal_t;
80
+
81
+ /* CUSOLVERRF factorization algorithm */
82
+ typedef enum {
83
+ CUSOLVERRF_FACTORIZATION_ALG0 = 0, // default
84
+ CUSOLVERRF_FACTORIZATION_ALG1 = 1,
85
+ CUSOLVERRF_FACTORIZATION_ALG2 = 2,
86
+ } cusolverRfFactorization_t;
87
+
88
+ /* CUSOLVERRF triangular solve algorithm */
89
+ typedef enum {
90
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG1 = 1, // default
91
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG2 = 2,
92
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG3 = 3
93
+ } cusolverRfTriangularSolve_t;
94
+
95
+ /* CUSOLVERRF numeric boost report */
96
+ typedef enum {
97
+ CUSOLVERRF_NUMERIC_BOOST_NOT_USED = 0, // default
98
+ CUSOLVERRF_NUMERIC_BOOST_USED = 1
99
+ } cusolverRfNumericBoostReport_t;
100
+
101
+ /* Opaque structure holding CUSOLVERRF library common */
102
+ struct cusolverRfCommon;
103
+ typedef struct cusolverRfCommon* cusolverRfHandle_t;
104
+
105
+ /* CUSOLVERRF create (allocate memory) and destroy (free memory) in the handle
106
+ */
107
+ cusolverStatus_t CUSOLVERAPI cusolverRfCreate(cusolverRfHandle_t* handle);
108
+ cusolverStatus_t CUSOLVERAPI cusolverRfDestroy(cusolverRfHandle_t handle);
109
+
110
+ /* CUSOLVERRF set and get input format */
111
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetMatrixFormat(
112
+ cusolverRfHandle_t handle,
113
+ cusolverRfMatrixFormat_t* format,
114
+ cusolverRfUnitDiagonal_t* diag);
115
+
116
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetMatrixFormat(
117
+ cusolverRfHandle_t handle,
118
+ cusolverRfMatrixFormat_t format,
119
+ cusolverRfUnitDiagonal_t diag);
120
+
121
+ /* CUSOLVERRF set and get numeric properties */
122
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetNumericProperties(
123
+ cusolverRfHandle_t handle,
124
+ double zero,
125
+ double boost);
126
+
127
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericProperties(
128
+ cusolverRfHandle_t handle,
129
+ double* zero,
130
+ double* boost);
131
+
132
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericBoostReport(
133
+ cusolverRfHandle_t handle,
134
+ cusolverRfNumericBoostReport_t* report);
135
+
136
+ /* CUSOLVERRF choose the triangular solve algorithm */
137
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetAlgs(
138
+ cusolverRfHandle_t handle,
139
+ cusolverRfFactorization_t factAlg,
140
+ cusolverRfTriangularSolve_t solveAlg);
141
+
142
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetAlgs(
143
+ cusolverRfHandle_t handle,
144
+ cusolverRfFactorization_t* factAlg,
145
+ cusolverRfTriangularSolve_t* solveAlg);
146
+
147
+ /* CUSOLVERRF set and get fast mode */
148
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetResetValuesFastMode(
149
+ cusolverRfHandle_t handle,
150
+ cusolverRfResetValuesFastMode_t* fastMode);
151
+
152
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetResetValuesFastMode(
153
+ cusolverRfHandle_t handle,
154
+ cusolverRfResetValuesFastMode_t fastMode);
155
+
156
+ /*** Non-Batched Routines ***/
157
+ /* CUSOLVERRF setup of internal structures from host or device memory */
158
+ cusolverStatus_t CUSOLVERAPI
159
+ cusolverRfSetupHost(/* Input (in the host memory) */
160
+ int n,
161
+ int nnzA,
162
+ int* h_csrRowPtrA,
163
+ int* h_csrColIndA,
164
+ double* h_csrValA,
165
+ int nnzL,
166
+ int* h_csrRowPtrL,
167
+ int* h_csrColIndL,
168
+ double* h_csrValL,
169
+ int nnzU,
170
+ int* h_csrRowPtrU,
171
+ int* h_csrColIndU,
172
+ double* h_csrValU,
173
+ int* h_P,
174
+ int* h_Q,
175
+ /* Output */
176
+ cusolverRfHandle_t handle);
177
+
178
+ cusolverStatus_t CUSOLVERAPI
179
+ cusolverRfSetupDevice(/* Input (in the device memory) */
180
+ int n,
181
+ int nnzA,
182
+ int* csrRowPtrA,
183
+ int* csrColIndA,
184
+ double* csrValA,
185
+ int nnzL,
186
+ int* csrRowPtrL,
187
+ int* csrColIndL,
188
+ double* csrValL,
189
+ int nnzU,
190
+ int* csrRowPtrU,
191
+ int* csrColIndU,
192
+ double* csrValU,
193
+ int* P,
194
+ int* Q,
195
+ /* Output */
196
+ cusolverRfHandle_t handle);
197
+
198
+ /* CUSOLVERRF update the matrix values (assuming the reordering, pivoting
199
+ and consequently the sparsity pattern of L and U did not change),
200
+ and zero out the remaining values. */
201
+ cusolverStatus_t CUSOLVERAPI
202
+ cusolverRfResetValues(/* Input (in the device memory) */
203
+ int n,
204
+ int nnzA,
205
+ int* csrRowPtrA,
206
+ int* csrColIndA,
207
+ double* csrValA,
208
+ int* P,
209
+ int* Q,
210
+ /* Output */
211
+ cusolverRfHandle_t handle);
212
+
213
+ /* CUSOLVERRF analysis (for parallelism) */
214
+ cusolverStatus_t CUSOLVERAPI cusolverRfAnalyze(cusolverRfHandle_t handle);
215
+
216
+ /* CUSOLVERRF re-factorization (for parallelism) */
217
+ cusolverStatus_t CUSOLVERAPI cusolverRfRefactor(cusolverRfHandle_t handle);
218
+
219
+ /* CUSOLVERRF extraction: Get L & U packed into a single matrix M */
220
+ cusolverStatus_t CUSOLVERAPI
221
+ cusolverRfAccessBundledFactorsDevice(/* Input */
222
+ cusolverRfHandle_t handle,
223
+ /* Output (in the host memory) */
224
+ int* nnzM,
225
+ /* Output (in the device memory) */
226
+ int** Mp,
227
+ int** Mi,
228
+ double** Mx);
229
+
230
+ cusolverStatus_t CUSOLVERAPI
231
+ cusolverRfExtractBundledFactorsHost(/* Input */
232
+ cusolverRfHandle_t handle,
233
+ /* Output (in the host memory) */
234
+ int* h_nnzM,
235
+ int** h_Mp,
236
+ int** h_Mi,
237
+ double** h_Mx);
238
+
239
+ /* CUSOLVERRF extraction: Get L & U individually */
240
+ cusolverStatus_t CUSOLVERAPI
241
+ cusolverRfExtractSplitFactorsHost(/* Input */
242
+ cusolverRfHandle_t handle,
243
+ /* Output (in the host memory) */
244
+ int* h_nnzL,
245
+ int** h_csrRowPtrL,
246
+ int** h_csrColIndL,
247
+ double** h_csrValL,
248
+ int* h_nnzU,
249
+ int** h_csrRowPtrU,
250
+ int** h_csrColIndU,
251
+ double** h_csrValU);
252
+
253
+ /* CUSOLVERRF (forward and backward triangular) solves */
254
+ cusolverStatus_t CUSOLVERAPI
255
+ cusolverRfSolve(/* Input (in the device memory) */
256
+ cusolverRfHandle_t handle,
257
+ int* P,
258
+ int* Q,
259
+ int nrhs, // only nrhs=1 is supported
260
+ double* Temp, // of size ldt*nrhs (ldt>=n)
261
+ int ldt,
262
+ /* Input/Output (in the device memory) */
263
+ double* XF,
264
+ /* Input */
265
+ int ldxf);
266
+
267
+ /*** Batched Routines ***/
268
+ /* CUSOLVERRF-batch setup of internal structures from host */
269
+ cusolverStatus_t CUSOLVERAPI
270
+ cusolverRfBatchSetupHost(/* Input (in the host memory)*/
271
+ int batchSize,
272
+ int n,
273
+ int nnzA,
274
+ int* h_csrRowPtrA,
275
+ int* h_csrColIndA,
276
+ double* h_csrValA_array[],
277
+ int nnzL,
278
+ int* h_csrRowPtrL,
279
+ int* h_csrColIndL,
280
+ double* h_csrValL,
281
+ int nnzU,
282
+ int* h_csrRowPtrU,
283
+ int* h_csrColIndU,
284
+ double* h_csrValU,
285
+ int* h_P,
286
+ int* h_Q,
287
+ /* Output (in the device memory) */
288
+ cusolverRfHandle_t handle);
289
+
290
+ /* CUSOLVERRF-batch update the matrix values (assuming the reordering,
291
+ pivoting and consequently the sparsity pattern of L and U did not change),
292
+ and zero out the remaining values. */
293
+ cusolverStatus_t CUSOLVERAPI
294
+ cusolverRfBatchResetValues(/* Input (in the device memory) */
295
+ int batchSize,
296
+ int n,
297
+ int nnzA,
298
+ int* csrRowPtrA,
299
+ int* csrColIndA,
300
+ double* csrValA_array[],
301
+ int* P,
302
+ int* Q,
303
+ /* Output */
304
+ cusolverRfHandle_t handle);
305
+
306
+ /* CUSOLVERRF-batch analysis (for parallelism) */
307
+ cusolverStatus_t CUSOLVERAPI
308
+ cusolverRfBatchAnalyze(cusolverRfHandle_t handle);
309
+
310
+ /* CUSOLVERRF-batch re-factorization (for parallelism) */
311
+ cusolverStatus_t CUSOLVERAPI
312
+ cusolverRfBatchRefactor(cusolverRfHandle_t handle);
313
+
314
+ /* CUSOLVERRF-batch (forward and backward triangular) solves */
315
+ cusolverStatus_t CUSOLVERAPI
316
+ cusolverRfBatchSolve(/* Input (in the device memory) */
317
+ cusolverRfHandle_t handle,
318
+ int* P,
319
+ int* Q,
320
+ int nrhs, // only nrhs=1 is supported
321
+ double* Temp, // of size 2*batchSize*(n*nrhs)
322
+ int ldt, // only ldt=n is supported
323
+ /* Input/Output (in the device memory) */
324
+ double* XF_array[],
325
+ /* Input */
326
+ int ldxf);
327
+
328
+ /* CUSOLVERRF-batch obtain the position of zero pivot */
329
+ cusolverStatus_t CUSOLVERAPI
330
+ cusolverRfBatchZeroPivot(/* Input */
331
+ cusolverRfHandle_t handle,
332
+ /* Output (in the host memory) */
333
+ int* position);
334
+
335
+ #if defined(__cplusplus)
336
+ }
337
+ #endif /* __cplusplus */
338
+
339
+ #endif /* CUSOLVERRF_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cusolver/include/cusolver_common.h ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVER_COMMON_H_)
51
+ #define CUSOLVER_COMMON_H_
52
+
53
+ #include "library_types.h"
54
+
55
+ #ifndef CUSOLVERAPI
56
+ #ifdef _WIN32
57
+ #define CUSOLVERAPI __stdcall
58
+ #else
59
+ #define CUSOLVERAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(_MSC_VER)
64
+ typedef __int64 int64_t;
65
+ #else
66
+ #include <inttypes.h>
67
+ #endif
68
+
69
+ typedef int cusolver_int_t;
70
+
71
+ #define CUSOLVER_VER_MAJOR 11
72
+ #define CUSOLVER_VER_MINOR 4
73
+ #define CUSOLVER_VER_PATCH 5
74
+ #define CUSOLVER_VER_BUILD 107
75
+ #define CUSOLVER_VERSION \
76
+ (CUSOLVER_VER_MAJOR * 1000 + CUSOLVER_VER_MINOR * 100 + CUSOLVER_VER_PATCH)
77
+
78
+ /*
79
+ * disable this macro to proceed old API
80
+ */
81
+ #define DISABLE_CUSOLVER_DEPRECATED
82
+
83
+ //------------------------------------------------------------------------------
84
+
85
+ #if !defined(_MSC_VER)
86
+ #define CUSOLVER_CPP_VERSION __cplusplus
87
+ #elif _MSC_FULL_VER >= 190024210 // Visual Studio 2015 Update 3
88
+ #define CUSOLVER_CPP_VERSION _MSVC_LANG
89
+ #else
90
+ #define CUSOLVER_CPP_VERSION 0
91
+ #endif
92
+
93
+ //------------------------------------------------------------------------------
94
+
95
+ #if !defined(DISABLE_CUSOLVER_DEPRECATED)
96
+
97
+ #if CUSOLVER_CPP_VERSION >= 201402L
98
+
99
+ #define CUSOLVER_DEPRECATED(new_func) \
100
+ [[deprecated("please use " #new_func " instead")]]
101
+
102
+ #elif defined(_MSC_VER)
103
+
104
+ #define CUSOLVER_DEPRECATED(new_func) \
105
+ __declspec(deprecated("please use " #new_func " instead"))
106
+
107
+ #elif defined(__INTEL_COMPILER) || defined(__clang__) || \
108
+ (defined(__GNUC__) && \
109
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
110
+
111
+ #define CUSOLVER_DEPRECATED(new_func) \
112
+ __attribute__((deprecated("please use " #new_func " instead")))
113
+
114
+ #elif defined(__GNUC__) || defined(__xlc__)
115
+
116
+ #define CUSOLVER_DEPRECATED(new_func) __attribute__((deprecated))
117
+
118
+ #else
119
+
120
+ #define CUSOLVER_DEPRECATED(new_func)
121
+
122
+ #endif // defined(__cplusplus) && __cplusplus >= 201402L
123
+ //------------------------------------------------------------------------------
124
+
125
+ #if CUSOLVER_CPP_VERSION >= 201703L
126
+
127
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum) \
128
+ [[deprecated("please use " #new_enum " instead")]]
129
+
130
+ #elif defined(__clang__) || \
131
+ (defined(__GNUC__) && __GNUC__ >= 6 && !defined(__PGI))
132
+
133
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum) \
134
+ __attribute__((deprecated("please use " #new_enum " instead")))
135
+
136
+ #else
137
+
138
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum)
139
+
140
+ #endif // defined(__cplusplus) && __cplusplus >= 201402L
141
+
142
+ #else // defined(DISABLE_CUSOLVER_DEPRECATED)
143
+
144
+ #define CUSOLVER_DEPRECATED(new_func)
145
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum)
146
+
147
+ #endif // !defined(DISABLE_CUSOLVER_DEPRECATED)
148
+
149
+ #undef CUSOLVER_CPP_VERSION
150
+
151
+ #if defined(__cplusplus)
152
+ extern "C" {
153
+ #endif /* __cplusplus */
154
+
155
+ typedef enum {
156
+ CUSOLVER_STATUS_SUCCESS = 0,
157
+ CUSOLVER_STATUS_NOT_INITIALIZED = 1,
158
+ CUSOLVER_STATUS_ALLOC_FAILED = 2,
159
+ CUSOLVER_STATUS_INVALID_VALUE = 3,
160
+ CUSOLVER_STATUS_ARCH_MISMATCH = 4,
161
+ CUSOLVER_STATUS_MAPPING_ERROR = 5,
162
+ CUSOLVER_STATUS_EXECUTION_FAILED = 6,
163
+ CUSOLVER_STATUS_INTERNAL_ERROR = 7,
164
+ CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8,
165
+ CUSOLVER_STATUS_NOT_SUPPORTED = 9,
166
+ CUSOLVER_STATUS_ZERO_PIVOT = 10,
167
+ CUSOLVER_STATUS_INVALID_LICENSE = 11,
168
+ CUSOLVER_STATUS_IRS_PARAMS_NOT_INITIALIZED = 12,
169
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID = 13,
170
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_PREC = 14,
171
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_REFINE = 15,
172
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_MAXITER = 16,
173
+ CUSOLVER_STATUS_IRS_INTERNAL_ERROR = 20,
174
+ CUSOLVER_STATUS_IRS_NOT_SUPPORTED = 21,
175
+ CUSOLVER_STATUS_IRS_OUT_OF_RANGE = 22,
176
+ CUSOLVER_STATUS_IRS_NRHS_NOT_SUPPORTED_FOR_REFINE_GMRES = 23,
177
+ CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED = 25,
178
+ CUSOLVER_STATUS_IRS_INFOS_NOT_DESTROYED = 26,
179
+ CUSOLVER_STATUS_IRS_MATRIX_SINGULAR = 30,
180
+ CUSOLVER_STATUS_INVALID_WORKSPACE = 31
181
+ } cusolverStatus_t;
182
+
183
+ typedef enum {
184
+ CUSOLVER_EIG_TYPE_1 = 1,
185
+ CUSOLVER_EIG_TYPE_2 = 2,
186
+ CUSOLVER_EIG_TYPE_3 = 3
187
+ } cusolverEigType_t;
188
+
189
+ typedef enum {
190
+ CUSOLVER_EIG_MODE_NOVECTOR = 0,
191
+ CUSOLVER_EIG_MODE_VECTOR = 1
192
+ } cusolverEigMode_t;
193
+
194
+ typedef enum {
195
+ CUSOLVER_EIG_RANGE_ALL = 1001,
196
+ CUSOLVER_EIG_RANGE_I = 1002,
197
+ CUSOLVER_EIG_RANGE_V = 1003,
198
+ } cusolverEigRange_t;
199
+
200
+ typedef enum {
201
+ CUSOLVER_INF_NORM = 104,
202
+ CUSOLVER_MAX_NORM = 105,
203
+ CUSOLVER_ONE_NORM = 106,
204
+ CUSOLVER_FRO_NORM = 107,
205
+ } cusolverNorm_t;
206
+
207
+ typedef enum {
208
+ CUSOLVER_IRS_REFINE_NOT_SET = 1100,
209
+ CUSOLVER_IRS_REFINE_NONE = 1101,
210
+ CUSOLVER_IRS_REFINE_CLASSICAL = 1102,
211
+ CUSOLVER_IRS_REFINE_CLASSICAL_GMRES = 1103,
212
+ CUSOLVER_IRS_REFINE_GMRES = 1104,
213
+ CUSOLVER_IRS_REFINE_GMRES_GMRES = 1105,
214
+ CUSOLVER_IRS_REFINE_GMRES_NOPCOND = 1106,
215
+
216
+ CUSOLVER_PREC_DD = 1150,
217
+ CUSOLVER_PREC_SS = 1151,
218
+ CUSOLVER_PREC_SHT = 1152,
219
+
220
+ } cusolverIRSRefinement_t;
221
+
222
+ typedef enum {
223
+ CUSOLVER_R_8I = 1201,
224
+ CUSOLVER_R_8U = 1202,
225
+ CUSOLVER_R_64F = 1203,
226
+ CUSOLVER_R_32F = 1204,
227
+ CUSOLVER_R_16F = 1205,
228
+ CUSOLVER_R_16BF = 1206,
229
+ CUSOLVER_R_TF32 = 1207,
230
+ CUSOLVER_R_AP = 1208,
231
+ CUSOLVER_C_8I = 1211,
232
+ CUSOLVER_C_8U = 1212,
233
+ CUSOLVER_C_64F = 1213,
234
+ CUSOLVER_C_32F = 1214,
235
+ CUSOLVER_C_16F = 1215,
236
+ CUSOLVER_C_16BF = 1216,
237
+ CUSOLVER_C_TF32 = 1217,
238
+ CUSOLVER_C_AP = 1218,
239
+ } cusolverPrecType_t;
240
+
241
+ typedef enum {
242
+ CUSOLVER_ALG_0 = 0, /* default algorithm */
243
+ CUSOLVER_ALG_1 = 1,
244
+ CUSOLVER_ALG_2 = 2
245
+ } cusolverAlgMode_t;
246
+
247
+ typedef enum {
248
+ CUBLAS_STOREV_COLUMNWISE = 0,
249
+ CUBLAS_STOREV_ROWWISE = 1
250
+ } cusolverStorevMode_t;
251
+
252
+ typedef enum {
253
+ CUBLAS_DIRECT_FORWARD = 0,
254
+ CUBLAS_DIRECT_BACKWARD = 1
255
+ } cusolverDirectMode_t;
256
+
257
+ cusolverStatus_t CUSOLVERAPI
258
+ cusolverGetProperty(libraryPropertyType type, int *value);
259
+
260
+ cusolverStatus_t CUSOLVERAPI cusolverGetVersion(int *version);
261
+
262
+ #if defined(__cplusplus)
263
+ }
264
+ #endif /* __cplusplus */
265
+
266
+ #endif // CUSOLVER_COMMON_H_
llava_next/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d4b81d1da2d284b1f61370c3e87b54de10f249a3debb14f7ad7bbf2c100523
3
+ size 988968
llava_next/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f28ec212e94a75d6066e383be4243e1317de1339e61ad445b8a79f0aed216136
3
+ size 340264
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (62.3 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.pyi ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ datetime,
3
+ timedelta,
4
+ tzinfo as _tzinfo,
5
+ )
6
+ import typing
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs.tslibs.period import Period
11
+ from pandas._typing import Self
12
+
13
+ NaT: NaTType
14
+ iNaT: int
15
+ nat_strings: set[str]
16
+
17
+ _NaTComparisonTypes: typing.TypeAlias = (
18
+ datetime | timedelta | Period | np.datetime64 | np.timedelta64
19
+ )
20
+
21
+ class _NatComparison:
22
+ def __call__(self, other: _NaTComparisonTypes) -> bool: ...
23
+
24
+ class NaTType:
25
+ _value: np.int64
26
+ @property
27
+ def value(self) -> int: ...
28
+ @property
29
+ def asm8(self) -> np.datetime64: ...
30
+ def to_datetime64(self) -> np.datetime64: ...
31
+ def to_numpy(
32
+ self, dtype: np.dtype | str | None = ..., copy: bool = ...
33
+ ) -> np.datetime64 | np.timedelta64: ...
34
+ @property
35
+ def is_leap_year(self) -> bool: ...
36
+ @property
37
+ def is_month_start(self) -> bool: ...
38
+ @property
39
+ def is_quarter_start(self) -> bool: ...
40
+ @property
41
+ def is_year_start(self) -> bool: ...
42
+ @property
43
+ def is_month_end(self) -> bool: ...
44
+ @property
45
+ def is_quarter_end(self) -> bool: ...
46
+ @property
47
+ def is_year_end(self) -> bool: ...
48
+ @property
49
+ def day_of_year(self) -> float: ...
50
+ @property
51
+ def dayofyear(self) -> float: ...
52
+ @property
53
+ def days_in_month(self) -> float: ...
54
+ @property
55
+ def daysinmonth(self) -> float: ...
56
+ @property
57
+ def day_of_week(self) -> float: ...
58
+ @property
59
+ def dayofweek(self) -> float: ...
60
+ @property
61
+ def week(self) -> float: ...
62
+ @property
63
+ def weekofyear(self) -> float: ...
64
+ def day_name(self) -> float: ...
65
+ def month_name(self) -> float: ...
66
+ def weekday(self) -> float: ...
67
+ def isoweekday(self) -> float: ...
68
+ def total_seconds(self) -> float: ...
69
+ def today(self, *args, **kwargs) -> NaTType: ...
70
+ def now(self, *args, **kwargs) -> NaTType: ...
71
+ def to_pydatetime(self) -> NaTType: ...
72
+ def date(self) -> NaTType: ...
73
+ def round(self) -> NaTType: ...
74
+ def floor(self) -> NaTType: ...
75
+ def ceil(self) -> NaTType: ...
76
+ @property
77
+ def tzinfo(self) -> None: ...
78
+ @property
79
+ def tz(self) -> None: ...
80
+ def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ...
81
+ def tz_localize(
82
+ self,
83
+ tz: _tzinfo | str | None,
84
+ ambiguous: str = ...,
85
+ nonexistent: str = ...,
86
+ ) -> NaTType: ...
87
+ def replace(
88
+ self,
89
+ year: int | None = ...,
90
+ month: int | None = ...,
91
+ day: int | None = ...,
92
+ hour: int | None = ...,
93
+ minute: int | None = ...,
94
+ second: int | None = ...,
95
+ microsecond: int | None = ...,
96
+ nanosecond: int | None = ...,
97
+ tzinfo: _tzinfo | None = ...,
98
+ fold: int | None = ...,
99
+ ) -> NaTType: ...
100
+ @property
101
+ def year(self) -> float: ...
102
+ @property
103
+ def quarter(self) -> float: ...
104
+ @property
105
+ def month(self) -> float: ...
106
+ @property
107
+ def day(self) -> float: ...
108
+ @property
109
+ def hour(self) -> float: ...
110
+ @property
111
+ def minute(self) -> float: ...
112
+ @property
113
+ def second(self) -> float: ...
114
+ @property
115
+ def millisecond(self) -> float: ...
116
+ @property
117
+ def microsecond(self) -> float: ...
118
+ @property
119
+ def nanosecond(self) -> float: ...
120
+ # inject Timedelta properties
121
+ @property
122
+ def days(self) -> float: ...
123
+ @property
124
+ def microseconds(self) -> float: ...
125
+ @property
126
+ def nanoseconds(self) -> float: ...
127
+ # inject Period properties
128
+ @property
129
+ def qyear(self) -> float: ...
130
+ def __eq__(self, other: object) -> bool: ...
131
+ def __ne__(self, other: object) -> bool: ...
132
+ __lt__: _NatComparison
133
+ __le__: _NatComparison
134
+ __gt__: _NatComparison
135
+ __ge__: _NatComparison
136
+ def __sub__(self, other: Self | timedelta | datetime) -> Self: ...
137
+ def __rsub__(self, other: Self | timedelta | datetime) -> Self: ...
138
+ def __add__(self, other: Self | timedelta | datetime) -> Self: ...
139
+ def __radd__(self, other: Self | timedelta | datetime) -> Self: ...
140
+ def __hash__(self) -> int: ...
141
+ def as_unit(self, unit: str, round_ok: bool = ...) -> NaTType: ...
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.pyi ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ datetime,
3
+ time,
4
+ timedelta,
5
+ )
6
+ from typing import (
7
+ Any,
8
+ Collection,
9
+ Literal,
10
+ TypeVar,
11
+ overload,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs.tslibs.nattype import NaTType
17
+ from pandas._typing import (
18
+ OffsetCalendar,
19
+ Self,
20
+ npt,
21
+ )
22
+
23
+ from .timedeltas import Timedelta
24
+
25
+ _BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset)
26
+ _DatetimeT = TypeVar("_DatetimeT", bound=datetime)
27
+ _TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta)
28
+
29
+ _relativedelta_kwds: set[str]
30
+ prefix_mapping: dict[str, type]
31
+
32
+ class ApplyTypeError(TypeError): ...
33
+
34
+ class BaseOffset:
35
+ n: int
36
+ normalize: bool
37
+ def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
38
+ def __eq__(self, other) -> bool: ...
39
+ def __ne__(self, other) -> bool: ...
40
+ def __hash__(self) -> int: ...
41
+ @property
42
+ def kwds(self) -> dict: ...
43
+ @property
44
+ def base(self) -> BaseOffset: ...
45
+ @overload
46
+ def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
47
+ @overload
48
+ def __add__(self, other: BaseOffset) -> Self: ...
49
+ @overload
50
+ def __add__(self, other: _DatetimeT) -> _DatetimeT: ...
51
+ @overload
52
+ def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ...
53
+ @overload
54
+ def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
55
+ @overload
56
+ def __radd__(self, other: BaseOffset) -> Self: ...
57
+ @overload
58
+ def __radd__(self, other: _DatetimeT) -> _DatetimeT: ...
59
+ @overload
60
+ def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ...
61
+ @overload
62
+ def __radd__(self, other: NaTType) -> NaTType: ...
63
+ def __sub__(self, other: BaseOffset) -> Self: ...
64
+ @overload
65
+ def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
66
+ @overload
67
+ def __rsub__(self, other: BaseOffset): ...
68
+ @overload
69
+ def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ...
70
+ @overload
71
+ def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ...
72
+ @overload
73
+ def __mul__(self, other: np.ndarray) -> np.ndarray: ...
74
+ @overload
75
+ def __mul__(self, other: int): ...
76
+ @overload
77
+ def __rmul__(self, other: np.ndarray) -> np.ndarray: ...
78
+ @overload
79
+ def __rmul__(self, other: int) -> Self: ...
80
+ def __neg__(self) -> Self: ...
81
+ def copy(self) -> Self: ...
82
+ @property
83
+ def name(self) -> str: ...
84
+ @property
85
+ def rule_code(self) -> str: ...
86
+ @property
87
+ def freqstr(self) -> str: ...
88
+ def _apply(self, other): ...
89
+ def _apply_array(self, dtarr: np.ndarray) -> np.ndarray: ...
90
+ def rollback(self, dt: datetime) -> datetime: ...
91
+ def rollforward(self, dt: datetime) -> datetime: ...
92
+ def is_on_offset(self, dt: datetime) -> bool: ...
93
+ def __setstate__(self, state) -> None: ...
94
+ def __getstate__(self): ...
95
+ @property
96
+ def nanos(self) -> int: ...
97
+ def is_anchored(self) -> bool: ...
98
+
99
+ def _get_offset(name: str) -> BaseOffset: ...
100
+
101
+ class SingleConstructorOffset(BaseOffset):
102
+ @classmethod
103
+ def _from_name(cls, suffix: None = ...): ...
104
+ def __reduce__(self): ...
105
+
106
+ @overload
107
+ def to_offset(freq: None, is_period: bool = ...) -> None: ...
108
+ @overload
109
+ def to_offset(freq: _BaseOffsetT, is_period: bool = ...) -> _BaseOffsetT: ...
110
+ @overload
111
+ def to_offset(freq: timedelta | str, is_period: bool = ...) -> BaseOffset: ...
112
+
113
+ class Tick(SingleConstructorOffset):
114
+ _creso: int
115
+ _prefix: str
116
+ def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
117
+ @property
118
+ def delta(self) -> Timedelta: ...
119
+ @property
120
+ def nanos(self) -> int: ...
121
+
122
+ def delta_to_tick(delta: timedelta) -> Tick: ...
123
+
124
+ class Day(Tick): ...
125
+ class Hour(Tick): ...
126
+ class Minute(Tick): ...
127
+ class Second(Tick): ...
128
+ class Milli(Tick): ...
129
+ class Micro(Tick): ...
130
+ class Nano(Tick): ...
131
+
132
+ class RelativeDeltaOffset(BaseOffset):
133
+ def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ...
134
+
135
+ class BusinessMixin(SingleConstructorOffset):
136
+ def __init__(
137
+ self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
138
+ ) -> None: ...
139
+
140
+ class BusinessDay(BusinessMixin): ...
141
+
142
+ class BusinessHour(BusinessMixin):
143
+ def __init__(
144
+ self,
145
+ n: int = ...,
146
+ normalize: bool = ...,
147
+ start: str | time | Collection[str | time] = ...,
148
+ end: str | time | Collection[str | time] = ...,
149
+ offset: timedelta = ...,
150
+ ) -> None: ...
151
+
152
+ class WeekOfMonthMixin(SingleConstructorOffset):
153
+ def __init__(
154
+ self, n: int = ..., normalize: bool = ..., weekday: int = ...
155
+ ) -> None: ...
156
+
157
+ class YearOffset(SingleConstructorOffset):
158
+ def __init__(
159
+ self, n: int = ..., normalize: bool = ..., month: int | None = ...
160
+ ) -> None: ...
161
+
162
+ class BYearEnd(YearOffset): ...
163
+ class BYearBegin(YearOffset): ...
164
+ class YearEnd(YearOffset): ...
165
+ class YearBegin(YearOffset): ...
166
+
167
+ class QuarterOffset(SingleConstructorOffset):
168
+ def __init__(
169
+ self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ...
170
+ ) -> None: ...
171
+
172
+ class BQuarterEnd(QuarterOffset): ...
173
+ class BQuarterBegin(QuarterOffset): ...
174
+ class QuarterEnd(QuarterOffset): ...
175
+ class QuarterBegin(QuarterOffset): ...
176
+ class MonthOffset(SingleConstructorOffset): ...
177
+ class MonthEnd(MonthOffset): ...
178
+ class MonthBegin(MonthOffset): ...
179
+ class BusinessMonthEnd(MonthOffset): ...
180
+ class BusinessMonthBegin(MonthOffset): ...
181
+
182
+ class SemiMonthOffset(SingleConstructorOffset):
183
+ def __init__(
184
+ self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ...
185
+ ) -> None: ...
186
+
187
+ class SemiMonthEnd(SemiMonthOffset): ...
188
+ class SemiMonthBegin(SemiMonthOffset): ...
189
+
190
+ class Week(SingleConstructorOffset):
191
+ def __init__(
192
+ self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
193
+ ) -> None: ...
194
+
195
+ class WeekOfMonth(WeekOfMonthMixin):
196
+ def __init__(
197
+ self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
198
+ ) -> None: ...
199
+
200
+ class LastWeekOfMonth(WeekOfMonthMixin): ...
201
+
202
+ class FY5253Mixin(SingleConstructorOffset):
203
+ def __init__(
204
+ self,
205
+ n: int = ...,
206
+ normalize: bool = ...,
207
+ weekday: int = ...,
208
+ startingMonth: int = ...,
209
+ variation: Literal["nearest", "last"] = ...,
210
+ ) -> None: ...
211
+
212
+ class FY5253(FY5253Mixin): ...
213
+
214
+ class FY5253Quarter(FY5253Mixin):
215
+ def __init__(
216
+ self,
217
+ n: int = ...,
218
+ normalize: bool = ...,
219
+ weekday: int = ...,
220
+ startingMonth: int = ...,
221
+ qtr_with_extra_week: int = ...,
222
+ variation: Literal["nearest", "last"] = ...,
223
+ ) -> None: ...
224
+
225
+ class Easter(SingleConstructorOffset): ...
226
+
227
+ class _CustomBusinessMonth(BusinessMixin):
228
+ def __init__(
229
+ self,
230
+ n: int = ...,
231
+ normalize: bool = ...,
232
+ weekmask: str = ...,
233
+ holidays: list | None = ...,
234
+ calendar: OffsetCalendar | None = ...,
235
+ offset: timedelta = ...,
236
+ ) -> None: ...
237
+
238
+ class CustomBusinessDay(BusinessDay):
239
+ def __init__(
240
+ self,
241
+ n: int = ...,
242
+ normalize: bool = ...,
243
+ weekmask: str = ...,
244
+ holidays: list | None = ...,
245
+ calendar: OffsetCalendar | None = ...,
246
+ offset: timedelta = ...,
247
+ ) -> None: ...
248
+
249
+ class CustomBusinessHour(BusinessHour):
250
+ def __init__(
251
+ self,
252
+ n: int = ...,
253
+ normalize: bool = ...,
254
+ weekmask: str = ...,
255
+ holidays: list | None = ...,
256
+ calendar: OffsetCalendar | None = ...,
257
+ start: str | time | Collection[str | time] = ...,
258
+ end: str | time | Collection[str | time] = ...,
259
+ offset: timedelta = ...,
260
+ ) -> None: ...
261
+
262
+ class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
263
+ class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
264
+ class OffsetMeta(type): ...
265
+ class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ...
266
+
267
+ BDay = BusinessDay
268
+ BMonthEnd = BusinessMonthEnd
269
+ BMonthBegin = BusinessMonthBegin
270
+ CBMonthEnd = CustomBusinessMonthEnd
271
+ CBMonthBegin = CustomBusinessMonthBegin
272
+ CDay = CustomBusinessDay
273
+
274
+ def roll_qtrday(
275
+ other: datetime, n: int, month: int, day_opt: str, modby: int
276
+ ) -> int: ...
277
+
278
+ INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"]
279
+
280
+ def shift_months(
281
+ dtindex: npt.NDArray[np.int64],
282
+ months: int,
283
+ day_opt: str | None = ...,
284
+ reso: int = ...,
285
+ ) -> npt.NDArray[np.int64]: ...
286
+
287
+ _offset_map: dict[str, BaseOffset]
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas._typing import npt
4
+
5
+ def array_strptime(
6
+ values: npt.NDArray[np.object_],
7
+ fmt: str | None,
8
+ exact: bool = ...,
9
+ errors: str = ...,
10
+ utc: bool = ...,
11
+ creso: int = ..., # NPY_DATETIMEUNIT
12
+ ) -> tuple[np.ndarray, np.ndarray]: ...
13
+
14
+ # first ndarray is M8[ns], second is object ndarray of tzinfo | None
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+ from typing import (
3
+ ClassVar,
4
+ Literal,
5
+ TypeAlias,
6
+ TypeVar,
7
+ overload,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs.tslibs import (
13
+ NaTType,
14
+ Tick,
15
+ )
16
+ from pandas._typing import (
17
+ Frequency,
18
+ Self,
19
+ npt,
20
+ )
21
+
22
+ # This should be kept consistent with the keys in the dict timedelta_abbrevs
23
+ # in pandas/_libs/tslibs/timedeltas.pyx
24
+ UnitChoices: TypeAlias = Literal[
25
+ "Y",
26
+ "y",
27
+ "M",
28
+ "W",
29
+ "w",
30
+ "D",
31
+ "d",
32
+ "days",
33
+ "day",
34
+ "hours",
35
+ "hour",
36
+ "hr",
37
+ "h",
38
+ "m",
39
+ "minute",
40
+ "min",
41
+ "minutes",
42
+ "T",
43
+ "t",
44
+ "s",
45
+ "seconds",
46
+ "sec",
47
+ "second",
48
+ "ms",
49
+ "milliseconds",
50
+ "millisecond",
51
+ "milli",
52
+ "millis",
53
+ "L",
54
+ "l",
55
+ "us",
56
+ "microseconds",
57
+ "microsecond",
58
+ "µs",
59
+ "micro",
60
+ "micros",
61
+ "u",
62
+ "ns",
63
+ "nanoseconds",
64
+ "nano",
65
+ "nanos",
66
+ "nanosecond",
67
+ "n",
68
+ ]
69
+ _S = TypeVar("_S", bound=timedelta)
70
+
71
+ def get_unit_for_round(freq, creso: int) -> int: ...
72
+ def disallow_ambiguous_unit(unit: str | None) -> None: ...
73
+ def ints_to_pytimedelta(
74
+ m8values: npt.NDArray[np.timedelta64],
75
+ box: bool = ...,
76
+ ) -> npt.NDArray[np.object_]: ...
77
+ def array_to_timedelta64(
78
+ values: npt.NDArray[np.object_],
79
+ unit: str | None = ...,
80
+ errors: str = ...,
81
+ ) -> np.ndarray: ... # np.ndarray[m8ns]
82
+ def parse_timedelta_unit(unit: str | None) -> UnitChoices: ...
83
+ def delta_to_nanoseconds(
84
+ delta: np.timedelta64 | timedelta | Tick,
85
+ reso: int = ..., # NPY_DATETIMEUNIT
86
+ round_ok: bool = ...,
87
+ ) -> int: ...
88
+ def floordiv_object_array(
89
+ left: np.ndarray, right: npt.NDArray[np.object_]
90
+ ) -> np.ndarray: ...
91
+ def truediv_object_array(
92
+ left: np.ndarray, right: npt.NDArray[np.object_]
93
+ ) -> np.ndarray: ...
94
+
95
+ class Timedelta(timedelta):
96
+ _creso: int
97
+ min: ClassVar[Timedelta]
98
+ max: ClassVar[Timedelta]
99
+ resolution: ClassVar[Timedelta]
100
+ value: int # np.int64
101
+ _value: int # np.int64
102
+ # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
103
+ def __new__( # type: ignore[misc]
104
+ cls: type[_S],
105
+ value=...,
106
+ unit: str | None = ...,
107
+ **kwargs: float | np.integer | np.floating,
108
+ ) -> _S | NaTType: ...
109
+ @classmethod
110
+ def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
111
+ @property
112
+ def days(self) -> int: ...
113
+ @property
114
+ def seconds(self) -> int: ...
115
+ @property
116
+ def microseconds(self) -> int: ...
117
+ def total_seconds(self) -> float: ...
118
+ def to_pytimedelta(self) -> timedelta: ...
119
+ def to_timedelta64(self) -> np.timedelta64: ...
120
+ @property
121
+ def asm8(self) -> np.timedelta64: ...
122
+ # TODO: round/floor/ceil could return NaT?
123
+ def round(self, freq: Frequency) -> Self: ...
124
+ def floor(self, freq: Frequency) -> Self: ...
125
+ def ceil(self, freq: Frequency) -> Self: ...
126
+ @property
127
+ def resolution_string(self) -> str: ...
128
+ def __add__(self, other: timedelta) -> Timedelta: ...
129
+ def __radd__(self, other: timedelta) -> Timedelta: ...
130
+ def __sub__(self, other: timedelta) -> Timedelta: ...
131
+ def __rsub__(self, other: timedelta) -> Timedelta: ...
132
+ def __neg__(self) -> Timedelta: ...
133
+ def __pos__(self) -> Timedelta: ...
134
+ def __abs__(self) -> Timedelta: ...
135
+ def __mul__(self, other: float) -> Timedelta: ...
136
+ def __rmul__(self, other: float) -> Timedelta: ...
137
+ # error: Signature of "__floordiv__" incompatible with supertype "timedelta"
138
+ @overload # type: ignore[override]
139
+ def __floordiv__(self, other: timedelta) -> int: ...
140
+ @overload
141
+ def __floordiv__(self, other: float) -> Timedelta: ...
142
+ @overload
143
+ def __floordiv__(
144
+ self, other: npt.NDArray[np.timedelta64]
145
+ ) -> npt.NDArray[np.intp]: ...
146
+ @overload
147
+ def __floordiv__(
148
+ self, other: npt.NDArray[np.number]
149
+ ) -> npt.NDArray[np.timedelta64] | Timedelta: ...
150
+ @overload
151
+ def __rfloordiv__(self, other: timedelta | str) -> int: ...
152
+ @overload
153
+ def __rfloordiv__(self, other: None | NaTType) -> NaTType: ...
154
+ @overload
155
+ def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ...
156
+ @overload
157
+ def __truediv__(self, other: timedelta) -> float: ...
158
+ @overload
159
+ def __truediv__(self, other: float) -> Timedelta: ...
160
+ def __mod__(self, other: timedelta) -> Timedelta: ...
161
+ def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ...
162
+ def __le__(self, other: timedelta) -> bool: ...
163
+ def __lt__(self, other: timedelta) -> bool: ...
164
+ def __ge__(self, other: timedelta) -> bool: ...
165
+ def __gt__(self, other: timedelta) -> bool: ...
166
+ def __hash__(self) -> int: ...
167
+ def isoformat(self) -> str: ...
168
+ def to_numpy(
169
+ self, dtype: npt.DTypeLike = ..., copy: bool = False
170
+ ) -> np.timedelta64: ...
171
+ def view(self, dtype: npt.DTypeLike) -> object: ...
172
+ @property
173
+ def unit(self) -> str: ...
174
+ def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ...
llava_next/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date as _date,
3
+ datetime,
4
+ time as _time,
5
+ timedelta,
6
+ tzinfo as _tzinfo,
7
+ )
8
+ from time import struct_time
9
+ from typing import (
10
+ ClassVar,
11
+ Literal,
12
+ TypeAlias,
13
+ overload,
14
+ )
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs.tslibs import (
19
+ BaseOffset,
20
+ NaTType,
21
+ Period,
22
+ Tick,
23
+ Timedelta,
24
+ )
25
+ from pandas._typing import (
26
+ Self,
27
+ TimestampNonexistent,
28
+ )
29
+
30
+ _TimeZones: TypeAlias = str | _tzinfo | None | int
31
+
32
+ def integer_op_not_supported(obj: object) -> TypeError: ...
33
+
34
+ class Timestamp(datetime):
35
+ _creso: int
36
+ min: ClassVar[Timestamp]
37
+ max: ClassVar[Timestamp]
38
+
39
+ resolution: ClassVar[Timedelta]
40
+ _value: int # np.int64
41
+ # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
42
+ def __new__( # type: ignore[misc]
43
+ cls: type[Self],
44
+ ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
45
+ year: int | None = ...,
46
+ month: int | None = ...,
47
+ day: int | None = ...,
48
+ hour: int | None = ...,
49
+ minute: int | None = ...,
50
+ second: int | None = ...,
51
+ microsecond: int | None = ...,
52
+ tzinfo: _tzinfo | None = ...,
53
+ *,
54
+ nanosecond: int | None = ...,
55
+ tz: _TimeZones = ...,
56
+ unit: str | int | None = ...,
57
+ fold: int | None = ...,
58
+ ) -> Self | NaTType: ...
59
+ @classmethod
60
+ def _from_value_and_reso(
61
+ cls, value: int, reso: int, tz: _TimeZones
62
+ ) -> Timestamp: ...
63
+ @property
64
+ def value(self) -> int: ... # np.int64
65
+ @property
66
+ def year(self) -> int: ...
67
+ @property
68
+ def month(self) -> int: ...
69
+ @property
70
+ def day(self) -> int: ...
71
+ @property
72
+ def hour(self) -> int: ...
73
+ @property
74
+ def minute(self) -> int: ...
75
+ @property
76
+ def second(self) -> int: ...
77
+ @property
78
+ def microsecond(self) -> int: ...
79
+ @property
80
+ def nanosecond(self) -> int: ...
81
+ @property
82
+ def tzinfo(self) -> _tzinfo | None: ...
83
+ @property
84
+ def tz(self) -> _tzinfo | None: ...
85
+ @property
86
+ def fold(self) -> int: ...
87
+ @classmethod
88
+ def fromtimestamp(cls, ts: float, tz: _TimeZones = ...) -> Self: ...
89
+ @classmethod
90
+ def utcfromtimestamp(cls, ts: float) -> Self: ...
91
+ @classmethod
92
+ def today(cls, tz: _TimeZones = ...) -> Self: ...
93
+ @classmethod
94
+ def fromordinal(
95
+ cls,
96
+ ordinal: int,
97
+ tz: _TimeZones = ...,
98
+ ) -> Self: ...
99
+ @classmethod
100
+ def now(cls, tz: _TimeZones = ...) -> Self: ...
101
+ @classmethod
102
+ def utcnow(cls) -> Self: ...
103
+ # error: Signature of "combine" incompatible with supertype "datetime"
104
+ @classmethod
105
+ def combine( # type: ignore[override]
106
+ cls, date: _date, time: _time
107
+ ) -> datetime: ...
108
+ @classmethod
109
+ def fromisoformat(cls, date_string: str) -> Self: ...
110
+ def strftime(self, format: str) -> str: ...
111
+ def __format__(self, fmt: str) -> str: ...
112
+ def toordinal(self) -> int: ...
113
+ def timetuple(self) -> struct_time: ...
114
+ def timestamp(self) -> float: ...
115
+ def utctimetuple(self) -> struct_time: ...
116
+ def date(self) -> _date: ...
117
+ def time(self) -> _time: ...
118
+ def timetz(self) -> _time: ...
119
+ # LSP violation: nanosecond is not present in datetime.datetime.replace
120
+ # and has positional args following it
121
+ def replace( # type: ignore[override]
122
+ self,
123
+ year: int | None = ...,
124
+ month: int | None = ...,
125
+ day: int | None = ...,
126
+ hour: int | None = ...,
127
+ minute: int | None = ...,
128
+ second: int | None = ...,
129
+ microsecond: int | None = ...,
130
+ nanosecond: int | None = ...,
131
+ tzinfo: _tzinfo | type[object] | None = ...,
132
+ fold: int | None = ...,
133
+ ) -> Self: ...
134
+ # LSP violation: datetime.datetime.astimezone has a default value for tz
135
+ def astimezone(self, tz: _TimeZones) -> Self: ... # type: ignore[override]
136
+ def ctime(self) -> str: ...
137
+ def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
138
+ @classmethod
139
+ def strptime(
140
+ # Note: strptime is actually disabled and raises NotImplementedError
141
+ cls,
142
+ date_string: str,
143
+ format: str,
144
+ ) -> Self: ...
145
+ def utcoffset(self) -> timedelta | None: ...
146
+ def tzname(self) -> str | None: ...
147
+ def dst(self) -> timedelta | None: ...
148
+ def __le__(self, other: datetime) -> bool: ... # type: ignore[override]
149
+ def __lt__(self, other: datetime) -> bool: ... # type: ignore[override]
150
+ def __ge__(self, other: datetime) -> bool: ... # type: ignore[override]
151
+ def __gt__(self, other: datetime) -> bool: ... # type: ignore[override]
152
+ # error: Signature of "__add__" incompatible with supertype "date"/"datetime"
153
+ @overload # type: ignore[override]
154
+ def __add__(self, other: np.ndarray) -> np.ndarray: ...
155
+ @overload
156
+ def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
157
+ def __radd__(self, other: timedelta) -> Self: ...
158
+ @overload # type: ignore[override]
159
+ def __sub__(self, other: datetime) -> Timedelta: ...
160
+ @overload
161
+ def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
162
+ def __hash__(self) -> int: ...
163
+ def weekday(self) -> int: ...
164
+ def isoweekday(self) -> int: ...
165
+ # Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return
166
+ # type "_IsoCalendarDate" in supertype "date"
167
+ def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override]
168
+ @property
169
+ def is_leap_year(self) -> bool: ...
170
+ @property
171
+ def is_month_start(self) -> bool: ...
172
+ @property
173
+ def is_quarter_start(self) -> bool: ...
174
+ @property
175
+ def is_year_start(self) -> bool: ...
176
+ @property
177
+ def is_month_end(self) -> bool: ...
178
+ @property
179
+ def is_quarter_end(self) -> bool: ...
180
+ @property
181
+ def is_year_end(self) -> bool: ...
182
+ def to_pydatetime(self, warn: bool = ...) -> datetime: ...
183
+ def to_datetime64(self) -> np.datetime64: ...
184
+ def to_period(self, freq: BaseOffset | str | None = None) -> Period: ...
185
+ def to_julian_date(self) -> np.float64: ...
186
+ @property
187
+ def asm8(self) -> np.datetime64: ...
188
+ def tz_convert(self, tz: _TimeZones) -> Self: ...
189
+ # TODO: could return NaT?
190
+ def tz_localize(
191
+ self,
192
+ tz: _TimeZones,
193
+ ambiguous: bool | Literal["raise", "NaT"] = ...,
194
+ nonexistent: TimestampNonexistent = ...,
195
+ ) -> Self: ...
196
+ def normalize(self) -> Self: ...
197
+ # TODO: round/floor/ceil could return NaT?
198
+ def round(
199
+ self,
200
+ freq: str,
201
+ ambiguous: bool | Literal["raise", "NaT"] = ...,
202
+ nonexistent: TimestampNonexistent = ...,
203
+ ) -> Self: ...
204
+ def floor(
205
+ self,
206
+ freq: str,
207
+ ambiguous: bool | Literal["raise", "NaT"] = ...,
208
+ nonexistent: TimestampNonexistent = ...,
209
+ ) -> Self: ...
210
+ def ceil(
211
+ self,
212
+ freq: str,
213
+ ambiguous: bool | Literal["raise", "NaT"] = ...,
214
+ nonexistent: TimestampNonexistent = ...,
215
+ ) -> Self: ...
216
+ def day_name(self, locale: str | None = ...) -> str: ...
217
+ def month_name(self, locale: str | None = ...) -> str: ...
218
+ @property
219
+ def day_of_week(self) -> int: ...
220
+ @property
221
+ def dayofweek(self) -> int: ...
222
+ @property
223
+ def day_of_year(self) -> int: ...
224
+ @property
225
+ def dayofyear(self) -> int: ...
226
+ @property
227
+ def quarter(self) -> int: ...
228
+ @property
229
+ def week(self) -> int: ...
230
+ def to_numpy(
231
+ self, dtype: np.dtype | None = ..., copy: bool = ...
232
+ ) -> np.datetime64: ...
233
+ @property
234
+ def _date_repr(self) -> str: ...
235
+ @property
236
+ def days_in_month(self) -> int: ...
237
+ @property
238
+ def daysinmonth(self) -> int: ...
239
+ @property
240
+ def unit(self) -> str: ...
241
+ def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ...
llava_next/lib/python3.10/site-packages/pandas/core/apply.py ADDED
@@ -0,0 +1,2062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ from collections import defaultdict
5
+ import functools
6
+ from functools import partial
7
+ import inspect
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ Callable,
12
+ Literal,
13
+ cast,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas._config import option_context
20
+
21
+ from pandas._libs import lib
22
+ from pandas._libs.internals import BlockValuesRefs
23
+ from pandas._typing import (
24
+ AggFuncType,
25
+ AggFuncTypeBase,
26
+ AggFuncTypeDict,
27
+ AggObjType,
28
+ Axis,
29
+ AxisInt,
30
+ NDFrameT,
31
+ npt,
32
+ )
33
+ from pandas.compat._optional import import_optional_dependency
34
+ from pandas.errors import SpecificationError
35
+ from pandas.util._decorators import cache_readonly
36
+ from pandas.util._exceptions import find_stack_level
37
+
38
+ from pandas.core.dtypes.cast import is_nested_object
39
+ from pandas.core.dtypes.common import (
40
+ is_dict_like,
41
+ is_extension_array_dtype,
42
+ is_list_like,
43
+ is_numeric_dtype,
44
+ is_sequence,
45
+ )
46
+ from pandas.core.dtypes.dtypes import (
47
+ CategoricalDtype,
48
+ ExtensionDtype,
49
+ )
50
+ from pandas.core.dtypes.generic import (
51
+ ABCDataFrame,
52
+ ABCNDFrame,
53
+ ABCSeries,
54
+ )
55
+
56
+ from pandas.core._numba.executor import generate_apply_looper
57
+ import pandas.core.common as com
58
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
59
+
60
+ if TYPE_CHECKING:
61
+ from collections.abc import (
62
+ Generator,
63
+ Hashable,
64
+ Iterable,
65
+ MutableMapping,
66
+ Sequence,
67
+ )
68
+
69
+ from pandas import (
70
+ DataFrame,
71
+ Index,
72
+ Series,
73
+ )
74
+ from pandas.core.groupby import GroupBy
75
+ from pandas.core.resample import Resampler
76
+ from pandas.core.window.rolling import BaseWindow
77
+
78
+
79
+ ResType = dict[int, Any]
80
+
81
+
82
+ def frame_apply(
83
+ obj: DataFrame,
84
+ func: AggFuncType,
85
+ axis: Axis = 0,
86
+ raw: bool = False,
87
+ result_type: str | None = None,
88
+ by_row: Literal[False, "compat"] = "compat",
89
+ engine: str = "python",
90
+ engine_kwargs: dict[str, bool] | None = None,
91
+ args=None,
92
+ kwargs=None,
93
+ ) -> FrameApply:
94
+ """construct and return a row or column based frame apply object"""
95
+ axis = obj._get_axis_number(axis)
96
+ klass: type[FrameApply]
97
+ if axis == 0:
98
+ klass = FrameRowApply
99
+ elif axis == 1:
100
+ klass = FrameColumnApply
101
+
102
+ _, func, _, _ = reconstruct_func(func, **kwargs)
103
+ assert func is not None
104
+
105
+ return klass(
106
+ obj,
107
+ func,
108
+ raw=raw,
109
+ result_type=result_type,
110
+ by_row=by_row,
111
+ engine=engine,
112
+ engine_kwargs=engine_kwargs,
113
+ args=args,
114
+ kwargs=kwargs,
115
+ )
116
+
117
+
118
+ class Apply(metaclass=abc.ABCMeta):
119
+ axis: AxisInt
120
+
121
+ def __init__(
122
+ self,
123
+ obj: AggObjType,
124
+ func: AggFuncType,
125
+ raw: bool,
126
+ result_type: str | None,
127
+ *,
128
+ by_row: Literal[False, "compat", "_compat"] = "compat",
129
+ engine: str = "python",
130
+ engine_kwargs: dict[str, bool] | None = None,
131
+ args,
132
+ kwargs,
133
+ ) -> None:
134
+ self.obj = obj
135
+ self.raw = raw
136
+
137
+ assert by_row is False or by_row in ["compat", "_compat"]
138
+ self.by_row = by_row
139
+
140
+ self.args = args or ()
141
+ self.kwargs = kwargs or {}
142
+
143
+ self.engine = engine
144
+ self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
145
+
146
+ if result_type not in [None, "reduce", "broadcast", "expand"]:
147
+ raise ValueError(
148
+ "invalid value for result_type, must be one "
149
+ "of {None, 'reduce', 'broadcast', 'expand'}"
150
+ )
151
+
152
+ self.result_type = result_type
153
+
154
+ self.func = func
155
+
156
+ @abc.abstractmethod
157
+ def apply(self) -> DataFrame | Series:
158
+ pass
159
+
160
+ @abc.abstractmethod
161
+ def agg_or_apply_list_like(
162
+ self, op_name: Literal["agg", "apply"]
163
+ ) -> DataFrame | Series:
164
+ pass
165
+
166
+ @abc.abstractmethod
167
+ def agg_or_apply_dict_like(
168
+ self, op_name: Literal["agg", "apply"]
169
+ ) -> DataFrame | Series:
170
+ pass
171
+
172
+ def agg(self) -> DataFrame | Series | None:
173
+ """
174
+ Provide an implementation for the aggregators.
175
+
176
+ Returns
177
+ -------
178
+ Result of aggregation, or None if agg cannot be performed by
179
+ this method.
180
+ """
181
+ obj = self.obj
182
+ func = self.func
183
+ args = self.args
184
+ kwargs = self.kwargs
185
+
186
+ if isinstance(func, str):
187
+ return self.apply_str()
188
+
189
+ if is_dict_like(func):
190
+ return self.agg_dict_like()
191
+ elif is_list_like(func):
192
+ # we require a list, but not a 'str'
193
+ return self.agg_list_like()
194
+
195
+ if callable(func):
196
+ f = com.get_cython_func(func)
197
+ if f and not args and not kwargs:
198
+ warn_alias_replacement(obj, func, f)
199
+ return getattr(obj, f)()
200
+
201
+ # caller can react
202
+ return None
203
+
204
+ def transform(self) -> DataFrame | Series:
205
+ """
206
+ Transform a DataFrame or Series.
207
+
208
+ Returns
209
+ -------
210
+ DataFrame or Series
211
+ Result of applying ``func`` along the given axis of the
212
+ Series or DataFrame.
213
+
214
+ Raises
215
+ ------
216
+ ValueError
217
+ If the transform function fails or does not transform.
218
+ """
219
+ obj = self.obj
220
+ func = self.func
221
+ axis = self.axis
222
+ args = self.args
223
+ kwargs = self.kwargs
224
+
225
+ is_series = obj.ndim == 1
226
+
227
+ if obj._get_axis_number(axis) == 1:
228
+ assert not is_series
229
+ return obj.T.transform(func, 0, *args, **kwargs).T
230
+
231
+ if is_list_like(func) and not is_dict_like(func):
232
+ func = cast(list[AggFuncTypeBase], func)
233
+ # Convert func equivalent dict
234
+ if is_series:
235
+ func = {com.get_callable_name(v) or v: v for v in func}
236
+ else:
237
+ func = {col: func for col in obj}
238
+
239
+ if is_dict_like(func):
240
+ func = cast(AggFuncTypeDict, func)
241
+ return self.transform_dict_like(func)
242
+
243
+ # func is either str or callable
244
+ func = cast(AggFuncTypeBase, func)
245
+ try:
246
+ result = self.transform_str_or_callable(func)
247
+ except TypeError:
248
+ raise
249
+ except Exception as err:
250
+ raise ValueError("Transform function failed") from err
251
+
252
+ # Functions that transform may return empty Series/DataFrame
253
+ # when the dtype is not appropriate
254
+ if (
255
+ isinstance(result, (ABCSeries, ABCDataFrame))
256
+ and result.empty
257
+ and not obj.empty
258
+ ):
259
+ raise ValueError("Transform function failed")
260
+ # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
261
+ # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
262
+ # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
263
+ # Series]"
264
+ if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
265
+ obj.index # type: ignore[arg-type]
266
+ ):
267
+ raise ValueError("Function did not transform")
268
+
269
+ return result
270
+
271
+ def transform_dict_like(self, func) -> DataFrame:
272
+ """
273
+ Compute transform in the case of a dict-like func
274
+ """
275
+ from pandas.core.reshape.concat import concat
276
+
277
+ obj = self.obj
278
+ args = self.args
279
+ kwargs = self.kwargs
280
+
281
+ # transform is currently only for Series/DataFrame
282
+ assert isinstance(obj, ABCNDFrame)
283
+
284
+ if len(func) == 0:
285
+ raise ValueError("No transform functions were provided")
286
+
287
+ func = self.normalize_dictlike_arg("transform", obj, func)
288
+
289
+ results: dict[Hashable, DataFrame | Series] = {}
290
+ for name, how in func.items():
291
+ colg = obj._gotitem(name, ndim=1)
292
+ results[name] = colg.transform(how, 0, *args, **kwargs)
293
+ return concat(results, axis=1)
294
+
295
+ def transform_str_or_callable(self, func) -> DataFrame | Series:
296
+ """
297
+ Compute transform in the case of a string or callable func
298
+ """
299
+ obj = self.obj
300
+ args = self.args
301
+ kwargs = self.kwargs
302
+
303
+ if isinstance(func, str):
304
+ return self._apply_str(obj, func, *args, **kwargs)
305
+
306
+ if not args and not kwargs:
307
+ f = com.get_cython_func(func)
308
+ if f:
309
+ warn_alias_replacement(obj, func, f)
310
+ return getattr(obj, f)()
311
+
312
+ # Two possible ways to use a UDF - apply or call directly
313
+ try:
314
+ return obj.apply(func, args=args, **kwargs)
315
+ except Exception:
316
+ return func(obj, *args, **kwargs)
317
+
318
+ def agg_list_like(self) -> DataFrame | Series:
319
+ """
320
+ Compute aggregation in the case of a list-like argument.
321
+
322
+ Returns
323
+ -------
324
+ Result of aggregation.
325
+ """
326
+ return self.agg_or_apply_list_like(op_name="agg")
327
+
328
+ def compute_list_like(
329
+ self,
330
+ op_name: Literal["agg", "apply"],
331
+ selected_obj: Series | DataFrame,
332
+ kwargs: dict[str, Any],
333
+ ) -> tuple[list[Hashable] | Index, list[Any]]:
334
+ """
335
+ Compute agg/apply results for like-like input.
336
+
337
+ Parameters
338
+ ----------
339
+ op_name : {"agg", "apply"}
340
+ Operation being performed.
341
+ selected_obj : Series or DataFrame
342
+ Data to perform operation on.
343
+ kwargs : dict
344
+ Keyword arguments to pass to the functions.
345
+
346
+ Returns
347
+ -------
348
+ keys : list[Hashable] or Index
349
+ Index labels for result.
350
+ results : list
351
+ Data for result. When aggregating with a Series, this can contain any
352
+ Python objects.
353
+ """
354
+ func = cast(list[AggFuncTypeBase], self.func)
355
+ obj = self.obj
356
+
357
+ results = []
358
+ keys = []
359
+
360
+ # degenerate case
361
+ if selected_obj.ndim == 1:
362
+ for a in func:
363
+ colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
364
+ args = (
365
+ [self.axis, *self.args]
366
+ if include_axis(op_name, colg)
367
+ else self.args
368
+ )
369
+ new_res = getattr(colg, op_name)(a, *args, **kwargs)
370
+ results.append(new_res)
371
+
372
+ # make sure we find a good name
373
+ name = com.get_callable_name(a) or a
374
+ keys.append(name)
375
+
376
+ else:
377
+ indices = []
378
+ for index, col in enumerate(selected_obj):
379
+ colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
380
+ args = (
381
+ [self.axis, *self.args]
382
+ if include_axis(op_name, colg)
383
+ else self.args
384
+ )
385
+ new_res = getattr(colg, op_name)(func, *args, **kwargs)
386
+ results.append(new_res)
387
+ indices.append(index)
388
+ # error: Incompatible types in assignment (expression has type "Any |
389
+ # Index", variable has type "list[Any | Callable[..., Any] | str]")
390
+ keys = selected_obj.columns.take(indices) # type: ignore[assignment]
391
+
392
+ return keys, results
393
+
394
+ def wrap_results_list_like(
395
+ self, keys: Iterable[Hashable], results: list[Series | DataFrame]
396
+ ):
397
+ from pandas.core.reshape.concat import concat
398
+
399
+ obj = self.obj
400
+
401
+ try:
402
+ return concat(results, keys=keys, axis=1, sort=False)
403
+ except TypeError as err:
404
+ # we are concatting non-NDFrame objects,
405
+ # e.g. a list of scalars
406
+ from pandas import Series
407
+
408
+ result = Series(results, index=keys, name=obj.name)
409
+ if is_nested_object(result):
410
+ raise ValueError(
411
+ "cannot combine transform and aggregation operations"
412
+ ) from err
413
+ return result
414
+
415
+ def agg_dict_like(self) -> DataFrame | Series:
416
+ """
417
+ Compute aggregation in the case of a dict-like argument.
418
+
419
+ Returns
420
+ -------
421
+ Result of aggregation.
422
+ """
423
+ return self.agg_or_apply_dict_like(op_name="agg")
424
+
425
+ def compute_dict_like(
426
+ self,
427
+ op_name: Literal["agg", "apply"],
428
+ selected_obj: Series | DataFrame,
429
+ selection: Hashable | Sequence[Hashable],
430
+ kwargs: dict[str, Any],
431
+ ) -> tuple[list[Hashable], list[Any]]:
432
+ """
433
+ Compute agg/apply results for dict-like input.
434
+
435
+ Parameters
436
+ ----------
437
+ op_name : {"agg", "apply"}
438
+ Operation being performed.
439
+ selected_obj : Series or DataFrame
440
+ Data to perform operation on.
441
+ selection : hashable or sequence of hashables
442
+ Used by GroupBy, Window, and Resample if selection is applied to the object.
443
+ kwargs : dict
444
+ Keyword arguments to pass to the functions.
445
+
446
+ Returns
447
+ -------
448
+ keys : list[hashable]
449
+ Index labels for result.
450
+ results : list
451
+ Data for result. When aggregating with a Series, this can contain any
452
+ Python object.
453
+ """
454
+ from pandas.core.groupby.generic import (
455
+ DataFrameGroupBy,
456
+ SeriesGroupBy,
457
+ )
458
+
459
+ obj = self.obj
460
+ is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
461
+ func = cast(AggFuncTypeDict, self.func)
462
+ func = self.normalize_dictlike_arg(op_name, selected_obj, func)
463
+
464
+ is_non_unique_col = (
465
+ selected_obj.ndim == 2
466
+ and selected_obj.columns.nunique() < len(selected_obj.columns)
467
+ )
468
+
469
+ if selected_obj.ndim == 1:
470
+ # key only used for output
471
+ colg = obj._gotitem(selection, ndim=1)
472
+ results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
473
+ keys = list(func.keys())
474
+ elif not is_groupby and is_non_unique_col:
475
+ # key used for column selection and output
476
+ # GH#51099
477
+ results = []
478
+ keys = []
479
+ for key, how in func.items():
480
+ indices = selected_obj.columns.get_indexer_for([key])
481
+ labels = selected_obj.columns.take(indices)
482
+ label_to_indices = defaultdict(list)
483
+ for index, label in zip(indices, labels):
484
+ label_to_indices[label].append(index)
485
+
486
+ key_data = [
487
+ getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
488
+ for label, indices in label_to_indices.items()
489
+ for indice in indices
490
+ ]
491
+
492
+ keys += [key] * len(key_data)
493
+ results += key_data
494
+ else:
495
+ # key used for column selection and output
496
+ results = [
497
+ getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
498
+ for key, how in func.items()
499
+ ]
500
+ keys = list(func.keys())
501
+
502
+ return keys, results
503
+
504
+ def wrap_results_dict_like(
505
+ self,
506
+ selected_obj: Series | DataFrame,
507
+ result_index: list[Hashable],
508
+ result_data: list,
509
+ ):
510
+ from pandas import Index
511
+ from pandas.core.reshape.concat import concat
512
+
513
+ obj = self.obj
514
+
515
+ # Avoid making two isinstance calls in all and any below
516
+ is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
517
+
518
+ if all(is_ndframe):
519
+ results = dict(zip(result_index, result_data))
520
+ keys_to_use: Iterable[Hashable]
521
+ keys_to_use = [k for k in result_index if not results[k].empty]
522
+ # Have to check, if at least one DataFrame is not empty.
523
+ keys_to_use = keys_to_use if keys_to_use != [] else result_index
524
+ if selected_obj.ndim == 2:
525
+ # keys are columns, so we can preserve names
526
+ ktu = Index(keys_to_use)
527
+ ktu._set_names(selected_obj.columns.names)
528
+ keys_to_use = ktu
529
+
530
+ axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
531
+ result = concat(
532
+ {k: results[k] for k in keys_to_use},
533
+ axis=axis,
534
+ keys=keys_to_use,
535
+ )
536
+ elif any(is_ndframe):
537
+ # There is a mix of NDFrames and scalars
538
+ raise ValueError(
539
+ "cannot perform both aggregation "
540
+ "and transformation operations "
541
+ "simultaneously"
542
+ )
543
+ else:
544
+ from pandas import Series
545
+
546
+ # we have a list of scalars
547
+ # GH 36212 use name only if obj is a series
548
+ if obj.ndim == 1:
549
+ obj = cast("Series", obj)
550
+ name = obj.name
551
+ else:
552
+ name = None
553
+
554
+ result = Series(result_data, index=result_index, name=name)
555
+
556
+ return result
557
+
558
+ def apply_str(self) -> DataFrame | Series:
559
+ """
560
+ Compute apply in case of a string.
561
+
562
+ Returns
563
+ -------
564
+ result: Series or DataFrame
565
+ """
566
+ # Caller is responsible for checking isinstance(self.f, str)
567
+ func = cast(str, self.func)
568
+
569
+ obj = self.obj
570
+
571
+ from pandas.core.groupby.generic import (
572
+ DataFrameGroupBy,
573
+ SeriesGroupBy,
574
+ )
575
+
576
+ # Support for `frame.transform('method')`
577
+ # Some methods (shift, etc.) require the axis argument, others
578
+ # don't, so inspect and insert if necessary.
579
+ method = getattr(obj, func, None)
580
+ if callable(method):
581
+ sig = inspect.getfullargspec(method)
582
+ arg_names = (*sig.args, *sig.kwonlyargs)
583
+ if self.axis != 0 and (
584
+ "axis" not in arg_names or func in ("corrwith", "skew")
585
+ ):
586
+ raise ValueError(f"Operation {func} does not support axis=1")
587
+ if "axis" in arg_names:
588
+ if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)):
589
+ # Try to avoid FutureWarning for deprecated axis keyword;
590
+ # If self.axis matches the axis we would get by not passing
591
+ # axis, we safely exclude the keyword.
592
+
593
+ default_axis = 0
594
+ if func in ["idxmax", "idxmin"]:
595
+ # DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis,
596
+ # whereas other axis keywords default to 0
597
+ default_axis = self.obj.axis
598
+
599
+ if default_axis != self.axis:
600
+ self.kwargs["axis"] = self.axis
601
+ else:
602
+ self.kwargs["axis"] = self.axis
603
+ return self._apply_str(obj, func, *self.args, **self.kwargs)
604
+
605
+ def apply_list_or_dict_like(self) -> DataFrame | Series:
606
+ """
607
+ Compute apply in case of a list-like or dict-like.
608
+
609
+ Returns
610
+ -------
611
+ result: Series, DataFrame, or None
612
+ Result when self.func is a list-like or dict-like, None otherwise.
613
+ """
614
+
615
+ if self.engine == "numba":
616
+ raise NotImplementedError(
617
+ "The 'numba' engine doesn't support list-like/"
618
+ "dict likes of callables yet."
619
+ )
620
+
621
+ if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
622
+ return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
623
+
624
+ func = self.func
625
+ kwargs = self.kwargs
626
+
627
+ if is_dict_like(func):
628
+ result = self.agg_or_apply_dict_like(op_name="apply")
629
+ else:
630
+ result = self.agg_or_apply_list_like(op_name="apply")
631
+
632
+ result = reconstruct_and_relabel_result(result, func, **kwargs)
633
+
634
+ return result
635
+
636
+ def normalize_dictlike_arg(
637
+ self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
638
+ ) -> AggFuncTypeDict:
639
+ """
640
+ Handler for dict-like argument.
641
+
642
+ Ensures that necessary columns exist if obj is a DataFrame, and
643
+ that a nested renamer is not passed. Also normalizes to all lists
644
+ when values consists of a mix of list and non-lists.
645
+ """
646
+ assert how in ("apply", "agg", "transform")
647
+
648
+ # Can't use func.values(); wouldn't work for a Series
649
+ if (
650
+ how == "agg"
651
+ and isinstance(obj, ABCSeries)
652
+ and any(is_list_like(v) for _, v in func.items())
653
+ ) or (any(is_dict_like(v) for _, v in func.items())):
654
+ # GH 15931 - deprecation of renaming keys
655
+ raise SpecificationError("nested renamer is not supported")
656
+
657
+ if obj.ndim != 1:
658
+ # Check for missing columns on a frame
659
+ from pandas import Index
660
+
661
+ cols = Index(list(func.keys())).difference(obj.columns, sort=True)
662
+ if len(cols) > 0:
663
+ raise KeyError(f"Column(s) {list(cols)} do not exist")
664
+
665
+ aggregator_types = (list, tuple, dict)
666
+
667
+ # if we have a dict of any non-scalars
668
+ # eg. {'A' : ['mean']}, normalize all to
669
+ # be list-likes
670
+ # Cannot use func.values() because arg may be a Series
671
+ if any(isinstance(x, aggregator_types) for _, x in func.items()):
672
+ new_func: AggFuncTypeDict = {}
673
+ for k, v in func.items():
674
+ if not isinstance(v, aggregator_types):
675
+ new_func[k] = [v]
676
+ else:
677
+ new_func[k] = v
678
+ func = new_func
679
+ return func
680
+
681
+ def _apply_str(self, obj, func: str, *args, **kwargs):
682
+ """
683
+ if arg is a string, then try to operate on it:
684
+ - try to find a function (or attribute) on obj
685
+ - try to find a numpy function
686
+ - raise
687
+ """
688
+ assert isinstance(func, str)
689
+
690
+ if hasattr(obj, func):
691
+ f = getattr(obj, func)
692
+ if callable(f):
693
+ return f(*args, **kwargs)
694
+
695
+ # people may aggregate on a non-callable attribute
696
+ # but don't let them think they can pass args to it
697
+ assert len(args) == 0
698
+ assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
699
+ return f
700
+ elif hasattr(np, func) and hasattr(obj, "__array__"):
701
+ # in particular exclude Window
702
+ f = getattr(np, func)
703
+ return f(obj, *args, **kwargs)
704
+ else:
705
+ msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
706
+ raise AttributeError(msg)
707
+
708
+
709
+ class NDFrameApply(Apply):
710
+ """
711
+ Methods shared by FrameApply and SeriesApply but
712
+ not GroupByApply or ResamplerWindowApply
713
+ """
714
+
715
+ obj: DataFrame | Series
716
+
717
+ @property
718
+ def index(self) -> Index:
719
+ return self.obj.index
720
+
721
+ @property
722
+ def agg_axis(self) -> Index:
723
+ return self.obj._get_agg_axis(self.axis)
724
+
725
+ def agg_or_apply_list_like(
726
+ self, op_name: Literal["agg", "apply"]
727
+ ) -> DataFrame | Series:
728
+ obj = self.obj
729
+ kwargs = self.kwargs
730
+
731
+ if op_name == "apply":
732
+ if isinstance(self, FrameApply):
733
+ by_row = self.by_row
734
+
735
+ elif isinstance(self, SeriesApply):
736
+ by_row = "_compat" if self.by_row else False
737
+ else:
738
+ by_row = False
739
+ kwargs = {**kwargs, "by_row": by_row}
740
+
741
+ if getattr(obj, "axis", 0) == 1:
742
+ raise NotImplementedError("axis other than 0 is not supported")
743
+
744
+ keys, results = self.compute_list_like(op_name, obj, kwargs)
745
+ result = self.wrap_results_list_like(keys, results)
746
+ return result
747
+
748
+ def agg_or_apply_dict_like(
749
+ self, op_name: Literal["agg", "apply"]
750
+ ) -> DataFrame | Series:
751
+ assert op_name in ["agg", "apply"]
752
+ obj = self.obj
753
+
754
+ kwargs = {}
755
+ if op_name == "apply":
756
+ by_row = "_compat" if self.by_row else False
757
+ kwargs.update({"by_row": by_row})
758
+
759
+ if getattr(obj, "axis", 0) == 1:
760
+ raise NotImplementedError("axis other than 0 is not supported")
761
+
762
+ selection = None
763
+ result_index, result_data = self.compute_dict_like(
764
+ op_name, obj, selection, kwargs
765
+ )
766
+ result = self.wrap_results_dict_like(obj, result_index, result_data)
767
+ return result
768
+
769
+
770
+ class FrameApply(NDFrameApply):
771
+ obj: DataFrame
772
+
773
+ def __init__(
774
+ self,
775
+ obj: AggObjType,
776
+ func: AggFuncType,
777
+ raw: bool,
778
+ result_type: str | None,
779
+ *,
780
+ by_row: Literal[False, "compat"] = False,
781
+ engine: str = "python",
782
+ engine_kwargs: dict[str, bool] | None = None,
783
+ args,
784
+ kwargs,
785
+ ) -> None:
786
+ if by_row is not False and by_row != "compat":
787
+ raise ValueError(f"by_row={by_row} not allowed")
788
+ super().__init__(
789
+ obj,
790
+ func,
791
+ raw,
792
+ result_type,
793
+ by_row=by_row,
794
+ engine=engine,
795
+ engine_kwargs=engine_kwargs,
796
+ args=args,
797
+ kwargs=kwargs,
798
+ )
799
+
800
+ # ---------------------------------------------------------------
801
+ # Abstract Methods
802
+
803
+ @property
804
+ @abc.abstractmethod
805
+ def result_index(self) -> Index:
806
+ pass
807
+
808
+ @property
809
+ @abc.abstractmethod
810
+ def result_columns(self) -> Index:
811
+ pass
812
+
813
+ @property
814
+ @abc.abstractmethod
815
+ def series_generator(self) -> Generator[Series, None, None]:
816
+ pass
817
+
818
+ @staticmethod
819
+ @functools.cache
820
+ @abc.abstractmethod
821
+ def generate_numba_apply_func(
822
+ func, nogil=True, nopython=True, parallel=False
823
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
824
+ pass
825
+
826
+ @abc.abstractmethod
827
+ def apply_with_numba(self):
828
+ pass
829
+
830
+ def validate_values_for_numba(self):
831
+ # Validate column dtyps all OK
832
+ for colname, dtype in self.obj.dtypes.items():
833
+ if not is_numeric_dtype(dtype):
834
+ raise ValueError(
835
+ f"Column {colname} must have a numeric dtype. "
836
+ f"Found '{dtype}' instead"
837
+ )
838
+ if is_extension_array_dtype(dtype):
839
+ raise ValueError(
840
+ f"Column {colname} is backed by an extension array, "
841
+ f"which is not supported by the numba engine."
842
+ )
843
+
844
+ @abc.abstractmethod
845
+ def wrap_results_for_axis(
846
+ self, results: ResType, res_index: Index
847
+ ) -> DataFrame | Series:
848
+ pass
849
+
850
+ # ---------------------------------------------------------------
851
+
852
+ @property
853
+ def res_columns(self) -> Index:
854
+ return self.result_columns
855
+
856
+ @property
857
+ def columns(self) -> Index:
858
+ return self.obj.columns
859
+
860
+ @cache_readonly
861
+ def values(self):
862
+ return self.obj.values
863
+
864
+ def apply(self) -> DataFrame | Series:
865
+ """compute the results"""
866
+
867
+ # dispatch to handle list-like or dict-like
868
+ if is_list_like(self.func):
869
+ if self.engine == "numba":
870
+ raise NotImplementedError(
871
+ "the 'numba' engine doesn't support lists of callables yet"
872
+ )
873
+ return self.apply_list_or_dict_like()
874
+
875
+ # all empty
876
+ if len(self.columns) == 0 and len(self.index) == 0:
877
+ return self.apply_empty_result()
878
+
879
+ # string dispatch
880
+ if isinstance(self.func, str):
881
+ if self.engine == "numba":
882
+ raise NotImplementedError(
883
+ "the 'numba' engine doesn't support using "
884
+ "a string as the callable function"
885
+ )
886
+ return self.apply_str()
887
+
888
+ # ufunc
889
+ elif isinstance(self.func, np.ufunc):
890
+ if self.engine == "numba":
891
+ raise NotImplementedError(
892
+ "the 'numba' engine doesn't support "
893
+ "using a numpy ufunc as the callable function"
894
+ )
895
+ with np.errstate(all="ignore"):
896
+ results = self.obj._mgr.apply("apply", func=self.func)
897
+ # _constructor will retain self.index and self.columns
898
+ return self.obj._constructor_from_mgr(results, axes=results.axes)
899
+
900
+ # broadcasting
901
+ if self.result_type == "broadcast":
902
+ if self.engine == "numba":
903
+ raise NotImplementedError(
904
+ "the 'numba' engine doesn't support result_type='broadcast'"
905
+ )
906
+ return self.apply_broadcast(self.obj)
907
+
908
+ # one axis empty
909
+ elif not all(self.obj.shape):
910
+ return self.apply_empty_result()
911
+
912
+ # raw
913
+ elif self.raw:
914
+ return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
915
+
916
+ return self.apply_standard()
917
+
918
+ def agg(self):
919
+ obj = self.obj
920
+ axis = self.axis
921
+
922
+ # TODO: Avoid having to change state
923
+ self.obj = self.obj if self.axis == 0 else self.obj.T
924
+ self.axis = 0
925
+
926
+ result = None
927
+ try:
928
+ result = super().agg()
929
+ finally:
930
+ self.obj = obj
931
+ self.axis = axis
932
+
933
+ if axis == 1:
934
+ result = result.T if result is not None else result
935
+
936
+ if result is None:
937
+ result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
938
+
939
+ return result
940
+
941
+ def apply_empty_result(self):
942
+ """
943
+ we have an empty result; at least 1 axis is 0
944
+
945
+ we will try to apply the function to an empty
946
+ series in order to see if this is a reduction function
947
+ """
948
+ assert callable(self.func)
949
+
950
+ # we are not asked to reduce or infer reduction
951
+ # so just return a copy of the existing object
952
+ if self.result_type not in ["reduce", None]:
953
+ return self.obj.copy()
954
+
955
+ # we may need to infer
956
+ should_reduce = self.result_type == "reduce"
957
+
958
+ from pandas import Series
959
+
960
+ if not should_reduce:
961
+ try:
962
+ if self.axis == 0:
963
+ r = self.func(
964
+ Series([], dtype=np.float64), *self.args, **self.kwargs
965
+ )
966
+ else:
967
+ r = self.func(
968
+ Series(index=self.columns, dtype=np.float64),
969
+ *self.args,
970
+ **self.kwargs,
971
+ )
972
+ except Exception:
973
+ pass
974
+ else:
975
+ should_reduce = not isinstance(r, Series)
976
+
977
+ if should_reduce:
978
+ if len(self.agg_axis):
979
+ r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
980
+ else:
981
+ r = np.nan
982
+
983
+ return self.obj._constructor_sliced(r, index=self.agg_axis)
984
+ else:
985
+ return self.obj.copy()
986
+
987
+ def apply_raw(self, engine="python", engine_kwargs=None):
988
+ """apply to the values as a numpy array"""
989
+
990
+ def wrap_function(func):
991
+ """
992
+ Wrap user supplied function to work around numpy issue.
993
+
994
+ see https://github.com/numpy/numpy/issues/8352
995
+ """
996
+
997
+ def wrapper(*args, **kwargs):
998
+ result = func(*args, **kwargs)
999
+ if isinstance(result, str):
1000
+ result = np.array(result, dtype=object)
1001
+ return result
1002
+
1003
+ return wrapper
1004
+
1005
+ if engine == "numba":
1006
+ engine_kwargs = {} if engine_kwargs is None else engine_kwargs
1007
+
1008
+ # error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
1009
+ # incompatible type "Callable[..., Any] | str | list[Callable
1010
+ # [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
1011
+ # list[Callable[..., Any] | str]]"; expected "Hashable"
1012
+ nb_looper = generate_apply_looper(
1013
+ self.func, **engine_kwargs # type: ignore[arg-type]
1014
+ )
1015
+ result = nb_looper(self.values, self.axis)
1016
+ # If we made the result 2-D, squeeze it back to 1-D
1017
+ result = np.squeeze(result)
1018
+ else:
1019
+ result = np.apply_along_axis(
1020
+ wrap_function(self.func),
1021
+ self.axis,
1022
+ self.values,
1023
+ *self.args,
1024
+ **self.kwargs,
1025
+ )
1026
+
1027
+ # TODO: mixed type case
1028
+ if result.ndim == 2:
1029
+ return self.obj._constructor(result, index=self.index, columns=self.columns)
1030
+ else:
1031
+ return self.obj._constructor_sliced(result, index=self.agg_axis)
1032
+
1033
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
1034
+ assert callable(self.func)
1035
+
1036
+ result_values = np.empty_like(target.values)
1037
+
1038
+ # axis which we want to compare compliance
1039
+ result_compare = target.shape[0]
1040
+
1041
+ for i, col in enumerate(target.columns):
1042
+ res = self.func(target[col], *self.args, **self.kwargs)
1043
+ ares = np.asarray(res).ndim
1044
+
1045
+ # must be a scalar or 1d
1046
+ if ares > 1:
1047
+ raise ValueError("too many dims to broadcast")
1048
+ if ares == 1:
1049
+ # must match return dim
1050
+ if result_compare != len(res):
1051
+ raise ValueError("cannot broadcast result")
1052
+
1053
+ result_values[:, i] = res
1054
+
1055
+ # we *always* preserve the original index / columns
1056
+ result = self.obj._constructor(
1057
+ result_values, index=target.index, columns=target.columns
1058
+ )
1059
+ return result
1060
+
1061
+ def apply_standard(self):
1062
+ if self.engine == "python":
1063
+ results, res_index = self.apply_series_generator()
1064
+ else:
1065
+ results, res_index = self.apply_series_numba()
1066
+
1067
+ # wrap results
1068
+ return self.wrap_results(results, res_index)
1069
+
1070
+ def apply_series_generator(self) -> tuple[ResType, Index]:
1071
+ assert callable(self.func)
1072
+
1073
+ series_gen = self.series_generator
1074
+ res_index = self.result_index
1075
+
1076
+ results = {}
1077
+
1078
+ with option_context("mode.chained_assignment", None):
1079
+ for i, v in enumerate(series_gen):
1080
+ # ignore SettingWithCopy here in case the user mutates
1081
+ results[i] = self.func(v, *self.args, **self.kwargs)
1082
+ if isinstance(results[i], ABCSeries):
1083
+ # If we have a view on v, we need to make a copy because
1084
+ # series_generator will swap out the underlying data
1085
+ results[i] = results[i].copy(deep=False)
1086
+
1087
+ return results, res_index
1088
+
1089
+ def apply_series_numba(self):
1090
+ if self.engine_kwargs.get("parallel", False):
1091
+ raise NotImplementedError(
1092
+ "Parallel apply is not supported when raw=False and engine='numba'"
1093
+ )
1094
+ if not self.obj.index.is_unique or not self.columns.is_unique:
1095
+ raise NotImplementedError(
1096
+ "The index/columns must be unique when raw=False and engine='numba'"
1097
+ )
1098
+ self.validate_values_for_numba()
1099
+ results = self.apply_with_numba()
1100
+ return results, self.result_index
1101
+
1102
+ def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
1103
+ from pandas import Series
1104
+
1105
+ # see if we can infer the results
1106
+ if len(results) > 0 and 0 in results and is_sequence(results[0]):
1107
+ return self.wrap_results_for_axis(results, res_index)
1108
+
1109
+ # dict of scalars
1110
+
1111
+ # the default dtype of an empty Series is `object`, but this
1112
+ # code can be hit by df.mean() where the result should have dtype
1113
+ # float64 even if it's an empty Series.
1114
+ constructor_sliced = self.obj._constructor_sliced
1115
+ if len(results) == 0 and constructor_sliced is Series:
1116
+ result = constructor_sliced(results, dtype=np.float64)
1117
+ else:
1118
+ result = constructor_sliced(results)
1119
+ result.index = res_index
1120
+
1121
+ return result
1122
+
1123
+ def apply_str(self) -> DataFrame | Series:
1124
+ # Caller is responsible for checking isinstance(self.func, str)
1125
+ # TODO: GH#39993 - Avoid special-casing by replacing with lambda
1126
+ if self.func == "size":
1127
+ # Special-cased because DataFrame.size returns a single scalar
1128
+ obj = self.obj
1129
+ value = obj.shape[self.axis]
1130
+ return obj._constructor_sliced(value, index=self.agg_axis)
1131
+ return super().apply_str()
1132
+
1133
+
1134
+ class FrameRowApply(FrameApply):
1135
+ axis: AxisInt = 0
1136
+
1137
+ @property
1138
+ def series_generator(self) -> Generator[Series, None, None]:
1139
+ return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
1140
+
1141
+ @staticmethod
1142
+ @functools.cache
1143
+ def generate_numba_apply_func(
1144
+ func, nogil=True, nopython=True, parallel=False
1145
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
1146
+ numba = import_optional_dependency("numba")
1147
+ from pandas import Series
1148
+
1149
+ # Import helper from extensions to cast string object -> np strings
1150
+ # Note: This also has the side effect of loading our numba extensions
1151
+ from pandas.core._numba.extensions import maybe_cast_str
1152
+
1153
+ jitted_udf = numba.extending.register_jitable(func)
1154
+
1155
+ # Currently the parallel argument doesn't get passed through here
1156
+ # (it's disabled) since the dicts in numba aren't thread-safe.
1157
+ @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
1158
+ def numba_func(values, col_names, df_index):
1159
+ results = {}
1160
+ for j in range(values.shape[1]):
1161
+ # Create the series
1162
+ ser = Series(
1163
+ values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
1164
+ )
1165
+ results[j] = jitted_udf(ser)
1166
+ return results
1167
+
1168
+ return numba_func
1169
+
1170
+ def apply_with_numba(self) -> dict[int, Any]:
1171
+ nb_func = self.generate_numba_apply_func(
1172
+ cast(Callable, self.func), **self.engine_kwargs
1173
+ )
1174
+ from pandas.core._numba.extensions import set_numba_data
1175
+
1176
+ index = self.obj.index
1177
+ if index.dtype == "string":
1178
+ index = index.astype(object)
1179
+
1180
+ columns = self.obj.columns
1181
+ if columns.dtype == "string":
1182
+ columns = columns.astype(object)
1183
+
1184
+ # Convert from numba dict to regular dict
1185
+ # Our isinstance checks in the df constructor don't pass for numbas typed dict
1186
+ with set_numba_data(index) as index, set_numba_data(columns) as columns:
1187
+ res = dict(nb_func(self.values, columns, index))
1188
+ return res
1189
+
1190
+ @property
1191
+ def result_index(self) -> Index:
1192
+ return self.columns
1193
+
1194
+ @property
1195
+ def result_columns(self) -> Index:
1196
+ return self.index
1197
+
1198
+ def wrap_results_for_axis(
1199
+ self, results: ResType, res_index: Index
1200
+ ) -> DataFrame | Series:
1201
+ """return the results for the rows"""
1202
+
1203
+ if self.result_type == "reduce":
1204
+ # e.g. test_apply_dict GH#8735
1205
+ res = self.obj._constructor_sliced(results)
1206
+ res.index = res_index
1207
+ return res
1208
+
1209
+ elif self.result_type is None and all(
1210
+ isinstance(x, dict) for x in results.values()
1211
+ ):
1212
+ # Our operation was a to_dict op e.g.
1213
+ # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
1214
+ res = self.obj._constructor_sliced(results)
1215
+ res.index = res_index
1216
+ return res
1217
+
1218
+ try:
1219
+ result = self.obj._constructor(data=results)
1220
+ except ValueError as err:
1221
+ if "All arrays must be of the same length" in str(err):
1222
+ # e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
1223
+ # see test_agg_listlike_result GH#29587
1224
+ res = self.obj._constructor_sliced(results)
1225
+ res.index = res_index
1226
+ return res
1227
+ else:
1228
+ raise
1229
+
1230
+ if not isinstance(results[0], ABCSeries):
1231
+ if len(result.index) == len(self.res_columns):
1232
+ result.index = self.res_columns
1233
+
1234
+ if len(result.columns) == len(res_index):
1235
+ result.columns = res_index
1236
+
1237
+ return result
1238
+
1239
+
1240
+ class FrameColumnApply(FrameApply):
1241
+ axis: AxisInt = 1
1242
+
1243
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
1244
+ result = super().apply_broadcast(target.T)
1245
+ return result.T
1246
+
1247
+ @property
1248
+ def series_generator(self) -> Generator[Series, None, None]:
1249
+ values = self.values
1250
+ values = ensure_wrapped_if_datetimelike(values)
1251
+ assert len(values) > 0
1252
+
1253
+ # We create one Series object, and will swap out the data inside
1254
+ # of it. Kids: don't do this at home.
1255
+ ser = self.obj._ixs(0, axis=0)
1256
+ mgr = ser._mgr
1257
+
1258
+ is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
1259
+
1260
+ if isinstance(ser.dtype, ExtensionDtype):
1261
+ # values will be incorrect for this block
1262
+ # TODO(EA2D): special case would be unnecessary with 2D EAs
1263
+ obj = self.obj
1264
+ for i in range(len(obj)):
1265
+ yield obj._ixs(i, axis=0)
1266
+
1267
+ else:
1268
+ for arr, name in zip(values, self.index):
1269
+ # GH#35462 re-pin mgr in case setitem changed it
1270
+ ser._mgr = mgr
1271
+ mgr.set_values(arr)
1272
+ object.__setattr__(ser, "_name", name)
1273
+ if not is_view:
1274
+ # In apply_series_generator we store the a shallow copy of the
1275
+ # result, which potentially increases the ref count of this reused
1276
+ # `ser` object (depending on the result of the applied function)
1277
+ # -> if that happened and `ser` is already a copy, then we reset
1278
+ # the refs here to avoid triggering a unnecessary CoW inside the
1279
+ # applied function (https://github.com/pandas-dev/pandas/pull/56212)
1280
+ mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
1281
+ yield ser
1282
+
1283
+ @staticmethod
1284
+ @functools.cache
1285
+ def generate_numba_apply_func(
1286
+ func, nogil=True, nopython=True, parallel=False
1287
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
1288
+ numba = import_optional_dependency("numba")
1289
+ from pandas import Series
1290
+ from pandas.core._numba.extensions import maybe_cast_str
1291
+
1292
+ jitted_udf = numba.extending.register_jitable(func)
1293
+
1294
+ @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
1295
+ def numba_func(values, col_names_index, index):
1296
+ results = {}
1297
+ # Currently the parallel argument doesn't get passed through here
1298
+ # (it's disabled) since the dicts in numba aren't thread-safe.
1299
+ for i in range(values.shape[0]):
1300
+ # Create the series
1301
+ # TODO: values corrupted without the copy
1302
+ ser = Series(
1303
+ values[i].copy(),
1304
+ index=col_names_index,
1305
+ name=maybe_cast_str(index[i]),
1306
+ )
1307
+ results[i] = jitted_udf(ser)
1308
+
1309
+ return results
1310
+
1311
+ return numba_func
1312
+
1313
+ def apply_with_numba(self) -> dict[int, Any]:
1314
+ nb_func = self.generate_numba_apply_func(
1315
+ cast(Callable, self.func), **self.engine_kwargs
1316
+ )
1317
+
1318
+ from pandas.core._numba.extensions import set_numba_data
1319
+
1320
+ # Convert from numba dict to regular dict
1321
+ # Our isinstance checks in the df constructor don't pass for numbas typed dict
1322
+ with set_numba_data(self.obj.index) as index, set_numba_data(
1323
+ self.columns
1324
+ ) as columns:
1325
+ res = dict(nb_func(self.values, columns, index))
1326
+
1327
+ return res
1328
+
1329
+ @property
1330
+ def result_index(self) -> Index:
1331
+ return self.index
1332
+
1333
+ @property
1334
+ def result_columns(self) -> Index:
1335
+ return self.columns
1336
+
1337
+ def wrap_results_for_axis(
1338
+ self, results: ResType, res_index: Index
1339
+ ) -> DataFrame | Series:
1340
+ """return the results for the columns"""
1341
+ result: DataFrame | Series
1342
+
1343
+ # we have requested to expand
1344
+ if self.result_type == "expand":
1345
+ result = self.infer_to_same_shape(results, res_index)
1346
+
1347
+ # we have a non-series and don't want inference
1348
+ elif not isinstance(results[0], ABCSeries):
1349
+ result = self.obj._constructor_sliced(results)
1350
+ result.index = res_index
1351
+
1352
+ # we may want to infer results
1353
+ else:
1354
+ result = self.infer_to_same_shape(results, res_index)
1355
+
1356
+ return result
1357
+
1358
+ def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
1359
+ """infer the results to the same shape as the input object"""
1360
+ result = self.obj._constructor(data=results)
1361
+ result = result.T
1362
+
1363
+ # set the index
1364
+ result.index = res_index
1365
+
1366
+ # infer dtypes
1367
+ result = result.infer_objects(copy=False)
1368
+
1369
+ return result
1370
+
1371
+
1372
+ class SeriesApply(NDFrameApply):
1373
+ obj: Series
1374
+ axis: AxisInt = 0
1375
+ by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
1376
+
1377
+ def __init__(
1378
+ self,
1379
+ obj: Series,
1380
+ func: AggFuncType,
1381
+ *,
1382
+ convert_dtype: bool | lib.NoDefault = lib.no_default,
1383
+ by_row: Literal[False, "compat", "_compat"] = "compat",
1384
+ args,
1385
+ kwargs,
1386
+ ) -> None:
1387
+ if convert_dtype is lib.no_default:
1388
+ convert_dtype = True
1389
+ else:
1390
+ warnings.warn(
1391
+ "the convert_dtype parameter is deprecated and will be removed in a "
1392
+ "future version. Do ``ser.astype(object).apply()`` "
1393
+ "instead if you want ``convert_dtype=False``.",
1394
+ FutureWarning,
1395
+ stacklevel=find_stack_level(),
1396
+ )
1397
+ self.convert_dtype = convert_dtype
1398
+
1399
+ super().__init__(
1400
+ obj,
1401
+ func,
1402
+ raw=False,
1403
+ result_type=None,
1404
+ by_row=by_row,
1405
+ args=args,
1406
+ kwargs=kwargs,
1407
+ )
1408
+
1409
+ def apply(self) -> DataFrame | Series:
1410
+ obj = self.obj
1411
+
1412
+ if len(obj) == 0:
1413
+ return self.apply_empty_result()
1414
+
1415
+ # dispatch to handle list-like or dict-like
1416
+ if is_list_like(self.func):
1417
+ return self.apply_list_or_dict_like()
1418
+
1419
+ if isinstance(self.func, str):
1420
+ # if we are a string, try to dispatch
1421
+ return self.apply_str()
1422
+
1423
+ if self.by_row == "_compat":
1424
+ return self.apply_compat()
1425
+
1426
+ # self.func is Callable
1427
+ return self.apply_standard()
1428
+
1429
+ def agg(self):
1430
+ result = super().agg()
1431
+ if result is None:
1432
+ obj = self.obj
1433
+ func = self.func
1434
+ # string, list-like, and dict-like are entirely handled in super
1435
+ assert callable(func)
1436
+
1437
+ # GH53325: The setup below is just to keep current behavior while emitting a
1438
+ # deprecation message. In the future this will all be replaced with a simple
1439
+ # `result = f(self.obj, *self.args, **self.kwargs)`.
1440
+ try:
1441
+ result = obj.apply(func, args=self.args, **self.kwargs)
1442
+ except (ValueError, AttributeError, TypeError):
1443
+ result = func(obj, *self.args, **self.kwargs)
1444
+ else:
1445
+ msg = (
1446
+ f"using {func} in {type(obj).__name__}.agg cannot aggregate and "
1447
+ f"has been deprecated. Use {type(obj).__name__}.transform to "
1448
+ f"keep behavior unchanged."
1449
+ )
1450
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
1451
+
1452
+ return result
1453
+
1454
+ def apply_empty_result(self) -> Series:
1455
+ obj = self.obj
1456
+ return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
1457
+ obj, method="apply"
1458
+ )
1459
+
1460
+ def apply_compat(self):
1461
+ """compat apply method for funcs in listlikes and dictlikes.
1462
+
1463
+ Used for each callable when giving listlikes and dictlikes of callables to
1464
+ apply. Needed for compatibility with Pandas < v2.1.
1465
+
1466
+ .. versionadded:: 2.1.0
1467
+ """
1468
+ obj = self.obj
1469
+ func = self.func
1470
+
1471
+ if callable(func):
1472
+ f = com.get_cython_func(func)
1473
+ if f and not self.args and not self.kwargs:
1474
+ return obj.apply(func, by_row=False)
1475
+
1476
+ try:
1477
+ result = obj.apply(func, by_row="compat")
1478
+ except (ValueError, AttributeError, TypeError):
1479
+ result = obj.apply(func, by_row=False)
1480
+ return result
1481
+
1482
+ def apply_standard(self) -> DataFrame | Series:
1483
+ # caller is responsible for ensuring that f is Callable
1484
+ func = cast(Callable, self.func)
1485
+ obj = self.obj
1486
+
1487
+ if isinstance(func, np.ufunc):
1488
+ with np.errstate(all="ignore"):
1489
+ return func(obj, *self.args, **self.kwargs)
1490
+ elif not self.by_row:
1491
+ return func(obj, *self.args, **self.kwargs)
1492
+
1493
+ if self.args or self.kwargs:
1494
+ # _map_values does not support args/kwargs
1495
+ def curried(x):
1496
+ return func(x, *self.args, **self.kwargs)
1497
+
1498
+ else:
1499
+ curried = func
1500
+
1501
+ # row-wise access
1502
+ # apply doesn't have a `na_action` keyword and for backward compat reasons
1503
+ # we need to give `na_action="ignore"` for categorical data.
1504
+ # TODO: remove the `na_action="ignore"` when that default has been changed in
1505
+ # Categorical (GH51645).
1506
+ action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None
1507
+ mapped = obj._map_values(
1508
+ mapper=curried, na_action=action, convert=self.convert_dtype
1509
+ )
1510
+
1511
+ if len(mapped) and isinstance(mapped[0], ABCSeries):
1512
+ # GH#43986 Need to do list(mapped) in order to get treated as nested
1513
+ # See also GH#25959 regarding EA support
1514
+ return obj._constructor_expanddim(list(mapped), index=obj.index)
1515
+ else:
1516
+ return obj._constructor(mapped, index=obj.index).__finalize__(
1517
+ obj, method="apply"
1518
+ )
1519
+
1520
+
1521
+ class GroupByApply(Apply):
1522
+ obj: GroupBy | Resampler | BaseWindow
1523
+
1524
+ def __init__(
1525
+ self,
1526
+ obj: GroupBy[NDFrameT],
1527
+ func: AggFuncType,
1528
+ *,
1529
+ args,
1530
+ kwargs,
1531
+ ) -> None:
1532
+ kwargs = kwargs.copy()
1533
+ self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
1534
+ super().__init__(
1535
+ obj,
1536
+ func,
1537
+ raw=False,
1538
+ result_type=None,
1539
+ args=args,
1540
+ kwargs=kwargs,
1541
+ )
1542
+
1543
+ def apply(self):
1544
+ raise NotImplementedError
1545
+
1546
+ def transform(self):
1547
+ raise NotImplementedError
1548
+
1549
+ def agg_or_apply_list_like(
1550
+ self, op_name: Literal["agg", "apply"]
1551
+ ) -> DataFrame | Series:
1552
+ obj = self.obj
1553
+ kwargs = self.kwargs
1554
+ if op_name == "apply":
1555
+ kwargs = {**kwargs, "by_row": False}
1556
+
1557
+ if getattr(obj, "axis", 0) == 1:
1558
+ raise NotImplementedError("axis other than 0 is not supported")
1559
+
1560
+ if obj._selected_obj.ndim == 1:
1561
+ # For SeriesGroupBy this matches _obj_with_exclusions
1562
+ selected_obj = obj._selected_obj
1563
+ else:
1564
+ selected_obj = obj._obj_with_exclusions
1565
+
1566
+ # Only set as_index=True on groupby objects, not Window or Resample
1567
+ # that inherit from this class.
1568
+ with com.temp_setattr(
1569
+ obj, "as_index", True, condition=hasattr(obj, "as_index")
1570
+ ):
1571
+ keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
1572
+ result = self.wrap_results_list_like(keys, results)
1573
+ return result
1574
+
1575
+ def agg_or_apply_dict_like(
1576
+ self, op_name: Literal["agg", "apply"]
1577
+ ) -> DataFrame | Series:
1578
+ from pandas.core.groupby.generic import (
1579
+ DataFrameGroupBy,
1580
+ SeriesGroupBy,
1581
+ )
1582
+
1583
+ assert op_name in ["agg", "apply"]
1584
+
1585
+ obj = self.obj
1586
+ kwargs = {}
1587
+ if op_name == "apply":
1588
+ by_row = "_compat" if self.by_row else False
1589
+ kwargs.update({"by_row": by_row})
1590
+
1591
+ if getattr(obj, "axis", 0) == 1:
1592
+ raise NotImplementedError("axis other than 0 is not supported")
1593
+
1594
+ selected_obj = obj._selected_obj
1595
+ selection = obj._selection
1596
+
1597
+ is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
1598
+
1599
+ # Numba Groupby engine/engine-kwargs passthrough
1600
+ if is_groupby:
1601
+ engine = self.kwargs.get("engine", None)
1602
+ engine_kwargs = self.kwargs.get("engine_kwargs", None)
1603
+ kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
1604
+
1605
+ with com.temp_setattr(
1606
+ obj, "as_index", True, condition=hasattr(obj, "as_index")
1607
+ ):
1608
+ result_index, result_data = self.compute_dict_like(
1609
+ op_name, selected_obj, selection, kwargs
1610
+ )
1611
+ result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
1612
+ return result
1613
+
1614
+
1615
+ class ResamplerWindowApply(GroupByApply):
1616
+ axis: AxisInt = 0
1617
+ obj: Resampler | BaseWindow
1618
+
1619
+ def __init__(
1620
+ self,
1621
+ obj: Resampler | BaseWindow,
1622
+ func: AggFuncType,
1623
+ *,
1624
+ args,
1625
+ kwargs,
1626
+ ) -> None:
1627
+ super(GroupByApply, self).__init__(
1628
+ obj,
1629
+ func,
1630
+ raw=False,
1631
+ result_type=None,
1632
+ args=args,
1633
+ kwargs=kwargs,
1634
+ )
1635
+
1636
+ def apply(self):
1637
+ raise NotImplementedError
1638
+
1639
+ def transform(self):
1640
+ raise NotImplementedError
1641
+
1642
+
1643
+ def reconstruct_func(
1644
+ func: AggFuncType | None, **kwargs
1645
+ ) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
1646
+ """
1647
+ This is the internal function to reconstruct func given if there is relabeling
1648
+ or not and also normalize the keyword to get new order of columns.
1649
+
1650
+ If named aggregation is applied, `func` will be None, and kwargs contains the
1651
+ column and aggregation function information to be parsed;
1652
+ If named aggregation is not applied, `func` is either string (e.g. 'min') or
1653
+ Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
1654
+ and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
1655
+
1656
+ If relabeling is True, will return relabeling, reconstructed func, column
1657
+ names, and the reconstructed order of columns.
1658
+ If relabeling is False, the columns and order will be None.
1659
+
1660
+ Parameters
1661
+ ----------
1662
+ func: agg function (e.g. 'min' or Callable) or list of agg functions
1663
+ (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
1664
+ **kwargs: dict, kwargs used in is_multi_agg_with_relabel and
1665
+ normalize_keyword_aggregation function for relabelling
1666
+
1667
+ Returns
1668
+ -------
1669
+ relabelling: bool, if there is relabelling or not
1670
+ func: normalized and mangled func
1671
+ columns: tuple of column names
1672
+ order: array of columns indices
1673
+
1674
+ Examples
1675
+ --------
1676
+ >>> reconstruct_func(None, **{"foo": ("col", "min")})
1677
+ (True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
1678
+
1679
+ >>> reconstruct_func("min")
1680
+ (False, 'min', None, None)
1681
+ """
1682
+ relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
1683
+ columns: tuple[str, ...] | None = None
1684
+ order: npt.NDArray[np.intp] | None = None
1685
+
1686
+ if not relabeling:
1687
+ if isinstance(func, list) and len(func) > len(set(func)):
1688
+ # GH 28426 will raise error if duplicated function names are used and
1689
+ # there is no reassigned name
1690
+ raise SpecificationError(
1691
+ "Function names must be unique if there is no new column names "
1692
+ "assigned"
1693
+ )
1694
+ if func is None:
1695
+ # nicer error message
1696
+ raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
1697
+
1698
+ if relabeling:
1699
+ # error: Incompatible types in assignment (expression has type
1700
+ # "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
1701
+ # "Callable[..., Any] | str | list[Callable[..., Any] | str] |
1702
+ # MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
1703
+ # str]] | None")
1704
+ func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
1705
+ kwargs
1706
+ )
1707
+ assert func is not None
1708
+
1709
+ return relabeling, func, columns, order
1710
+
1711
+
1712
+ def is_multi_agg_with_relabel(**kwargs) -> bool:
1713
+ """
1714
+ Check whether kwargs passed to .agg look like multi-agg with relabeling.
1715
+
1716
+ Parameters
1717
+ ----------
1718
+ **kwargs : dict
1719
+
1720
+ Returns
1721
+ -------
1722
+ bool
1723
+
1724
+ Examples
1725
+ --------
1726
+ >>> is_multi_agg_with_relabel(a="max")
1727
+ False
1728
+ >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
1729
+ True
1730
+ >>> is_multi_agg_with_relabel()
1731
+ False
1732
+ """
1733
+ return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
1734
+ len(kwargs) > 0
1735
+ )
1736
+
1737
+
1738
+ def normalize_keyword_aggregation(
1739
+ kwargs: dict,
1740
+ ) -> tuple[
1741
+ MutableMapping[Hashable, list[AggFuncTypeBase]],
1742
+ tuple[str, ...],
1743
+ npt.NDArray[np.intp],
1744
+ ]:
1745
+ """
1746
+ Normalize user-provided "named aggregation" kwargs.
1747
+ Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
1748
+ to the old Dict[str, List[scalar]]].
1749
+
1750
+ Parameters
1751
+ ----------
1752
+ kwargs : dict
1753
+
1754
+ Returns
1755
+ -------
1756
+ aggspec : dict
1757
+ The transformed kwargs.
1758
+ columns : tuple[str, ...]
1759
+ The user-provided keys.
1760
+ col_idx_order : List[int]
1761
+ List of columns indices.
1762
+
1763
+ Examples
1764
+ --------
1765
+ >>> normalize_keyword_aggregation({"output": ("input", "sum")})
1766
+ (defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
1767
+ """
1768
+ from pandas.core.indexes.base import Index
1769
+
1770
+ # Normalize the aggregation functions as Mapping[column, List[func]],
1771
+ # process normally, then fixup the names.
1772
+ # TODO: aggspec type: typing.Dict[str, List[AggScalar]]
1773
+ aggspec = defaultdict(list)
1774
+ order = []
1775
+ columns, pairs = list(zip(*kwargs.items()))
1776
+
1777
+ for column, aggfunc in pairs:
1778
+ aggspec[column].append(aggfunc)
1779
+ order.append((column, com.get_callable_name(aggfunc) or aggfunc))
1780
+
1781
+ # uniquify aggfunc name if duplicated in order list
1782
+ uniquified_order = _make_unique_kwarg_list(order)
1783
+
1784
+ # GH 25719, due to aggspec will change the order of assigned columns in aggregation
1785
+ # uniquified_aggspec will store uniquified order list and will compare it with order
1786
+ # based on index
1787
+ aggspec_order = [
1788
+ (column, com.get_callable_name(aggfunc) or aggfunc)
1789
+ for column, aggfuncs in aggspec.items()
1790
+ for aggfunc in aggfuncs
1791
+ ]
1792
+ uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
1793
+
1794
+ # get the new index of columns by comparison
1795
+ col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
1796
+ return aggspec, columns, col_idx_order
1797
+
1798
+
1799
+ def _make_unique_kwarg_list(
1800
+ seq: Sequence[tuple[Any, Any]]
1801
+ ) -> Sequence[tuple[Any, Any]]:
1802
+ """
1803
+ Uniquify aggfunc name of the pairs in the order list
1804
+
1805
+ Examples:
1806
+ --------
1807
+ >>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
1808
+ >>> _make_unique_kwarg_list(kwarg_list)
1809
+ [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
1810
+ """
1811
+ return [
1812
+ (pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
1813
+ for i, pair in enumerate(seq)
1814
+ ]
1815
+
1816
+
1817
+ def relabel_result(
1818
+ result: DataFrame | Series,
1819
+ func: dict[str, list[Callable | str]],
1820
+ columns: Iterable[Hashable],
1821
+ order: Iterable[int],
1822
+ ) -> dict[Hashable, Series]:
1823
+ """
1824
+ Internal function to reorder result if relabelling is True for
1825
+ dataframe.agg, and return the reordered result in dict.
1826
+
1827
+ Parameters:
1828
+ ----------
1829
+ result: Result from aggregation
1830
+ func: Dict of (column name, funcs)
1831
+ columns: New columns name for relabelling
1832
+ order: New order for relabelling
1833
+
1834
+ Examples
1835
+ --------
1836
+ >>> from pandas.core.apply import relabel_result
1837
+ >>> result = pd.DataFrame(
1838
+ ... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
1839
+ ... index=["max", "mean", "min"]
1840
+ ... )
1841
+ >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
1842
+ >>> columns = ("foo", "aab", "bar", "dat")
1843
+ >>> order = [0, 1, 2, 3]
1844
+ >>> result_in_dict = relabel_result(result, funcs, columns, order)
1845
+ >>> pd.DataFrame(result_in_dict, index=columns)
1846
+ A C B
1847
+ foo 2.0 NaN NaN
1848
+ aab NaN 6.0 NaN
1849
+ bar NaN NaN 4.0
1850
+ dat NaN NaN 2.5
1851
+ """
1852
+ from pandas.core.indexes.base import Index
1853
+
1854
+ reordered_indexes = [
1855
+ pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
1856
+ ]
1857
+ reordered_result_in_dict: dict[Hashable, Series] = {}
1858
+ idx = 0
1859
+
1860
+ reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
1861
+ for col, fun in func.items():
1862
+ s = result[col].dropna()
1863
+
1864
+ # In the `_aggregate`, the callable names are obtained and used in `result`, and
1865
+ # these names are ordered alphabetically. e.g.
1866
+ # C2 C1
1867
+ # <lambda> 1 NaN
1868
+ # amax NaN 4.0
1869
+ # max NaN 4.0
1870
+ # sum 18.0 6.0
1871
+ # Therefore, the order of functions for each column could be shuffled
1872
+ # accordingly so need to get the callable name if it is not parsed names, and
1873
+ # reorder the aggregated result for each column.
1874
+ # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
1875
+ # [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
1876
+ # reorder so that aggregated values map to their functions regarding the order.
1877
+
1878
+ # However there is only one column being used for aggregation, not need to
1879
+ # reorder since the index is not sorted, and keep as is in `funcs`, e.g.
1880
+ # A
1881
+ # min 1.0
1882
+ # mean 1.5
1883
+ # mean 1.5
1884
+ if reorder_mask:
1885
+ fun = [
1886
+ com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
1887
+ ]
1888
+ col_idx_order = Index(s.index).get_indexer(fun)
1889
+ s = s.iloc[col_idx_order]
1890
+
1891
+ # assign the new user-provided "named aggregation" as index names, and reindex
1892
+ # it based on the whole user-provided names.
1893
+ s.index = reordered_indexes[idx : idx + len(fun)]
1894
+ reordered_result_in_dict[col] = s.reindex(columns, copy=False)
1895
+ idx = idx + len(fun)
1896
+ return reordered_result_in_dict
1897
+
1898
+
1899
+ def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
1900
+ from pandas import DataFrame
1901
+
1902
+ relabeling, func, columns, order = reconstruct_func(func, **kwargs)
1903
+
1904
+ if relabeling:
1905
+ # This is to keep the order to columns occurrence unchanged, and also
1906
+ # keep the order of new columns occurrence unchanged
1907
+
1908
+ # For the return values of reconstruct_func, if relabeling is
1909
+ # False, columns and order will be None.
1910
+ assert columns is not None
1911
+ assert order is not None
1912
+
1913
+ result_in_dict = relabel_result(result, func, columns, order)
1914
+ result = DataFrame(result_in_dict, index=columns)
1915
+
1916
+ return result
1917
+
1918
+
1919
+ # TODO: Can't use, because mypy doesn't like us setting __name__
1920
+ # error: "partial[Any]" has no attribute "__name__"
1921
+ # the type is:
1922
+ # typing.Sequence[Callable[..., ScalarResult]]
1923
+ # -> typing.Sequence[Callable[..., ScalarResult]]:
1924
+
1925
+
1926
+ def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
1927
+ """
1928
+ Possibly mangle a list of aggfuncs.
1929
+
1930
+ Parameters
1931
+ ----------
1932
+ aggfuncs : Sequence
1933
+
1934
+ Returns
1935
+ -------
1936
+ mangled: list-like
1937
+ A new AggSpec sequence, where lambdas have been converted
1938
+ to have unique names.
1939
+
1940
+ Notes
1941
+ -----
1942
+ If just one aggfunc is passed, the name will not be mangled.
1943
+ """
1944
+ if len(aggfuncs) <= 1:
1945
+ # don't mangle for .agg([lambda x: .])
1946
+ return aggfuncs
1947
+ i = 0
1948
+ mangled_aggfuncs = []
1949
+ for aggfunc in aggfuncs:
1950
+ if com.get_callable_name(aggfunc) == "<lambda>":
1951
+ aggfunc = partial(aggfunc)
1952
+ aggfunc.__name__ = f"<lambda_{i}>"
1953
+ i += 1
1954
+ mangled_aggfuncs.append(aggfunc)
1955
+
1956
+ return mangled_aggfuncs
1957
+
1958
+
1959
+ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
1960
+ """
1961
+ Make new lambdas with unique names.
1962
+
1963
+ Parameters
1964
+ ----------
1965
+ agg_spec : Any
1966
+ An argument to GroupBy.agg.
1967
+ Non-dict-like `agg_spec` are pass through as is.
1968
+ For dict-like `agg_spec` a new spec is returned
1969
+ with name-mangled lambdas.
1970
+
1971
+ Returns
1972
+ -------
1973
+ mangled : Any
1974
+ Same type as the input.
1975
+
1976
+ Examples
1977
+ --------
1978
+ >>> maybe_mangle_lambdas('sum')
1979
+ 'sum'
1980
+ >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
1981
+ [<function __main__.<lambda_0>,
1982
+ <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
1983
+ """
1984
+ is_dict = is_dict_like(agg_spec)
1985
+ if not (is_dict or is_list_like(agg_spec)):
1986
+ return agg_spec
1987
+ mangled_aggspec = type(agg_spec)() # dict or OrderedDict
1988
+
1989
+ if is_dict:
1990
+ for key, aggfuncs in agg_spec.items():
1991
+ if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
1992
+ mangled_aggfuncs = _managle_lambda_list(aggfuncs)
1993
+ else:
1994
+ mangled_aggfuncs = aggfuncs
1995
+
1996
+ mangled_aggspec[key] = mangled_aggfuncs
1997
+ else:
1998
+ mangled_aggspec = _managle_lambda_list(agg_spec)
1999
+
2000
+ return mangled_aggspec
2001
+
2002
+
2003
+ def validate_func_kwargs(
2004
+ kwargs: dict,
2005
+ ) -> tuple[list[str], list[str | Callable[..., Any]]]:
2006
+ """
2007
+ Validates types of user-provided "named aggregation" kwargs.
2008
+ `TypeError` is raised if aggfunc is not `str` or callable.
2009
+
2010
+ Parameters
2011
+ ----------
2012
+ kwargs : dict
2013
+
2014
+ Returns
2015
+ -------
2016
+ columns : List[str]
2017
+ List of user-provided keys.
2018
+ func : List[Union[str, callable[...,Any]]]
2019
+ List of user-provided aggfuncs
2020
+
2021
+ Examples
2022
+ --------
2023
+ >>> validate_func_kwargs({'one': 'min', 'two': 'max'})
2024
+ (['one', 'two'], ['min', 'max'])
2025
+ """
2026
+ tuple_given_message = "func is expected but received {} in **kwargs."
2027
+ columns = list(kwargs)
2028
+ func = []
2029
+ for col_func in kwargs.values():
2030
+ if not (isinstance(col_func, str) or callable(col_func)):
2031
+ raise TypeError(tuple_given_message.format(type(col_func).__name__))
2032
+ func.append(col_func)
2033
+ if not columns:
2034
+ no_arg_message = "Must provide 'func' or named aggregation **kwargs."
2035
+ raise TypeError(no_arg_message)
2036
+ return columns, func
2037
+
2038
+
2039
+ def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
2040
+ return isinstance(colg, ABCDataFrame) or (
2041
+ isinstance(colg, ABCSeries) and op_name == "agg"
2042
+ )
2043
+
2044
+
2045
+ def warn_alias_replacement(
2046
+ obj: AggObjType,
2047
+ func: Callable,
2048
+ alias: str,
2049
+ ) -> None:
2050
+ if alias.startswith("np."):
2051
+ full_alias = alias
2052
+ else:
2053
+ full_alias = f"{type(obj).__name__}.{alias}"
2054
+ alias = f'"{alias}"'
2055
+ warnings.warn(
2056
+ f"The provided callable {func} is currently using "
2057
+ f"{full_alias}. In a future version of pandas, "
2058
+ f"the provided callable will be used directly. To keep current "
2059
+ f"behavior pass the string {alias} instead.",
2060
+ category=FutureWarning,
2061
+ stacklevel=find_stack_level(),
2062
+ )
llava_next/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ masked_accumulations.py is for accumulation algorithms using a mask-based approach
3
+ for missing values.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Callable,
11
+ )
12
+
13
+ import numpy as np
14
+
15
+ if TYPE_CHECKING:
16
+ from pandas._typing import npt
17
+
18
+
19
+ def _cum_func(
20
+ func: Callable,
21
+ values: np.ndarray,
22
+ mask: npt.NDArray[np.bool_],
23
+ *,
24
+ skipna: bool = True,
25
+ ):
26
+ """
27
+ Accumulations for 1D masked array.
28
+
29
+ We will modify values in place to replace NAs with the appropriate fill value.
30
+
31
+ Parameters
32
+ ----------
33
+ func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
34
+ values : np.ndarray
35
+ Numpy array with the values (can be of any dtype that support the
36
+ operation).
37
+ mask : np.ndarray
38
+ Boolean numpy array (True values indicate missing values).
39
+ skipna : bool, default True
40
+ Whether to skip NA.
41
+ """
42
+ dtype_info: np.iinfo | np.finfo
43
+ if values.dtype.kind == "f":
44
+ dtype_info = np.finfo(values.dtype.type)
45
+ elif values.dtype.kind in "iu":
46
+ dtype_info = np.iinfo(values.dtype.type)
47
+ elif values.dtype.kind == "b":
48
+ # Max value of bool is 1, but since we are setting into a boolean
49
+ # array, 255 is fine as well. Min value has to be 0 when setting
50
+ # into the boolean array.
51
+ dtype_info = np.iinfo(np.uint8)
52
+ else:
53
+ raise NotImplementedError(
54
+ f"No masked accumulation defined for dtype {values.dtype.type}"
55
+ )
56
+ try:
57
+ fill_value = {
58
+ np.cumprod: 1,
59
+ np.maximum.accumulate: dtype_info.min,
60
+ np.cumsum: 0,
61
+ np.minimum.accumulate: dtype_info.max,
62
+ }[func]
63
+ except KeyError:
64
+ raise NotImplementedError(
65
+ f"No accumulation for {func} implemented on BaseMaskedArray"
66
+ )
67
+
68
+ values[mask] = fill_value
69
+
70
+ if not skipna:
71
+ mask = np.maximum.accumulate(mask)
72
+
73
+ values = func(values)
74
+ return values, mask
75
+
76
+
77
+ def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
78
+ return _cum_func(np.cumsum, values, mask, skipna=skipna)
79
+
80
+
81
+ def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
82
+ return _cum_func(np.cumprod, values, mask, skipna=skipna)
83
+
84
+
85
+ def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
86
+ return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
87
+
88
+
89
+ def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
90
+ return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
llava_next/lib/python3.10/site-packages/pandas/core/array_algos/replace.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Methods used by Block.replace and related methods.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import operator
7
+ import re
8
+ from re import Pattern
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas.core.dtypes.common import (
17
+ is_bool,
18
+ is_re,
19
+ is_re_compilable,
20
+ )
21
+ from pandas.core.dtypes.missing import isna
22
+
23
+ if TYPE_CHECKING:
24
+ from pandas._typing import (
25
+ ArrayLike,
26
+ Scalar,
27
+ npt,
28
+ )
29
+
30
+
31
+ def should_use_regex(regex: bool, to_replace: Any) -> bool:
32
+ """
33
+ Decide whether to treat `to_replace` as a regular expression.
34
+ """
35
+ if is_re(to_replace):
36
+ regex = True
37
+
38
+ regex = regex and is_re_compilable(to_replace)
39
+
40
+ # Don't use regex if the pattern is empty.
41
+ regex = regex and re.compile(to_replace).pattern != ""
42
+ return regex
43
+
44
+
45
+ def compare_or_regex_search(
46
+ a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
47
+ ) -> ArrayLike:
48
+ """
49
+ Compare two array-like inputs of the same shape or two scalar values
50
+
51
+ Calls operator.eq or re.search, depending on regex argument. If regex is
52
+ True, perform an element-wise regex matching.
53
+
54
+ Parameters
55
+ ----------
56
+ a : array-like
57
+ b : scalar or regex pattern
58
+ regex : bool
59
+ mask : np.ndarray[bool]
60
+
61
+ Returns
62
+ -------
63
+ mask : array-like of bool
64
+ """
65
+ if isna(b):
66
+ return ~mask
67
+
68
+ def _check_comparison_types(
69
+ result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
70
+ ):
71
+ """
72
+ Raises an error if the two arrays (a,b) cannot be compared.
73
+ Otherwise, returns the comparison result as expected.
74
+ """
75
+ if is_bool(result) and isinstance(a, np.ndarray):
76
+ type_names = [type(a).__name__, type(b).__name__]
77
+
78
+ type_names[0] = f"ndarray(dtype={a.dtype})"
79
+
80
+ raise TypeError(
81
+ f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
82
+ )
83
+
84
+ if not regex or not should_use_regex(regex, b):
85
+ # TODO: should use missing.mask_missing?
86
+ op = lambda x: operator.eq(x, b)
87
+ else:
88
+ op = np.vectorize(
89
+ lambda x: bool(re.search(b, x))
90
+ if isinstance(x, str) and isinstance(b, (str, Pattern))
91
+ else False
92
+ )
93
+
94
+ # GH#32621 use mask to avoid comparing to NAs
95
+ if isinstance(a, np.ndarray):
96
+ a = a[mask]
97
+
98
+ result = op(a)
99
+
100
+ if isinstance(result, np.ndarray) and mask is not None:
101
+ # The shape of the mask can differ to that of the result
102
+ # since we may compare only a subset of a's or b's elements
103
+ tmp = np.zeros(mask.shape, dtype=np.bool_)
104
+ np.place(tmp, mask, result)
105
+ result = tmp
106
+
107
+ _check_comparison_types(result, a, b)
108
+ return result
109
+
110
+
111
+ def replace_regex(
112
+ values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
113
+ ) -> None:
114
+ """
115
+ Parameters
116
+ ----------
117
+ values : ArrayLike
118
+ Object dtype.
119
+ rx : re.Pattern
120
+ value : Any
121
+ mask : np.ndarray[bool], optional
122
+
123
+ Notes
124
+ -----
125
+ Alters values in-place.
126
+ """
127
+
128
+ # deal with replacing values with objects (strings) that match but
129
+ # whose replacement is not a string (numeric, nan, object)
130
+ if isna(value) or not isinstance(value, str):
131
+
132
+ def re_replacer(s):
133
+ if is_re(rx) and isinstance(s, str):
134
+ return value if rx.search(s) is not None else s
135
+ else:
136
+ return s
137
+
138
+ else:
139
+ # value is guaranteed to be a string here, s can be either a string
140
+ # or null if it's null it gets returned
141
+ def re_replacer(s):
142
+ if is_re(rx) and isinstance(s, str):
143
+ return rx.sub(value, s)
144
+ else:
145
+ return s
146
+
147
+ f = np.vectorize(re_replacer, otypes=[np.object_])
148
+
149
+ if mask is None:
150
+ values[:] = f(values)
151
+ else:
152
+ values[mask] = f(values[mask])
llava_next/lib/python3.10/site-packages/pandas/core/arraylike.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Methods that can be shared by many array-like classes or subclasses:
3
+ Series
4
+ Index
5
+ ExtensionArray
6
+ """
7
+ from __future__ import annotations
8
+
9
+ import operator
10
+ from typing import Any
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
16
+
17
+ from pandas.core.dtypes.generic import ABCNDFrame
18
+
19
+ from pandas.core import roperator
20
+ from pandas.core.construction import extract_array
21
+ from pandas.core.ops.common import unpack_zerodim_and_defer
22
+
23
+ REDUCTION_ALIASES = {
24
+ "maximum": "max",
25
+ "minimum": "min",
26
+ "add": "sum",
27
+ "multiply": "prod",
28
+ }
29
+
30
+
31
+ class OpsMixin:
32
+ # -------------------------------------------------------------
33
+ # Comparisons
34
+
35
+ def _cmp_method(self, other, op):
36
+ return NotImplemented
37
+
38
+ @unpack_zerodim_and_defer("__eq__")
39
+ def __eq__(self, other):
40
+ return self._cmp_method(other, operator.eq)
41
+
42
+ @unpack_zerodim_and_defer("__ne__")
43
+ def __ne__(self, other):
44
+ return self._cmp_method(other, operator.ne)
45
+
46
+ @unpack_zerodim_and_defer("__lt__")
47
+ def __lt__(self, other):
48
+ return self._cmp_method(other, operator.lt)
49
+
50
+ @unpack_zerodim_and_defer("__le__")
51
+ def __le__(self, other):
52
+ return self._cmp_method(other, operator.le)
53
+
54
+ @unpack_zerodim_and_defer("__gt__")
55
+ def __gt__(self, other):
56
+ return self._cmp_method(other, operator.gt)
57
+
58
+ @unpack_zerodim_and_defer("__ge__")
59
+ def __ge__(self, other):
60
+ return self._cmp_method(other, operator.ge)
61
+
62
+ # -------------------------------------------------------------
63
+ # Logical Methods
64
+
65
+ def _logical_method(self, other, op):
66
+ return NotImplemented
67
+
68
+ @unpack_zerodim_and_defer("__and__")
69
+ def __and__(self, other):
70
+ return self._logical_method(other, operator.and_)
71
+
72
+ @unpack_zerodim_and_defer("__rand__")
73
+ def __rand__(self, other):
74
+ return self._logical_method(other, roperator.rand_)
75
+
76
+ @unpack_zerodim_and_defer("__or__")
77
+ def __or__(self, other):
78
+ return self._logical_method(other, operator.or_)
79
+
80
+ @unpack_zerodim_and_defer("__ror__")
81
+ def __ror__(self, other):
82
+ return self._logical_method(other, roperator.ror_)
83
+
84
+ @unpack_zerodim_and_defer("__xor__")
85
+ def __xor__(self, other):
86
+ return self._logical_method(other, operator.xor)
87
+
88
+ @unpack_zerodim_and_defer("__rxor__")
89
+ def __rxor__(self, other):
90
+ return self._logical_method(other, roperator.rxor)
91
+
92
+ # -------------------------------------------------------------
93
+ # Arithmetic Methods
94
+
95
+ def _arith_method(self, other, op):
96
+ return NotImplemented
97
+
98
+ @unpack_zerodim_and_defer("__add__")
99
+ def __add__(self, other):
100
+ """
101
+ Get Addition of DataFrame and other, column-wise.
102
+
103
+ Equivalent to ``DataFrame.add(other)``.
104
+
105
+ Parameters
106
+ ----------
107
+ other : scalar, sequence, Series, dict or DataFrame
108
+ Object to be added to the DataFrame.
109
+
110
+ Returns
111
+ -------
112
+ DataFrame
113
+ The result of adding ``other`` to DataFrame.
114
+
115
+ See Also
116
+ --------
117
+ DataFrame.add : Add a DataFrame and another object, with option for index-
118
+ or column-oriented addition.
119
+
120
+ Examples
121
+ --------
122
+ >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
123
+ ... index=['elk', 'moose'])
124
+ >>> df
125
+ height weight
126
+ elk 1.5 500
127
+ moose 2.6 800
128
+
129
+ Adding a scalar affects all rows and columns.
130
+
131
+ >>> df[['height', 'weight']] + 1.5
132
+ height weight
133
+ elk 3.0 501.5
134
+ moose 4.1 801.5
135
+
136
+ Each element of a list is added to a column of the DataFrame, in order.
137
+
138
+ >>> df[['height', 'weight']] + [0.5, 1.5]
139
+ height weight
140
+ elk 2.0 501.5
141
+ moose 3.1 801.5
142
+
143
+ Keys of a dictionary are aligned to the DataFrame, based on column names;
144
+ each value in the dictionary is added to the corresponding column.
145
+
146
+ >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
147
+ height weight
148
+ elk 2.0 501.5
149
+ moose 3.1 801.5
150
+
151
+ When `other` is a :class:`Series`, the index of `other` is aligned with the
152
+ columns of the DataFrame.
153
+
154
+ >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
155
+ >>> df[['height', 'weight']] + s1
156
+ height weight
157
+ elk 3.0 500.5
158
+ moose 4.1 800.5
159
+
160
+ Even when the index of `other` is the same as the index of the DataFrame,
161
+ the :class:`Series` will not be reoriented. If index-wise alignment is desired,
162
+ :meth:`DataFrame.add` should be used with `axis='index'`.
163
+
164
+ >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
165
+ >>> df[['height', 'weight']] + s2
166
+ elk height moose weight
167
+ elk NaN NaN NaN NaN
168
+ moose NaN NaN NaN NaN
169
+
170
+ >>> df[['height', 'weight']].add(s2, axis='index')
171
+ height weight
172
+ elk 2.0 500.5
173
+ moose 4.1 801.5
174
+
175
+ When `other` is a :class:`DataFrame`, both columns names and the
176
+ index are aligned.
177
+
178
+ >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
179
+ ... index=['elk', 'moose', 'deer'])
180
+ >>> df[['height', 'weight']] + other
181
+ height weight
182
+ deer NaN NaN
183
+ elk 1.7 NaN
184
+ moose 3.0 NaN
185
+ """
186
+ return self._arith_method(other, operator.add)
187
+
188
+ @unpack_zerodim_and_defer("__radd__")
189
+ def __radd__(self, other):
190
+ return self._arith_method(other, roperator.radd)
191
+
192
+ @unpack_zerodim_and_defer("__sub__")
193
+ def __sub__(self, other):
194
+ return self._arith_method(other, operator.sub)
195
+
196
+ @unpack_zerodim_and_defer("__rsub__")
197
+ def __rsub__(self, other):
198
+ return self._arith_method(other, roperator.rsub)
199
+
200
+ @unpack_zerodim_and_defer("__mul__")
201
+ def __mul__(self, other):
202
+ return self._arith_method(other, operator.mul)
203
+
204
+ @unpack_zerodim_and_defer("__rmul__")
205
+ def __rmul__(self, other):
206
+ return self._arith_method(other, roperator.rmul)
207
+
208
+ @unpack_zerodim_and_defer("__truediv__")
209
+ def __truediv__(self, other):
210
+ return self._arith_method(other, operator.truediv)
211
+
212
+ @unpack_zerodim_and_defer("__rtruediv__")
213
+ def __rtruediv__(self, other):
214
+ return self._arith_method(other, roperator.rtruediv)
215
+
216
+ @unpack_zerodim_and_defer("__floordiv__")
217
+ def __floordiv__(self, other):
218
+ return self._arith_method(other, operator.floordiv)
219
+
220
+ @unpack_zerodim_and_defer("__rfloordiv")
221
+ def __rfloordiv__(self, other):
222
+ return self._arith_method(other, roperator.rfloordiv)
223
+
224
+ @unpack_zerodim_and_defer("__mod__")
225
+ def __mod__(self, other):
226
+ return self._arith_method(other, operator.mod)
227
+
228
+ @unpack_zerodim_and_defer("__rmod__")
229
+ def __rmod__(self, other):
230
+ return self._arith_method(other, roperator.rmod)
231
+
232
+ @unpack_zerodim_and_defer("__divmod__")
233
+ def __divmod__(self, other):
234
+ return self._arith_method(other, divmod)
235
+
236
+ @unpack_zerodim_and_defer("__rdivmod__")
237
+ def __rdivmod__(self, other):
238
+ return self._arith_method(other, roperator.rdivmod)
239
+
240
+ @unpack_zerodim_and_defer("__pow__")
241
+ def __pow__(self, other):
242
+ return self._arith_method(other, operator.pow)
243
+
244
+ @unpack_zerodim_and_defer("__rpow__")
245
+ def __rpow__(self, other):
246
+ return self._arith_method(other, roperator.rpow)
247
+
248
+
249
+ # -----------------------------------------------------------------------------
250
+ # Helpers to implement __array_ufunc__
251
+
252
+
253
+ def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
254
+ """
255
+ Compatibility with numpy ufuncs.
256
+
257
+ See also
258
+ --------
259
+ numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
260
+ """
261
+ from pandas.core.frame import (
262
+ DataFrame,
263
+ Series,
264
+ )
265
+ from pandas.core.generic import NDFrame
266
+ from pandas.core.internals import (
267
+ ArrayManager,
268
+ BlockManager,
269
+ )
270
+
271
+ cls = type(self)
272
+
273
+ kwargs = _standardize_out_kwarg(**kwargs)
274
+
275
+ # for binary ops, use our custom dunder methods
276
+ result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
277
+ if result is not NotImplemented:
278
+ return result
279
+
280
+ # Determine if we should defer.
281
+ no_defer = (
282
+ np.ndarray.__array_ufunc__,
283
+ cls.__array_ufunc__,
284
+ )
285
+
286
+ for item in inputs:
287
+ higher_priority = (
288
+ hasattr(item, "__array_priority__")
289
+ and item.__array_priority__ > self.__array_priority__
290
+ )
291
+ has_array_ufunc = (
292
+ hasattr(item, "__array_ufunc__")
293
+ and type(item).__array_ufunc__ not in no_defer
294
+ and not isinstance(item, self._HANDLED_TYPES)
295
+ )
296
+ if higher_priority or has_array_ufunc:
297
+ return NotImplemented
298
+
299
+ # align all the inputs.
300
+ types = tuple(type(x) for x in inputs)
301
+ alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
302
+
303
+ if len(alignable) > 1:
304
+ # This triggers alignment.
305
+ # At the moment, there aren't any ufuncs with more than two inputs
306
+ # so this ends up just being x1.index | x2.index, but we write
307
+ # it to handle *args.
308
+ set_types = set(types)
309
+ if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
310
+ # We currently don't handle ufunc(DataFrame, Series)
311
+ # well. Previously this raised an internal ValueError. We might
312
+ # support it someday, so raise a NotImplementedError.
313
+ raise NotImplementedError(
314
+ f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
315
+ )
316
+ axes = self.axes
317
+ for obj in alignable[1:]:
318
+ # this relies on the fact that we aren't handling mixed
319
+ # series / frame ufuncs.
320
+ for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
321
+ axes[i] = ax1.union(ax2)
322
+
323
+ reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
324
+ inputs = tuple(
325
+ x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
326
+ for x, t in zip(inputs, types)
327
+ )
328
+ else:
329
+ reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
330
+
331
+ if self.ndim == 1:
332
+ names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
333
+ name = names[0] if len(set(names)) == 1 else None
334
+ reconstruct_kwargs = {"name": name}
335
+ else:
336
+ reconstruct_kwargs = {}
337
+
338
+ def reconstruct(result):
339
+ if ufunc.nout > 1:
340
+ # np.modf, np.frexp, np.divmod
341
+ return tuple(_reconstruct(x) for x in result)
342
+
343
+ return _reconstruct(result)
344
+
345
+ def _reconstruct(result):
346
+ if lib.is_scalar(result):
347
+ return result
348
+
349
+ if result.ndim != self.ndim:
350
+ if method == "outer":
351
+ raise NotImplementedError
352
+ return result
353
+ if isinstance(result, (BlockManager, ArrayManager)):
354
+ # we went through BlockManager.apply e.g. np.sqrt
355
+ result = self._constructor_from_mgr(result, axes=result.axes)
356
+ else:
357
+ # we converted an array, lost our axes
358
+ result = self._constructor(
359
+ result, **reconstruct_axes, **reconstruct_kwargs, copy=False
360
+ )
361
+ # TODO: When we support multiple values in __finalize__, this
362
+ # should pass alignable to `__finalize__` instead of self.
363
+ # Then `np.add(a, b)` would consider attrs from both a and b
364
+ # when a and b are NDFrames.
365
+ if len(alignable) == 1:
366
+ result = result.__finalize__(self)
367
+ return result
368
+
369
+ if "out" in kwargs:
370
+ # e.g. test_multiindex_get_loc
371
+ result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
372
+ return reconstruct(result)
373
+
374
+ if method == "reduce":
375
+ # e.g. test.series.test_ufunc.test_reduce
376
+ result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
377
+ if result is not NotImplemented:
378
+ return result
379
+
380
+ # We still get here with kwargs `axis` for e.g. np.maximum.accumulate
381
+ # and `dtype` and `keepdims` for np.ptp
382
+
383
+ if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
384
+ # Just give up on preserving types in the complex case.
385
+ # In theory we could preserve them for them.
386
+ # * nout>1 is doable if BlockManager.apply took nout and
387
+ # returned a Tuple[BlockManager].
388
+ # * len(inputs) > 1 is doable when we know that we have
389
+ # aligned blocks / dtypes.
390
+
391
+ # e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
392
+ inputs = tuple(np.asarray(x) for x in inputs)
393
+ # Note: we can't use default_array_ufunc here bc reindexing means
394
+ # that `self` may not be among `inputs`
395
+ result = getattr(ufunc, method)(*inputs, **kwargs)
396
+ elif self.ndim == 1:
397
+ # ufunc(series, ...)
398
+ inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
399
+ result = getattr(ufunc, method)(*inputs, **kwargs)
400
+ else:
401
+ # ufunc(dataframe)
402
+ if method == "__call__" and not kwargs:
403
+ # for np.<ufunc>(..) calls
404
+ # kwargs cannot necessarily be handled block-by-block, so only
405
+ # take this path if there are no kwargs
406
+ mgr = inputs[0]._mgr
407
+ result = mgr.apply(getattr(ufunc, method))
408
+ else:
409
+ # otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
410
+ # Those can have an axis keyword and thus can't be called block-by-block
411
+ result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
412
+ # e.g. np.negative (only one reached), with "where" and "out" in kwargs
413
+
414
+ result = reconstruct(result)
415
+ return result
416
+
417
+
418
+ def _standardize_out_kwarg(**kwargs) -> dict:
419
+ """
420
+ If kwargs contain "out1" and "out2", replace that with a tuple "out"
421
+
422
+ np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
423
+ `out1=out1, out2=out2)`
424
+ """
425
+ if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
426
+ out1 = kwargs.pop("out1")
427
+ out2 = kwargs.pop("out2")
428
+ out = (out1, out2)
429
+ kwargs["out"] = out
430
+ return kwargs
431
+
432
+
433
+ def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
434
+ """
435
+ If we have an `out` keyword, then call the ufunc without `out` and then
436
+ set the result into the given `out`.
437
+ """
438
+
439
+ # Note: we assume _standardize_out_kwarg has already been called.
440
+ out = kwargs.pop("out")
441
+ where = kwargs.pop("where", None)
442
+
443
+ result = getattr(ufunc, method)(*inputs, **kwargs)
444
+
445
+ if result is NotImplemented:
446
+ return NotImplemented
447
+
448
+ if isinstance(result, tuple):
449
+ # i.e. np.divmod, np.modf, np.frexp
450
+ if not isinstance(out, tuple) or len(out) != len(result):
451
+ raise NotImplementedError
452
+
453
+ for arr, res in zip(out, result):
454
+ _assign_where(arr, res, where)
455
+
456
+ return out
457
+
458
+ if isinstance(out, tuple):
459
+ if len(out) == 1:
460
+ out = out[0]
461
+ else:
462
+ raise NotImplementedError
463
+
464
+ _assign_where(out, result, where)
465
+ return out
466
+
467
+
468
+ def _assign_where(out, result, where) -> None:
469
+ """
470
+ Set a ufunc result into 'out', masking with a 'where' argument if necessary.
471
+ """
472
+ if where is None:
473
+ # no 'where' arg passed to ufunc
474
+ out[:] = result
475
+ else:
476
+ np.putmask(out, where, result)
477
+
478
+
479
+ def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
480
+ """
481
+ Fallback to the behavior we would get if we did not define __array_ufunc__.
482
+
483
+ Notes
484
+ -----
485
+ We are assuming that `self` is among `inputs`.
486
+ """
487
+ if not any(x is self for x in inputs):
488
+ raise NotImplementedError
489
+
490
+ new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
491
+
492
+ return getattr(ufunc, method)(*new_inputs, **kwargs)
493
+
494
+
495
+ def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
496
+ """
497
+ Dispatch ufunc reductions to self's reduction methods.
498
+ """
499
+ assert method == "reduce"
500
+
501
+ if len(inputs) != 1 or inputs[0] is not self:
502
+ return NotImplemented
503
+
504
+ if ufunc.__name__ not in REDUCTION_ALIASES:
505
+ return NotImplemented
506
+
507
+ method_name = REDUCTION_ALIASES[ufunc.__name__]
508
+
509
+ # NB: we are assuming that min/max represent minimum/maximum methods,
510
+ # which would not be accurate for e.g. Timestamp.min
511
+ if not hasattr(self, method_name):
512
+ return NotImplemented
513
+
514
+ if self.ndim > 1:
515
+ if isinstance(self, ABCNDFrame):
516
+ # TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
517
+ kwargs["numeric_only"] = False
518
+
519
+ if "axis" not in kwargs:
520
+ # For DataFrame reductions we don't want the default axis=0
521
+ # Note: np.min is not a ufunc, but uses array_function_dispatch,
522
+ # so calls DataFrame.min (without ever getting here) with the np.min
523
+ # default of axis=None, which DataFrame.min catches and changes to axis=0.
524
+ # np.minimum.reduce(df) gets here bc axis is not in kwargs,
525
+ # so we set axis=0 to match the behaviorof np.minimum.reduce(df.values)
526
+ kwargs["axis"] = 0
527
+
528
+ # By default, numpy's reductions do not skip NaNs, so we have to
529
+ # pass skipna=False
530
+ return getattr(self, method_name)(skipna=False, **kwargs)
llava_next/lib/python3.10/site-packages/pandas/core/arrays/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.arrow import ArrowExtensionArray
2
+ from pandas.core.arrays.base import (
3
+ ExtensionArray,
4
+ ExtensionOpsMixin,
5
+ ExtensionScalarOpsMixin,
6
+ )
7
+ from pandas.core.arrays.boolean import BooleanArray
8
+ from pandas.core.arrays.categorical import Categorical
9
+ from pandas.core.arrays.datetimes import DatetimeArray
10
+ from pandas.core.arrays.floating import FloatingArray
11
+ from pandas.core.arrays.integer import IntegerArray
12
+ from pandas.core.arrays.interval import IntervalArray
13
+ from pandas.core.arrays.masked import BaseMaskedArray
14
+ from pandas.core.arrays.numpy_ import NumpyExtensionArray
15
+ from pandas.core.arrays.period import (
16
+ PeriodArray,
17
+ period_array,
18
+ )
19
+ from pandas.core.arrays.sparse import SparseArray
20
+ from pandas.core.arrays.string_ import StringArray
21
+ from pandas.core.arrays.string_arrow import ArrowStringArray
22
+ from pandas.core.arrays.timedeltas import TimedeltaArray
23
+
24
+ __all__ = [
25
+ "ArrowExtensionArray",
26
+ "ExtensionArray",
27
+ "ExtensionOpsMixin",
28
+ "ExtensionScalarOpsMixin",
29
+ "ArrowStringArray",
30
+ "BaseMaskedArray",
31
+ "BooleanArray",
32
+ "Categorical",
33
+ "DatetimeArray",
34
+ "FloatingArray",
35
+ "IntegerArray",
36
+ "IntervalArray",
37
+ "NumpyExtensionArray",
38
+ "PeriodArray",
39
+ "period_array",
40
+ "SparseArray",
41
+ "StringArray",
42
+ "TimedeltaArray",
43
+ ]
llava_next/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ import numpy as np
6
+
7
+ from pandas.compat import pa_version_under10p1
8
+
9
+ if not pa_version_under10p1:
10
+ import pyarrow as pa
11
+ import pyarrow.compute as pc
12
+
13
+
14
+ class ArrowStringArrayMixin:
15
+ _pa_array = None
16
+
17
+ def __init__(self, *args, **kwargs) -> None:
18
+ raise NotImplementedError
19
+
20
+ def _str_pad(
21
+ self,
22
+ width: int,
23
+ side: Literal["left", "right", "both"] = "left",
24
+ fillchar: str = " ",
25
+ ):
26
+ if side == "left":
27
+ pa_pad = pc.utf8_lpad
28
+ elif side == "right":
29
+ pa_pad = pc.utf8_rpad
30
+ elif side == "both":
31
+ pa_pad = pc.utf8_center
32
+ else:
33
+ raise ValueError(
34
+ f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
35
+ )
36
+ return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar))
37
+
38
+ def _str_get(self, i: int):
39
+ lengths = pc.utf8_length(self._pa_array)
40
+ if i >= 0:
41
+ out_of_bounds = pc.greater_equal(i, lengths)
42
+ start = i
43
+ stop = i + 1
44
+ step = 1
45
+ else:
46
+ out_of_bounds = pc.greater(-i, lengths)
47
+ start = i
48
+ stop = i - 1
49
+ step = -1
50
+ not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
51
+ selected = pc.utf8_slice_codeunits(
52
+ self._pa_array, start=start, stop=stop, step=step
53
+ )
54
+ null_value = pa.scalar(
55
+ None, type=self._pa_array.type # type: ignore[attr-defined]
56
+ )
57
+ result = pc.if_else(not_out_of_bounds, selected, null_value)
58
+ return type(self)(result)
59
+
60
+ def _str_slice_replace(
61
+ self, start: int | None = None, stop: int | None = None, repl: str | None = None
62
+ ):
63
+ if repl is None:
64
+ repl = ""
65
+ if start is None:
66
+ start = 0
67
+ if stop is None:
68
+ stop = np.iinfo(np.int64).max
69
+ return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl))
70
+
71
+ def _str_capitalize(self):
72
+ return type(self)(pc.utf8_capitalize(self._pa_array))
73
+
74
+ def _str_title(self):
75
+ return type(self)(pc.utf8_title(self._pa_array))
76
+
77
+ def _str_swapcase(self):
78
+ return type(self)(pc.utf8_swapcase(self._pa_array))
79
+
80
+ def _str_removesuffix(self, suffix: str):
81
+ ends_with = pc.ends_with(self._pa_array, pattern=suffix)
82
+ removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
83
+ result = pc.if_else(ends_with, removed, self._pa_array)
84
+ return type(self)(result)
llava_next/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import wraps
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Literal,
8
+ cast,
9
+ overload,
10
+ )
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas._libs.arrays import NDArrayBacked
16
+ from pandas._libs.tslibs import is_supported_dtype
17
+ from pandas._typing import (
18
+ ArrayLike,
19
+ AxisInt,
20
+ Dtype,
21
+ F,
22
+ FillnaOptions,
23
+ PositionalIndexer2D,
24
+ PositionalIndexerTuple,
25
+ ScalarIndexer,
26
+ Self,
27
+ SequenceIndexer,
28
+ Shape,
29
+ TakeIndexer,
30
+ npt,
31
+ )
32
+ from pandas.errors import AbstractMethodError
33
+ from pandas.util._decorators import doc
34
+ from pandas.util._validators import (
35
+ validate_bool_kwarg,
36
+ validate_fillna_kwargs,
37
+ validate_insert_loc,
38
+ )
39
+
40
+ from pandas.core.dtypes.common import pandas_dtype
41
+ from pandas.core.dtypes.dtypes import (
42
+ DatetimeTZDtype,
43
+ ExtensionDtype,
44
+ PeriodDtype,
45
+ )
46
+ from pandas.core.dtypes.missing import array_equivalent
47
+
48
+ from pandas.core import missing
49
+ from pandas.core.algorithms import (
50
+ take,
51
+ unique,
52
+ value_counts_internal as value_counts,
53
+ )
54
+ from pandas.core.array_algos.quantile import quantile_with_mask
55
+ from pandas.core.array_algos.transforms import shift
56
+ from pandas.core.arrays.base import ExtensionArray
57
+ from pandas.core.construction import extract_array
58
+ from pandas.core.indexers import check_array_indexer
59
+ from pandas.core.sorting import nargminmax
60
+
61
+ if TYPE_CHECKING:
62
+ from collections.abc import Sequence
63
+
64
+ from pandas._typing import (
65
+ NumpySorter,
66
+ NumpyValueArrayLike,
67
+ )
68
+
69
+ from pandas import Series
70
+
71
+
72
+ def ravel_compat(meth: F) -> F:
73
+ """
74
+ Decorator to ravel a 2D array before passing it to a cython operation,
75
+ then reshape the result to our own shape.
76
+ """
77
+
78
+ @wraps(meth)
79
+ def method(self, *args, **kwargs):
80
+ if self.ndim == 1:
81
+ return meth(self, *args, **kwargs)
82
+
83
+ flags = self._ndarray.flags
84
+ flat = self.ravel("K")
85
+ result = meth(flat, *args, **kwargs)
86
+ order = "F" if flags.f_contiguous else "C"
87
+ return result.reshape(self.shape, order=order)
88
+
89
+ return cast(F, method)
90
+
91
+
92
+ class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray):
93
+ """
94
+ ExtensionArray that is backed by a single NumPy ndarray.
95
+ """
96
+
97
+ _ndarray: np.ndarray
98
+
99
+ # scalar used to denote NA value inside our self._ndarray, e.g. -1
100
+ # for Categorical, iNaT for Period. Outside of object dtype,
101
+ # self.isna() should be exactly locations in self._ndarray with
102
+ # _internal_fill_value.
103
+ _internal_fill_value: Any
104
+
105
+ def _box_func(self, x):
106
+ """
107
+ Wrap numpy type in our dtype.type if necessary.
108
+ """
109
+ return x
110
+
111
+ def _validate_scalar(self, value):
112
+ # used by NDArrayBackedExtensionIndex.insert
113
+ raise AbstractMethodError(self)
114
+
115
+ # ------------------------------------------------------------------------
116
+
117
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
118
+ # We handle datetime64, datetime64tz, timedelta64, and period
119
+ # dtypes here. Everything else we pass through to the underlying
120
+ # ndarray.
121
+ if dtype is None or dtype is self.dtype:
122
+ return self._from_backing_data(self._ndarray)
123
+
124
+ if isinstance(dtype, type):
125
+ # we sometimes pass non-dtype objects, e.g np.ndarray;
126
+ # pass those through to the underlying ndarray
127
+ return self._ndarray.view(dtype)
128
+
129
+ dtype = pandas_dtype(dtype)
130
+ arr = self._ndarray
131
+
132
+ if isinstance(dtype, PeriodDtype):
133
+ cls = dtype.construct_array_type()
134
+ return cls(arr.view("i8"), dtype=dtype)
135
+ elif isinstance(dtype, DatetimeTZDtype):
136
+ dt_cls = dtype.construct_array_type()
137
+ dt64_values = arr.view(f"M8[{dtype.unit}]")
138
+ return dt_cls._simple_new(dt64_values, dtype=dtype)
139
+ elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
140
+ from pandas.core.arrays import DatetimeArray
141
+
142
+ dt64_values = arr.view(dtype)
143
+ return DatetimeArray._simple_new(dt64_values, dtype=dtype)
144
+
145
+ elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
146
+ from pandas.core.arrays import TimedeltaArray
147
+
148
+ td64_values = arr.view(dtype)
149
+ return TimedeltaArray._simple_new(td64_values, dtype=dtype)
150
+
151
+ # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
152
+ # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
153
+ # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
154
+ # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
155
+ return arr.view(dtype=dtype) # type: ignore[arg-type]
156
+
157
+ def take(
158
+ self,
159
+ indices: TakeIndexer,
160
+ *,
161
+ allow_fill: bool = False,
162
+ fill_value: Any = None,
163
+ axis: AxisInt = 0,
164
+ ) -> Self:
165
+ if allow_fill:
166
+ fill_value = self._validate_scalar(fill_value)
167
+
168
+ new_data = take(
169
+ self._ndarray,
170
+ indices,
171
+ allow_fill=allow_fill,
172
+ fill_value=fill_value,
173
+ axis=axis,
174
+ )
175
+ return self._from_backing_data(new_data)
176
+
177
+ # ------------------------------------------------------------------------
178
+
179
+ def equals(self, other) -> bool:
180
+ if type(self) is not type(other):
181
+ return False
182
+ if self.dtype != other.dtype:
183
+ return False
184
+ return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True))
185
+
186
+ @classmethod
187
+ def _from_factorized(cls, values, original):
188
+ assert values.dtype == original._ndarray.dtype
189
+ return original._from_backing_data(values)
190
+
191
+ def _values_for_argsort(self) -> np.ndarray:
192
+ return self._ndarray
193
+
194
+ def _values_for_factorize(self):
195
+ return self._ndarray, self._internal_fill_value
196
+
197
+ def _hash_pandas_object(
198
+ self, *, encoding: str, hash_key: str, categorize: bool
199
+ ) -> npt.NDArray[np.uint64]:
200
+ from pandas.core.util.hashing import hash_array
201
+
202
+ values = self._ndarray
203
+ return hash_array(
204
+ values, encoding=encoding, hash_key=hash_key, categorize=categorize
205
+ )
206
+
207
+ # Signature of "argmin" incompatible with supertype "ExtensionArray"
208
+ def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
209
+ # override base class by adding axis keyword
210
+ validate_bool_kwarg(skipna, "skipna")
211
+ if not skipna and self._hasna:
212
+ raise NotImplementedError
213
+ return nargminmax(self, "argmin", axis=axis)
214
+
215
+ # Signature of "argmax" incompatible with supertype "ExtensionArray"
216
+ def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
217
+ # override base class by adding axis keyword
218
+ validate_bool_kwarg(skipna, "skipna")
219
+ if not skipna and self._hasna:
220
+ raise NotImplementedError
221
+ return nargminmax(self, "argmax", axis=axis)
222
+
223
+ def unique(self) -> Self:
224
+ new_data = unique(self._ndarray)
225
+ return self._from_backing_data(new_data)
226
+
227
+ @classmethod
228
+ @doc(ExtensionArray._concat_same_type)
229
+ def _concat_same_type(
230
+ cls,
231
+ to_concat: Sequence[Self],
232
+ axis: AxisInt = 0,
233
+ ) -> Self:
234
+ if not lib.dtypes_all_equal([x.dtype for x in to_concat]):
235
+ dtypes = {str(x.dtype) for x in to_concat}
236
+ raise ValueError("to_concat must have the same dtype", dtypes)
237
+
238
+ return super()._concat_same_type(to_concat, axis=axis)
239
+
240
+ @doc(ExtensionArray.searchsorted)
241
+ def searchsorted(
242
+ self,
243
+ value: NumpyValueArrayLike | ExtensionArray,
244
+ side: Literal["left", "right"] = "left",
245
+ sorter: NumpySorter | None = None,
246
+ ) -> npt.NDArray[np.intp] | np.intp:
247
+ npvalue = self._validate_setitem_value(value)
248
+ return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter)
249
+
250
+ @doc(ExtensionArray.shift)
251
+ def shift(self, periods: int = 1, fill_value=None):
252
+ # NB: shift is always along axis=0
253
+ axis = 0
254
+ fill_value = self._validate_scalar(fill_value)
255
+ new_values = shift(self._ndarray, periods, axis, fill_value)
256
+
257
+ return self._from_backing_data(new_values)
258
+
259
+ def __setitem__(self, key, value) -> None:
260
+ key = check_array_indexer(self, key)
261
+ value = self._validate_setitem_value(value)
262
+ self._ndarray[key] = value
263
+
264
+ def _validate_setitem_value(self, value):
265
+ return value
266
+
267
+ @overload
268
+ def __getitem__(self, key: ScalarIndexer) -> Any:
269
+ ...
270
+
271
+ @overload
272
+ def __getitem__(
273
+ self,
274
+ key: SequenceIndexer | PositionalIndexerTuple,
275
+ ) -> Self:
276
+ ...
277
+
278
+ def __getitem__(
279
+ self,
280
+ key: PositionalIndexer2D,
281
+ ) -> Self | Any:
282
+ if lib.is_integer(key):
283
+ # fast-path
284
+ result = self._ndarray[key]
285
+ if self.ndim == 1:
286
+ return self._box_func(result)
287
+ return self._from_backing_data(result)
288
+
289
+ # error: Incompatible types in assignment (expression has type "ExtensionArray",
290
+ # variable has type "Union[int, slice, ndarray]")
291
+ key = extract_array(key, extract_numpy=True) # type: ignore[assignment]
292
+ key = check_array_indexer(self, key)
293
+ result = self._ndarray[key]
294
+ if lib.is_scalar(result):
295
+ return self._box_func(result)
296
+
297
+ result = self._from_backing_data(result)
298
+ return result
299
+
300
+ def _fill_mask_inplace(
301
+ self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
302
+ ) -> None:
303
+ # (for now) when self.ndim == 2, we assume axis=0
304
+ func = missing.get_fill_func(method, ndim=self.ndim)
305
+ func(self._ndarray.T, limit=limit, mask=mask.T)
306
+
307
+ def _pad_or_backfill(
308
+ self,
309
+ *,
310
+ method: FillnaOptions,
311
+ limit: int | None = None,
312
+ limit_area: Literal["inside", "outside"] | None = None,
313
+ copy: bool = True,
314
+ ) -> Self:
315
+ mask = self.isna()
316
+ if mask.any():
317
+ # (for now) when self.ndim == 2, we assume axis=0
318
+ func = missing.get_fill_func(method, ndim=self.ndim)
319
+
320
+ npvalues = self._ndarray.T
321
+ if copy:
322
+ npvalues = npvalues.copy()
323
+ func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T)
324
+ npvalues = npvalues.T
325
+
326
+ if copy:
327
+ new_values = self._from_backing_data(npvalues)
328
+ else:
329
+ new_values = self
330
+
331
+ else:
332
+ if copy:
333
+ new_values = self.copy()
334
+ else:
335
+ new_values = self
336
+ return new_values
337
+
338
+ @doc(ExtensionArray.fillna)
339
+ def fillna(
340
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
341
+ ) -> Self:
342
+ value, method = validate_fillna_kwargs(
343
+ value, method, validate_scalar_dict_value=False
344
+ )
345
+
346
+ mask = self.isna()
347
+ # error: Argument 2 to "check_value_size" has incompatible type
348
+ # "ExtensionArray"; expected "ndarray"
349
+ value = missing.check_value_size(
350
+ value, mask, len(self) # type: ignore[arg-type]
351
+ )
352
+
353
+ if mask.any():
354
+ if method is not None:
355
+ # (for now) when self.ndim == 2, we assume axis=0
356
+ func = missing.get_fill_func(method, ndim=self.ndim)
357
+ npvalues = self._ndarray.T
358
+ if copy:
359
+ npvalues = npvalues.copy()
360
+ func(npvalues, limit=limit, mask=mask.T)
361
+ npvalues = npvalues.T
362
+
363
+ # TODO: NumpyExtensionArray didn't used to copy, need tests
364
+ # for this
365
+ new_values = self._from_backing_data(npvalues)
366
+ else:
367
+ # fill with value
368
+ if copy:
369
+ new_values = self.copy()
370
+ else:
371
+ new_values = self[:]
372
+ new_values[mask] = value
373
+ else:
374
+ # We validate the fill_value even if there is nothing to fill
375
+ if value is not None:
376
+ self._validate_setitem_value(value)
377
+
378
+ if not copy:
379
+ new_values = self[:]
380
+ else:
381
+ new_values = self.copy()
382
+ return new_values
383
+
384
+ # ------------------------------------------------------------------------
385
+ # Reductions
386
+
387
+ def _wrap_reduction_result(self, axis: AxisInt | None, result):
388
+ if axis is None or self.ndim == 1:
389
+ return self._box_func(result)
390
+ return self._from_backing_data(result)
391
+
392
+ # ------------------------------------------------------------------------
393
+ # __array_function__ methods
394
+
395
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
396
+ """
397
+ Analogue to np.putmask(self, mask, value)
398
+
399
+ Parameters
400
+ ----------
401
+ mask : np.ndarray[bool]
402
+ value : scalar or listlike
403
+
404
+ Raises
405
+ ------
406
+ TypeError
407
+ If value cannot be cast to self.dtype.
408
+ """
409
+ value = self._validate_setitem_value(value)
410
+
411
+ np.putmask(self._ndarray, mask, value)
412
+
413
+ def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self:
414
+ """
415
+ Analogue to np.where(mask, self, value)
416
+
417
+ Parameters
418
+ ----------
419
+ mask : np.ndarray[bool]
420
+ value : scalar or listlike
421
+
422
+ Raises
423
+ ------
424
+ TypeError
425
+ If value cannot be cast to self.dtype.
426
+ """
427
+ value = self._validate_setitem_value(value)
428
+
429
+ res_values = np.where(mask, self._ndarray, value)
430
+ if res_values.dtype != self._ndarray.dtype:
431
+ raise AssertionError(
432
+ # GH#56410
433
+ "Something has gone wrong, please report a bug at "
434
+ "github.com/pandas-dev/pandas/"
435
+ )
436
+ return self._from_backing_data(res_values)
437
+
438
+ # ------------------------------------------------------------------------
439
+ # Index compat methods
440
+
441
+ def insert(self, loc: int, item) -> Self:
442
+ """
443
+ Make new ExtensionArray inserting new item at location. Follows
444
+ Python list.append semantics for negative values.
445
+
446
+ Parameters
447
+ ----------
448
+ loc : int
449
+ item : object
450
+
451
+ Returns
452
+ -------
453
+ type(self)
454
+ """
455
+ loc = validate_insert_loc(loc, len(self))
456
+
457
+ code = self._validate_scalar(item)
458
+
459
+ new_vals = np.concatenate(
460
+ (
461
+ self._ndarray[:loc],
462
+ np.asarray([code], dtype=self._ndarray.dtype),
463
+ self._ndarray[loc:],
464
+ )
465
+ )
466
+ return self._from_backing_data(new_vals)
467
+
468
+ # ------------------------------------------------------------------------
469
+ # Additional array methods
470
+ # These are not part of the EA API, but we implement them because
471
+ # pandas assumes they're there.
472
+
473
+ def value_counts(self, dropna: bool = True) -> Series:
474
+ """
475
+ Return a Series containing counts of unique values.
476
+
477
+ Parameters
478
+ ----------
479
+ dropna : bool, default True
480
+ Don't include counts of NA values.
481
+
482
+ Returns
483
+ -------
484
+ Series
485
+ """
486
+ if self.ndim != 1:
487
+ raise NotImplementedError
488
+
489
+ from pandas import (
490
+ Index,
491
+ Series,
492
+ )
493
+
494
+ if dropna:
495
+ # error: Unsupported operand type for ~ ("ExtensionArray")
496
+ values = self[~self.isna()]._ndarray # type: ignore[operator]
497
+ else:
498
+ values = self._ndarray
499
+
500
+ result = value_counts(values, sort=False, dropna=dropna)
501
+
502
+ index_arr = self._from_backing_data(np.asarray(result.index._data))
503
+ index = Index(index_arr, name=result.index.name)
504
+ return Series(result._values, index=index, name=result.name, copy=False)
505
+
506
+ def _quantile(
507
+ self,
508
+ qs: npt.NDArray[np.float64],
509
+ interpolation: str,
510
+ ) -> Self:
511
+ # TODO: disable for Categorical if not ordered?
512
+
513
+ mask = np.asarray(self.isna())
514
+ arr = self._ndarray
515
+ fill_value = self._internal_fill_value
516
+
517
+ res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
518
+
519
+ res_values = self._cast_quantile_result(res_values)
520
+ return self._from_backing_data(res_values)
521
+
522
+ # TODO: see if we can share this with other dispatch-wrapping methods
523
+ def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray:
524
+ """
525
+ Cast the result of quantile_with_mask to an appropriate dtype
526
+ to pass to _from_backing_data in _quantile.
527
+ """
528
+ return res_values
529
+
530
+ # ------------------------------------------------------------------------
531
+ # numpy-like methods
532
+
533
+ @classmethod
534
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self:
535
+ """
536
+ Analogous to np.empty(shape, dtype=dtype)
537
+
538
+ Parameters
539
+ ----------
540
+ shape : tuple[int]
541
+ dtype : ExtensionDtype
542
+ """
543
+ # The base implementation uses a naive approach to find the dtype
544
+ # for the backing ndarray
545
+ arr = cls._from_sequence([], dtype=dtype)
546
+ backing = np.empty(shape, dtype=arr._ndarray.dtype)
547
+ return arr._from_backing_data(backing)
llava_next/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper functions to generate range-like data for DatetimeArray
3
+ (and possibly TimedeltaArray/PeriodArray)
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs.lib import i8max
12
+ from pandas._libs.tslibs import (
13
+ BaseOffset,
14
+ OutOfBoundsDatetime,
15
+ Timedelta,
16
+ Timestamp,
17
+ iNaT,
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import npt
22
+
23
+
24
+ def generate_regular_range(
25
+ start: Timestamp | Timedelta | None,
26
+ end: Timestamp | Timedelta | None,
27
+ periods: int | None,
28
+ freq: BaseOffset,
29
+ unit: str = "ns",
30
+ ) -> npt.NDArray[np.intp]:
31
+ """
32
+ Generate a range of dates or timestamps with the spans between dates
33
+ described by the given `freq` DateOffset.
34
+
35
+ Parameters
36
+ ----------
37
+ start : Timedelta, Timestamp or None
38
+ First point of produced date range.
39
+ end : Timedelta, Timestamp or None
40
+ Last point of produced date range.
41
+ periods : int or None
42
+ Number of periods in produced date range.
43
+ freq : Tick
44
+ Describes space between dates in produced date range.
45
+ unit : str, default "ns"
46
+ The resolution the output is meant to represent.
47
+
48
+ Returns
49
+ -------
50
+ ndarray[np.int64]
51
+ Representing the given resolution.
52
+ """
53
+ istart = start._value if start is not None else None
54
+ iend = end._value if end is not None else None
55
+ freq.nanos # raises if non-fixed frequency
56
+ td = Timedelta(freq)
57
+ b: int
58
+ e: int
59
+ try:
60
+ td = td.as_unit(unit, round_ok=False)
61
+ except ValueError as err:
62
+ raise ValueError(
63
+ f"freq={freq} is incompatible with unit={unit}. "
64
+ "Use a lower freq or a higher unit instead."
65
+ ) from err
66
+ stride = int(td._value)
67
+
68
+ if periods is None and istart is not None and iend is not None:
69
+ b = istart
70
+ # cannot just use e = Timestamp(end) + 1 because arange breaks when
71
+ # stride is too large, see GH10887
72
+ e = b + (iend - b) // stride * stride + stride // 2 + 1
73
+ elif istart is not None and periods is not None:
74
+ b = istart
75
+ e = _generate_range_overflow_safe(b, periods, stride, side="start")
76
+ elif iend is not None and periods is not None:
77
+ e = iend + stride
78
+ b = _generate_range_overflow_safe(e, periods, stride, side="end")
79
+ else:
80
+ raise ValueError(
81
+ "at least 'start' or 'end' should be specified if a 'period' is given."
82
+ )
83
+
84
+ with np.errstate(over="raise"):
85
+ # If the range is sufficiently large, np.arange may overflow
86
+ # and incorrectly return an empty array if not caught.
87
+ try:
88
+ values = np.arange(b, e, stride, dtype=np.int64)
89
+ except FloatingPointError:
90
+ xdr = [b]
91
+ while xdr[-1] != e:
92
+ xdr.append(xdr[-1] + stride)
93
+ values = np.array(xdr[:-1], dtype=np.int64)
94
+ return values
95
+
96
+
97
+ def _generate_range_overflow_safe(
98
+ endpoint: int, periods: int, stride: int, side: str = "start"
99
+ ) -> int:
100
+ """
101
+ Calculate the second endpoint for passing to np.arange, checking
102
+ to avoid an integer overflow. Catch OverflowError and re-raise
103
+ as OutOfBoundsDatetime.
104
+
105
+ Parameters
106
+ ----------
107
+ endpoint : int
108
+ nanosecond timestamp of the known endpoint of the desired range
109
+ periods : int
110
+ number of periods in the desired range
111
+ stride : int
112
+ nanoseconds between periods in the desired range
113
+ side : {'start', 'end'}
114
+ which end of the range `endpoint` refers to
115
+
116
+ Returns
117
+ -------
118
+ other_end : int
119
+
120
+ Raises
121
+ ------
122
+ OutOfBoundsDatetime
123
+ """
124
+ # GH#14187 raise instead of incorrectly wrapping around
125
+ assert side in ["start", "end"]
126
+
127
+ i64max = np.uint64(i8max)
128
+ msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
129
+
130
+ with np.errstate(over="raise"):
131
+ # if periods * strides cannot be multiplied within the *uint64* bounds,
132
+ # we cannot salvage the operation by recursing, so raise
133
+ try:
134
+ addend = np.uint64(periods) * np.uint64(np.abs(stride))
135
+ except FloatingPointError as err:
136
+ raise OutOfBoundsDatetime(msg) from err
137
+
138
+ if np.abs(addend) <= i64max:
139
+ # relatively easy case without casting concerns
140
+ return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
141
+
142
+ elif (endpoint > 0 and side == "start" and stride > 0) or (
143
+ endpoint < 0 < stride and side == "end"
144
+ ):
145
+ # no chance of not-overflowing
146
+ raise OutOfBoundsDatetime(msg)
147
+
148
+ elif side == "end" and endpoint - stride <= i64max < endpoint:
149
+ # in _generate_regular_range we added `stride` thereby overflowing
150
+ # the bounds. Adjust to fix this.
151
+ return _generate_range_overflow_safe(
152
+ endpoint - stride, periods - 1, stride, side
153
+ )
154
+
155
+ # split into smaller pieces
156
+ mid_periods = periods // 2
157
+ remaining = periods - mid_periods
158
+ assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
159
+
160
+ midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side))
161
+ return _generate_range_overflow_safe(midpoint, remaining, stride, side)
162
+
163
+
164
+ def _generate_range_overflow_safe_signed(
165
+ endpoint: int, periods: int, stride: int, side: str
166
+ ) -> int:
167
+ """
168
+ A special case for _generate_range_overflow_safe where `periods * stride`
169
+ can be calculated without overflowing int64 bounds.
170
+ """
171
+ assert side in ["start", "end"]
172
+ if side == "end":
173
+ stride *= -1
174
+
175
+ with np.errstate(over="raise"):
176
+ addend = np.int64(periods) * np.int64(stride)
177
+ try:
178
+ # easy case with no overflows
179
+ result = np.int64(endpoint) + addend
180
+ if result == iNaT:
181
+ # Putting this into a DatetimeArray/TimedeltaArray
182
+ # would incorrectly be interpreted as NaT
183
+ raise OverflowError
184
+ return int(result)
185
+ except (FloatingPointError, OverflowError):
186
+ # with endpoint negative and addend positive we risk
187
+ # FloatingPointError; with reversed signed we risk OverflowError
188
+ pass
189
+
190
+ # if stride and endpoint had opposite signs, then endpoint + addend
191
+ # should never overflow. so they must have the same signs
192
+ assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
193
+
194
+ if stride > 0:
195
+ # watch out for very special case in which we just slightly
196
+ # exceed implementation bounds, but when passing the result to
197
+ # np.arange will get a result slightly within the bounds
198
+
199
+ uresult = np.uint64(endpoint) + np.uint64(addend)
200
+ i64max = np.uint64(i8max)
201
+ assert uresult > i64max
202
+ if uresult <= i64max + np.uint64(stride):
203
+ return int(uresult)
204
+
205
+ raise OutOfBoundsDatetime(
206
+ f"Cannot generate range with {side}={endpoint} and periods={periods}"
207
+ )
llava_next/lib/python3.10/site-packages/pandas/core/arrays/_utils.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs import lib
11
+ from pandas.errors import LossySetitemError
12
+
13
+ from pandas.core.dtypes.cast import np_can_hold_element
14
+ from pandas.core.dtypes.common import is_numeric_dtype
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import (
18
+ ArrayLike,
19
+ npt,
20
+ )
21
+
22
+
23
+ def to_numpy_dtype_inference(
24
+ arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool
25
+ ) -> tuple[npt.DTypeLike, Any]:
26
+ if dtype is None and is_numeric_dtype(arr.dtype):
27
+ dtype_given = False
28
+ if hasna:
29
+ if arr.dtype.kind == "b":
30
+ dtype = np.dtype(np.object_)
31
+ else:
32
+ if arr.dtype.kind in "iu":
33
+ dtype = np.dtype(np.float64)
34
+ else:
35
+ dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
36
+ if na_value is lib.no_default:
37
+ na_value = np.nan
38
+ else:
39
+ dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
40
+ elif dtype is not None:
41
+ dtype = np.dtype(dtype)
42
+ dtype_given = True
43
+ else:
44
+ dtype_given = True
45
+
46
+ if na_value is lib.no_default:
47
+ if dtype is None or not hasna:
48
+ na_value = arr.dtype.na_value
49
+ elif dtype.kind == "f": # type: ignore[union-attr]
50
+ na_value = np.nan
51
+ elif dtype.kind == "M": # type: ignore[union-attr]
52
+ na_value = np.datetime64("nat")
53
+ elif dtype.kind == "m": # type: ignore[union-attr]
54
+ na_value = np.timedelta64("nat")
55
+ else:
56
+ na_value = arr.dtype.na_value
57
+
58
+ if not dtype_given and hasna:
59
+ try:
60
+ np_can_hold_element(dtype, na_value) # type: ignore[arg-type]
61
+ except LossySetitemError:
62
+ dtype = np.dtype(np.object_)
63
+ return dtype, na_value
llava_next/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py ADDED
@@ -0,0 +1,2556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import (
4
+ datetime,
5
+ timedelta,
6
+ )
7
+ from functools import wraps
8
+ import operator
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Literal,
14
+ Union,
15
+ cast,
16
+ final,
17
+ overload,
18
+ )
19
+ import warnings
20
+
21
+ import numpy as np
22
+
23
+ from pandas._libs import (
24
+ algos,
25
+ lib,
26
+ )
27
+ from pandas._libs.arrays import NDArrayBacked
28
+ from pandas._libs.tslibs import (
29
+ BaseOffset,
30
+ IncompatibleFrequency,
31
+ NaT,
32
+ NaTType,
33
+ Period,
34
+ Resolution,
35
+ Tick,
36
+ Timedelta,
37
+ Timestamp,
38
+ add_overflowsafe,
39
+ astype_overflowsafe,
40
+ get_unit_from_dtype,
41
+ iNaT,
42
+ ints_to_pydatetime,
43
+ ints_to_pytimedelta,
44
+ periods_per_day,
45
+ to_offset,
46
+ )
47
+ from pandas._libs.tslibs.fields import (
48
+ RoundTo,
49
+ round_nsint64,
50
+ )
51
+ from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
52
+ from pandas._libs.tslibs.timedeltas import get_unit_for_round
53
+ from pandas._libs.tslibs.timestamps import integer_op_not_supported
54
+ from pandas._typing import (
55
+ ArrayLike,
56
+ AxisInt,
57
+ DatetimeLikeScalar,
58
+ Dtype,
59
+ DtypeObj,
60
+ F,
61
+ InterpolateOptions,
62
+ NpDtype,
63
+ PositionalIndexer2D,
64
+ PositionalIndexerTuple,
65
+ ScalarIndexer,
66
+ Self,
67
+ SequenceIndexer,
68
+ TimeAmbiguous,
69
+ TimeNonexistent,
70
+ npt,
71
+ )
72
+ from pandas.compat.numpy import function as nv
73
+ from pandas.errors import (
74
+ AbstractMethodError,
75
+ InvalidComparison,
76
+ PerformanceWarning,
77
+ )
78
+ from pandas.util._decorators import (
79
+ Appender,
80
+ Substitution,
81
+ cache_readonly,
82
+ )
83
+ from pandas.util._exceptions import find_stack_level
84
+
85
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
86
+ from pandas.core.dtypes.common import (
87
+ is_all_strings,
88
+ is_integer_dtype,
89
+ is_list_like,
90
+ is_object_dtype,
91
+ is_string_dtype,
92
+ pandas_dtype,
93
+ )
94
+ from pandas.core.dtypes.dtypes import (
95
+ ArrowDtype,
96
+ CategoricalDtype,
97
+ DatetimeTZDtype,
98
+ ExtensionDtype,
99
+ PeriodDtype,
100
+ )
101
+ from pandas.core.dtypes.generic import (
102
+ ABCCategorical,
103
+ ABCMultiIndex,
104
+ )
105
+ from pandas.core.dtypes.missing import (
106
+ is_valid_na_for_dtype,
107
+ isna,
108
+ )
109
+
110
+ from pandas.core import (
111
+ algorithms,
112
+ missing,
113
+ nanops,
114
+ ops,
115
+ )
116
+ from pandas.core.algorithms import (
117
+ isin,
118
+ map_array,
119
+ unique1d,
120
+ )
121
+ from pandas.core.array_algos import datetimelike_accumulations
122
+ from pandas.core.arraylike import OpsMixin
123
+ from pandas.core.arrays._mixins import (
124
+ NDArrayBackedExtensionArray,
125
+ ravel_compat,
126
+ )
127
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
128
+ from pandas.core.arrays.base import ExtensionArray
129
+ from pandas.core.arrays.integer import IntegerArray
130
+ import pandas.core.common as com
131
+ from pandas.core.construction import (
132
+ array as pd_array,
133
+ ensure_wrapped_if_datetimelike,
134
+ extract_array,
135
+ )
136
+ from pandas.core.indexers import (
137
+ check_array_indexer,
138
+ check_setitem_lengths,
139
+ )
140
+ from pandas.core.ops.common import unpack_zerodim_and_defer
141
+ from pandas.core.ops.invalid import (
142
+ invalid_comparison,
143
+ make_invalid_op,
144
+ )
145
+
146
+ from pandas.tseries import frequencies
147
+
148
+ if TYPE_CHECKING:
149
+ from collections.abc import (
150
+ Iterator,
151
+ Sequence,
152
+ )
153
+
154
+ from pandas import Index
155
+ from pandas.core.arrays import (
156
+ DatetimeArray,
157
+ PeriodArray,
158
+ TimedeltaArray,
159
+ )
160
+
161
+ DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
162
+
163
+
164
+ def _make_unpacked_invalid_op(op_name: str):
165
+ op = make_invalid_op(op_name)
166
+ return unpack_zerodim_and_defer(op_name)(op)
167
+
168
+
169
+ def _period_dispatch(meth: F) -> F:
170
+ """
171
+ For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
172
+ in PeriodArray. We cannot use ._ndarray directly for the affected
173
+ methods because the i8 data has different semantics on NaT values.
174
+ """
175
+
176
+ @wraps(meth)
177
+ def new_meth(self, *args, **kwargs):
178
+ if not isinstance(self.dtype, PeriodDtype):
179
+ return meth(self, *args, **kwargs)
180
+
181
+ arr = self.view("M8[ns]")
182
+ result = meth(arr, *args, **kwargs)
183
+ if result is NaT:
184
+ return NaT
185
+ elif isinstance(result, Timestamp):
186
+ return self._box_func(result._value)
187
+
188
+ res_i8 = result.view("i8")
189
+ return self._from_backing_data(res_i8)
190
+
191
+ return cast(F, new_meth)
192
+
193
+
194
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
195
+ # incompatible with definition in base class "ExtensionArray"
196
+ class DatetimeLikeArrayMixin( # type: ignore[misc]
197
+ OpsMixin, NDArrayBackedExtensionArray
198
+ ):
199
+ """
200
+ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
201
+
202
+ Assumes that __new__/__init__ defines:
203
+ _ndarray
204
+
205
+ and that inheriting subclass implements:
206
+ freq
207
+ """
208
+
209
+ # _infer_matches -> which infer_dtype strings are close enough to our own
210
+ _infer_matches: tuple[str, ...]
211
+ _is_recognized_dtype: Callable[[DtypeObj], bool]
212
+ _recognized_scalars: tuple[type, ...]
213
+ _ndarray: np.ndarray
214
+ freq: BaseOffset | None
215
+
216
+ @cache_readonly
217
+ def _can_hold_na(self) -> bool:
218
+ return True
219
+
220
+ def __init__(
221
+ self, data, dtype: Dtype | None = None, freq=None, copy: bool = False
222
+ ) -> None:
223
+ raise AbstractMethodError(self)
224
+
225
+ @property
226
+ def _scalar_type(self) -> type[DatetimeLikeScalar]:
227
+ """
228
+ The scalar associated with this datelike
229
+
230
+ * PeriodArray : Period
231
+ * DatetimeArray : Timestamp
232
+ * TimedeltaArray : Timedelta
233
+ """
234
+ raise AbstractMethodError(self)
235
+
236
+ def _scalar_from_string(self, value: str) -> DTScalarOrNaT:
237
+ """
238
+ Construct a scalar type from a string.
239
+
240
+ Parameters
241
+ ----------
242
+ value : str
243
+
244
+ Returns
245
+ -------
246
+ Period, Timestamp, or Timedelta, or NaT
247
+ Whatever the type of ``self._scalar_type`` is.
248
+
249
+ Notes
250
+ -----
251
+ This should call ``self._check_compatible_with`` before
252
+ unboxing the result.
253
+ """
254
+ raise AbstractMethodError(self)
255
+
256
+ def _unbox_scalar(
257
+ self, value: DTScalarOrNaT
258
+ ) -> np.int64 | np.datetime64 | np.timedelta64:
259
+ """
260
+ Unbox the integer value of a scalar `value`.
261
+
262
+ Parameters
263
+ ----------
264
+ value : Period, Timestamp, Timedelta, or NaT
265
+ Depending on subclass.
266
+
267
+ Returns
268
+ -------
269
+ int
270
+
271
+ Examples
272
+ --------
273
+ >>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]'))
274
+ >>> arr._unbox_scalar(arr[0])
275
+ numpy.datetime64('1970-01-01T00:00:00.000000000')
276
+ """
277
+ raise AbstractMethodError(self)
278
+
279
+ def _check_compatible_with(self, other: DTScalarOrNaT) -> None:
280
+ """
281
+ Verify that `self` and `other` are compatible.
282
+
283
+ * DatetimeArray verifies that the timezones (if any) match
284
+ * PeriodArray verifies that the freq matches
285
+ * Timedelta has no verification
286
+
287
+ In each case, NaT is considered compatible.
288
+
289
+ Parameters
290
+ ----------
291
+ other
292
+
293
+ Raises
294
+ ------
295
+ Exception
296
+ """
297
+ raise AbstractMethodError(self)
298
+
299
+ # ------------------------------------------------------------------
300
+
301
+ def _box_func(self, x):
302
+ """
303
+ box function to get object from internal representation
304
+ """
305
+ raise AbstractMethodError(self)
306
+
307
+ def _box_values(self, values) -> np.ndarray:
308
+ """
309
+ apply box func to passed values
310
+ """
311
+ return lib.map_infer(values, self._box_func, convert=False)
312
+
313
+ def __iter__(self) -> Iterator:
314
+ if self.ndim > 1:
315
+ return (self[n] for n in range(len(self)))
316
+ else:
317
+ return (self._box_func(v) for v in self.asi8)
318
+
319
+ @property
320
+ def asi8(self) -> npt.NDArray[np.int64]:
321
+ """
322
+ Integer representation of the values.
323
+
324
+ Returns
325
+ -------
326
+ ndarray
327
+ An ndarray with int64 dtype.
328
+ """
329
+ # do not cache or you'll create a memory leak
330
+ return self._ndarray.view("i8")
331
+
332
+ # ----------------------------------------------------------------
333
+ # Rendering Methods
334
+
335
+ def _format_native_types(
336
+ self, *, na_rep: str | float = "NaT", date_format=None
337
+ ) -> npt.NDArray[np.object_]:
338
+ """
339
+ Helper method for astype when converting to strings.
340
+
341
+ Returns
342
+ -------
343
+ ndarray[str]
344
+ """
345
+ raise AbstractMethodError(self)
346
+
347
+ def _formatter(self, boxed: bool = False):
348
+ # TODO: Remove Datetime & DatetimeTZ formatters.
349
+ return "'{}'".format
350
+
351
+ # ----------------------------------------------------------------
352
+ # Array-Like / EA-Interface Methods
353
+
354
+ def __array__(
355
+ self, dtype: NpDtype | None = None, copy: bool | None = None
356
+ ) -> np.ndarray:
357
+ # used for Timedelta/DatetimeArray, overwritten by PeriodArray
358
+ if is_object_dtype(dtype):
359
+ return np.array(list(self), dtype=object)
360
+ return self._ndarray
361
+
362
+ @overload
363
+ def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT:
364
+ ...
365
+
366
+ @overload
367
+ def __getitem__(
368
+ self,
369
+ item: SequenceIndexer | PositionalIndexerTuple,
370
+ ) -> Self:
371
+ ...
372
+
373
+ def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:
374
+ """
375
+ This getitem defers to the underlying array, which by-definition can
376
+ only handle list-likes, slices, and integer scalars
377
+ """
378
+ # Use cast as we know we will get back a DatetimeLikeArray or DTScalar,
379
+ # but skip evaluating the Union at runtime for performance
380
+ # (see https://github.com/pandas-dev/pandas/pull/44624)
381
+ result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key))
382
+ if lib.is_scalar(result):
383
+ return result
384
+ else:
385
+ # At this point we know the result is an array.
386
+ result = cast(Self, result)
387
+ result._freq = self._get_getitem_freq(key)
388
+ return result
389
+
390
+ def _get_getitem_freq(self, key) -> BaseOffset | None:
391
+ """
392
+ Find the `freq` attribute to assign to the result of a __getitem__ lookup.
393
+ """
394
+ is_period = isinstance(self.dtype, PeriodDtype)
395
+ if is_period:
396
+ freq = self.freq
397
+ elif self.ndim != 1:
398
+ freq = None
399
+ else:
400
+ key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice
401
+ freq = None
402
+ if isinstance(key, slice):
403
+ if self.freq is not None and key.step is not None:
404
+ freq = key.step * self.freq
405
+ else:
406
+ freq = self.freq
407
+ elif key is Ellipsis:
408
+ # GH#21282 indexing with Ellipsis is similar to a full slice,
409
+ # should preserve `freq` attribute
410
+ freq = self.freq
411
+ elif com.is_bool_indexer(key):
412
+ new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))
413
+ if isinstance(new_key, slice):
414
+ return self._get_getitem_freq(new_key)
415
+ return freq
416
+
417
+ # error: Argument 1 of "__setitem__" is incompatible with supertype
418
+ # "ExtensionArray"; supertype defines the argument type as "Union[int,
419
+ # ndarray]"
420
+ def __setitem__(
421
+ self,
422
+ key: int | Sequence[int] | Sequence[bool] | slice,
423
+ value: NaTType | Any | Sequence[Any],
424
+ ) -> None:
425
+ # I'm fudging the types a bit here. "Any" above really depends
426
+ # on type(self). For PeriodArray, it's Period (or stuff coercible
427
+ # to a period in from_sequence). For DatetimeArray, it's Timestamp...
428
+ # I don't know if mypy can do that, possibly with Generics.
429
+ # https://mypy.readthedocs.io/en/latest/generics.html
430
+
431
+ no_op = check_setitem_lengths(key, value, self)
432
+
433
+ # Calling super() before the no_op short-circuit means that we raise
434
+ # on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array.
435
+ super().__setitem__(key, value)
436
+
437
+ if no_op:
438
+ return
439
+
440
+ self._maybe_clear_freq()
441
+
442
+ def _maybe_clear_freq(self) -> None:
443
+ # inplace operations like __setitem__ may invalidate the freq of
444
+ # DatetimeArray and TimedeltaArray
445
+ pass
446
+
447
+ def astype(self, dtype, copy: bool = True):
448
+ # Some notes on cases we don't have to handle here in the base class:
449
+ # 1. PeriodArray.astype handles period -> period
450
+ # 2. DatetimeArray.astype handles conversion between tz.
451
+ # 3. DatetimeArray.astype handles datetime -> period
452
+ dtype = pandas_dtype(dtype)
453
+
454
+ if dtype == object:
455
+ if self.dtype.kind == "M":
456
+ self = cast("DatetimeArray", self)
457
+ # *much* faster than self._box_values
458
+ # for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
459
+ i8data = self.asi8
460
+ converted = ints_to_pydatetime(
461
+ i8data,
462
+ tz=self.tz,
463
+ box="timestamp",
464
+ reso=self._creso,
465
+ )
466
+ return converted
467
+
468
+ elif self.dtype.kind == "m":
469
+ return ints_to_pytimedelta(self._ndarray, box=True)
470
+
471
+ return self._box_values(self.asi8.ravel()).reshape(self.shape)
472
+
473
+ elif isinstance(dtype, ExtensionDtype):
474
+ return super().astype(dtype, copy=copy)
475
+ elif is_string_dtype(dtype):
476
+ return self._format_native_types()
477
+ elif dtype.kind in "iu":
478
+ # we deliberately ignore int32 vs. int64 here.
479
+ # See https://github.com/pandas-dev/pandas/issues/24381 for more.
480
+ values = self.asi8
481
+ if dtype != np.int64:
482
+ raise TypeError(
483
+ f"Converting from {self.dtype} to {dtype} is not supported. "
484
+ "Do obj.astype('int64').astype(dtype) instead"
485
+ )
486
+
487
+ if copy:
488
+ values = values.copy()
489
+ return values
490
+ elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f":
491
+ # disallow conversion between datetime/timedelta,
492
+ # and conversions for any datetimelike to float
493
+ msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
494
+ raise TypeError(msg)
495
+ else:
496
+ return np.asarray(self, dtype=dtype)
497
+
498
+ @overload
499
+ def view(self) -> Self:
500
+ ...
501
+
502
+ @overload
503
+ def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
504
+ ...
505
+
506
+ @overload
507
+ def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
508
+ ...
509
+
510
+ @overload
511
+ def view(self, dtype: Dtype | None = ...) -> ArrayLike:
512
+ ...
513
+
514
+ # pylint: disable-next=useless-parent-delegation
515
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
516
+ # we need to explicitly call super() method as long as the `@overload`s
517
+ # are present in this file.
518
+ return super().view(dtype)
519
+
520
+ # ------------------------------------------------------------------
521
+ # Validation Methods
522
+ # TODO: try to de-duplicate these, ensure identical behavior
523
+
524
+ def _validate_comparison_value(self, other):
525
+ if isinstance(other, str):
526
+ try:
527
+ # GH#18435 strings get a pass from tzawareness compat
528
+ other = self._scalar_from_string(other)
529
+ except (ValueError, IncompatibleFrequency):
530
+ # failed to parse as Timestamp/Timedelta/Period
531
+ raise InvalidComparison(other)
532
+
533
+ if isinstance(other, self._recognized_scalars) or other is NaT:
534
+ other = self._scalar_type(other)
535
+ try:
536
+ self._check_compatible_with(other)
537
+ except (TypeError, IncompatibleFrequency) as err:
538
+ # e.g. tzawareness mismatch
539
+ raise InvalidComparison(other) from err
540
+
541
+ elif not is_list_like(other):
542
+ raise InvalidComparison(other)
543
+
544
+ elif len(other) != len(self):
545
+ raise ValueError("Lengths must match")
546
+
547
+ else:
548
+ try:
549
+ other = self._validate_listlike(other, allow_object=True)
550
+ self._check_compatible_with(other)
551
+ except (TypeError, IncompatibleFrequency) as err:
552
+ if is_object_dtype(getattr(other, "dtype", None)):
553
+ # We will have to operate element-wise
554
+ pass
555
+ else:
556
+ raise InvalidComparison(other) from err
557
+
558
+ return other
559
+
560
+ def _validate_scalar(
561
+ self,
562
+ value,
563
+ *,
564
+ allow_listlike: bool = False,
565
+ unbox: bool = True,
566
+ ):
567
+ """
568
+ Validate that the input value can be cast to our scalar_type.
569
+
570
+ Parameters
571
+ ----------
572
+ value : object
573
+ allow_listlike: bool, default False
574
+ When raising an exception, whether the message should say
575
+ listlike inputs are allowed.
576
+ unbox : bool, default True
577
+ Whether to unbox the result before returning. Note: unbox=False
578
+ skips the setitem compatibility check.
579
+
580
+ Returns
581
+ -------
582
+ self._scalar_type or NaT
583
+ """
584
+ if isinstance(value, self._scalar_type):
585
+ pass
586
+
587
+ elif isinstance(value, str):
588
+ # NB: Careful about tzawareness
589
+ try:
590
+ value = self._scalar_from_string(value)
591
+ except ValueError as err:
592
+ msg = self._validation_error_message(value, allow_listlike)
593
+ raise TypeError(msg) from err
594
+
595
+ elif is_valid_na_for_dtype(value, self.dtype):
596
+ # GH#18295
597
+ value = NaT
598
+
599
+ elif isna(value):
600
+ # if we are dt64tz and value is dt64("NaT"), dont cast to NaT,
601
+ # or else we'll fail to raise in _unbox_scalar
602
+ msg = self._validation_error_message(value, allow_listlike)
603
+ raise TypeError(msg)
604
+
605
+ elif isinstance(value, self._recognized_scalars):
606
+ # error: Argument 1 to "Timestamp" has incompatible type "object"; expected
607
+ # "integer[Any] | float | str | date | datetime | datetime64"
608
+ value = self._scalar_type(value) # type: ignore[arg-type]
609
+
610
+ else:
611
+ msg = self._validation_error_message(value, allow_listlike)
612
+ raise TypeError(msg)
613
+
614
+ if not unbox:
615
+ # NB: In general NDArrayBackedExtensionArray will unbox here;
616
+ # this option exists to prevent a performance hit in
617
+ # TimedeltaIndex.get_loc
618
+ return value
619
+ return self._unbox_scalar(value)
620
+
621
+ def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
622
+ """
623
+ Construct an exception message on validation error.
624
+
625
+ Some methods allow only scalar inputs, while others allow either scalar
626
+ or listlike.
627
+
628
+ Parameters
629
+ ----------
630
+ allow_listlike: bool, default False
631
+
632
+ Returns
633
+ -------
634
+ str
635
+ """
636
+ if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0:
637
+ msg_got = f"{value.dtype} array"
638
+ else:
639
+ msg_got = f"'{type(value).__name__}'"
640
+ if allow_listlike:
641
+ msg = (
642
+ f"value should be a '{self._scalar_type.__name__}', 'NaT', "
643
+ f"or array of those. Got {msg_got} instead."
644
+ )
645
+ else:
646
+ msg = (
647
+ f"value should be a '{self._scalar_type.__name__}' or 'NaT'. "
648
+ f"Got {msg_got} instead."
649
+ )
650
+ return msg
651
+
652
+ def _validate_listlike(self, value, allow_object: bool = False):
653
+ if isinstance(value, type(self)):
654
+ if self.dtype.kind in "mM" and not allow_object:
655
+ # error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
656
+ value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
657
+ return value
658
+
659
+ if isinstance(value, list) and len(value) == 0:
660
+ # We treat empty list as our own dtype.
661
+ return type(self)._from_sequence([], dtype=self.dtype)
662
+
663
+ if hasattr(value, "dtype") and value.dtype == object:
664
+ # `array` below won't do inference if value is an Index or Series.
665
+ # so do so here. in the Index case, inferred_type may be cached.
666
+ if lib.infer_dtype(value) in self._infer_matches:
667
+ try:
668
+ value = type(self)._from_sequence(value)
669
+ except (ValueError, TypeError):
670
+ if allow_object:
671
+ return value
672
+ msg = self._validation_error_message(value, True)
673
+ raise TypeError(msg)
674
+
675
+ # Do type inference if necessary up front (after unpacking
676
+ # NumpyExtensionArray)
677
+ # e.g. we passed PeriodIndex.values and got an ndarray of Periods
678
+ value = extract_array(value, extract_numpy=True)
679
+ value = pd_array(value)
680
+ value = extract_array(value, extract_numpy=True)
681
+
682
+ if is_all_strings(value):
683
+ # We got a StringArray
684
+ try:
685
+ # TODO: Could use from_sequence_of_strings if implemented
686
+ # Note: passing dtype is necessary for PeriodArray tests
687
+ value = type(self)._from_sequence(value, dtype=self.dtype)
688
+ except ValueError:
689
+ pass
690
+
691
+ if isinstance(value.dtype, CategoricalDtype):
692
+ # e.g. we have a Categorical holding self.dtype
693
+ if value.categories.dtype == self.dtype:
694
+ # TODO: do we need equal dtype or just comparable?
695
+ value = value._internal_get_values()
696
+ value = extract_array(value, extract_numpy=True)
697
+
698
+ if allow_object and is_object_dtype(value.dtype):
699
+ pass
700
+
701
+ elif not type(self)._is_recognized_dtype(value.dtype):
702
+ msg = self._validation_error_message(value, True)
703
+ raise TypeError(msg)
704
+
705
+ if self.dtype.kind in "mM" and not allow_object:
706
+ # error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
707
+ value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
708
+ return value
709
+
710
+ def _validate_setitem_value(self, value):
711
+ if is_list_like(value):
712
+ value = self._validate_listlike(value)
713
+ else:
714
+ return self._validate_scalar(value, allow_listlike=True)
715
+
716
+ return self._unbox(value)
717
+
718
+ @final
719
+ def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:
720
+ """
721
+ Unbox either a scalar with _unbox_scalar or an instance of our own type.
722
+ """
723
+ if lib.is_scalar(other):
724
+ other = self._unbox_scalar(other)
725
+ else:
726
+ # same type as self
727
+ self._check_compatible_with(other)
728
+ other = other._ndarray
729
+ return other
730
+
731
+ # ------------------------------------------------------------------
732
+ # Additional array methods
733
+ # These are not part of the EA API, but we implement them because
734
+ # pandas assumes they're there.
735
+
736
+ @ravel_compat
737
+ def map(self, mapper, na_action=None):
738
+ from pandas import Index
739
+
740
+ result = map_array(self, mapper, na_action=na_action)
741
+ result = Index(result)
742
+
743
+ if isinstance(result, ABCMultiIndex):
744
+ return result.to_numpy()
745
+ else:
746
+ return result.array
747
+
748
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
749
+ """
750
+ Compute boolean array of whether each value is found in the
751
+ passed set of values.
752
+
753
+ Parameters
754
+ ----------
755
+ values : np.ndarray or ExtensionArray
756
+
757
+ Returns
758
+ -------
759
+ ndarray[bool]
760
+ """
761
+ if values.dtype.kind in "fiuc":
762
+ # TODO: de-duplicate with equals, validate_comparison_value
763
+ return np.zeros(self.shape, dtype=bool)
764
+
765
+ values = ensure_wrapped_if_datetimelike(values)
766
+
767
+ if not isinstance(values, type(self)):
768
+ inferable = [
769
+ "timedelta",
770
+ "timedelta64",
771
+ "datetime",
772
+ "datetime64",
773
+ "date",
774
+ "period",
775
+ ]
776
+ if values.dtype == object:
777
+ values = lib.maybe_convert_objects(
778
+ values, # type: ignore[arg-type]
779
+ convert_non_numeric=True,
780
+ dtype_if_all_nat=self.dtype,
781
+ )
782
+ if values.dtype != object:
783
+ return self.isin(values)
784
+
785
+ inferred = lib.infer_dtype(values, skipna=False)
786
+ if inferred not in inferable:
787
+ if inferred == "string":
788
+ pass
789
+
790
+ elif "mixed" in inferred:
791
+ return isin(self.astype(object), values)
792
+ else:
793
+ return np.zeros(self.shape, dtype=bool)
794
+
795
+ try:
796
+ values = type(self)._from_sequence(values)
797
+ except ValueError:
798
+ return isin(self.astype(object), values)
799
+ else:
800
+ warnings.warn(
801
+ # GH#53111
802
+ f"The behavior of 'isin' with dtype={self.dtype} and "
803
+ "castable values (e.g. strings) is deprecated. In a "
804
+ "future version, these will not be considered matching "
805
+ "by isin. Explicitly cast to the appropriate dtype before "
806
+ "calling isin instead.",
807
+ FutureWarning,
808
+ stacklevel=find_stack_level(),
809
+ )
810
+
811
+ if self.dtype.kind in "mM":
812
+ self = cast("DatetimeArray | TimedeltaArray", self)
813
+ # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
814
+ # has no attribute "as_unit"
815
+ values = values.as_unit(self.unit) # type: ignore[union-attr]
816
+
817
+ try:
818
+ # error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin"
819
+ # has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected
820
+ # "Period | Timestamp | Timedelta | NaTType"
821
+ self._check_compatible_with(values) # type: ignore[arg-type]
822
+ except (TypeError, ValueError):
823
+ # Includes tzawareness mismatch and IncompatibleFrequencyError
824
+ return np.zeros(self.shape, dtype=bool)
825
+
826
+ # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
827
+ # has no attribute "asi8"
828
+ return isin(self.asi8, values.asi8) # type: ignore[union-attr]
829
+
830
+ # ------------------------------------------------------------------
831
+ # Null Handling
832
+
833
+ def isna(self) -> npt.NDArray[np.bool_]:
834
+ return self._isnan
835
+
836
+ @property # NB: override with cache_readonly in immutable subclasses
837
+ def _isnan(self) -> npt.NDArray[np.bool_]:
838
+ """
839
+ return if each value is nan
840
+ """
841
+ return self.asi8 == iNaT
842
+
843
+ @property # NB: override with cache_readonly in immutable subclasses
844
+ def _hasna(self) -> bool:
845
+ """
846
+ return if I have any nans; enables various perf speedups
847
+ """
848
+ return bool(self._isnan.any())
849
+
850
+ def _maybe_mask_results(
851
+ self, result: np.ndarray, fill_value=iNaT, convert=None
852
+ ) -> np.ndarray:
853
+ """
854
+ Parameters
855
+ ----------
856
+ result : np.ndarray
857
+ fill_value : object, default iNaT
858
+ convert : str, dtype or None
859
+
860
+ Returns
861
+ -------
862
+ result : ndarray with values replace by the fill_value
863
+
864
+ mask the result if needed, convert to the provided dtype if its not
865
+ None
866
+
867
+ This is an internal routine.
868
+ """
869
+ if self._hasna:
870
+ if convert:
871
+ result = result.astype(convert)
872
+ if fill_value is None:
873
+ fill_value = np.nan
874
+ np.putmask(result, self._isnan, fill_value)
875
+ return result
876
+
877
+ # ------------------------------------------------------------------
878
+ # Frequency Properties/Methods
879
+
880
+ @property
881
+ def freqstr(self) -> str | None:
882
+ """
883
+ Return the frequency object as a string if it's set, otherwise None.
884
+
885
+ Examples
886
+ --------
887
+ For DatetimeIndex:
888
+
889
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
890
+ >>> idx.freqstr
891
+ 'D'
892
+
893
+ The frequency can be inferred if there are more than 2 points:
894
+
895
+ >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"],
896
+ ... freq="infer")
897
+ >>> idx.freqstr
898
+ '2D'
899
+
900
+ For PeriodIndex:
901
+
902
+ >>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
903
+ >>> idx.freqstr
904
+ 'M'
905
+ """
906
+ if self.freq is None:
907
+ return None
908
+ return self.freq.freqstr
909
+
910
+ @property # NB: override with cache_readonly in immutable subclasses
911
+ def inferred_freq(self) -> str | None:
912
+ """
913
+ Tries to return a string representing a frequency generated by infer_freq.
914
+
915
+ Returns None if it can't autodetect the frequency.
916
+
917
+ Examples
918
+ --------
919
+ For DatetimeIndex:
920
+
921
+ >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
922
+ >>> idx.inferred_freq
923
+ '2D'
924
+
925
+ For TimedeltaIndex:
926
+
927
+ >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
928
+ >>> tdelta_idx
929
+ TimedeltaIndex(['0 days', '10 days', '20 days'],
930
+ dtype='timedelta64[ns]', freq=None)
931
+ >>> tdelta_idx.inferred_freq
932
+ '10D'
933
+ """
934
+ if self.ndim != 1:
935
+ return None
936
+ try:
937
+ return frequencies.infer_freq(self)
938
+ except ValueError:
939
+ return None
940
+
941
+ @property # NB: override with cache_readonly in immutable subclasses
942
+ def _resolution_obj(self) -> Resolution | None:
943
+ freqstr = self.freqstr
944
+ if freqstr is None:
945
+ return None
946
+ try:
947
+ return Resolution.get_reso_from_freqstr(freqstr)
948
+ except KeyError:
949
+ return None
950
+
951
+ @property # NB: override with cache_readonly in immutable subclasses
952
+ def resolution(self) -> str:
953
+ """
954
+ Returns day, hour, minute, second, millisecond or microsecond
955
+ """
956
+ # error: Item "None" of "Optional[Any]" has no attribute "attrname"
957
+ return self._resolution_obj.attrname # type: ignore[union-attr]
958
+
959
+ # monotonicity/uniqueness properties are called via frequencies.infer_freq,
960
+ # see GH#23789
961
+
962
+ @property
963
+ def _is_monotonic_increasing(self) -> bool:
964
+ return algos.is_monotonic(self.asi8, timelike=True)[0]
965
+
966
+ @property
967
+ def _is_monotonic_decreasing(self) -> bool:
968
+ return algos.is_monotonic(self.asi8, timelike=True)[1]
969
+
970
+ @property
971
+ def _is_unique(self) -> bool:
972
+ return len(unique1d(self.asi8.ravel("K"))) == self.size
973
+
974
+ # ------------------------------------------------------------------
975
+ # Arithmetic Methods
976
+
977
+ def _cmp_method(self, other, op):
978
+ if self.ndim > 1 and getattr(other, "shape", None) == self.shape:
979
+ # TODO: handle 2D-like listlikes
980
+ return op(self.ravel(), other.ravel()).reshape(self.shape)
981
+
982
+ try:
983
+ other = self._validate_comparison_value(other)
984
+ except InvalidComparison:
985
+ return invalid_comparison(self, other, op)
986
+
987
+ dtype = getattr(other, "dtype", None)
988
+ if is_object_dtype(dtype):
989
+ # We have to use comp_method_OBJECT_ARRAY instead of numpy
990
+ # comparison otherwise it would raise when comparing to None
991
+ result = ops.comp_method_OBJECT_ARRAY(
992
+ op, np.asarray(self.astype(object)), other
993
+ )
994
+ return result
995
+ if other is NaT:
996
+ if op is operator.ne:
997
+ result = np.ones(self.shape, dtype=bool)
998
+ else:
999
+ result = np.zeros(self.shape, dtype=bool)
1000
+ return result
1001
+
1002
+ if not isinstance(self.dtype, PeriodDtype):
1003
+ self = cast(TimelikeOps, self)
1004
+ if self._creso != other._creso:
1005
+ if not isinstance(other, type(self)):
1006
+ # i.e. Timedelta/Timestamp, cast to ndarray and let
1007
+ # compare_mismatched_resolutions handle broadcasting
1008
+ try:
1009
+ # GH#52080 see if we can losslessly cast to shared unit
1010
+ other = other.as_unit(self.unit, round_ok=False)
1011
+ except ValueError:
1012
+ other_arr = np.array(other.asm8)
1013
+ return compare_mismatched_resolutions(
1014
+ self._ndarray, other_arr, op
1015
+ )
1016
+ else:
1017
+ other_arr = other._ndarray
1018
+ return compare_mismatched_resolutions(self._ndarray, other_arr, op)
1019
+
1020
+ other_vals = self._unbox(other)
1021
+ # GH#37462 comparison on i8 values is almost 2x faster than M8/m8
1022
+ result = op(self._ndarray.view("i8"), other_vals.view("i8"))
1023
+
1024
+ o_mask = isna(other)
1025
+ mask = self._isnan | o_mask
1026
+ if mask.any():
1027
+ nat_result = op is operator.ne
1028
+ np.putmask(result, mask, nat_result)
1029
+
1030
+ return result
1031
+
1032
+ # pow is invalid for all three subclasses; TimedeltaArray will override
1033
+ # the multiplication and division ops
1034
+ __pow__ = _make_unpacked_invalid_op("__pow__")
1035
+ __rpow__ = _make_unpacked_invalid_op("__rpow__")
1036
+ __mul__ = _make_unpacked_invalid_op("__mul__")
1037
+ __rmul__ = _make_unpacked_invalid_op("__rmul__")
1038
+ __truediv__ = _make_unpacked_invalid_op("__truediv__")
1039
+ __rtruediv__ = _make_unpacked_invalid_op("__rtruediv__")
1040
+ __floordiv__ = _make_unpacked_invalid_op("__floordiv__")
1041
+ __rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__")
1042
+ __mod__ = _make_unpacked_invalid_op("__mod__")
1043
+ __rmod__ = _make_unpacked_invalid_op("__rmod__")
1044
+ __divmod__ = _make_unpacked_invalid_op("__divmod__")
1045
+ __rdivmod__ = _make_unpacked_invalid_op("__rdivmod__")
1046
+
1047
+ @final
1048
+ def _get_i8_values_and_mask(
1049
+ self, other
1050
+ ) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]:
1051
+ """
1052
+ Get the int64 values and b_mask to pass to add_overflowsafe.
1053
+ """
1054
+ if isinstance(other, Period):
1055
+ i8values = other.ordinal
1056
+ mask = None
1057
+ elif isinstance(other, (Timestamp, Timedelta)):
1058
+ i8values = other._value
1059
+ mask = None
1060
+ else:
1061
+ # PeriodArray, DatetimeArray, TimedeltaArray
1062
+ mask = other._isnan
1063
+ i8values = other.asi8
1064
+ return i8values, mask
1065
+
1066
+ @final
1067
+ def _get_arithmetic_result_freq(self, other) -> BaseOffset | None:
1068
+ """
1069
+ Check if we can preserve self.freq in addition or subtraction.
1070
+ """
1071
+ # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving
1072
+ # whenever self.freq is a Tick
1073
+ if isinstance(self.dtype, PeriodDtype):
1074
+ return self.freq
1075
+ elif not lib.is_scalar(other):
1076
+ return None
1077
+ elif isinstance(self.freq, Tick):
1078
+ # In these cases
1079
+ return self.freq
1080
+ return None
1081
+
1082
+ @final
1083
+ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
1084
+ if not lib.is_np_dtype(self.dtype, "m"):
1085
+ raise TypeError(
1086
+ f"cannot add {type(self).__name__} and {type(other).__name__}"
1087
+ )
1088
+
1089
+ self = cast("TimedeltaArray", self)
1090
+
1091
+ from pandas.core.arrays import DatetimeArray
1092
+ from pandas.core.arrays.datetimes import tz_to_dtype
1093
+
1094
+ assert other is not NaT
1095
+ if isna(other):
1096
+ # i.e. np.datetime64("NaT")
1097
+ # In this case we specifically interpret NaT as a datetime, not
1098
+ # the timedelta interpretation we would get by returning self + NaT
1099
+ result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]")
1100
+ # Preserve our resolution
1101
+ return DatetimeArray._simple_new(result, dtype=result.dtype)
1102
+
1103
+ other = Timestamp(other)
1104
+ self, other = self._ensure_matching_resos(other)
1105
+ self = cast("TimedeltaArray", self)
1106
+
1107
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1108
+ result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
1109
+ res_values = result.view(f"M8[{self.unit}]")
1110
+
1111
+ dtype = tz_to_dtype(tz=other.tz, unit=self.unit)
1112
+ res_values = result.view(f"M8[{self.unit}]")
1113
+ new_freq = self._get_arithmetic_result_freq(other)
1114
+ return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq)
1115
+
1116
+ @final
1117
+ def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray:
1118
+ if not lib.is_np_dtype(self.dtype, "m"):
1119
+ raise TypeError(
1120
+ f"cannot add {type(self).__name__} and {type(other).__name__}"
1121
+ )
1122
+
1123
+ # defer to DatetimeArray.__add__
1124
+ return other + self
1125
+
1126
+ @final
1127
+ def _sub_datetimelike_scalar(
1128
+ self, other: datetime | np.datetime64
1129
+ ) -> TimedeltaArray:
1130
+ if self.dtype.kind != "M":
1131
+ raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
1132
+
1133
+ self = cast("DatetimeArray", self)
1134
+ # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
1135
+
1136
+ if isna(other):
1137
+ # i.e. np.datetime64("NaT")
1138
+ return self - NaT
1139
+
1140
+ ts = Timestamp(other)
1141
+
1142
+ self, ts = self._ensure_matching_resos(ts)
1143
+ return self._sub_datetimelike(ts)
1144
+
1145
+ @final
1146
+ def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
1147
+ if self.dtype.kind != "M":
1148
+ raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
1149
+
1150
+ if len(self) != len(other):
1151
+ raise ValueError("cannot add indices of unequal length")
1152
+
1153
+ self = cast("DatetimeArray", self)
1154
+
1155
+ self, other = self._ensure_matching_resos(other)
1156
+ return self._sub_datetimelike(other)
1157
+
1158
+ @final
1159
+ def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
1160
+ self = cast("DatetimeArray", self)
1161
+
1162
+ from pandas.core.arrays import TimedeltaArray
1163
+
1164
+ try:
1165
+ self._assert_tzawareness_compat(other)
1166
+ except TypeError as err:
1167
+ new_message = str(err).replace("compare", "subtract")
1168
+ raise type(err)(new_message) from err
1169
+
1170
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1171
+ res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
1172
+ res_m8 = res_values.view(f"timedelta64[{self.unit}]")
1173
+
1174
+ new_freq = self._get_arithmetic_result_freq(other)
1175
+ new_freq = cast("Tick | None", new_freq)
1176
+ return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq)
1177
+
1178
+ @final
1179
+ def _add_period(self, other: Period) -> PeriodArray:
1180
+ if not lib.is_np_dtype(self.dtype, "m"):
1181
+ raise TypeError(f"cannot add Period to a {type(self).__name__}")
1182
+
1183
+ # We will wrap in a PeriodArray and defer to the reversed operation
1184
+ from pandas.core.arrays.period import PeriodArray
1185
+
1186
+ i8vals = np.broadcast_to(other.ordinal, self.shape)
1187
+ dtype = PeriodDtype(other.freq)
1188
+ parr = PeriodArray(i8vals, dtype=dtype)
1189
+ return parr + self
1190
+
1191
+ def _add_offset(self, offset):
1192
+ raise AbstractMethodError(self)
1193
+
1194
+ def _add_timedeltalike_scalar(self, other):
1195
+ """
1196
+ Add a delta of a timedeltalike
1197
+
1198
+ Returns
1199
+ -------
1200
+ Same type as self
1201
+ """
1202
+ if isna(other):
1203
+ # i.e np.timedelta64("NaT")
1204
+ new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype)
1205
+ new_values.fill(iNaT)
1206
+ return type(self)._simple_new(new_values, dtype=self.dtype)
1207
+
1208
+ # PeriodArray overrides, so we only get here with DTA/TDA
1209
+ self = cast("DatetimeArray | TimedeltaArray", self)
1210
+ other = Timedelta(other)
1211
+ self, other = self._ensure_matching_resos(other)
1212
+ return self._add_timedeltalike(other)
1213
+
1214
+ def _add_timedelta_arraylike(self, other: TimedeltaArray):
1215
+ """
1216
+ Add a delta of a TimedeltaIndex
1217
+
1218
+ Returns
1219
+ -------
1220
+ Same type as self
1221
+ """
1222
+ # overridden by PeriodArray
1223
+
1224
+ if len(self) != len(other):
1225
+ raise ValueError("cannot add indices of unequal length")
1226
+
1227
+ self = cast("DatetimeArray | TimedeltaArray", self)
1228
+
1229
+ self, other = self._ensure_matching_resos(other)
1230
+ return self._add_timedeltalike(other)
1231
+
1232
+ @final
1233
+ def _add_timedeltalike(self, other: Timedelta | TimedeltaArray):
1234
+ self = cast("DatetimeArray | TimedeltaArray", self)
1235
+
1236
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1237
+ new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
1238
+ res_values = new_values.view(self._ndarray.dtype)
1239
+
1240
+ new_freq = self._get_arithmetic_result_freq(other)
1241
+
1242
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
1243
+ # incompatible type "Union[dtype[datetime64], DatetimeTZDtype,
1244
+ # dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
1245
+ return type(self)._simple_new(
1246
+ res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type]
1247
+ )
1248
+
1249
+ @final
1250
+ def _add_nat(self):
1251
+ """
1252
+ Add pd.NaT to self
1253
+ """
1254
+ if isinstance(self.dtype, PeriodDtype):
1255
+ raise TypeError(
1256
+ f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
1257
+ )
1258
+ self = cast("TimedeltaArray | DatetimeArray", self)
1259
+
1260
+ # GH#19124 pd.NaT is treated like a timedelta for both timedelta
1261
+ # and datetime dtypes
1262
+ result = np.empty(self.shape, dtype=np.int64)
1263
+ result.fill(iNaT)
1264
+ result = result.view(self._ndarray.dtype) # preserve reso
1265
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
1266
+ # incompatible type "Union[dtype[timedelta64], dtype[datetime64],
1267
+ # DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
1268
+ return type(self)._simple_new(
1269
+ result, dtype=self.dtype, freq=None # type: ignore[arg-type]
1270
+ )
1271
+
1272
+ @final
1273
+ def _sub_nat(self):
1274
+ """
1275
+ Subtract pd.NaT from self
1276
+ """
1277
+ # GH#19124 Timedelta - datetime is not in general well-defined.
1278
+ # We make an exception for pd.NaT, which in this case quacks
1279
+ # like a timedelta.
1280
+ # For datetime64 dtypes by convention we treat NaT as a datetime, so
1281
+ # this subtraction returns a timedelta64 dtype.
1282
+ # For period dtype, timedelta64 is a close-enough return dtype.
1283
+ result = np.empty(self.shape, dtype=np.int64)
1284
+ result.fill(iNaT)
1285
+ if self.dtype.kind in "mM":
1286
+ # We can retain unit in dtype
1287
+ self = cast("DatetimeArray| TimedeltaArray", self)
1288
+ return result.view(f"timedelta64[{self.unit}]")
1289
+ else:
1290
+ return result.view("timedelta64[ns]")
1291
+
1292
+ @final
1293
+ def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]:
1294
+ # If the operation is well-defined, we return an object-dtype ndarray
1295
+ # of DateOffsets. Null entries are filled with pd.NaT
1296
+ if not isinstance(self.dtype, PeriodDtype):
1297
+ raise TypeError(
1298
+ f"cannot subtract {type(other).__name__} from {type(self).__name__}"
1299
+ )
1300
+
1301
+ self = cast("PeriodArray", self)
1302
+ self._check_compatible_with(other)
1303
+
1304
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1305
+ new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
1306
+ new_data = np.array([self.freq.base * x for x in new_i8_data])
1307
+
1308
+ if o_mask is None:
1309
+ # i.e. Period scalar
1310
+ mask = self._isnan
1311
+ else:
1312
+ # i.e. PeriodArray
1313
+ mask = self._isnan | o_mask
1314
+ new_data[mask] = NaT
1315
+ return new_data
1316
+
1317
+ @final
1318
+ def _addsub_object_array(self, other: npt.NDArray[np.object_], op):
1319
+ """
1320
+ Add or subtract array-like of DateOffset objects
1321
+
1322
+ Parameters
1323
+ ----------
1324
+ other : np.ndarray[object]
1325
+ op : {operator.add, operator.sub}
1326
+
1327
+ Returns
1328
+ -------
1329
+ np.ndarray[object]
1330
+ Except in fastpath case with length 1 where we operate on the
1331
+ contained scalar.
1332
+ """
1333
+ assert op in [operator.add, operator.sub]
1334
+ if len(other) == 1 and self.ndim == 1:
1335
+ # Note: without this special case, we could annotate return type
1336
+ # as ndarray[object]
1337
+ # If both 1D then broadcasting is unambiguous
1338
+ return op(self, other[0])
1339
+
1340
+ warnings.warn(
1341
+ "Adding/subtracting object-dtype array to "
1342
+ f"{type(self).__name__} not vectorized.",
1343
+ PerformanceWarning,
1344
+ stacklevel=find_stack_level(),
1345
+ )
1346
+
1347
+ # Caller is responsible for broadcasting if necessary
1348
+ assert self.shape == other.shape, (self.shape, other.shape)
1349
+
1350
+ res_values = op(self.astype("O"), np.asarray(other))
1351
+ return res_values
1352
+
1353
+ def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self:
1354
+ if name not in {"cummin", "cummax"}:
1355
+ raise TypeError(f"Accumulation {name} not supported for {type(self)}")
1356
+
1357
+ op = getattr(datetimelike_accumulations, name)
1358
+ result = op(self.copy(), skipna=skipna, **kwargs)
1359
+
1360
+ return type(self)._simple_new(result, dtype=self.dtype)
1361
+
1362
+ @unpack_zerodim_and_defer("__add__")
1363
+ def __add__(self, other):
1364
+ other_dtype = getattr(other, "dtype", None)
1365
+ other = ensure_wrapped_if_datetimelike(other)
1366
+
1367
+ # scalar others
1368
+ if other is NaT:
1369
+ result = self._add_nat()
1370
+ elif isinstance(other, (Tick, timedelta, np.timedelta64)):
1371
+ result = self._add_timedeltalike_scalar(other)
1372
+ elif isinstance(other, BaseOffset):
1373
+ # specifically _not_ a Tick
1374
+ result = self._add_offset(other)
1375
+ elif isinstance(other, (datetime, np.datetime64)):
1376
+ result = self._add_datetimelike_scalar(other)
1377
+ elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"):
1378
+ result = self._add_period(other)
1379
+ elif lib.is_integer(other):
1380
+ # This check must come after the check for np.timedelta64
1381
+ # as is_integer returns True for these
1382
+ if not isinstance(self.dtype, PeriodDtype):
1383
+ raise integer_op_not_supported(self)
1384
+ obj = cast("PeriodArray", self)
1385
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
1386
+
1387
+ # array-like others
1388
+ elif lib.is_np_dtype(other_dtype, "m"):
1389
+ # TimedeltaIndex, ndarray[timedelta64]
1390
+ result = self._add_timedelta_arraylike(other)
1391
+ elif is_object_dtype(other_dtype):
1392
+ # e.g. Array/Index of DateOffset objects
1393
+ result = self._addsub_object_array(other, operator.add)
1394
+ elif lib.is_np_dtype(other_dtype, "M") or isinstance(
1395
+ other_dtype, DatetimeTZDtype
1396
+ ):
1397
+ # DatetimeIndex, ndarray[datetime64]
1398
+ return self._add_datetime_arraylike(other)
1399
+ elif is_integer_dtype(other_dtype):
1400
+ if not isinstance(self.dtype, PeriodDtype):
1401
+ raise integer_op_not_supported(self)
1402
+ obj = cast("PeriodArray", self)
1403
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
1404
+ else:
1405
+ # Includes Categorical, other ExtensionArrays
1406
+ # For PeriodDtype, if self is a TimedeltaArray and other is a
1407
+ # PeriodArray with a timedelta-like (i.e. Tick) freq, this
1408
+ # operation is valid. Defer to the PeriodArray implementation.
1409
+ # In remaining cases, this will end up raising TypeError.
1410
+ return NotImplemented
1411
+
1412
+ if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
1413
+ from pandas.core.arrays import TimedeltaArray
1414
+
1415
+ return TimedeltaArray._from_sequence(result)
1416
+ return result
1417
+
1418
+ def __radd__(self, other):
1419
+ # alias for __add__
1420
+ return self.__add__(other)
1421
+
1422
+ @unpack_zerodim_and_defer("__sub__")
1423
+ def __sub__(self, other):
1424
+ other_dtype = getattr(other, "dtype", None)
1425
+ other = ensure_wrapped_if_datetimelike(other)
1426
+
1427
+ # scalar others
1428
+ if other is NaT:
1429
+ result = self._sub_nat()
1430
+ elif isinstance(other, (Tick, timedelta, np.timedelta64)):
1431
+ result = self._add_timedeltalike_scalar(-other)
1432
+ elif isinstance(other, BaseOffset):
1433
+ # specifically _not_ a Tick
1434
+ result = self._add_offset(-other)
1435
+ elif isinstance(other, (datetime, np.datetime64)):
1436
+ result = self._sub_datetimelike_scalar(other)
1437
+ elif lib.is_integer(other):
1438
+ # This check must come after the check for np.timedelta64
1439
+ # as is_integer returns True for these
1440
+ if not isinstance(self.dtype, PeriodDtype):
1441
+ raise integer_op_not_supported(self)
1442
+ obj = cast("PeriodArray", self)
1443
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
1444
+
1445
+ elif isinstance(other, Period):
1446
+ result = self._sub_periodlike(other)
1447
+
1448
+ # array-like others
1449
+ elif lib.is_np_dtype(other_dtype, "m"):
1450
+ # TimedeltaIndex, ndarray[timedelta64]
1451
+ result = self._add_timedelta_arraylike(-other)
1452
+ elif is_object_dtype(other_dtype):
1453
+ # e.g. Array/Index of DateOffset objects
1454
+ result = self._addsub_object_array(other, operator.sub)
1455
+ elif lib.is_np_dtype(other_dtype, "M") or isinstance(
1456
+ other_dtype, DatetimeTZDtype
1457
+ ):
1458
+ # DatetimeIndex, ndarray[datetime64]
1459
+ result = self._sub_datetime_arraylike(other)
1460
+ elif isinstance(other_dtype, PeriodDtype):
1461
+ # PeriodIndex
1462
+ result = self._sub_periodlike(other)
1463
+ elif is_integer_dtype(other_dtype):
1464
+ if not isinstance(self.dtype, PeriodDtype):
1465
+ raise integer_op_not_supported(self)
1466
+ obj = cast("PeriodArray", self)
1467
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
1468
+ else:
1469
+ # Includes ExtensionArrays, float_dtype
1470
+ return NotImplemented
1471
+
1472
+ if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
1473
+ from pandas.core.arrays import TimedeltaArray
1474
+
1475
+ return TimedeltaArray._from_sequence(result)
1476
+ return result
1477
+
1478
+ def __rsub__(self, other):
1479
+ other_dtype = getattr(other, "dtype", None)
1480
+ other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance(
1481
+ other_dtype, DatetimeTZDtype
1482
+ )
1483
+
1484
+ if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"):
1485
+ # ndarray[datetime64] cannot be subtracted from self, so
1486
+ # we need to wrap in DatetimeArray/Index and flip the operation
1487
+ if lib.is_scalar(other):
1488
+ # i.e. np.datetime64 object
1489
+ return Timestamp(other) - self
1490
+ if not isinstance(other, DatetimeLikeArrayMixin):
1491
+ # Avoid down-casting DatetimeIndex
1492
+ from pandas.core.arrays import DatetimeArray
1493
+
1494
+ other = DatetimeArray._from_sequence(other)
1495
+ return other - self
1496
+ elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64:
1497
+ # GH#19959 datetime - datetime is well-defined as timedelta,
1498
+ # but any other type - datetime is not well-defined.
1499
+ raise TypeError(
1500
+ f"cannot subtract {type(self).__name__} from {type(other).__name__}"
1501
+ )
1502
+ elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"):
1503
+ # TODO: Can we simplify/generalize these cases at all?
1504
+ raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
1505
+ elif lib.is_np_dtype(self.dtype, "m"):
1506
+ self = cast("TimedeltaArray", self)
1507
+ return (-self) + other
1508
+
1509
+ # We get here with e.g. datetime objects
1510
+ return -(self - other)
1511
+
1512
+ def __iadd__(self, other) -> Self:
1513
+ result = self + other
1514
+ self[:] = result[:]
1515
+
1516
+ if not isinstance(self.dtype, PeriodDtype):
1517
+ # restore freq, which is invalidated by setitem
1518
+ self._freq = result.freq
1519
+ return self
1520
+
1521
+ def __isub__(self, other) -> Self:
1522
+ result = self - other
1523
+ self[:] = result[:]
1524
+
1525
+ if not isinstance(self.dtype, PeriodDtype):
1526
+ # restore freq, which is invalidated by setitem
1527
+ self._freq = result.freq
1528
+ return self
1529
+
1530
+ # --------------------------------------------------------------
1531
+ # Reductions
1532
+
1533
+ @_period_dispatch
1534
+ def _quantile(
1535
+ self,
1536
+ qs: npt.NDArray[np.float64],
1537
+ interpolation: str,
1538
+ ) -> Self:
1539
+ return super()._quantile(qs=qs, interpolation=interpolation)
1540
+
1541
+ @_period_dispatch
1542
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1543
+ """
1544
+ Return the minimum value of the Array or minimum along
1545
+ an axis.
1546
+
1547
+ See Also
1548
+ --------
1549
+ numpy.ndarray.min
1550
+ Index.min : Return the minimum value in an Index.
1551
+ Series.min : Return the minimum value in a Series.
1552
+ """
1553
+ nv.validate_min((), kwargs)
1554
+ nv.validate_minmax_axis(axis, self.ndim)
1555
+
1556
+ result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
1557
+ return self._wrap_reduction_result(axis, result)
1558
+
1559
+ @_period_dispatch
1560
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1561
+ """
1562
+ Return the maximum value of the Array or maximum along
1563
+ an axis.
1564
+
1565
+ See Also
1566
+ --------
1567
+ numpy.ndarray.max
1568
+ Index.max : Return the maximum value in an Index.
1569
+ Series.max : Return the maximum value in a Series.
1570
+ """
1571
+ nv.validate_max((), kwargs)
1572
+ nv.validate_minmax_axis(axis, self.ndim)
1573
+
1574
+ result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
1575
+ return self._wrap_reduction_result(axis, result)
1576
+
1577
+ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
1578
+ """
1579
+ Return the mean value of the Array.
1580
+
1581
+ Parameters
1582
+ ----------
1583
+ skipna : bool, default True
1584
+ Whether to ignore any NaT elements.
1585
+ axis : int, optional, default 0
1586
+
1587
+ Returns
1588
+ -------
1589
+ scalar
1590
+ Timestamp or Timedelta.
1591
+
1592
+ See Also
1593
+ --------
1594
+ numpy.ndarray.mean : Returns the average of array elements along a given axis.
1595
+ Series.mean : Return the mean value in a Series.
1596
+
1597
+ Notes
1598
+ -----
1599
+ mean is only defined for Datetime and Timedelta dtypes, not for Period.
1600
+
1601
+ Examples
1602
+ --------
1603
+ For :class:`pandas.DatetimeIndex`:
1604
+
1605
+ >>> idx = pd.date_range('2001-01-01 00:00', periods=3)
1606
+ >>> idx
1607
+ DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
1608
+ dtype='datetime64[ns]', freq='D')
1609
+ >>> idx.mean()
1610
+ Timestamp('2001-01-02 00:00:00')
1611
+
1612
+ For :class:`pandas.TimedeltaIndex`:
1613
+
1614
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
1615
+ >>> tdelta_idx
1616
+ TimedeltaIndex(['1 days', '2 days', '3 days'],
1617
+ dtype='timedelta64[ns]', freq=None)
1618
+ >>> tdelta_idx.mean()
1619
+ Timedelta('2 days 00:00:00')
1620
+ """
1621
+ if isinstance(self.dtype, PeriodDtype):
1622
+ # See discussion in GH#24757
1623
+ raise TypeError(
1624
+ f"mean is not implemented for {type(self).__name__} since the "
1625
+ "meaning is ambiguous. An alternative is "
1626
+ "obj.to_timestamp(how='start').mean()"
1627
+ )
1628
+
1629
+ result = nanops.nanmean(
1630
+ self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
1631
+ )
1632
+ return self._wrap_reduction_result(axis, result)
1633
+
1634
+ @_period_dispatch
1635
+ def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1636
+ nv.validate_median((), kwargs)
1637
+
1638
+ if axis is not None and abs(axis) >= self.ndim:
1639
+ raise ValueError("abs(axis) must be less than ndim")
1640
+
1641
+ result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
1642
+ return self._wrap_reduction_result(axis, result)
1643
+
1644
+ def _mode(self, dropna: bool = True):
1645
+ mask = None
1646
+ if dropna:
1647
+ mask = self.isna()
1648
+
1649
+ i8modes = algorithms.mode(self.view("i8"), mask=mask)
1650
+ npmodes = i8modes.view(self._ndarray.dtype)
1651
+ npmodes = cast(np.ndarray, npmodes)
1652
+ return self._from_backing_data(npmodes)
1653
+
1654
+ # ------------------------------------------------------------------
1655
+ # GroupBy Methods
1656
+
1657
+ def _groupby_op(
1658
+ self,
1659
+ *,
1660
+ how: str,
1661
+ has_dropped_na: bool,
1662
+ min_count: int,
1663
+ ngroups: int,
1664
+ ids: npt.NDArray[np.intp],
1665
+ **kwargs,
1666
+ ):
1667
+ dtype = self.dtype
1668
+ if dtype.kind == "M":
1669
+ # Adding/multiplying datetimes is not valid
1670
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
1671
+ raise TypeError(f"datetime64 type does not support {how} operations")
1672
+ if how in ["any", "all"]:
1673
+ # GH#34479
1674
+ warnings.warn(
1675
+ f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
1676
+ f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
1677
+ FutureWarning,
1678
+ stacklevel=find_stack_level(),
1679
+ )
1680
+
1681
+ elif isinstance(dtype, PeriodDtype):
1682
+ # Adding/multiplying Periods is not valid
1683
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
1684
+ raise TypeError(f"Period type does not support {how} operations")
1685
+ if how in ["any", "all"]:
1686
+ # GH#34479
1687
+ warnings.warn(
1688
+ f"'{how}' with PeriodDtype is deprecated and will raise in a "
1689
+ f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
1690
+ FutureWarning,
1691
+ stacklevel=find_stack_level(),
1692
+ )
1693
+ else:
1694
+ # timedeltas we can add but not multiply
1695
+ if how in ["prod", "cumprod", "skew", "var"]:
1696
+ raise TypeError(f"timedelta64 type does not support {how} operations")
1697
+
1698
+ # All of the functions implemented here are ordinal, so we can
1699
+ # operate on the tz-naive equivalents
1700
+ npvalues = self._ndarray.view("M8[ns]")
1701
+
1702
+ from pandas.core.groupby.ops import WrappedCythonOp
1703
+
1704
+ kind = WrappedCythonOp.get_kind_from_how(how)
1705
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
1706
+
1707
+ res_values = op._cython_op_ndim_compat(
1708
+ npvalues,
1709
+ min_count=min_count,
1710
+ ngroups=ngroups,
1711
+ comp_ids=ids,
1712
+ mask=None,
1713
+ **kwargs,
1714
+ )
1715
+
1716
+ if op.how in op.cast_blocklist:
1717
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
1718
+ # through cython_operation
1719
+ return res_values
1720
+
1721
+ # We did a view to M8[ns] above, now we go the other direction
1722
+ assert res_values.dtype == "M8[ns]"
1723
+ if how in ["std", "sem"]:
1724
+ from pandas.core.arrays import TimedeltaArray
1725
+
1726
+ if isinstance(self.dtype, PeriodDtype):
1727
+ raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
1728
+ self = cast("DatetimeArray | TimedeltaArray", self)
1729
+ new_dtype = f"m8[{self.unit}]"
1730
+ res_values = res_values.view(new_dtype)
1731
+ return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
1732
+
1733
+ res_values = res_values.view(self._ndarray.dtype)
1734
+ return self._from_backing_data(res_values)
1735
+
1736
+
1737
+ class DatelikeOps(DatetimeLikeArrayMixin):
1738
+ """
1739
+ Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
1740
+ """
1741
+
1742
+ @Substitution(
1743
+ URL="https://docs.python.org/3/library/datetime.html"
1744
+ "#strftime-and-strptime-behavior"
1745
+ )
1746
+ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
1747
+ """
1748
+ Convert to Index using specified date_format.
1749
+
1750
+ Return an Index of formatted strings specified by date_format, which
1751
+ supports the same string format as the python standard library. Details
1752
+ of the string format can be found in `python string format
1753
+ doc <%(URL)s>`__.
1754
+
1755
+ Formats supported by the C `strftime` API but not by the python string format
1756
+ doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be
1757
+ preferably replaced with their supported equivalents (such as `"%%H:%%M"`,
1758
+ `"%%I:%%M:%%S %%p"`).
1759
+
1760
+ Note that `PeriodIndex` support additional directives, detailed in
1761
+ `Period.strftime`.
1762
+
1763
+ Parameters
1764
+ ----------
1765
+ date_format : str
1766
+ Date format string (e.g. "%%Y-%%m-%%d").
1767
+
1768
+ Returns
1769
+ -------
1770
+ ndarray[object]
1771
+ NumPy ndarray of formatted strings.
1772
+
1773
+ See Also
1774
+ --------
1775
+ to_datetime : Convert the given argument to datetime.
1776
+ DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
1777
+ DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
1778
+ DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
1779
+ Timestamp.strftime : Format a single Timestamp.
1780
+ Period.strftime : Format a single Period.
1781
+
1782
+ Examples
1783
+ --------
1784
+ >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
1785
+ ... periods=3, freq='s')
1786
+ >>> rng.strftime('%%B %%d, %%Y, %%r')
1787
+ Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
1788
+ 'March 10, 2018, 09:00:02 AM'],
1789
+ dtype='object')
1790
+ """
1791
+ result = self._format_native_types(date_format=date_format, na_rep=np.nan)
1792
+ return result.astype(object, copy=False)
1793
+
1794
+
1795
+ _round_doc = """
1796
+ Perform {op} operation on the data to the specified `freq`.
1797
+
1798
+ Parameters
1799
+ ----------
1800
+ freq : str or Offset
1801
+ The frequency level to {op} the index to. Must be a fixed
1802
+ frequency like 'S' (second) not 'ME' (month end). See
1803
+ :ref:`frequency aliases <timeseries.offset_aliases>` for
1804
+ a list of possible `freq` values.
1805
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
1806
+ Only relevant for DatetimeIndex:
1807
+
1808
+ - 'infer' will attempt to infer fall dst-transition hours based on
1809
+ order
1810
+ - bool-ndarray where True signifies a DST time, False designates
1811
+ a non-DST time (note that this flag is only applicable for
1812
+ ambiguous times)
1813
+ - 'NaT' will return NaT where there are ambiguous times
1814
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous
1815
+ times.
1816
+
1817
+ nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise'
1818
+ A nonexistent time does not exist in a particular timezone
1819
+ where clocks moved forward due to DST.
1820
+
1821
+ - 'shift_forward' will shift the nonexistent time forward to the
1822
+ closest existing time
1823
+ - 'shift_backward' will shift the nonexistent time backward to the
1824
+ closest existing time
1825
+ - 'NaT' will return NaT where there are nonexistent times
1826
+ - timedelta objects will shift nonexistent times by the timedelta
1827
+ - 'raise' will raise an NonExistentTimeError if there are
1828
+ nonexistent times.
1829
+
1830
+ Returns
1831
+ -------
1832
+ DatetimeIndex, TimedeltaIndex, or Series
1833
+ Index of the same type for a DatetimeIndex or TimedeltaIndex,
1834
+ or a Series with the same index for a Series.
1835
+
1836
+ Raises
1837
+ ------
1838
+ ValueError if the `freq` cannot be converted.
1839
+
1840
+ Notes
1841
+ -----
1842
+ If the timestamps have a timezone, {op}ing will take place relative to the
1843
+ local ("wall") time and re-localized to the same timezone. When {op}ing
1844
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
1845
+ control the re-localization behavior.
1846
+
1847
+ Examples
1848
+ --------
1849
+ **DatetimeIndex**
1850
+
1851
+ >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
1852
+ >>> rng
1853
+ DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
1854
+ '2018-01-01 12:01:00'],
1855
+ dtype='datetime64[ns]', freq='min')
1856
+ """
1857
+
1858
+ _round_example = """>>> rng.round('h')
1859
+ DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
1860
+ '2018-01-01 12:00:00'],
1861
+ dtype='datetime64[ns]', freq=None)
1862
+
1863
+ **Series**
1864
+
1865
+ >>> pd.Series(rng).dt.round("h")
1866
+ 0 2018-01-01 12:00:00
1867
+ 1 2018-01-01 12:00:00
1868
+ 2 2018-01-01 12:00:00
1869
+ dtype: datetime64[ns]
1870
+
1871
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1872
+ ``nonexistent`` to control how the timestamp should be re-localized.
1873
+
1874
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
1875
+
1876
+ >>> rng_tz.floor("2h", ambiguous=False)
1877
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1878
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1879
+
1880
+ >>> rng_tz.floor("2h", ambiguous=True)
1881
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1882
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1883
+ """
1884
+
1885
+ _floor_example = """>>> rng.floor('h')
1886
+ DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
1887
+ '2018-01-01 12:00:00'],
1888
+ dtype='datetime64[ns]', freq=None)
1889
+
1890
+ **Series**
1891
+
1892
+ >>> pd.Series(rng).dt.floor("h")
1893
+ 0 2018-01-01 11:00:00
1894
+ 1 2018-01-01 12:00:00
1895
+ 2 2018-01-01 12:00:00
1896
+ dtype: datetime64[ns]
1897
+
1898
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1899
+ ``nonexistent`` to control how the timestamp should be re-localized.
1900
+
1901
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
1902
+
1903
+ >>> rng_tz.floor("2h", ambiguous=False)
1904
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1905
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1906
+
1907
+ >>> rng_tz.floor("2h", ambiguous=True)
1908
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1909
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1910
+ """
1911
+
1912
+ _ceil_example = """>>> rng.ceil('h')
1913
+ DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
1914
+ '2018-01-01 13:00:00'],
1915
+ dtype='datetime64[ns]', freq=None)
1916
+
1917
+ **Series**
1918
+
1919
+ >>> pd.Series(rng).dt.ceil("h")
1920
+ 0 2018-01-01 12:00:00
1921
+ 1 2018-01-01 12:00:00
1922
+ 2 2018-01-01 13:00:00
1923
+ dtype: datetime64[ns]
1924
+
1925
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1926
+ ``nonexistent`` to control how the timestamp should be re-localized.
1927
+
1928
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")
1929
+
1930
+ >>> rng_tz.ceil("h", ambiguous=False)
1931
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1932
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1933
+
1934
+ >>> rng_tz.ceil("h", ambiguous=True)
1935
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1936
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1937
+ """
1938
+
1939
+
1940
+ class TimelikeOps(DatetimeLikeArrayMixin):
1941
+ """
1942
+ Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
1943
+ """
1944
+
1945
+ _default_dtype: np.dtype
1946
+
1947
+ def __init__(
1948
+ self, values, dtype=None, freq=lib.no_default, copy: bool = False
1949
+ ) -> None:
1950
+ warnings.warn(
1951
+ # GH#55623
1952
+ f"{type(self).__name__}.__init__ is deprecated and will be "
1953
+ "removed in a future version. Use pd.array instead.",
1954
+ FutureWarning,
1955
+ stacklevel=find_stack_level(),
1956
+ )
1957
+ if dtype is not None:
1958
+ dtype = pandas_dtype(dtype)
1959
+
1960
+ values = extract_array(values, extract_numpy=True)
1961
+ if isinstance(values, IntegerArray):
1962
+ values = values.to_numpy("int64", na_value=iNaT)
1963
+
1964
+ inferred_freq = getattr(values, "_freq", None)
1965
+ explicit_none = freq is None
1966
+ freq = freq if freq is not lib.no_default else None
1967
+
1968
+ if isinstance(values, type(self)):
1969
+ if explicit_none:
1970
+ # don't inherit from values
1971
+ pass
1972
+ elif freq is None:
1973
+ freq = values.freq
1974
+ elif freq and values.freq:
1975
+ freq = to_offset(freq)
1976
+ freq = _validate_inferred_freq(freq, values.freq)
1977
+
1978
+ if dtype is not None and dtype != values.dtype:
1979
+ # TODO: we only have tests for this for DTA, not TDA (2022-07-01)
1980
+ raise TypeError(
1981
+ f"dtype={dtype} does not match data dtype {values.dtype}"
1982
+ )
1983
+
1984
+ dtype = values.dtype
1985
+ values = values._ndarray
1986
+
1987
+ elif dtype is None:
1988
+ if isinstance(values, np.ndarray) and values.dtype.kind in "Mm":
1989
+ dtype = values.dtype
1990
+ else:
1991
+ dtype = self._default_dtype
1992
+ if isinstance(values, np.ndarray) and values.dtype == "i8":
1993
+ values = values.view(dtype)
1994
+
1995
+ if not isinstance(values, np.ndarray):
1996
+ raise ValueError(
1997
+ f"Unexpected type '{type(values).__name__}'. 'values' must be a "
1998
+ f"{type(self).__name__}, ndarray, or Series or Index "
1999
+ "containing one of those."
2000
+ )
2001
+ if values.ndim not in [1, 2]:
2002
+ raise ValueError("Only 1-dimensional input arrays are supported.")
2003
+
2004
+ if values.dtype == "i8":
2005
+ # for compat with datetime/timedelta/period shared methods,
2006
+ # we can sometimes get here with int64 values. These represent
2007
+ # nanosecond UTC (or tz-naive) unix timestamps
2008
+ if dtype is None:
2009
+ dtype = self._default_dtype
2010
+ values = values.view(self._default_dtype)
2011
+ elif lib.is_np_dtype(dtype, "mM"):
2012
+ values = values.view(dtype)
2013
+ elif isinstance(dtype, DatetimeTZDtype):
2014
+ kind = self._default_dtype.kind
2015
+ new_dtype = f"{kind}8[{dtype.unit}]"
2016
+ values = values.view(new_dtype)
2017
+
2018
+ dtype = self._validate_dtype(values, dtype)
2019
+
2020
+ if freq == "infer":
2021
+ raise ValueError(
2022
+ f"Frequency inference not allowed in {type(self).__name__}.__init__. "
2023
+ "Use 'pd.array()' instead."
2024
+ )
2025
+
2026
+ if copy:
2027
+ values = values.copy()
2028
+ if freq:
2029
+ freq = to_offset(freq)
2030
+ if values.dtype.kind == "m" and not isinstance(freq, Tick):
2031
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2032
+
2033
+ NDArrayBacked.__init__(self, values=values, dtype=dtype)
2034
+ self._freq = freq
2035
+
2036
+ if inferred_freq is None and freq is not None:
2037
+ type(self)._validate_frequency(self, freq)
2038
+
2039
+ @classmethod
2040
+ def _validate_dtype(cls, values, dtype):
2041
+ raise AbstractMethodError(cls)
2042
+
2043
+ @property
2044
+ def freq(self):
2045
+ """
2046
+ Return the frequency object if it is set, otherwise None.
2047
+ """
2048
+ return self._freq
2049
+
2050
+ @freq.setter
2051
+ def freq(self, value) -> None:
2052
+ if value is not None:
2053
+ value = to_offset(value)
2054
+ self._validate_frequency(self, value)
2055
+ if self.dtype.kind == "m" and not isinstance(value, Tick):
2056
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2057
+
2058
+ if self.ndim > 1:
2059
+ raise ValueError("Cannot set freq with ndim > 1")
2060
+
2061
+ self._freq = value
2062
+
2063
+ @final
2064
+ def _maybe_pin_freq(self, freq, validate_kwds: dict):
2065
+ """
2066
+ Constructor helper to pin the appropriate `freq` attribute. Assumes
2067
+ that self._freq is currently set to any freq inferred in
2068
+ _from_sequence_not_strict.
2069
+ """
2070
+ if freq is None:
2071
+ # user explicitly passed None -> override any inferred_freq
2072
+ self._freq = None
2073
+ elif freq == "infer":
2074
+ # if self._freq is *not* None then we already inferred a freq
2075
+ # and there is nothing left to do
2076
+ if self._freq is None:
2077
+ # Set _freq directly to bypass duplicative _validate_frequency
2078
+ # check.
2079
+ self._freq = to_offset(self.inferred_freq)
2080
+ elif freq is lib.no_default:
2081
+ # user did not specify anything, keep inferred freq if the original
2082
+ # data had one, otherwise do nothing
2083
+ pass
2084
+ elif self._freq is None:
2085
+ # We cannot inherit a freq from the data, so we need to validate
2086
+ # the user-passed freq
2087
+ freq = to_offset(freq)
2088
+ type(self)._validate_frequency(self, freq, **validate_kwds)
2089
+ self._freq = freq
2090
+ else:
2091
+ # Otherwise we just need to check that the user-passed freq
2092
+ # doesn't conflict with the one we already have.
2093
+ freq = to_offset(freq)
2094
+ _validate_inferred_freq(freq, self._freq)
2095
+
2096
+ @final
2097
+ @classmethod
2098
+ def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
2099
+ """
2100
+ Validate that a frequency is compatible with the values of a given
2101
+ Datetime Array/Index or Timedelta Array/Index
2102
+
2103
+ Parameters
2104
+ ----------
2105
+ index : DatetimeIndex or TimedeltaIndex
2106
+ The index on which to determine if the given frequency is valid
2107
+ freq : DateOffset
2108
+ The frequency to validate
2109
+ """
2110
+ inferred = index.inferred_freq
2111
+ if index.size == 0 or inferred == freq.freqstr:
2112
+ return None
2113
+
2114
+ try:
2115
+ on_freq = cls._generate_range(
2116
+ start=index[0],
2117
+ end=None,
2118
+ periods=len(index),
2119
+ freq=freq,
2120
+ unit=index.unit,
2121
+ **kwargs,
2122
+ )
2123
+ if not np.array_equal(index.asi8, on_freq.asi8):
2124
+ raise ValueError
2125
+ except ValueError as err:
2126
+ if "non-fixed" in str(err):
2127
+ # non-fixed frequencies are not meaningful for timedelta64;
2128
+ # we retain that error message
2129
+ raise err
2130
+ # GH#11587 the main way this is reached is if the `np.array_equal`
2131
+ # check above is False. This can also be reached if index[0]
2132
+ # is `NaT`, in which case the call to `cls._generate_range` will
2133
+ # raise a ValueError, which we re-raise with a more targeted
2134
+ # message.
2135
+ raise ValueError(
2136
+ f"Inferred frequency {inferred} from passed values "
2137
+ f"does not conform to passed frequency {freq.freqstr}"
2138
+ ) from err
2139
+
2140
+ @classmethod
2141
+ def _generate_range(
2142
+ cls, start, end, periods: int | None, freq, *args, **kwargs
2143
+ ) -> Self:
2144
+ raise AbstractMethodError(cls)
2145
+
2146
+ # --------------------------------------------------------------
2147
+
2148
+ @cache_readonly
2149
+ def _creso(self) -> int:
2150
+ return get_unit_from_dtype(self._ndarray.dtype)
2151
+
2152
+ @cache_readonly
2153
+ def unit(self) -> str:
2154
+ # e.g. "ns", "us", "ms"
2155
+ # error: Argument 1 to "dtype_to_unit" has incompatible type
2156
+ # "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]"
2157
+ return dtype_to_unit(self.dtype) # type: ignore[arg-type]
2158
+
2159
+ def as_unit(self, unit: str, round_ok: bool = True) -> Self:
2160
+ if unit not in ["s", "ms", "us", "ns"]:
2161
+ raise ValueError("Supported units are 's', 'ms', 'us', 'ns'")
2162
+
2163
+ dtype = np.dtype(f"{self.dtype.kind}8[{unit}]")
2164
+ new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok)
2165
+
2166
+ if isinstance(self.dtype, np.dtype):
2167
+ new_dtype = new_values.dtype
2168
+ else:
2169
+ tz = cast("DatetimeArray", self).tz
2170
+ new_dtype = DatetimeTZDtype(tz=tz, unit=unit)
2171
+
2172
+ # error: Unexpected keyword argument "freq" for "_simple_new" of
2173
+ # "NDArrayBacked" [call-arg]
2174
+ return type(self)._simple_new(
2175
+ new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
2176
+ )
2177
+
2178
+ # TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta
2179
+ # with the return type matching input type. TypeVar?
2180
+ def _ensure_matching_resos(self, other):
2181
+ if self._creso != other._creso:
2182
+ # Just as with Timestamp/Timedelta, we cast to the higher resolution
2183
+ if self._creso < other._creso:
2184
+ self = self.as_unit(other.unit)
2185
+ else:
2186
+ other = other.as_unit(self.unit)
2187
+ return self, other
2188
+
2189
+ # --------------------------------------------------------------
2190
+
2191
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
2192
+ if (
2193
+ ufunc in [np.isnan, np.isinf, np.isfinite]
2194
+ and len(inputs) == 1
2195
+ and inputs[0] is self
2196
+ ):
2197
+ # numpy 1.18 changed isinf and isnan to not raise on dt64/td64
2198
+ return getattr(ufunc, method)(self._ndarray, **kwargs)
2199
+
2200
+ return super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
2201
+
2202
+ def _round(self, freq, mode, ambiguous, nonexistent):
2203
+ # round the local times
2204
+ if isinstance(self.dtype, DatetimeTZDtype):
2205
+ # operate on naive timestamps, then convert back to aware
2206
+ self = cast("DatetimeArray", self)
2207
+ naive = self.tz_localize(None)
2208
+ result = naive._round(freq, mode, ambiguous, nonexistent)
2209
+ return result.tz_localize(
2210
+ self.tz, ambiguous=ambiguous, nonexistent=nonexistent
2211
+ )
2212
+
2213
+ values = self.view("i8")
2214
+ values = cast(np.ndarray, values)
2215
+ nanos = get_unit_for_round(freq, self._creso)
2216
+ if nanos == 0:
2217
+ # GH 52761
2218
+ return self.copy()
2219
+ result_i8 = round_nsint64(values, mode, nanos)
2220
+ result = self._maybe_mask_results(result_i8, fill_value=iNaT)
2221
+ result = result.view(self._ndarray.dtype)
2222
+ return self._simple_new(result, dtype=self.dtype)
2223
+
2224
+ @Appender((_round_doc + _round_example).format(op="round"))
2225
+ def round(
2226
+ self,
2227
+ freq,
2228
+ ambiguous: TimeAmbiguous = "raise",
2229
+ nonexistent: TimeNonexistent = "raise",
2230
+ ) -> Self:
2231
+ return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
2232
+
2233
+ @Appender((_round_doc + _floor_example).format(op="floor"))
2234
+ def floor(
2235
+ self,
2236
+ freq,
2237
+ ambiguous: TimeAmbiguous = "raise",
2238
+ nonexistent: TimeNonexistent = "raise",
2239
+ ) -> Self:
2240
+ return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
2241
+
2242
+ @Appender((_round_doc + _ceil_example).format(op="ceil"))
2243
+ def ceil(
2244
+ self,
2245
+ freq,
2246
+ ambiguous: TimeAmbiguous = "raise",
2247
+ nonexistent: TimeNonexistent = "raise",
2248
+ ) -> Self:
2249
+ return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
2250
+
2251
+ # --------------------------------------------------------------
2252
+ # Reductions
2253
+
2254
+ def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
2255
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
2256
+ return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
2257
+
2258
+ def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
2259
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
2260
+
2261
+ return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
2262
+
2263
+ # --------------------------------------------------------------
2264
+ # Frequency Methods
2265
+
2266
+ def _maybe_clear_freq(self) -> None:
2267
+ self._freq = None
2268
+
2269
+ def _with_freq(self, freq) -> Self:
2270
+ """
2271
+ Helper to get a view on the same data, with a new freq.
2272
+
2273
+ Parameters
2274
+ ----------
2275
+ freq : DateOffset, None, or "infer"
2276
+
2277
+ Returns
2278
+ -------
2279
+ Same type as self
2280
+ """
2281
+ # GH#29843
2282
+ if freq is None:
2283
+ # Always valid
2284
+ pass
2285
+ elif len(self) == 0 and isinstance(freq, BaseOffset):
2286
+ # Always valid. In the TimedeltaArray case, we require a Tick offset
2287
+ if self.dtype.kind == "m" and not isinstance(freq, Tick):
2288
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2289
+ else:
2290
+ # As an internal method, we can ensure this assertion always holds
2291
+ assert freq == "infer"
2292
+ freq = to_offset(self.inferred_freq)
2293
+
2294
+ arr = self.view()
2295
+ arr._freq = freq
2296
+ return arr
2297
+
2298
+ # --------------------------------------------------------------
2299
+ # ExtensionArray Interface
2300
+
2301
+ def _values_for_json(self) -> np.ndarray:
2302
+ # Small performance bump vs the base class which calls np.asarray(self)
2303
+ if isinstance(self.dtype, np.dtype):
2304
+ return self._ndarray
2305
+ return super()._values_for_json()
2306
+
2307
+ def factorize(
2308
+ self,
2309
+ use_na_sentinel: bool = True,
2310
+ sort: bool = False,
2311
+ ):
2312
+ if self.freq is not None:
2313
+ # We must be unique, so can short-circuit (and retain freq)
2314
+ codes = np.arange(len(self), dtype=np.intp)
2315
+ uniques = self.copy() # TODO: copy or view?
2316
+ if sort and self.freq.n < 0:
2317
+ codes = codes[::-1]
2318
+ uniques = uniques[::-1]
2319
+ return codes, uniques
2320
+
2321
+ if sort:
2322
+ # algorithms.factorize only passes sort=True here when freq is
2323
+ # not None, so this should not be reached.
2324
+ raise NotImplementedError(
2325
+ f"The 'sort' keyword in {type(self).__name__}.factorize is "
2326
+ "ignored unless arr.freq is not None. To factorize with sort, "
2327
+ "call pd.factorize(obj, sort=True) instead."
2328
+ )
2329
+ return super().factorize(use_na_sentinel=use_na_sentinel)
2330
+
2331
+ @classmethod
2332
+ def _concat_same_type(
2333
+ cls,
2334
+ to_concat: Sequence[Self],
2335
+ axis: AxisInt = 0,
2336
+ ) -> Self:
2337
+ new_obj = super()._concat_same_type(to_concat, axis)
2338
+
2339
+ obj = to_concat[0]
2340
+
2341
+ if axis == 0:
2342
+ # GH 3232: If the concat result is evenly spaced, we can retain the
2343
+ # original frequency
2344
+ to_concat = [x for x in to_concat if len(x)]
2345
+
2346
+ if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):
2347
+ pairs = zip(to_concat[:-1], to_concat[1:])
2348
+ if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):
2349
+ new_freq = obj.freq
2350
+ new_obj._freq = new_freq
2351
+ return new_obj
2352
+
2353
+ def copy(self, order: str = "C") -> Self:
2354
+ new_obj = super().copy(order=order)
2355
+ new_obj._freq = self.freq
2356
+ return new_obj
2357
+
2358
+ def interpolate(
2359
+ self,
2360
+ *,
2361
+ method: InterpolateOptions,
2362
+ axis: int,
2363
+ index: Index,
2364
+ limit,
2365
+ limit_direction,
2366
+ limit_area,
2367
+ copy: bool,
2368
+ **kwargs,
2369
+ ) -> Self:
2370
+ """
2371
+ See NDFrame.interpolate.__doc__.
2372
+ """
2373
+ # NB: we return type(self) even if copy=False
2374
+ if method != "linear":
2375
+ raise NotImplementedError
2376
+
2377
+ if not copy:
2378
+ out_data = self._ndarray
2379
+ else:
2380
+ out_data = self._ndarray.copy()
2381
+
2382
+ missing.interpolate_2d_inplace(
2383
+ out_data,
2384
+ method=method,
2385
+ axis=axis,
2386
+ index=index,
2387
+ limit=limit,
2388
+ limit_direction=limit_direction,
2389
+ limit_area=limit_area,
2390
+ **kwargs,
2391
+ )
2392
+ if not copy:
2393
+ return self
2394
+ return type(self)._simple_new(out_data, dtype=self.dtype)
2395
+
2396
+ # --------------------------------------------------------------
2397
+ # Unsorted
2398
+
2399
+ @property
2400
+ def _is_dates_only(self) -> bool:
2401
+ """
2402
+ Check if we are round times at midnight (and no timezone), which will
2403
+ be given a more compact __repr__ than other cases. For TimedeltaArray
2404
+ we are checking for multiples of 24H.
2405
+ """
2406
+ if not lib.is_np_dtype(self.dtype):
2407
+ # i.e. we have a timezone
2408
+ return False
2409
+
2410
+ values_int = self.asi8
2411
+ consider_values = values_int != iNaT
2412
+ reso = get_unit_from_dtype(self.dtype)
2413
+ ppd = periods_per_day(reso)
2414
+
2415
+ # TODO: can we reuse is_date_array_normalized? would need a skipna kwd
2416
+ # (first attempt at this was less performant than this implementation)
2417
+ even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
2418
+ return even_days
2419
+
2420
+
2421
+ # -------------------------------------------------------------------
2422
+ # Shared Constructor Helpers
2423
+
2424
+
2425
+ def ensure_arraylike_for_datetimelike(
2426
+ data, copy: bool, cls_name: str
2427
+ ) -> tuple[ArrayLike, bool]:
2428
+ if not hasattr(data, "dtype"):
2429
+ # e.g. list, tuple
2430
+ if not isinstance(data, (list, tuple)) and np.ndim(data) == 0:
2431
+ # i.e. generator
2432
+ data = list(data)
2433
+
2434
+ data = construct_1d_object_array_from_listlike(data)
2435
+ copy = False
2436
+ elif isinstance(data, ABCMultiIndex):
2437
+ raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.")
2438
+ else:
2439
+ data = extract_array(data, extract_numpy=True)
2440
+
2441
+ if isinstance(data, IntegerArray) or (
2442
+ isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu"
2443
+ ):
2444
+ data = data.to_numpy("int64", na_value=iNaT)
2445
+ copy = False
2446
+ elif isinstance(data, ArrowExtensionArray):
2447
+ data = data._maybe_convert_datelike_array()
2448
+ data = data.to_numpy()
2449
+ copy = False
2450
+ elif not isinstance(data, (np.ndarray, ExtensionArray)):
2451
+ # GH#24539 e.g. xarray, dask object
2452
+ data = np.asarray(data)
2453
+
2454
+ elif isinstance(data, ABCCategorical):
2455
+ # GH#18664 preserve tz in going DTI->Categorical->DTI
2456
+ # TODO: cases where we need to do another pass through maybe_convert_dtype,
2457
+ # e.g. the categories are timedelta64s
2458
+ data = data.categories.take(data.codes, fill_value=NaT)._values
2459
+ copy = False
2460
+
2461
+ return data, copy
2462
+
2463
+
2464
+ @overload
2465
+ def validate_periods(periods: None) -> None:
2466
+ ...
2467
+
2468
+
2469
+ @overload
2470
+ def validate_periods(periods: int | float) -> int:
2471
+ ...
2472
+
2473
+
2474
+ def validate_periods(periods: int | float | None) -> int | None:
2475
+ """
2476
+ If a `periods` argument is passed to the Datetime/Timedelta Array/Index
2477
+ constructor, cast it to an integer.
2478
+
2479
+ Parameters
2480
+ ----------
2481
+ periods : None, float, int
2482
+
2483
+ Returns
2484
+ -------
2485
+ periods : None or int
2486
+
2487
+ Raises
2488
+ ------
2489
+ TypeError
2490
+ if periods is None, float, or int
2491
+ """
2492
+ if periods is not None:
2493
+ if lib.is_float(periods):
2494
+ warnings.warn(
2495
+ # GH#56036
2496
+ "Non-integer 'periods' in pd.date_range, pd.timedelta_range, "
2497
+ "pd.period_range, and pd.interval_range are deprecated and "
2498
+ "will raise in a future version.",
2499
+ FutureWarning,
2500
+ stacklevel=find_stack_level(),
2501
+ )
2502
+ periods = int(periods)
2503
+ elif not lib.is_integer(periods):
2504
+ raise TypeError(f"periods must be a number, got {periods}")
2505
+ return periods
2506
+
2507
+
2508
+ def _validate_inferred_freq(
2509
+ freq: BaseOffset | None, inferred_freq: BaseOffset | None
2510
+ ) -> BaseOffset | None:
2511
+ """
2512
+ If the user passes a freq and another freq is inferred from passed data,
2513
+ require that they match.
2514
+
2515
+ Parameters
2516
+ ----------
2517
+ freq : DateOffset or None
2518
+ inferred_freq : DateOffset or None
2519
+
2520
+ Returns
2521
+ -------
2522
+ freq : DateOffset or None
2523
+ """
2524
+ if inferred_freq is not None:
2525
+ if freq is not None and freq != inferred_freq:
2526
+ raise ValueError(
2527
+ f"Inferred frequency {inferred_freq} from passed "
2528
+ "values does not conform to passed frequency "
2529
+ f"{freq.freqstr}"
2530
+ )
2531
+ if freq is None:
2532
+ freq = inferred_freq
2533
+
2534
+ return freq
2535
+
2536
+
2537
+ def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str:
2538
+ """
2539
+ Return the unit str corresponding to the dtype's resolution.
2540
+
2541
+ Parameters
2542
+ ----------
2543
+ dtype : DatetimeTZDtype or np.dtype
2544
+ If np.dtype, we assume it is a datetime64 dtype.
2545
+
2546
+ Returns
2547
+ -------
2548
+ str
2549
+ """
2550
+ if isinstance(dtype, DatetimeTZDtype):
2551
+ return dtype.unit
2552
+ elif isinstance(dtype, ArrowDtype):
2553
+ if dtype.kind not in "mM":
2554
+ raise ValueError(f"{dtype=} does not have a resolution.")
2555
+ return dtype.pyarrow_dtype.unit
2556
+ return np.datetime_data(dtype)[0]
llava_next/lib/python3.10/site-packages/pandas/core/arrays/integer.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import ClassVar
4
+
5
+ import numpy as np
6
+
7
+ from pandas.core.dtypes.base import register_extension_dtype
8
+ from pandas.core.dtypes.common import is_integer_dtype
9
+
10
+ from pandas.core.arrays.numeric import (
11
+ NumericArray,
12
+ NumericDtype,
13
+ )
14
+
15
+
16
+ class IntegerDtype(NumericDtype):
17
+ """
18
+ An ExtensionDtype to hold a single size & kind of integer dtype.
19
+
20
+ These specific implementations are subclasses of the non-public
21
+ IntegerDtype. For example, we have Int8Dtype to represent signed int 8s.
22
+
23
+ The attributes name & type are set when these subclasses are created.
24
+ """
25
+
26
+ _default_np_dtype = np.dtype(np.int64)
27
+ _checker = is_integer_dtype
28
+
29
+ @classmethod
30
+ def construct_array_type(cls) -> type[IntegerArray]:
31
+ """
32
+ Return the array type associated with this dtype.
33
+
34
+ Returns
35
+ -------
36
+ type
37
+ """
38
+ return IntegerArray
39
+
40
+ @classmethod
41
+ def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]:
42
+ return NUMPY_INT_TO_DTYPE
43
+
44
+ @classmethod
45
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
46
+ """
47
+ Safely cast the values to the given dtype.
48
+
49
+ "safe" in this context means the casting is lossless. e.g. if 'values'
50
+ has a floating dtype, each value must be an integer.
51
+ """
52
+ try:
53
+ return values.astype(dtype, casting="safe", copy=copy)
54
+ except TypeError as err:
55
+ casted = values.astype(dtype, copy=copy)
56
+ if (casted == values).all():
57
+ return casted
58
+
59
+ raise TypeError(
60
+ f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
61
+ ) from err
62
+
63
+
64
+ class IntegerArray(NumericArray):
65
+ """
66
+ Array of integer (optional missing) values.
67
+
68
+ Uses :attr:`pandas.NA` as the missing value.
69
+
70
+ .. warning::
71
+
72
+ IntegerArray is currently experimental, and its API or internal
73
+ implementation may change without warning.
74
+
75
+ We represent an IntegerArray with 2 numpy arrays:
76
+
77
+ - data: contains a numpy integer array of the appropriate dtype
78
+ - mask: a boolean array holding a mask on the data, True is missing
79
+
80
+ To construct an IntegerArray from generic array-like input, use
81
+ :func:`pandas.array` with one of the integer dtypes (see examples).
82
+
83
+ See :ref:`integer_na` for more.
84
+
85
+ Parameters
86
+ ----------
87
+ values : numpy.ndarray
88
+ A 1-d integer-dtype array.
89
+ mask : numpy.ndarray
90
+ A 1-d boolean-dtype array indicating missing values.
91
+ copy : bool, default False
92
+ Whether to copy the `values` and `mask`.
93
+
94
+ Attributes
95
+ ----------
96
+ None
97
+
98
+ Methods
99
+ -------
100
+ None
101
+
102
+ Returns
103
+ -------
104
+ IntegerArray
105
+
106
+ Examples
107
+ --------
108
+ Create an IntegerArray with :func:`pandas.array`.
109
+
110
+ >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
111
+ >>> int_array
112
+ <IntegerArray>
113
+ [1, <NA>, 3]
114
+ Length: 3, dtype: Int32
115
+
116
+ String aliases for the dtypes are also available. They are capitalized.
117
+
118
+ >>> pd.array([1, None, 3], dtype='Int32')
119
+ <IntegerArray>
120
+ [1, <NA>, 3]
121
+ Length: 3, dtype: Int32
122
+
123
+ >>> pd.array([1, None, 3], dtype='UInt16')
124
+ <IntegerArray>
125
+ [1, <NA>, 3]
126
+ Length: 3, dtype: UInt16
127
+ """
128
+
129
+ _dtype_cls = IntegerDtype
130
+
131
+ # The value used to fill '_data' to avoid upcasting
132
+ _internal_fill_value = 1
133
+ # Fill values used for any/all
134
+ # Incompatible types in assignment (expression has type "int", base class
135
+ # "BaseMaskedArray" defined the type as "<typing special form>")
136
+ _truthy_value = 1 # type: ignore[assignment]
137
+ _falsey_value = 0 # type: ignore[assignment]
138
+
139
+
140
+ _dtype_docstring = """
141
+ An ExtensionDtype for {dtype} integer data.
142
+
143
+ Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
144
+
145
+ Attributes
146
+ ----------
147
+ None
148
+
149
+ Methods
150
+ -------
151
+ None
152
+
153
+ Examples
154
+ --------
155
+ For Int8Dtype:
156
+
157
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
158
+ >>> ser.dtype
159
+ Int8Dtype()
160
+
161
+ For Int16Dtype:
162
+
163
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
164
+ >>> ser.dtype
165
+ Int16Dtype()
166
+
167
+ For Int32Dtype:
168
+
169
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
170
+ >>> ser.dtype
171
+ Int32Dtype()
172
+
173
+ For Int64Dtype:
174
+
175
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
176
+ >>> ser.dtype
177
+ Int64Dtype()
178
+
179
+ For UInt8Dtype:
180
+
181
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
182
+ >>> ser.dtype
183
+ UInt8Dtype()
184
+
185
+ For UInt16Dtype:
186
+
187
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
188
+ >>> ser.dtype
189
+ UInt16Dtype()
190
+
191
+ For UInt32Dtype:
192
+
193
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
194
+ >>> ser.dtype
195
+ UInt32Dtype()
196
+
197
+ For UInt64Dtype:
198
+
199
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
200
+ >>> ser.dtype
201
+ UInt64Dtype()
202
+ """
203
+
204
+ # create the Dtype
205
+
206
+
207
+ @register_extension_dtype
208
+ class Int8Dtype(IntegerDtype):
209
+ type = np.int8
210
+ name: ClassVar[str] = "Int8"
211
+ __doc__ = _dtype_docstring.format(dtype="int8")
212
+
213
+
214
+ @register_extension_dtype
215
+ class Int16Dtype(IntegerDtype):
216
+ type = np.int16
217
+ name: ClassVar[str] = "Int16"
218
+ __doc__ = _dtype_docstring.format(dtype="int16")
219
+
220
+
221
+ @register_extension_dtype
222
+ class Int32Dtype(IntegerDtype):
223
+ type = np.int32
224
+ name: ClassVar[str] = "Int32"
225
+ __doc__ = _dtype_docstring.format(dtype="int32")
226
+
227
+
228
+ @register_extension_dtype
229
+ class Int64Dtype(IntegerDtype):
230
+ type = np.int64
231
+ name: ClassVar[str] = "Int64"
232
+ __doc__ = _dtype_docstring.format(dtype="int64")
233
+
234
+
235
+ @register_extension_dtype
236
+ class UInt8Dtype(IntegerDtype):
237
+ type = np.uint8
238
+ name: ClassVar[str] = "UInt8"
239
+ __doc__ = _dtype_docstring.format(dtype="uint8")
240
+
241
+
242
+ @register_extension_dtype
243
+ class UInt16Dtype(IntegerDtype):
244
+ type = np.uint16
245
+ name: ClassVar[str] = "UInt16"
246
+ __doc__ = _dtype_docstring.format(dtype="uint16")
247
+
248
+
249
+ @register_extension_dtype
250
+ class UInt32Dtype(IntegerDtype):
251
+ type = np.uint32
252
+ name: ClassVar[str] = "UInt32"
253
+ __doc__ = _dtype_docstring.format(dtype="uint32")
254
+
255
+
256
+ @register_extension_dtype
257
+ class UInt64Dtype(IntegerDtype):
258
+ type = np.uint64
259
+ name: ClassVar[str] = "UInt64"
260
+ __doc__ = _dtype_docstring.format(dtype="uint64")
261
+
262
+
263
+ NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = {
264
+ np.dtype(np.int8): Int8Dtype(),
265
+ np.dtype(np.int16): Int16Dtype(),
266
+ np.dtype(np.int32): Int32Dtype(),
267
+ np.dtype(np.int64): Int64Dtype(),
268
+ np.dtype(np.uint8): UInt8Dtype(),
269
+ np.dtype(np.uint16): UInt16Dtype(),
270
+ np.dtype(np.uint32): UInt32Dtype(),
271
+ np.dtype(np.uint64): UInt64Dtype(),
272
+ }
llava_next/lib/python3.10/site-packages/pandas/core/arrays/masked.py ADDED
@@ -0,0 +1,1650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Callable,
7
+ Literal,
8
+ overload,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import (
15
+ lib,
16
+ missing as libmissing,
17
+ )
18
+ from pandas._libs.tslibs import is_supported_dtype
19
+ from pandas._typing import (
20
+ ArrayLike,
21
+ AstypeArg,
22
+ AxisInt,
23
+ DtypeObj,
24
+ FillnaOptions,
25
+ InterpolateOptions,
26
+ NpDtype,
27
+ PositionalIndexer,
28
+ Scalar,
29
+ ScalarIndexer,
30
+ Self,
31
+ SequenceIndexer,
32
+ Shape,
33
+ npt,
34
+ )
35
+ from pandas.compat import (
36
+ IS64,
37
+ is_platform_windows,
38
+ )
39
+ from pandas.errors import AbstractMethodError
40
+ from pandas.util._decorators import doc
41
+ from pandas.util._validators import validate_fillna_kwargs
42
+
43
+ from pandas.core.dtypes.base import ExtensionDtype
44
+ from pandas.core.dtypes.common import (
45
+ is_bool,
46
+ is_integer_dtype,
47
+ is_list_like,
48
+ is_scalar,
49
+ is_string_dtype,
50
+ pandas_dtype,
51
+ )
52
+ from pandas.core.dtypes.dtypes import BaseMaskedDtype
53
+ from pandas.core.dtypes.missing import (
54
+ array_equivalent,
55
+ is_valid_na_for_dtype,
56
+ isna,
57
+ notna,
58
+ )
59
+
60
+ from pandas.core import (
61
+ algorithms as algos,
62
+ arraylike,
63
+ missing,
64
+ nanops,
65
+ ops,
66
+ )
67
+ from pandas.core.algorithms import (
68
+ factorize_array,
69
+ isin,
70
+ map_array,
71
+ mode,
72
+ take,
73
+ )
74
+ from pandas.core.array_algos import (
75
+ masked_accumulations,
76
+ masked_reductions,
77
+ )
78
+ from pandas.core.array_algos.quantile import quantile_with_mask
79
+ from pandas.core.arraylike import OpsMixin
80
+ from pandas.core.arrays._utils import to_numpy_dtype_inference
81
+ from pandas.core.arrays.base import ExtensionArray
82
+ from pandas.core.construction import (
83
+ array as pd_array,
84
+ ensure_wrapped_if_datetimelike,
85
+ extract_array,
86
+ )
87
+ from pandas.core.indexers import check_array_indexer
88
+ from pandas.core.ops import invalid_comparison
89
+ from pandas.core.util.hashing import hash_array
90
+
91
+ if TYPE_CHECKING:
92
+ from collections.abc import (
93
+ Iterator,
94
+ Sequence,
95
+ )
96
+ from pandas import Series
97
+ from pandas.core.arrays import BooleanArray
98
+ from pandas._typing import (
99
+ NumpySorter,
100
+ NumpyValueArrayLike,
101
+ )
102
+ from pandas.core.arrays import FloatingArray
103
+
104
+ from pandas.compat.numpy import function as nv
105
+
106
+
107
+ class BaseMaskedArray(OpsMixin, ExtensionArray):
108
+ """
109
+ Base class for masked arrays (which use _data and _mask to store the data).
110
+
111
+ numpy based
112
+ """
113
+
114
+ # The value used to fill '_data' to avoid upcasting
115
+ _internal_fill_value: Scalar
116
+ # our underlying data and mask are each ndarrays
117
+ _data: np.ndarray
118
+ _mask: npt.NDArray[np.bool_]
119
+
120
+ # Fill values used for any/all
121
+ _truthy_value = Scalar # bool(_truthy_value) = True
122
+ _falsey_value = Scalar # bool(_falsey_value) = False
123
+
124
+ @classmethod
125
+ def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
126
+ result = BaseMaskedArray.__new__(cls)
127
+ result._data = values
128
+ result._mask = mask
129
+ return result
130
+
131
+ def __init__(
132
+ self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
133
+ ) -> None:
134
+ # values is supposed to already be validated in the subclass
135
+ if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
136
+ raise TypeError(
137
+ "mask should be boolean numpy array. Use "
138
+ "the 'pd.array' function instead"
139
+ )
140
+ if values.shape != mask.shape:
141
+ raise ValueError("values.shape must match mask.shape")
142
+
143
+ if copy:
144
+ values = values.copy()
145
+ mask = mask.copy()
146
+
147
+ self._data = values
148
+ self._mask = mask
149
+
150
+ @classmethod
151
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
152
+ values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
153
+ return cls(values, mask)
154
+
155
+ @classmethod
156
+ @doc(ExtensionArray._empty)
157
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype):
158
+ values = np.empty(shape, dtype=dtype.type)
159
+ values.fill(cls._internal_fill_value)
160
+ mask = np.ones(shape, dtype=bool)
161
+ result = cls(values, mask)
162
+ if not isinstance(result, cls) or dtype != result.dtype:
163
+ raise NotImplementedError(
164
+ f"Default 'empty' implementation is invalid for dtype='{dtype}'"
165
+ )
166
+ return result
167
+
168
+ def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
169
+ # NEP 51: https://github.com/numpy/numpy/pull/22449
170
+ return str
171
+
172
+ @property
173
+ def dtype(self) -> BaseMaskedDtype:
174
+ raise AbstractMethodError(self)
175
+
176
+ @overload
177
+ def __getitem__(self, item: ScalarIndexer) -> Any:
178
+ ...
179
+
180
+ @overload
181
+ def __getitem__(self, item: SequenceIndexer) -> Self:
182
+ ...
183
+
184
+ def __getitem__(self, item: PositionalIndexer) -> Self | Any:
185
+ item = check_array_indexer(self, item)
186
+
187
+ newmask = self._mask[item]
188
+ if is_bool(newmask):
189
+ # This is a scalar indexing
190
+ if newmask:
191
+ return self.dtype.na_value
192
+ return self._data[item]
193
+
194
+ return self._simple_new(self._data[item], newmask)
195
+
196
+ def _pad_or_backfill(
197
+ self,
198
+ *,
199
+ method: FillnaOptions,
200
+ limit: int | None = None,
201
+ limit_area: Literal["inside", "outside"] | None = None,
202
+ copy: bool = True,
203
+ ) -> Self:
204
+ mask = self._mask
205
+
206
+ if mask.any():
207
+ func = missing.get_fill_func(method, ndim=self.ndim)
208
+
209
+ npvalues = self._data.T
210
+ new_mask = mask.T
211
+ if copy:
212
+ npvalues = npvalues.copy()
213
+ new_mask = new_mask.copy()
214
+ elif limit_area is not None:
215
+ mask = mask.copy()
216
+ func(npvalues, limit=limit, mask=new_mask)
217
+
218
+ if limit_area is not None and not mask.all():
219
+ mask = mask.T
220
+ neg_mask = ~mask
221
+ first = neg_mask.argmax()
222
+ last = len(neg_mask) - neg_mask[::-1].argmax() - 1
223
+ if limit_area == "inside":
224
+ new_mask[:first] |= mask[:first]
225
+ new_mask[last + 1 :] |= mask[last + 1 :]
226
+ elif limit_area == "outside":
227
+ new_mask[first + 1 : last] |= mask[first + 1 : last]
228
+
229
+ if copy:
230
+ return self._simple_new(npvalues.T, new_mask.T)
231
+ else:
232
+ return self
233
+ else:
234
+ if copy:
235
+ new_values = self.copy()
236
+ else:
237
+ new_values = self
238
+ return new_values
239
+
240
+ @doc(ExtensionArray.fillna)
241
+ def fillna(
242
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
243
+ ) -> Self:
244
+ value, method = validate_fillna_kwargs(value, method)
245
+
246
+ mask = self._mask
247
+
248
+ value = missing.check_value_size(value, mask, len(self))
249
+
250
+ if mask.any():
251
+ if method is not None:
252
+ func = missing.get_fill_func(method, ndim=self.ndim)
253
+ npvalues = self._data.T
254
+ new_mask = mask.T
255
+ if copy:
256
+ npvalues = npvalues.copy()
257
+ new_mask = new_mask.copy()
258
+ func(npvalues, limit=limit, mask=new_mask)
259
+ return self._simple_new(npvalues.T, new_mask.T)
260
+ else:
261
+ # fill with value
262
+ if copy:
263
+ new_values = self.copy()
264
+ else:
265
+ new_values = self[:]
266
+ new_values[mask] = value
267
+ else:
268
+ if copy:
269
+ new_values = self.copy()
270
+ else:
271
+ new_values = self[:]
272
+ return new_values
273
+
274
+ @classmethod
275
+ def _coerce_to_array(
276
+ cls, values, *, dtype: DtypeObj, copy: bool = False
277
+ ) -> tuple[np.ndarray, np.ndarray]:
278
+ raise AbstractMethodError(cls)
279
+
280
+ def _validate_setitem_value(self, value):
281
+ """
282
+ Check if we have a scalar that we can cast losslessly.
283
+
284
+ Raises
285
+ ------
286
+ TypeError
287
+ """
288
+ kind = self.dtype.kind
289
+ # TODO: get this all from np_can_hold_element?
290
+ if kind == "b":
291
+ if lib.is_bool(value):
292
+ return value
293
+
294
+ elif kind == "f":
295
+ if lib.is_integer(value) or lib.is_float(value):
296
+ return value
297
+
298
+ else:
299
+ if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):
300
+ return value
301
+ # TODO: unsigned checks
302
+
303
+ # Note: without the "str" here, the f-string rendering raises in
304
+ # py38 builds.
305
+ raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}")
306
+
307
+ def __setitem__(self, key, value) -> None:
308
+ key = check_array_indexer(self, key)
309
+
310
+ if is_scalar(value):
311
+ if is_valid_na_for_dtype(value, self.dtype):
312
+ self._mask[key] = True
313
+ else:
314
+ value = self._validate_setitem_value(value)
315
+ self._data[key] = value
316
+ self._mask[key] = False
317
+ return
318
+
319
+ value, mask = self._coerce_to_array(value, dtype=self.dtype)
320
+
321
+ self._data[key] = value
322
+ self._mask[key] = mask
323
+
324
+ def __contains__(self, key) -> bool:
325
+ if isna(key) and key is not self.dtype.na_value:
326
+ # GH#52840
327
+ if self._data.dtype.kind == "f" and lib.is_float(key):
328
+ return bool((np.isnan(self._data) & ~self._mask).any())
329
+
330
+ return bool(super().__contains__(key))
331
+
332
+ def __iter__(self) -> Iterator:
333
+ if self.ndim == 1:
334
+ if not self._hasna:
335
+ for val in self._data:
336
+ yield val
337
+ else:
338
+ na_value = self.dtype.na_value
339
+ for isna_, val in zip(self._mask, self._data):
340
+ if isna_:
341
+ yield na_value
342
+ else:
343
+ yield val
344
+ else:
345
+ for i in range(len(self)):
346
+ yield self[i]
347
+
348
+ def __len__(self) -> int:
349
+ return len(self._data)
350
+
351
+ @property
352
+ def shape(self) -> Shape:
353
+ return self._data.shape
354
+
355
+ @property
356
+ def ndim(self) -> int:
357
+ return self._data.ndim
358
+
359
+ def swapaxes(self, axis1, axis2) -> Self:
360
+ data = self._data.swapaxes(axis1, axis2)
361
+ mask = self._mask.swapaxes(axis1, axis2)
362
+ return self._simple_new(data, mask)
363
+
364
+ def delete(self, loc, axis: AxisInt = 0) -> Self:
365
+ data = np.delete(self._data, loc, axis=axis)
366
+ mask = np.delete(self._mask, loc, axis=axis)
367
+ return self._simple_new(data, mask)
368
+
369
+ def reshape(self, *args, **kwargs) -> Self:
370
+ data = self._data.reshape(*args, **kwargs)
371
+ mask = self._mask.reshape(*args, **kwargs)
372
+ return self._simple_new(data, mask)
373
+
374
+ def ravel(self, *args, **kwargs) -> Self:
375
+ # TODO: need to make sure we have the same order for data/mask
376
+ data = self._data.ravel(*args, **kwargs)
377
+ mask = self._mask.ravel(*args, **kwargs)
378
+ return type(self)(data, mask)
379
+
380
+ @property
381
+ def T(self) -> Self:
382
+ return self._simple_new(self._data.T, self._mask.T)
383
+
384
+ def round(self, decimals: int = 0, *args, **kwargs):
385
+ """
386
+ Round each value in the array a to the given number of decimals.
387
+
388
+ Parameters
389
+ ----------
390
+ decimals : int, default 0
391
+ Number of decimal places to round to. If decimals is negative,
392
+ it specifies the number of positions to the left of the decimal point.
393
+ *args, **kwargs
394
+ Additional arguments and keywords have no effect but might be
395
+ accepted for compatibility with NumPy.
396
+
397
+ Returns
398
+ -------
399
+ NumericArray
400
+ Rounded values of the NumericArray.
401
+
402
+ See Also
403
+ --------
404
+ numpy.around : Round values of an np.array.
405
+ DataFrame.round : Round values of a DataFrame.
406
+ Series.round : Round values of a Series.
407
+ """
408
+ if self.dtype.kind == "b":
409
+ return self
410
+ nv.validate_round(args, kwargs)
411
+ values = np.round(self._data, decimals=decimals, **kwargs)
412
+
413
+ # Usually we'll get same type as self, but ndarray[bool] casts to float
414
+ return self._maybe_mask_result(values, self._mask.copy())
415
+
416
+ # ------------------------------------------------------------------
417
+ # Unary Methods
418
+
419
+ def __invert__(self) -> Self:
420
+ return self._simple_new(~self._data, self._mask.copy())
421
+
422
+ def __neg__(self) -> Self:
423
+ return self._simple_new(-self._data, self._mask.copy())
424
+
425
+ def __pos__(self) -> Self:
426
+ return self.copy()
427
+
428
+ def __abs__(self) -> Self:
429
+ return self._simple_new(abs(self._data), self._mask.copy())
430
+
431
+ # ------------------------------------------------------------------
432
+
433
+ def _values_for_json(self) -> np.ndarray:
434
+ return np.asarray(self, dtype=object)
435
+
436
+ def to_numpy(
437
+ self,
438
+ dtype: npt.DTypeLike | None = None,
439
+ copy: bool = False,
440
+ na_value: object = lib.no_default,
441
+ ) -> np.ndarray:
442
+ """
443
+ Convert to a NumPy Array.
444
+
445
+ By default converts to an object-dtype NumPy array. Specify the `dtype` and
446
+ `na_value` keywords to customize the conversion.
447
+
448
+ Parameters
449
+ ----------
450
+ dtype : dtype, default object
451
+ The numpy dtype to convert to.
452
+ copy : bool, default False
453
+ Whether to ensure that the returned value is a not a view on
454
+ the array. Note that ``copy=False`` does not *ensure* that
455
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
456
+ a copy is made, even if not strictly necessary. This is typically
457
+ only possible when no missing values are present and `dtype`
458
+ is the equivalent numpy dtype.
459
+ na_value : scalar, optional
460
+ Scalar missing value indicator to use in numpy array. Defaults
461
+ to the native missing value indicator of this array (pd.NA).
462
+
463
+ Returns
464
+ -------
465
+ numpy.ndarray
466
+
467
+ Examples
468
+ --------
469
+ An object-dtype is the default result
470
+
471
+ >>> a = pd.array([True, False, pd.NA], dtype="boolean")
472
+ >>> a.to_numpy()
473
+ array([True, False, <NA>], dtype=object)
474
+
475
+ When no missing values are present, an equivalent dtype can be used.
476
+
477
+ >>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool")
478
+ array([ True, False])
479
+ >>> pd.array([1, 2], dtype="Int64").to_numpy("int64")
480
+ array([1, 2])
481
+
482
+ However, requesting such dtype will raise a ValueError if
483
+ missing values are present and the default missing value :attr:`NA`
484
+ is used.
485
+
486
+ >>> a = pd.array([True, False, pd.NA], dtype="boolean")
487
+ >>> a
488
+ <BooleanArray>
489
+ [True, False, <NA>]
490
+ Length: 3, dtype: boolean
491
+
492
+ >>> a.to_numpy(dtype="bool")
493
+ Traceback (most recent call last):
494
+ ...
495
+ ValueError: cannot convert to bool numpy array in presence of missing values
496
+
497
+ Specify a valid `na_value` instead
498
+
499
+ >>> a.to_numpy(dtype="bool", na_value=False)
500
+ array([ True, False, False])
501
+ """
502
+ hasna = self._hasna
503
+ dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna)
504
+ if dtype is None:
505
+ dtype = object
506
+
507
+ if hasna:
508
+ if (
509
+ dtype != object
510
+ and not is_string_dtype(dtype)
511
+ and na_value is libmissing.NA
512
+ ):
513
+ raise ValueError(
514
+ f"cannot convert to '{dtype}'-dtype NumPy array "
515
+ "with missing values. Specify an appropriate 'na_value' "
516
+ "for this dtype."
517
+ )
518
+ # don't pass copy to astype -> always need a copy since we are mutating
519
+ with warnings.catch_warnings():
520
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
521
+ data = self._data.astype(dtype)
522
+ data[self._mask] = na_value
523
+ else:
524
+ with warnings.catch_warnings():
525
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
526
+ data = self._data.astype(dtype, copy=copy)
527
+ return data
528
+
529
+ @doc(ExtensionArray.tolist)
530
+ def tolist(self):
531
+ if self.ndim > 1:
532
+ return [x.tolist() for x in self]
533
+ dtype = None if self._hasna else self._data.dtype
534
+ return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
535
+
536
+ @overload
537
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
538
+ ...
539
+
540
+ @overload
541
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
542
+ ...
543
+
544
+ @overload
545
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
546
+ ...
547
+
548
+ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
549
+ dtype = pandas_dtype(dtype)
550
+
551
+ if dtype == self.dtype:
552
+ if copy:
553
+ return self.copy()
554
+ return self
555
+
556
+ # if we are astyping to another nullable masked dtype, we can fastpath
557
+ if isinstance(dtype, BaseMaskedDtype):
558
+ # TODO deal with NaNs for FloatingArray case
559
+ with warnings.catch_warnings():
560
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
561
+ # TODO: Is rounding what we want long term?
562
+ data = self._data.astype(dtype.numpy_dtype, copy=copy)
563
+ # mask is copied depending on whether the data was copied, and
564
+ # not directly depending on the `copy` keyword
565
+ mask = self._mask if data is self._data else self._mask.copy()
566
+ cls = dtype.construct_array_type()
567
+ return cls(data, mask, copy=False)
568
+
569
+ if isinstance(dtype, ExtensionDtype):
570
+ eacls = dtype.construct_array_type()
571
+ return eacls._from_sequence(self, dtype=dtype, copy=copy)
572
+
573
+ na_value: float | np.datetime64 | lib.NoDefault
574
+
575
+ # coerce
576
+ if dtype.kind == "f":
577
+ # In astype, we consider dtype=float to also mean na_value=np.nan
578
+ na_value = np.nan
579
+ elif dtype.kind == "M":
580
+ na_value = np.datetime64("NaT")
581
+ else:
582
+ na_value = lib.no_default
583
+
584
+ # to_numpy will also raise, but we get somewhat nicer exception messages here
585
+ if dtype.kind in "iu" and self._hasna:
586
+ raise ValueError("cannot convert NA to integer")
587
+ if dtype.kind == "b" and self._hasna:
588
+ # careful: astype_nansafe converts np.nan to True
589
+ raise ValueError("cannot convert float NaN to bool")
590
+
591
+ data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy)
592
+ return data
593
+
594
+ __array_priority__ = 1000 # higher than ndarray so ops dispatch to us
595
+
596
+ def __array__(
597
+ self, dtype: NpDtype | None = None, copy: bool | None = None
598
+ ) -> np.ndarray:
599
+ """
600
+ the array interface, return my values
601
+ We return an object array here to preserve our scalar values
602
+ """
603
+ return self.to_numpy(dtype=dtype)
604
+
605
+ _HANDLED_TYPES: tuple[type, ...]
606
+
607
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
608
+ # For MaskedArray inputs, we apply the ufunc to ._data
609
+ # and mask the result.
610
+
611
+ out = kwargs.get("out", ())
612
+
613
+ for x in inputs + out:
614
+ if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)):
615
+ return NotImplemented
616
+
617
+ # for binary ops, use our custom dunder methods
618
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
619
+ self, ufunc, method, *inputs, **kwargs
620
+ )
621
+ if result is not NotImplemented:
622
+ return result
623
+
624
+ if "out" in kwargs:
625
+ # e.g. test_ufunc_with_out
626
+ return arraylike.dispatch_ufunc_with_out(
627
+ self, ufunc, method, *inputs, **kwargs
628
+ )
629
+
630
+ if method == "reduce":
631
+ result = arraylike.dispatch_reduction_ufunc(
632
+ self, ufunc, method, *inputs, **kwargs
633
+ )
634
+ if result is not NotImplemented:
635
+ return result
636
+
637
+ mask = np.zeros(len(self), dtype=bool)
638
+ inputs2 = []
639
+ for x in inputs:
640
+ if isinstance(x, BaseMaskedArray):
641
+ mask |= x._mask
642
+ inputs2.append(x._data)
643
+ else:
644
+ inputs2.append(x)
645
+
646
+ def reconstruct(x: np.ndarray):
647
+ # we don't worry about scalar `x` here, since we
648
+ # raise for reduce up above.
649
+ from pandas.core.arrays import (
650
+ BooleanArray,
651
+ FloatingArray,
652
+ IntegerArray,
653
+ )
654
+
655
+ if x.dtype.kind == "b":
656
+ m = mask.copy()
657
+ return BooleanArray(x, m)
658
+ elif x.dtype.kind in "iu":
659
+ m = mask.copy()
660
+ return IntegerArray(x, m)
661
+ elif x.dtype.kind == "f":
662
+ m = mask.copy()
663
+ if x.dtype == np.float16:
664
+ # reached in e.g. np.sqrt on BooleanArray
665
+ # we don't support float16
666
+ x = x.astype(np.float32)
667
+ return FloatingArray(x, m)
668
+ else:
669
+ x[mask] = np.nan
670
+ return x
671
+
672
+ result = getattr(ufunc, method)(*inputs2, **kwargs)
673
+ if ufunc.nout > 1:
674
+ # e.g. np.divmod
675
+ return tuple(reconstruct(x) for x in result)
676
+ elif method == "reduce":
677
+ # e.g. np.add.reduce; test_ufunc_reduce_raises
678
+ if self._mask.any():
679
+ return self._na_value
680
+ return result
681
+ else:
682
+ return reconstruct(result)
683
+
684
+ def __arrow_array__(self, type=None):
685
+ """
686
+ Convert myself into a pyarrow Array.
687
+ """
688
+ import pyarrow as pa
689
+
690
+ return pa.array(self._data, mask=self._mask, type=type)
691
+
692
+ @property
693
+ def _hasna(self) -> bool:
694
+ # Note: this is expensive right now! The hope is that we can
695
+ # make this faster by having an optional mask, but not have to change
696
+ # source code using it..
697
+
698
+ # error: Incompatible return value type (got "bool_", expected "bool")
699
+ return self._mask.any() # type: ignore[return-value]
700
+
701
+ def _propagate_mask(
702
+ self, mask: npt.NDArray[np.bool_] | None, other
703
+ ) -> npt.NDArray[np.bool_]:
704
+ if mask is None:
705
+ mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy
706
+ if other is libmissing.NA:
707
+ # GH#45421 don't alter inplace
708
+ mask = mask | True
709
+ elif is_list_like(other) and len(other) == len(mask):
710
+ mask = mask | isna(other)
711
+ else:
712
+ mask = self._mask | mask
713
+ # Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]",
714
+ # expected "ndarray[Any, dtype[bool_]]")
715
+ return mask # type: ignore[return-value]
716
+
717
+ def _arith_method(self, other, op):
718
+ op_name = op.__name__
719
+ omask = None
720
+
721
+ if (
722
+ not hasattr(other, "dtype")
723
+ and is_list_like(other)
724
+ and len(other) == len(self)
725
+ ):
726
+ # Try inferring masked dtype instead of casting to object
727
+ other = pd_array(other)
728
+ other = extract_array(other, extract_numpy=True)
729
+
730
+ if isinstance(other, BaseMaskedArray):
731
+ other, omask = other._data, other._mask
732
+
733
+ elif is_list_like(other):
734
+ if not isinstance(other, ExtensionArray):
735
+ other = np.asarray(other)
736
+ if other.ndim > 1:
737
+ raise NotImplementedError("can only perform ops with 1-d structures")
738
+
739
+ # We wrap the non-masked arithmetic logic used for numpy dtypes
740
+ # in Series/Index arithmetic ops.
741
+ other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
742
+ pd_op = ops.get_array_op(op)
743
+ other = ensure_wrapped_if_datetimelike(other)
744
+
745
+ if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
746
+ # Avoid DeprecationWarning: In future, it will be an error
747
+ # for 'np.bool_' scalars to be interpreted as an index
748
+ # e.g. test_array_scalar_like_equivalence
749
+ other = bool(other)
750
+
751
+ mask = self._propagate_mask(omask, other)
752
+
753
+ if other is libmissing.NA:
754
+ result = np.ones_like(self._data)
755
+ if self.dtype.kind == "b":
756
+ if op_name in {
757
+ "floordiv",
758
+ "rfloordiv",
759
+ "pow",
760
+ "rpow",
761
+ "truediv",
762
+ "rtruediv",
763
+ }:
764
+ # GH#41165 Try to match non-masked Series behavior
765
+ # This is still imperfect GH#46043
766
+ raise NotImplementedError(
767
+ f"operator '{op_name}' not implemented for bool dtypes"
768
+ )
769
+ if op_name in {"mod", "rmod"}:
770
+ dtype = "int8"
771
+ else:
772
+ dtype = "bool"
773
+ result = result.astype(dtype)
774
+ elif "truediv" in op_name and self.dtype.kind != "f":
775
+ # The actual data here doesn't matter since the mask
776
+ # will be all-True, but since this is division, we want
777
+ # to end up with floating dtype.
778
+ result = result.astype(np.float64)
779
+ else:
780
+ # Make sure we do this before the "pow" mask checks
781
+ # to get an expected exception message on shape mismatch.
782
+ if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]:
783
+ # TODO(GH#30188) ATM we don't match the behavior of non-masked
784
+ # types with respect to floordiv-by-zero
785
+ pd_op = op
786
+
787
+ with np.errstate(all="ignore"):
788
+ result = pd_op(self._data, other)
789
+
790
+ if op_name == "pow":
791
+ # 1 ** x is 1.
792
+ mask = np.where((self._data == 1) & ~self._mask, False, mask)
793
+ # x ** 0 is 1.
794
+ if omask is not None:
795
+ mask = np.where((other == 0) & ~omask, False, mask)
796
+ elif other is not libmissing.NA:
797
+ mask = np.where(other == 0, False, mask)
798
+
799
+ elif op_name == "rpow":
800
+ # 1 ** x is 1.
801
+ if omask is not None:
802
+ mask = np.where((other == 1) & ~omask, False, mask)
803
+ elif other is not libmissing.NA:
804
+ mask = np.where(other == 1, False, mask)
805
+ # x ** 0 is 1.
806
+ mask = np.where((self._data == 0) & ~self._mask, False, mask)
807
+
808
+ return self._maybe_mask_result(result, mask)
809
+
810
+ _logical_method = _arith_method
811
+
812
+ def _cmp_method(self, other, op) -> BooleanArray:
813
+ from pandas.core.arrays import BooleanArray
814
+
815
+ mask = None
816
+
817
+ if isinstance(other, BaseMaskedArray):
818
+ other, mask = other._data, other._mask
819
+
820
+ elif is_list_like(other):
821
+ other = np.asarray(other)
822
+ if other.ndim > 1:
823
+ raise NotImplementedError("can only perform ops with 1-d structures")
824
+ if len(self) != len(other):
825
+ raise ValueError("Lengths must match to compare")
826
+
827
+ if other is libmissing.NA:
828
+ # numpy does not handle pd.NA well as "other" scalar (it returns
829
+ # a scalar False instead of an array)
830
+ # This may be fixed by NA.__array_ufunc__. Revisit this check
831
+ # once that's implemented.
832
+ result = np.zeros(self._data.shape, dtype="bool")
833
+ mask = np.ones(self._data.shape, dtype="bool")
834
+ else:
835
+ with warnings.catch_warnings():
836
+ # numpy may show a FutureWarning or DeprecationWarning:
837
+ # elementwise comparison failed; returning scalar instead,
838
+ # but in the future will perform elementwise comparison
839
+ # before returning NotImplemented. We fall back to the correct
840
+ # behavior today, so that should be fine to ignore.
841
+ warnings.filterwarnings("ignore", "elementwise", FutureWarning)
842
+ warnings.filterwarnings("ignore", "elementwise", DeprecationWarning)
843
+ method = getattr(self._data, f"__{op.__name__}__")
844
+ result = method(other)
845
+
846
+ if result is NotImplemented:
847
+ result = invalid_comparison(self._data, other, op)
848
+
849
+ mask = self._propagate_mask(mask, other)
850
+ return BooleanArray(result, mask, copy=False)
851
+
852
+ def _maybe_mask_result(
853
+ self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray
854
+ ):
855
+ """
856
+ Parameters
857
+ ----------
858
+ result : array-like or tuple[array-like]
859
+ mask : array-like bool
860
+ """
861
+ if isinstance(result, tuple):
862
+ # i.e. divmod
863
+ div, mod = result
864
+ return (
865
+ self._maybe_mask_result(div, mask),
866
+ self._maybe_mask_result(mod, mask),
867
+ )
868
+
869
+ if result.dtype.kind == "f":
870
+ from pandas.core.arrays import FloatingArray
871
+
872
+ return FloatingArray(result, mask, copy=False)
873
+
874
+ elif result.dtype.kind == "b":
875
+ from pandas.core.arrays import BooleanArray
876
+
877
+ return BooleanArray(result, mask, copy=False)
878
+
879
+ elif lib.is_np_dtype(result.dtype, "m") and is_supported_dtype(result.dtype):
880
+ # e.g. test_numeric_arr_mul_tdscalar_numexpr_path
881
+ from pandas.core.arrays import TimedeltaArray
882
+
883
+ result[mask] = result.dtype.type("NaT")
884
+
885
+ if not isinstance(result, TimedeltaArray):
886
+ return TimedeltaArray._simple_new(result, dtype=result.dtype)
887
+
888
+ return result
889
+
890
+ elif result.dtype.kind in "iu":
891
+ from pandas.core.arrays import IntegerArray
892
+
893
+ return IntegerArray(result, mask, copy=False)
894
+
895
+ else:
896
+ result[mask] = np.nan
897
+ return result
898
+
899
+ def isna(self) -> np.ndarray:
900
+ return self._mask.copy()
901
+
902
+ @property
903
+ def _na_value(self):
904
+ return self.dtype.na_value
905
+
906
+ @property
907
+ def nbytes(self) -> int:
908
+ return self._data.nbytes + self._mask.nbytes
909
+
910
+ @classmethod
911
+ def _concat_same_type(
912
+ cls,
913
+ to_concat: Sequence[Self],
914
+ axis: AxisInt = 0,
915
+ ) -> Self:
916
+ data = np.concatenate([x._data for x in to_concat], axis=axis)
917
+ mask = np.concatenate([x._mask for x in to_concat], axis=axis)
918
+ return cls(data, mask)
919
+
920
+ def _hash_pandas_object(
921
+ self, *, encoding: str, hash_key: str, categorize: bool
922
+ ) -> npt.NDArray[np.uint64]:
923
+ hashed_array = hash_array(
924
+ self._data, encoding=encoding, hash_key=hash_key, categorize=categorize
925
+ )
926
+ hashed_array[self.isna()] = hash(self.dtype.na_value)
927
+ return hashed_array
928
+
929
+ def take(
930
+ self,
931
+ indexer,
932
+ *,
933
+ allow_fill: bool = False,
934
+ fill_value: Scalar | None = None,
935
+ axis: AxisInt = 0,
936
+ ) -> Self:
937
+ # we always fill with 1 internally
938
+ # to avoid upcasting
939
+ data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
940
+ result = take(
941
+ self._data,
942
+ indexer,
943
+ fill_value=data_fill_value,
944
+ allow_fill=allow_fill,
945
+ axis=axis,
946
+ )
947
+
948
+ mask = take(
949
+ self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
950
+ )
951
+
952
+ # if we are filling
953
+ # we only fill where the indexer is null
954
+ # not existing missing values
955
+ # TODO(jreback) what if we have a non-na float as a fill value?
956
+ if allow_fill and notna(fill_value):
957
+ fill_mask = np.asarray(indexer) == -1
958
+ result[fill_mask] = fill_value
959
+ mask = mask ^ fill_mask
960
+
961
+ return self._simple_new(result, mask)
962
+
963
+ # error: Return type "BooleanArray" of "isin" incompatible with return type
964
+ # "ndarray" in supertype "ExtensionArray"
965
+ def isin(self, values: ArrayLike) -> BooleanArray: # type: ignore[override]
966
+ from pandas.core.arrays import BooleanArray
967
+
968
+ # algorithms.isin will eventually convert values to an ndarray, so no extra
969
+ # cost to doing it here first
970
+ values_arr = np.asarray(values)
971
+ result = isin(self._data, values_arr)
972
+
973
+ if self._hasna:
974
+ values_have_NA = values_arr.dtype == object and any(
975
+ val is self.dtype.na_value for val in values_arr
976
+ )
977
+
978
+ # For now, NA does not propagate so set result according to presence of NA,
979
+ # see https://github.com/pandas-dev/pandas/pull/38379 for some discussion
980
+ result[self._mask] = values_have_NA
981
+
982
+ mask = np.zeros(self._data.shape, dtype=bool)
983
+ return BooleanArray(result, mask, copy=False)
984
+
985
+ def copy(self) -> Self:
986
+ data = self._data.copy()
987
+ mask = self._mask.copy()
988
+ return self._simple_new(data, mask)
989
+
990
+ @doc(ExtensionArray.duplicated)
991
+ def duplicated(
992
+ self, keep: Literal["first", "last", False] = "first"
993
+ ) -> npt.NDArray[np.bool_]:
994
+ values = self._data
995
+ mask = self._mask
996
+ return algos.duplicated(values, keep=keep, mask=mask)
997
+
998
+ def unique(self) -> Self:
999
+ """
1000
+ Compute the BaseMaskedArray of unique values.
1001
+
1002
+ Returns
1003
+ -------
1004
+ uniques : BaseMaskedArray
1005
+ """
1006
+ uniques, mask = algos.unique_with_mask(self._data, self._mask)
1007
+ return self._simple_new(uniques, mask)
1008
+
1009
+ @doc(ExtensionArray.searchsorted)
1010
+ def searchsorted(
1011
+ self,
1012
+ value: NumpyValueArrayLike | ExtensionArray,
1013
+ side: Literal["left", "right"] = "left",
1014
+ sorter: NumpySorter | None = None,
1015
+ ) -> npt.NDArray[np.intp] | np.intp:
1016
+ if self._hasna:
1017
+ raise ValueError(
1018
+ "searchsorted requires array to be sorted, which is impossible "
1019
+ "with NAs present."
1020
+ )
1021
+ if isinstance(value, ExtensionArray):
1022
+ value = value.astype(object)
1023
+ # Base class searchsorted would cast to object, which is *much* slower.
1024
+ return self._data.searchsorted(value, side=side, sorter=sorter)
1025
+
1026
+ @doc(ExtensionArray.factorize)
1027
+ def factorize(
1028
+ self,
1029
+ use_na_sentinel: bool = True,
1030
+ ) -> tuple[np.ndarray, ExtensionArray]:
1031
+ arr = self._data
1032
+ mask = self._mask
1033
+
1034
+ # Use a sentinel for na; recode and add NA to uniques if necessary below
1035
+ codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask)
1036
+
1037
+ # check that factorize_array correctly preserves dtype.
1038
+ assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
1039
+
1040
+ has_na = mask.any()
1041
+ if use_na_sentinel or not has_na:
1042
+ size = len(uniques)
1043
+ else:
1044
+ # Make room for an NA value
1045
+ size = len(uniques) + 1
1046
+ uniques_mask = np.zeros(size, dtype=bool)
1047
+ if not use_na_sentinel and has_na:
1048
+ na_index = mask.argmax()
1049
+ # Insert na with the proper code
1050
+ if na_index == 0:
1051
+ na_code = np.intp(0)
1052
+ else:
1053
+ na_code = codes[:na_index].max() + 1
1054
+ codes[codes >= na_code] += 1
1055
+ codes[codes == -1] = na_code
1056
+ # dummy value for uniques; not used since uniques_mask will be True
1057
+ uniques = np.insert(uniques, na_code, 0)
1058
+ uniques_mask[na_code] = True
1059
+ uniques_ea = self._simple_new(uniques, uniques_mask)
1060
+
1061
+ return codes, uniques_ea
1062
+
1063
+ @doc(ExtensionArray._values_for_argsort)
1064
+ def _values_for_argsort(self) -> np.ndarray:
1065
+ return self._data
1066
+
1067
+ def value_counts(self, dropna: bool = True) -> Series:
1068
+ """
1069
+ Returns a Series containing counts of each unique value.
1070
+
1071
+ Parameters
1072
+ ----------
1073
+ dropna : bool, default True
1074
+ Don't include counts of missing values.
1075
+
1076
+ Returns
1077
+ -------
1078
+ counts : Series
1079
+
1080
+ See Also
1081
+ --------
1082
+ Series.value_counts
1083
+ """
1084
+ from pandas import (
1085
+ Index,
1086
+ Series,
1087
+ )
1088
+ from pandas.arrays import IntegerArray
1089
+
1090
+ keys, value_counts, na_counter = algos.value_counts_arraylike(
1091
+ self._data, dropna=dropna, mask=self._mask
1092
+ )
1093
+ mask_index = np.zeros((len(value_counts),), dtype=np.bool_)
1094
+ mask = mask_index.copy()
1095
+
1096
+ if na_counter > 0:
1097
+ mask_index[-1] = True
1098
+
1099
+ arr = IntegerArray(value_counts, mask)
1100
+ index = Index(
1101
+ self.dtype.construct_array_type()(
1102
+ keys, mask_index # type: ignore[arg-type]
1103
+ )
1104
+ )
1105
+ return Series(arr, index=index, name="count", copy=False)
1106
+
1107
+ def _mode(self, dropna: bool = True) -> Self:
1108
+ if dropna:
1109
+ result = mode(self._data, dropna=dropna, mask=self._mask)
1110
+ res_mask = np.zeros(result.shape, dtype=np.bool_)
1111
+ else:
1112
+ result, res_mask = mode(self._data, dropna=dropna, mask=self._mask)
1113
+ result = type(self)(result, res_mask) # type: ignore[arg-type]
1114
+ return result[result.argsort()]
1115
+
1116
+ @doc(ExtensionArray.equals)
1117
+ def equals(self, other) -> bool:
1118
+ if type(self) != type(other):
1119
+ return False
1120
+ if other.dtype != self.dtype:
1121
+ return False
1122
+
1123
+ # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
1124
+ # equal.
1125
+ if not np.array_equal(self._mask, other._mask):
1126
+ return False
1127
+
1128
+ left = self._data[~self._mask]
1129
+ right = other._data[~other._mask]
1130
+ return array_equivalent(left, right, strict_nan=True, dtype_equal=True)
1131
+
1132
+ def _quantile(
1133
+ self, qs: npt.NDArray[np.float64], interpolation: str
1134
+ ) -> BaseMaskedArray:
1135
+ """
1136
+ Dispatch to quantile_with_mask, needed because we do not have
1137
+ _from_factorized.
1138
+
1139
+ Notes
1140
+ -----
1141
+ We assume that all impacted cases are 1D-only.
1142
+ """
1143
+ res = quantile_with_mask(
1144
+ self._data,
1145
+ mask=self._mask,
1146
+ # TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype)
1147
+ # instead of np.nan
1148
+ fill_value=np.nan,
1149
+ qs=qs,
1150
+ interpolation=interpolation,
1151
+ )
1152
+
1153
+ if self._hasna:
1154
+ # Our result mask is all-False unless we are all-NA, in which
1155
+ # case it is all-True.
1156
+ if self.ndim == 2:
1157
+ # I think this should be out_mask=self.isna().all(axis=1)
1158
+ # but am holding off until we have tests
1159
+ raise NotImplementedError
1160
+ if self.isna().all():
1161
+ out_mask = np.ones(res.shape, dtype=bool)
1162
+
1163
+ if is_integer_dtype(self.dtype):
1164
+ # We try to maintain int dtype if possible for not all-na case
1165
+ # as well
1166
+ res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype)
1167
+ else:
1168
+ out_mask = np.zeros(res.shape, dtype=bool)
1169
+ else:
1170
+ out_mask = np.zeros(res.shape, dtype=bool)
1171
+ return self._maybe_mask_result(res, mask=out_mask)
1172
+
1173
+ # ------------------------------------------------------------------
1174
+ # Reductions
1175
+
1176
+ def _reduce(
1177
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1178
+ ):
1179
+ if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}:
1180
+ result = getattr(self, name)(skipna=skipna, **kwargs)
1181
+ else:
1182
+ # median, skew, kurt, sem
1183
+ data = self._data
1184
+ mask = self._mask
1185
+ op = getattr(nanops, f"nan{name}")
1186
+ axis = kwargs.pop("axis", None)
1187
+ result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs)
1188
+
1189
+ if keepdims:
1190
+ if isna(result):
1191
+ return self._wrap_na_result(name=name, axis=0, mask_size=(1,))
1192
+ else:
1193
+ result = result.reshape(1)
1194
+ mask = np.zeros(1, dtype=bool)
1195
+ return self._maybe_mask_result(result, mask)
1196
+
1197
+ if isna(result):
1198
+ return libmissing.NA
1199
+ else:
1200
+ return result
1201
+
1202
+ def _wrap_reduction_result(self, name: str, result, *, skipna, axis):
1203
+ if isinstance(result, np.ndarray):
1204
+ if skipna:
1205
+ # we only retain mask for all-NA rows/columns
1206
+ mask = self._mask.all(axis=axis)
1207
+ else:
1208
+ mask = self._mask.any(axis=axis)
1209
+
1210
+ return self._maybe_mask_result(result, mask)
1211
+ return result
1212
+
1213
+ def _wrap_na_result(self, *, name, axis, mask_size):
1214
+ mask = np.ones(mask_size, dtype=bool)
1215
+
1216
+ float_dtyp = "float32" if self.dtype == "Float32" else "float64"
1217
+ if name in ["mean", "median", "var", "std", "skew", "kurt"]:
1218
+ np_dtype = float_dtyp
1219
+ elif name in ["min", "max"] or self.dtype.itemsize == 8:
1220
+ np_dtype = self.dtype.numpy_dtype.name
1221
+ else:
1222
+ is_windows_or_32bit = is_platform_windows() or not IS64
1223
+ int_dtyp = "int32" if is_windows_or_32bit else "int64"
1224
+ uint_dtyp = "uint32" if is_windows_or_32bit else "uint64"
1225
+ np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[
1226
+ self.dtype.kind
1227
+ ]
1228
+
1229
+ value = np.array([1], dtype=np_dtype)
1230
+ return self._maybe_mask_result(value, mask=mask)
1231
+
1232
+ def _wrap_min_count_reduction_result(
1233
+ self, name: str, result, *, skipna, min_count, axis
1234
+ ):
1235
+ if min_count == 0 and isinstance(result, np.ndarray):
1236
+ return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool))
1237
+ return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis)
1238
+
1239
+ def sum(
1240
+ self,
1241
+ *,
1242
+ skipna: bool = True,
1243
+ min_count: int = 0,
1244
+ axis: AxisInt | None = 0,
1245
+ **kwargs,
1246
+ ):
1247
+ nv.validate_sum((), kwargs)
1248
+
1249
+ result = masked_reductions.sum(
1250
+ self._data,
1251
+ self._mask,
1252
+ skipna=skipna,
1253
+ min_count=min_count,
1254
+ axis=axis,
1255
+ )
1256
+ return self._wrap_min_count_reduction_result(
1257
+ "sum", result, skipna=skipna, min_count=min_count, axis=axis
1258
+ )
1259
+
1260
+ def prod(
1261
+ self,
1262
+ *,
1263
+ skipna: bool = True,
1264
+ min_count: int = 0,
1265
+ axis: AxisInt | None = 0,
1266
+ **kwargs,
1267
+ ):
1268
+ nv.validate_prod((), kwargs)
1269
+
1270
+ result = masked_reductions.prod(
1271
+ self._data,
1272
+ self._mask,
1273
+ skipna=skipna,
1274
+ min_count=min_count,
1275
+ axis=axis,
1276
+ )
1277
+ return self._wrap_min_count_reduction_result(
1278
+ "prod", result, skipna=skipna, min_count=min_count, axis=axis
1279
+ )
1280
+
1281
+ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1282
+ nv.validate_mean((), kwargs)
1283
+ result = masked_reductions.mean(
1284
+ self._data,
1285
+ self._mask,
1286
+ skipna=skipna,
1287
+ axis=axis,
1288
+ )
1289
+ return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis)
1290
+
1291
+ def var(
1292
+ self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
1293
+ ):
1294
+ nv.validate_stat_ddof_func((), kwargs, fname="var")
1295
+ result = masked_reductions.var(
1296
+ self._data,
1297
+ self._mask,
1298
+ skipna=skipna,
1299
+ axis=axis,
1300
+ ddof=ddof,
1301
+ )
1302
+ return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis)
1303
+
1304
+ def std(
1305
+ self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
1306
+ ):
1307
+ nv.validate_stat_ddof_func((), kwargs, fname="std")
1308
+ result = masked_reductions.std(
1309
+ self._data,
1310
+ self._mask,
1311
+ skipna=skipna,
1312
+ axis=axis,
1313
+ ddof=ddof,
1314
+ )
1315
+ return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis)
1316
+
1317
+ def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1318
+ nv.validate_min((), kwargs)
1319
+ result = masked_reductions.min(
1320
+ self._data,
1321
+ self._mask,
1322
+ skipna=skipna,
1323
+ axis=axis,
1324
+ )
1325
+ return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis)
1326
+
1327
+ def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1328
+ nv.validate_max((), kwargs)
1329
+ result = masked_reductions.max(
1330
+ self._data,
1331
+ self._mask,
1332
+ skipna=skipna,
1333
+ axis=axis,
1334
+ )
1335
+ return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis)
1336
+
1337
+ def map(self, mapper, na_action=None):
1338
+ return map_array(self.to_numpy(), mapper, na_action=na_action)
1339
+
1340
+ def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1341
+ """
1342
+ Return whether any element is truthy.
1343
+
1344
+ Returns False unless there is at least one element that is truthy.
1345
+ By default, NAs are skipped. If ``skipna=False`` is specified and
1346
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
1347
+ is used as for logical operations.
1348
+
1349
+ .. versionchanged:: 1.4.0
1350
+
1351
+ Parameters
1352
+ ----------
1353
+ skipna : bool, default True
1354
+ Exclude NA values. If the entire array is NA and `skipna` is
1355
+ True, then the result will be False, as for an empty array.
1356
+ If `skipna` is False, the result will still be True if there is
1357
+ at least one element that is truthy, otherwise NA will be returned
1358
+ if there are NA's present.
1359
+ axis : int, optional, default 0
1360
+ **kwargs : any, default None
1361
+ Additional keywords have no effect but might be accepted for
1362
+ compatibility with NumPy.
1363
+
1364
+ Returns
1365
+ -------
1366
+ bool or :attr:`pandas.NA`
1367
+
1368
+ See Also
1369
+ --------
1370
+ numpy.any : Numpy version of this method.
1371
+ BaseMaskedArray.all : Return whether all elements are truthy.
1372
+
1373
+ Examples
1374
+ --------
1375
+ The result indicates whether any element is truthy (and by default
1376
+ skips NAs):
1377
+
1378
+ >>> pd.array([True, False, True]).any()
1379
+ True
1380
+ >>> pd.array([True, False, pd.NA]).any()
1381
+ True
1382
+ >>> pd.array([False, False, pd.NA]).any()
1383
+ False
1384
+ >>> pd.array([], dtype="boolean").any()
1385
+ False
1386
+ >>> pd.array([pd.NA], dtype="boolean").any()
1387
+ False
1388
+ >>> pd.array([pd.NA], dtype="Float64").any()
1389
+ False
1390
+
1391
+ With ``skipna=False``, the result can be NA if this is logically
1392
+ required (whether ``pd.NA`` is True or False influences the result):
1393
+
1394
+ >>> pd.array([True, False, pd.NA]).any(skipna=False)
1395
+ True
1396
+ >>> pd.array([1, 0, pd.NA]).any(skipna=False)
1397
+ True
1398
+ >>> pd.array([False, False, pd.NA]).any(skipna=False)
1399
+ <NA>
1400
+ >>> pd.array([0, 0, pd.NA]).any(skipna=False)
1401
+ <NA>
1402
+ """
1403
+ nv.validate_any((), kwargs)
1404
+
1405
+ values = self._data.copy()
1406
+ # error: Argument 3 to "putmask" has incompatible type "object";
1407
+ # expected "Union[_SupportsArray[dtype[Any]],
1408
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
1409
+ # bool, int, float, complex, str, bytes,
1410
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
1411
+ np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
1412
+ result = values.any()
1413
+ if skipna:
1414
+ return result
1415
+ else:
1416
+ if result or len(self) == 0 or not self._mask.any():
1417
+ return result
1418
+ else:
1419
+ return self.dtype.na_value
1420
+
1421
+ def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1422
+ """
1423
+ Return whether all elements are truthy.
1424
+
1425
+ Returns True unless there is at least one element that is falsey.
1426
+ By default, NAs are skipped. If ``skipna=False`` is specified and
1427
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
1428
+ is used as for logical operations.
1429
+
1430
+ .. versionchanged:: 1.4.0
1431
+
1432
+ Parameters
1433
+ ----------
1434
+ skipna : bool, default True
1435
+ Exclude NA values. If the entire array is NA and `skipna` is
1436
+ True, then the result will be True, as for an empty array.
1437
+ If `skipna` is False, the result will still be False if there is
1438
+ at least one element that is falsey, otherwise NA will be returned
1439
+ if there are NA's present.
1440
+ axis : int, optional, default 0
1441
+ **kwargs : any, default None
1442
+ Additional keywords have no effect but might be accepted for
1443
+ compatibility with NumPy.
1444
+
1445
+ Returns
1446
+ -------
1447
+ bool or :attr:`pandas.NA`
1448
+
1449
+ See Also
1450
+ --------
1451
+ numpy.all : Numpy version of this method.
1452
+ BooleanArray.any : Return whether any element is truthy.
1453
+
1454
+ Examples
1455
+ --------
1456
+ The result indicates whether all elements are truthy (and by default
1457
+ skips NAs):
1458
+
1459
+ >>> pd.array([True, True, pd.NA]).all()
1460
+ True
1461
+ >>> pd.array([1, 1, pd.NA]).all()
1462
+ True
1463
+ >>> pd.array([True, False, pd.NA]).all()
1464
+ False
1465
+ >>> pd.array([], dtype="boolean").all()
1466
+ True
1467
+ >>> pd.array([pd.NA], dtype="boolean").all()
1468
+ True
1469
+ >>> pd.array([pd.NA], dtype="Float64").all()
1470
+ True
1471
+
1472
+ With ``skipna=False``, the result can be NA if this is logically
1473
+ required (whether ``pd.NA`` is True or False influences the result):
1474
+
1475
+ >>> pd.array([True, True, pd.NA]).all(skipna=False)
1476
+ <NA>
1477
+ >>> pd.array([1, 1, pd.NA]).all(skipna=False)
1478
+ <NA>
1479
+ >>> pd.array([True, False, pd.NA]).all(skipna=False)
1480
+ False
1481
+ >>> pd.array([1, 0, pd.NA]).all(skipna=False)
1482
+ False
1483
+ """
1484
+ nv.validate_all((), kwargs)
1485
+
1486
+ values = self._data.copy()
1487
+ # error: Argument 3 to "putmask" has incompatible type "object";
1488
+ # expected "Union[_SupportsArray[dtype[Any]],
1489
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
1490
+ # bool, int, float, complex, str, bytes,
1491
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
1492
+ np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
1493
+ result = values.all(axis=axis)
1494
+
1495
+ if skipna:
1496
+ return result
1497
+ else:
1498
+ if not result or len(self) == 0 or not self._mask.any():
1499
+ return result
1500
+ else:
1501
+ return self.dtype.na_value
1502
+
1503
+ def interpolate(
1504
+ self,
1505
+ *,
1506
+ method: InterpolateOptions,
1507
+ axis: int,
1508
+ index,
1509
+ limit,
1510
+ limit_direction,
1511
+ limit_area,
1512
+ copy: bool,
1513
+ **kwargs,
1514
+ ) -> FloatingArray:
1515
+ """
1516
+ See NDFrame.interpolate.__doc__.
1517
+ """
1518
+ # NB: we return type(self) even if copy=False
1519
+ if self.dtype.kind == "f":
1520
+ if copy:
1521
+ data = self._data.copy()
1522
+ mask = self._mask.copy()
1523
+ else:
1524
+ data = self._data
1525
+ mask = self._mask
1526
+ elif self.dtype.kind in "iu":
1527
+ copy = True
1528
+ data = self._data.astype("f8")
1529
+ mask = self._mask.copy()
1530
+ else:
1531
+ raise NotImplementedError(
1532
+ f"interpolate is not implemented for dtype={self.dtype}"
1533
+ )
1534
+
1535
+ missing.interpolate_2d_inplace(
1536
+ data,
1537
+ method=method,
1538
+ axis=0,
1539
+ index=index,
1540
+ limit=limit,
1541
+ limit_direction=limit_direction,
1542
+ limit_area=limit_area,
1543
+ mask=mask,
1544
+ **kwargs,
1545
+ )
1546
+ if not copy:
1547
+ return self # type: ignore[return-value]
1548
+ if self.dtype.kind == "f":
1549
+ return type(self)._simple_new(data, mask) # type: ignore[return-value]
1550
+ else:
1551
+ from pandas.core.arrays import FloatingArray
1552
+
1553
+ return FloatingArray._simple_new(data, mask)
1554
+
1555
+ def _accumulate(
1556
+ self, name: str, *, skipna: bool = True, **kwargs
1557
+ ) -> BaseMaskedArray:
1558
+ data = self._data
1559
+ mask = self._mask
1560
+
1561
+ op = getattr(masked_accumulations, name)
1562
+ data, mask = op(data, mask, skipna=skipna, **kwargs)
1563
+
1564
+ return self._simple_new(data, mask)
1565
+
1566
+ # ------------------------------------------------------------------
1567
+ # GroupBy Methods
1568
+
1569
+ def _groupby_op(
1570
+ self,
1571
+ *,
1572
+ how: str,
1573
+ has_dropped_na: bool,
1574
+ min_count: int,
1575
+ ngroups: int,
1576
+ ids: npt.NDArray[np.intp],
1577
+ **kwargs,
1578
+ ):
1579
+ from pandas.core.groupby.ops import WrappedCythonOp
1580
+
1581
+ kind = WrappedCythonOp.get_kind_from_how(how)
1582
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
1583
+
1584
+ # libgroupby functions are responsible for NOT altering mask
1585
+ mask = self._mask
1586
+ if op.kind != "aggregate":
1587
+ result_mask = mask.copy()
1588
+ else:
1589
+ result_mask = np.zeros(ngroups, dtype=bool)
1590
+
1591
+ if how == "rank" and kwargs.get("na_option") in ["top", "bottom"]:
1592
+ result_mask[:] = False
1593
+
1594
+ res_values = op._cython_op_ndim_compat(
1595
+ self._data,
1596
+ min_count=min_count,
1597
+ ngroups=ngroups,
1598
+ comp_ids=ids,
1599
+ mask=mask,
1600
+ result_mask=result_mask,
1601
+ **kwargs,
1602
+ )
1603
+
1604
+ if op.how == "ohlc":
1605
+ arity = op._cython_arity.get(op.how, 1)
1606
+ result_mask = np.tile(result_mask, (arity, 1)).T
1607
+
1608
+ if op.how in ["idxmin", "idxmax"]:
1609
+ # Result values are indexes to take, keep as ndarray
1610
+ return res_values
1611
+ else:
1612
+ # res_values should already have the correct dtype, we just need to
1613
+ # wrap in a MaskedArray
1614
+ return self._maybe_mask_result(res_values, result_mask)
1615
+
1616
+
1617
+ def transpose_homogeneous_masked_arrays(
1618
+ masked_arrays: Sequence[BaseMaskedArray],
1619
+ ) -> list[BaseMaskedArray]:
1620
+ """Transpose masked arrays in a list, but faster.
1621
+
1622
+ Input should be a list of 1-dim masked arrays of equal length and all have the
1623
+ same dtype. The caller is responsible for ensuring validity of input data.
1624
+ """
1625
+ masked_arrays = list(masked_arrays)
1626
+ dtype = masked_arrays[0].dtype
1627
+
1628
+ values = [arr._data.reshape(1, -1) for arr in masked_arrays]
1629
+ transposed_values = np.concatenate(
1630
+ values,
1631
+ axis=0,
1632
+ out=np.empty(
1633
+ (len(masked_arrays), len(masked_arrays[0])),
1634
+ order="F",
1635
+ dtype=dtype.numpy_dtype,
1636
+ ),
1637
+ )
1638
+
1639
+ masks = [arr._mask.reshape(1, -1) for arr in masked_arrays]
1640
+ transposed_masks = np.concatenate(
1641
+ masks, axis=0, out=np.empty_like(transposed_values, dtype=bool)
1642
+ )
1643
+
1644
+ arr_type = dtype.construct_array_type()
1645
+ transposed_arrays: list[BaseMaskedArray] = []
1646
+ for i in range(transposed_values.shape[1]):
1647
+ transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i])
1648
+ transposed_arrays.append(transposed_arr)
1649
+
1650
+ return transposed_arrays
llava_next/lib/python3.10/site-packages/pandas/core/arrays/numeric.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numbers
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Callable,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ missing as libmissing,
15
+ )
16
+ from pandas.errors import AbstractMethodError
17
+ from pandas.util._decorators import cache_readonly
18
+
19
+ from pandas.core.dtypes.common import (
20
+ is_integer_dtype,
21
+ is_string_dtype,
22
+ pandas_dtype,
23
+ )
24
+
25
+ from pandas.core.arrays.masked import (
26
+ BaseMaskedArray,
27
+ BaseMaskedDtype,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ from collections.abc import Mapping
32
+
33
+ import pyarrow
34
+
35
+ from pandas._typing import (
36
+ Dtype,
37
+ DtypeObj,
38
+ Self,
39
+ npt,
40
+ )
41
+
42
+
43
+ class NumericDtype(BaseMaskedDtype):
44
+ _default_np_dtype: np.dtype
45
+ _checker: Callable[[Any], bool] # is_foo_dtype
46
+
47
+ def __repr__(self) -> str:
48
+ return f"{self.name}Dtype()"
49
+
50
+ @cache_readonly
51
+ def is_signed_integer(self) -> bool:
52
+ return self.kind == "i"
53
+
54
+ @cache_readonly
55
+ def is_unsigned_integer(self) -> bool:
56
+ return self.kind == "u"
57
+
58
+ @property
59
+ def _is_numeric(self) -> bool:
60
+ return True
61
+
62
+ def __from_arrow__(
63
+ self, array: pyarrow.Array | pyarrow.ChunkedArray
64
+ ) -> BaseMaskedArray:
65
+ """
66
+ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
67
+ """
68
+ import pyarrow
69
+
70
+ from pandas.core.arrays.arrow._arrow_utils import (
71
+ pyarrow_array_to_numpy_and_mask,
72
+ )
73
+
74
+ array_class = self.construct_array_type()
75
+
76
+ pyarrow_type = pyarrow.from_numpy_dtype(self.type)
77
+ if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null(
78
+ array.type
79
+ ):
80
+ # test_from_arrow_type_error raise for string, but allow
81
+ # through itemsize conversion GH#31896
82
+ rt_dtype = pandas_dtype(array.type.to_pandas_dtype())
83
+ if rt_dtype.kind not in "iuf":
84
+ # Could allow "c" or potentially disallow float<->int conversion,
85
+ # but at the moment we specifically test that uint<->int works
86
+ raise TypeError(
87
+ f"Expected array of {self} type, got {array.type} instead"
88
+ )
89
+
90
+ array = array.cast(pyarrow_type)
91
+
92
+ if isinstance(array, pyarrow.ChunkedArray):
93
+ # TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed
94
+ # combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757
95
+ if array.num_chunks == 0:
96
+ array = pyarrow.array([], type=array.type)
97
+ else:
98
+ array = array.combine_chunks()
99
+
100
+ data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)
101
+ return array_class(data.copy(), ~mask, copy=False)
102
+
103
+ @classmethod
104
+ def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]:
105
+ raise AbstractMethodError(cls)
106
+
107
+ @classmethod
108
+ def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype:
109
+ """
110
+ Convert a string representation or a numpy dtype to NumericDtype.
111
+ """
112
+ if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))):
113
+ # Avoid DeprecationWarning from NumPy about np.dtype("Int64")
114
+ # https://github.com/numpy/numpy/pull/7476
115
+ dtype = dtype.lower()
116
+
117
+ if not isinstance(dtype, NumericDtype):
118
+ mapping = cls._get_dtype_mapping()
119
+ try:
120
+ dtype = mapping[np.dtype(dtype)]
121
+ except KeyError as err:
122
+ raise ValueError(f"invalid dtype specified {dtype}") from err
123
+ return dtype
124
+
125
+ @classmethod
126
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
127
+ """
128
+ Safely cast the values to the given dtype.
129
+
130
+ "safe" in this context means the casting is lossless.
131
+ """
132
+ raise AbstractMethodError(cls)
133
+
134
+
135
+ def _coerce_to_data_and_mask(
136
+ values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype
137
+ ):
138
+ checker = dtype_cls._checker
139
+
140
+ mask = None
141
+ inferred_type = None
142
+
143
+ if dtype is None and hasattr(values, "dtype"):
144
+ if checker(values.dtype):
145
+ dtype = values.dtype
146
+
147
+ if dtype is not None:
148
+ dtype = dtype_cls._standardize_dtype(dtype)
149
+
150
+ cls = dtype_cls.construct_array_type()
151
+ if isinstance(values, cls):
152
+ values, mask = values._data, values._mask
153
+ if dtype is not None:
154
+ values = values.astype(dtype.numpy_dtype, copy=False)
155
+
156
+ if copy:
157
+ values = values.copy()
158
+ mask = mask.copy()
159
+ return values, mask, dtype, inferred_type
160
+
161
+ original = values
162
+ if not copy:
163
+ values = np.asarray(values)
164
+ else:
165
+ values = np.array(values, copy=copy)
166
+ inferred_type = None
167
+ if values.dtype == object or is_string_dtype(values.dtype):
168
+ inferred_type = lib.infer_dtype(values, skipna=True)
169
+ if inferred_type == "boolean" and dtype is None:
170
+ name = dtype_cls.__name__.strip("_")
171
+ raise TypeError(f"{values.dtype} cannot be converted to {name}")
172
+
173
+ elif values.dtype.kind == "b" and checker(dtype):
174
+ if not copy:
175
+ values = np.asarray(values, dtype=default_dtype)
176
+ else:
177
+ values = np.array(values, dtype=default_dtype, copy=copy)
178
+
179
+ elif values.dtype.kind not in "iuf":
180
+ name = dtype_cls.__name__.strip("_")
181
+ raise TypeError(f"{values.dtype} cannot be converted to {name}")
182
+
183
+ if values.ndim != 1:
184
+ raise TypeError("values must be a 1D list-like")
185
+
186
+ if mask is None:
187
+ if values.dtype.kind in "iu":
188
+ # fastpath
189
+ mask = np.zeros(len(values), dtype=np.bool_)
190
+ else:
191
+ mask = libmissing.is_numeric_na(values)
192
+ else:
193
+ assert len(mask) == len(values)
194
+
195
+ if mask.ndim != 1:
196
+ raise TypeError("mask must be a 1D list-like")
197
+
198
+ # infer dtype if needed
199
+ if dtype is None:
200
+ dtype = default_dtype
201
+ else:
202
+ dtype = dtype.numpy_dtype
203
+
204
+ if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0:
205
+ if mask.all():
206
+ values = np.ones(values.shape, dtype=dtype)
207
+ else:
208
+ idx = np.nanargmax(values)
209
+ if int(values[idx]) != original[idx]:
210
+ # We have ints that lost precision during the cast.
211
+ inferred_type = lib.infer_dtype(original, skipna=True)
212
+ if (
213
+ inferred_type not in ["floating", "mixed-integer-float"]
214
+ and not mask.any()
215
+ ):
216
+ values = np.asarray(original, dtype=dtype)
217
+ else:
218
+ values = np.asarray(original, dtype="object")
219
+
220
+ # we copy as need to coerce here
221
+ if mask.any():
222
+ values = values.copy()
223
+ values[mask] = cls._internal_fill_value
224
+ if inferred_type in ("string", "unicode"):
225
+ # casts from str are always safe since they raise
226
+ # a ValueError if the str cannot be parsed into a float
227
+ values = values.astype(dtype, copy=copy)
228
+ else:
229
+ values = dtype_cls._safe_cast(values, dtype, copy=False)
230
+
231
+ return values, mask, dtype, inferred_type
232
+
233
+
234
+ class NumericArray(BaseMaskedArray):
235
+ """
236
+ Base class for IntegerArray and FloatingArray.
237
+ """
238
+
239
+ _dtype_cls: type[NumericDtype]
240
+
241
+ def __init__(
242
+ self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
243
+ ) -> None:
244
+ checker = self._dtype_cls._checker
245
+ if not (isinstance(values, np.ndarray) and checker(values.dtype)):
246
+ descr = (
247
+ "floating"
248
+ if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
249
+ else "integer"
250
+ )
251
+ raise TypeError(
252
+ f"values should be {descr} numpy array. Use "
253
+ "the 'pd.array' function instead"
254
+ )
255
+ if values.dtype == np.float16:
256
+ # If we don't raise here, then accessing self.dtype would raise
257
+ raise TypeError("FloatingArray does not support np.float16 dtype.")
258
+
259
+ super().__init__(values, mask, copy=copy)
260
+
261
+ @cache_readonly
262
+ def dtype(self) -> NumericDtype:
263
+ mapping = self._dtype_cls._get_dtype_mapping()
264
+ return mapping[self._data.dtype]
265
+
266
+ @classmethod
267
+ def _coerce_to_array(
268
+ cls, value, *, dtype: DtypeObj, copy: bool = False
269
+ ) -> tuple[np.ndarray, np.ndarray]:
270
+ dtype_cls = cls._dtype_cls
271
+ default_dtype = dtype_cls._default_np_dtype
272
+ values, mask, _, _ = _coerce_to_data_and_mask(
273
+ value, dtype, copy, dtype_cls, default_dtype
274
+ )
275
+ return values, mask
276
+
277
+ @classmethod
278
+ def _from_sequence_of_strings(
279
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
280
+ ) -> Self:
281
+ from pandas.core.tools.numeric import to_numeric
282
+
283
+ scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable")
284
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
285
+
286
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
llava_next/lib/python3.10/site-packages/pandas/core/arrays/period.py ADDED
@@ -0,0 +1,1313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import timedelta
4
+ import operator
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ Callable,
9
+ Literal,
10
+ TypeVar,
11
+ cast,
12
+ overload,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs import (
19
+ algos as libalgos,
20
+ lib,
21
+ )
22
+ from pandas._libs.arrays import NDArrayBacked
23
+ from pandas._libs.tslibs import (
24
+ BaseOffset,
25
+ NaT,
26
+ NaTType,
27
+ Timedelta,
28
+ add_overflowsafe,
29
+ astype_overflowsafe,
30
+ dt64arr_to_periodarr as c_dt64arr_to_periodarr,
31
+ get_unit_from_dtype,
32
+ iNaT,
33
+ parsing,
34
+ period as libperiod,
35
+ to_offset,
36
+ )
37
+ from pandas._libs.tslibs.dtypes import (
38
+ FreqGroup,
39
+ PeriodDtypeBase,
40
+ freq_to_period_freqstr,
41
+ )
42
+ from pandas._libs.tslibs.fields import isleapyear_arr
43
+ from pandas._libs.tslibs.offsets import (
44
+ Tick,
45
+ delta_to_tick,
46
+ )
47
+ from pandas._libs.tslibs.period import (
48
+ DIFFERENT_FREQ,
49
+ IncompatibleFrequency,
50
+ Period,
51
+ get_period_field_arr,
52
+ period_asfreq_arr,
53
+ )
54
+ from pandas.util._decorators import (
55
+ cache_readonly,
56
+ doc,
57
+ )
58
+ from pandas.util._exceptions import find_stack_level
59
+
60
+ from pandas.core.dtypes.common import (
61
+ ensure_object,
62
+ pandas_dtype,
63
+ )
64
+ from pandas.core.dtypes.dtypes import (
65
+ DatetimeTZDtype,
66
+ PeriodDtype,
67
+ )
68
+ from pandas.core.dtypes.generic import (
69
+ ABCIndex,
70
+ ABCPeriodIndex,
71
+ ABCSeries,
72
+ ABCTimedeltaArray,
73
+ )
74
+ from pandas.core.dtypes.missing import isna
75
+
76
+ from pandas.core.arrays import datetimelike as dtl
77
+ import pandas.core.common as com
78
+
79
+ if TYPE_CHECKING:
80
+ from collections.abc import Sequence
81
+
82
+ from pandas._typing import (
83
+ AnyArrayLike,
84
+ Dtype,
85
+ FillnaOptions,
86
+ NpDtype,
87
+ NumpySorter,
88
+ NumpyValueArrayLike,
89
+ Self,
90
+ npt,
91
+ )
92
+
93
+ from pandas.core.arrays import (
94
+ DatetimeArray,
95
+ TimedeltaArray,
96
+ )
97
+ from pandas.core.arrays.base import ExtensionArray
98
+
99
+
100
+ BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
101
+
102
+
103
+ _shared_doc_kwargs = {
104
+ "klass": "PeriodArray",
105
+ }
106
+
107
+
108
+ def _field_accessor(name: str, docstring: str | None = None):
109
+ def f(self):
110
+ base = self.dtype._dtype_code
111
+ result = get_period_field_arr(name, self.asi8, base)
112
+ return result
113
+
114
+ f.__name__ = name
115
+ f.__doc__ = docstring
116
+ return property(f)
117
+
118
+
119
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
120
+ # incompatible with definition in base class "ExtensionArray"
121
+ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc]
122
+ """
123
+ Pandas ExtensionArray for storing Period data.
124
+
125
+ Users should use :func:`~pandas.array` to create new instances.
126
+
127
+ Parameters
128
+ ----------
129
+ values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
130
+ The data to store. These should be arrays that can be directly
131
+ converted to ordinals without inference or copy (PeriodArray,
132
+ ndarray[int64]), or a box around such an array (Series[period],
133
+ PeriodIndex).
134
+ dtype : PeriodDtype, optional
135
+ A PeriodDtype instance from which to extract a `freq`. If both
136
+ `freq` and `dtype` are specified, then the frequencies must match.
137
+ freq : str or DateOffset
138
+ The `freq` to use for the array. Mostly applicable when `values`
139
+ is an ndarray of integers, when `freq` is required. When `values`
140
+ is a PeriodArray (or box around), it's checked that ``values.freq``
141
+ matches `freq`.
142
+ copy : bool, default False
143
+ Whether to copy the ordinals before storing.
144
+
145
+ Attributes
146
+ ----------
147
+ None
148
+
149
+ Methods
150
+ -------
151
+ None
152
+
153
+ See Also
154
+ --------
155
+ Period: Represents a period of time.
156
+ PeriodIndex : Immutable Index for period data.
157
+ period_range: Create a fixed-frequency PeriodArray.
158
+ array: Construct a pandas array.
159
+
160
+ Notes
161
+ -----
162
+ There are two components to a PeriodArray
163
+
164
+ - ordinals : integer ndarray
165
+ - freq : pd.tseries.offsets.Offset
166
+
167
+ The values are physically stored as a 1-D ndarray of integers. These are
168
+ called "ordinals" and represent some kind of offset from a base.
169
+
170
+ The `freq` indicates the span covered by each element of the array.
171
+ All elements in the PeriodArray have the same `freq`.
172
+
173
+ Examples
174
+ --------
175
+ >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
176
+ ... '2023-01-02'], freq='D'))
177
+ <PeriodArray>
178
+ ['2023-01-01', '2023-01-02']
179
+ Length: 2, dtype: period[D]
180
+ """
181
+
182
+ # array priority higher than numpy scalars
183
+ __array_priority__ = 1000
184
+ _typ = "periodarray" # ABCPeriodArray
185
+ _internal_fill_value = np.int64(iNaT)
186
+ _recognized_scalars = (Period,)
187
+ _is_recognized_dtype = lambda x: isinstance(
188
+ x, PeriodDtype
189
+ ) # check_compatible_with checks freq match
190
+ _infer_matches = ("period",)
191
+
192
+ @property
193
+ def _scalar_type(self) -> type[Period]:
194
+ return Period
195
+
196
+ # Names others delegate to us
197
+ _other_ops: list[str] = []
198
+ _bool_ops: list[str] = ["is_leap_year"]
199
+ _object_ops: list[str] = ["start_time", "end_time", "freq"]
200
+ _field_ops: list[str] = [
201
+ "year",
202
+ "month",
203
+ "day",
204
+ "hour",
205
+ "minute",
206
+ "second",
207
+ "weekofyear",
208
+ "weekday",
209
+ "week",
210
+ "dayofweek",
211
+ "day_of_week",
212
+ "dayofyear",
213
+ "day_of_year",
214
+ "quarter",
215
+ "qyear",
216
+ "days_in_month",
217
+ "daysinmonth",
218
+ ]
219
+ _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
220
+ _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
221
+
222
+ _dtype: PeriodDtype
223
+
224
+ # --------------------------------------------------------------------
225
+ # Constructors
226
+
227
+ def __init__(
228
+ self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
229
+ ) -> None:
230
+ if freq is not None:
231
+ # GH#52462
232
+ warnings.warn(
233
+ "The 'freq' keyword in the PeriodArray constructor is deprecated "
234
+ "and will be removed in a future version. Pass 'dtype' instead",
235
+ FutureWarning,
236
+ stacklevel=find_stack_level(),
237
+ )
238
+ freq = validate_dtype_freq(dtype, freq)
239
+ dtype = PeriodDtype(freq)
240
+
241
+ if dtype is not None:
242
+ dtype = pandas_dtype(dtype)
243
+ if not isinstance(dtype, PeriodDtype):
244
+ raise ValueError(f"Invalid dtype {dtype} for PeriodArray")
245
+
246
+ if isinstance(values, ABCSeries):
247
+ values = values._values
248
+ if not isinstance(values, type(self)):
249
+ raise TypeError("Incorrect dtype")
250
+
251
+ elif isinstance(values, ABCPeriodIndex):
252
+ values = values._values
253
+
254
+ if isinstance(values, type(self)):
255
+ if dtype is not None and dtype != values.dtype:
256
+ raise raise_on_incompatible(values, dtype.freq)
257
+ values, dtype = values._ndarray, values.dtype
258
+
259
+ if not copy:
260
+ values = np.asarray(values, dtype="int64")
261
+ else:
262
+ values = np.array(values, dtype="int64", copy=copy)
263
+ if dtype is None:
264
+ raise ValueError("dtype is not specified and cannot be inferred")
265
+ dtype = cast(PeriodDtype, dtype)
266
+ NDArrayBacked.__init__(self, values, dtype)
267
+
268
+ # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
269
+ @classmethod
270
+ def _simple_new( # type: ignore[override]
271
+ cls,
272
+ values: npt.NDArray[np.int64],
273
+ dtype: PeriodDtype,
274
+ ) -> Self:
275
+ # alias for PeriodArray.__init__
276
+ assertion_msg = "Should be numpy array of type i8"
277
+ assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
278
+ return cls(values, dtype=dtype)
279
+
280
+ @classmethod
281
+ def _from_sequence(
282
+ cls,
283
+ scalars,
284
+ *,
285
+ dtype: Dtype | None = None,
286
+ copy: bool = False,
287
+ ) -> Self:
288
+ if dtype is not None:
289
+ dtype = pandas_dtype(dtype)
290
+ if dtype and isinstance(dtype, PeriodDtype):
291
+ freq = dtype.freq
292
+ else:
293
+ freq = None
294
+
295
+ if isinstance(scalars, cls):
296
+ validate_dtype_freq(scalars.dtype, freq)
297
+ if copy:
298
+ scalars = scalars.copy()
299
+ return scalars
300
+
301
+ periods = np.asarray(scalars, dtype=object)
302
+
303
+ freq = freq or libperiod.extract_freq(periods)
304
+ ordinals = libperiod.extract_ordinals(periods, freq)
305
+ dtype = PeriodDtype(freq)
306
+ return cls(ordinals, dtype=dtype)
307
+
308
+ @classmethod
309
+ def _from_sequence_of_strings(
310
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
311
+ ) -> Self:
312
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
313
+
314
+ @classmethod
315
+ def _from_datetime64(cls, data, freq, tz=None) -> Self:
316
+ """
317
+ Construct a PeriodArray from a datetime64 array
318
+
319
+ Parameters
320
+ ----------
321
+ data : ndarray[datetime64[ns], datetime64[ns, tz]]
322
+ freq : str or Tick
323
+ tz : tzinfo, optional
324
+
325
+ Returns
326
+ -------
327
+ PeriodArray[freq]
328
+ """
329
+ if isinstance(freq, BaseOffset):
330
+ freq = freq_to_period_freqstr(freq.n, freq.name)
331
+ data, freq = dt64arr_to_periodarr(data, freq, tz)
332
+ dtype = PeriodDtype(freq)
333
+ return cls(data, dtype=dtype)
334
+
335
+ @classmethod
336
+ def _generate_range(cls, start, end, periods, freq):
337
+ periods = dtl.validate_periods(periods)
338
+
339
+ if freq is not None:
340
+ freq = Period._maybe_convert_freq(freq)
341
+
342
+ if start is not None or end is not None:
343
+ subarr, freq = _get_ordinal_range(start, end, periods, freq)
344
+ else:
345
+ raise ValueError("Not enough parameters to construct Period range")
346
+
347
+ return subarr, freq
348
+
349
+ @classmethod
350
+ def _from_fields(cls, *, fields: dict, freq) -> Self:
351
+ subarr, freq = _range_from_fields(freq=freq, **fields)
352
+ dtype = PeriodDtype(freq)
353
+ return cls._simple_new(subarr, dtype=dtype)
354
+
355
+ # -----------------------------------------------------------------
356
+ # DatetimeLike Interface
357
+
358
+ # error: Argument 1 of "_unbox_scalar" is incompatible with supertype
359
+ # "DatetimeLikeArrayMixin"; supertype defines the argument type as
360
+ # "Union[Union[Period, Any, Timedelta], NaTType]"
361
+ def _unbox_scalar( # type: ignore[override]
362
+ self,
363
+ value: Period | NaTType,
364
+ ) -> np.int64:
365
+ if value is NaT:
366
+ # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
367
+ return np.int64(value._value) # type: ignore[union-attr]
368
+ elif isinstance(value, self._scalar_type):
369
+ self._check_compatible_with(value)
370
+ return np.int64(value.ordinal)
371
+ else:
372
+ raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
373
+
374
+ def _scalar_from_string(self, value: str) -> Period:
375
+ return Period(value, freq=self.freq)
376
+
377
+ # error: Argument 1 of "_check_compatible_with" is incompatible with
378
+ # supertype "DatetimeLikeArrayMixin"; supertype defines the argument type
379
+ # as "Period | Timestamp | Timedelta | NaTType"
380
+ def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: # type: ignore[override]
381
+ if other is NaT:
382
+ return
383
+ # error: Item "NaTType" of "Period | NaTType | PeriodArray" has no
384
+ # attribute "freq"
385
+ self._require_matching_freq(other.freq) # type: ignore[union-attr]
386
+
387
+ # --------------------------------------------------------------------
388
+ # Data / Attributes
389
+
390
+ @cache_readonly
391
+ def dtype(self) -> PeriodDtype:
392
+ return self._dtype
393
+
394
+ # error: Cannot override writeable attribute with read-only property
395
+ @property # type: ignore[override]
396
+ def freq(self) -> BaseOffset:
397
+ """
398
+ Return the frequency object for this PeriodArray.
399
+ """
400
+ return self.dtype.freq
401
+
402
+ @property
403
+ def freqstr(self) -> str:
404
+ return freq_to_period_freqstr(self.freq.n, self.freq.name)
405
+
406
+ def __array__(
407
+ self, dtype: NpDtype | None = None, copy: bool | None = None
408
+ ) -> np.ndarray:
409
+ if dtype == "i8":
410
+ return self.asi8
411
+ elif dtype == bool:
412
+ return ~self._isnan
413
+
414
+ # This will raise TypeError for non-object dtypes
415
+ return np.array(list(self), dtype=object)
416
+
417
+ def __arrow_array__(self, type=None):
418
+ """
419
+ Convert myself into a pyarrow Array.
420
+ """
421
+ import pyarrow
422
+
423
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
424
+
425
+ if type is not None:
426
+ if pyarrow.types.is_integer(type):
427
+ return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
428
+ elif isinstance(type, ArrowPeriodType):
429
+ # ensure we have the same freq
430
+ if self.freqstr != type.freq:
431
+ raise TypeError(
432
+ "Not supported to convert PeriodArray to array with different "
433
+ f"'freq' ({self.freqstr} vs {type.freq})"
434
+ )
435
+ else:
436
+ raise TypeError(
437
+ f"Not supported to convert PeriodArray to '{type}' type"
438
+ )
439
+
440
+ period_type = ArrowPeriodType(self.freqstr)
441
+ storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
442
+ return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
443
+
444
+ # --------------------------------------------------------------------
445
+ # Vectorized analogues of Period properties
446
+
447
+ year = _field_accessor(
448
+ "year",
449
+ """
450
+ The year of the period.
451
+
452
+ Examples
453
+ --------
454
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
455
+ >>> idx.year
456
+ Index([2023, 2024, 2025], dtype='int64')
457
+ """,
458
+ )
459
+ month = _field_accessor(
460
+ "month",
461
+ """
462
+ The month as January=1, December=12.
463
+
464
+ Examples
465
+ --------
466
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
467
+ >>> idx.month
468
+ Index([1, 2, 3], dtype='int64')
469
+ """,
470
+ )
471
+ day = _field_accessor(
472
+ "day",
473
+ """
474
+ The days of the period.
475
+
476
+ Examples
477
+ --------
478
+ >>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D')
479
+ >>> idx.day
480
+ Index([31, 28], dtype='int64')
481
+ """,
482
+ )
483
+ hour = _field_accessor(
484
+ "hour",
485
+ """
486
+ The hour of the period.
487
+
488
+ Examples
489
+ --------
490
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='h')
491
+ >>> idx.hour
492
+ Index([10, 11], dtype='int64')
493
+ """,
494
+ )
495
+ minute = _field_accessor(
496
+ "minute",
497
+ """
498
+ The minute of the period.
499
+
500
+ Examples
501
+ --------
502
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:30:00",
503
+ ... "2023-01-01 11:50:00"], freq='min')
504
+ >>> idx.minute
505
+ Index([30, 50], dtype='int64')
506
+ """,
507
+ )
508
+ second = _field_accessor(
509
+ "second",
510
+ """
511
+ The second of the period.
512
+
513
+ Examples
514
+ --------
515
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:00:30",
516
+ ... "2023-01-01 10:00:31"], freq='s')
517
+ >>> idx.second
518
+ Index([30, 31], dtype='int64')
519
+ """,
520
+ )
521
+ weekofyear = _field_accessor(
522
+ "week",
523
+ """
524
+ The week ordinal of the year.
525
+
526
+ Examples
527
+ --------
528
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
529
+ >>> idx.week # It can be written `weekofyear`
530
+ Index([5, 9, 13], dtype='int64')
531
+ """,
532
+ )
533
+ week = weekofyear
534
+ day_of_week = _field_accessor(
535
+ "day_of_week",
536
+ """
537
+ The day of the week with Monday=0, Sunday=6.
538
+
539
+ Examples
540
+ --------
541
+ >>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")
542
+ >>> idx.weekday
543
+ Index([6, 0, 1], dtype='int64')
544
+ """,
545
+ )
546
+ dayofweek = day_of_week
547
+ weekday = dayofweek
548
+ dayofyear = day_of_year = _field_accessor(
549
+ "day_of_year",
550
+ """
551
+ The ordinal day of the year.
552
+
553
+ Examples
554
+ --------
555
+ >>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D")
556
+ >>> idx.dayofyear
557
+ Index([10, 32, 60], dtype='int64')
558
+
559
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
560
+ >>> idx
561
+ PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]')
562
+ >>> idx.dayofyear
563
+ Index([365, 366, 365], dtype='int64')
564
+ """,
565
+ )
566
+ quarter = _field_accessor(
567
+ "quarter",
568
+ """
569
+ The quarter of the date.
570
+
571
+ Examples
572
+ --------
573
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
574
+ >>> idx.quarter
575
+ Index([1, 1, 1], dtype='int64')
576
+ """,
577
+ )
578
+ qyear = _field_accessor("qyear")
579
+ days_in_month = _field_accessor(
580
+ "days_in_month",
581
+ """
582
+ The number of days in the month.
583
+
584
+ Examples
585
+ --------
586
+ For Series:
587
+
588
+ >>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M')
589
+ >>> s = pd.Series(period)
590
+ >>> s
591
+ 0 2020-01
592
+ 1 2020-02
593
+ 2 2020-03
594
+ dtype: period[M]
595
+ >>> s.dt.days_in_month
596
+ 0 31
597
+ 1 29
598
+ 2 31
599
+ dtype: int64
600
+
601
+ For PeriodIndex:
602
+
603
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
604
+ >>> idx.days_in_month # It can be also entered as `daysinmonth`
605
+ Index([31, 28, 31], dtype='int64')
606
+ """,
607
+ )
608
+ daysinmonth = days_in_month
609
+
610
+ @property
611
+ def is_leap_year(self) -> npt.NDArray[np.bool_]:
612
+ """
613
+ Logical indicating if the date belongs to a leap year.
614
+
615
+ Examples
616
+ --------
617
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
618
+ >>> idx.is_leap_year
619
+ array([False, True, False])
620
+ """
621
+ return isleapyear_arr(np.asarray(self.year))
622
+
623
+ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
624
+ """
625
+ Cast to DatetimeArray/Index.
626
+
627
+ Parameters
628
+ ----------
629
+ freq : str or DateOffset, optional
630
+ Target frequency. The default is 'D' for week or longer,
631
+ 's' otherwise.
632
+ how : {'s', 'e', 'start', 'end'}
633
+ Whether to use the start or end of the time period being converted.
634
+
635
+ Returns
636
+ -------
637
+ DatetimeArray/Index
638
+
639
+ Examples
640
+ --------
641
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
642
+ >>> idx.to_timestamp()
643
+ DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'],
644
+ dtype='datetime64[ns]', freq='MS')
645
+ """
646
+ from pandas.core.arrays import DatetimeArray
647
+
648
+ how = libperiod.validate_end_alias(how)
649
+
650
+ end = how == "E"
651
+ if end:
652
+ if freq == "B" or self.freq == "B":
653
+ # roll forward to ensure we land on B date
654
+ adjust = Timedelta(1, "D") - Timedelta(1, "ns")
655
+ return self.to_timestamp(how="start") + adjust
656
+ else:
657
+ adjust = Timedelta(1, "ns")
658
+ return (self + self.freq).to_timestamp(how="start") - adjust
659
+
660
+ if freq is None:
661
+ freq_code = self._dtype._get_to_timestamp_base()
662
+ dtype = PeriodDtypeBase(freq_code, 1)
663
+ freq = dtype._freqstr
664
+ base = freq_code
665
+ else:
666
+ freq = Period._maybe_convert_freq(freq)
667
+ base = freq._period_dtype_code
668
+
669
+ new_parr = self.asfreq(freq, how=how)
670
+
671
+ new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
672
+ dta = DatetimeArray._from_sequence(new_data)
673
+
674
+ if self.freq.name == "B":
675
+ # See if we can retain BDay instead of Day in cases where
676
+ # len(self) is too small for infer_freq to distinguish between them
677
+ diffs = libalgos.unique_deltas(self.asi8)
678
+ if len(diffs) == 1:
679
+ diff = diffs[0]
680
+ if diff == self.dtype._n:
681
+ dta._freq = self.freq
682
+ elif diff == 1:
683
+ dta._freq = self.freq.base
684
+ # TODO: other cases?
685
+ return dta
686
+ else:
687
+ return dta._with_freq("infer")
688
+
689
+ # --------------------------------------------------------------------
690
+
691
+ def _box_func(self, x) -> Period | NaTType:
692
+ return Period._from_ordinal(ordinal=x, freq=self.freq)
693
+
694
+ @doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
695
+ def asfreq(self, freq=None, how: str = "E") -> Self:
696
+ """
697
+ Convert the {klass} to the specified frequency `freq`.
698
+
699
+ Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
700
+ to each :class:`~pandas.Period` in this {klass}.
701
+
702
+ Parameters
703
+ ----------
704
+ freq : str
705
+ A frequency.
706
+ how : str {{'E', 'S'}}, default 'E'
707
+ Whether the elements should be aligned to the end
708
+ or start within pa period.
709
+
710
+ * 'E', 'END', or 'FINISH' for end,
711
+ * 'S', 'START', or 'BEGIN' for start.
712
+
713
+ January 31st ('END') vs. January 1st ('START') for example.
714
+
715
+ Returns
716
+ -------
717
+ {klass}
718
+ The transformed {klass} with the new frequency.
719
+
720
+ See Also
721
+ --------
722
+ {other}.asfreq: Convert each Period in a {other_name} to the given frequency.
723
+ Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
724
+
725
+ Examples
726
+ --------
727
+ >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y')
728
+ >>> pidx
729
+ PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
730
+ dtype='period[Y-DEC]')
731
+
732
+ >>> pidx.asfreq('M')
733
+ PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
734
+ '2015-12'], dtype='period[M]')
735
+
736
+ >>> pidx.asfreq('M', how='S')
737
+ PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
738
+ '2015-01'], dtype='period[M]')
739
+ """
740
+ how = libperiod.validate_end_alias(how)
741
+ if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"):
742
+ freq = PeriodDtype(freq)._freqstr
743
+ freq = Period._maybe_convert_freq(freq)
744
+
745
+ base1 = self._dtype._dtype_code
746
+ base2 = freq._period_dtype_code
747
+
748
+ asi8 = self.asi8
749
+ # self.freq.n can't be negative or 0
750
+ end = how == "E"
751
+ if end:
752
+ ordinal = asi8 + self.dtype._n - 1
753
+ else:
754
+ ordinal = asi8
755
+
756
+ new_data = period_asfreq_arr(ordinal, base1, base2, end)
757
+
758
+ if self._hasna:
759
+ new_data[self._isnan] = iNaT
760
+
761
+ dtype = PeriodDtype(freq)
762
+ return type(self)(new_data, dtype=dtype)
763
+
764
+ # ------------------------------------------------------------------
765
+ # Rendering Methods
766
+
767
+ def _formatter(self, boxed: bool = False):
768
+ if boxed:
769
+ return str
770
+ return "'{}'".format
771
+
772
+ def _format_native_types(
773
+ self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
774
+ ) -> npt.NDArray[np.object_]:
775
+ """
776
+ actually format my specific types
777
+ """
778
+ return libperiod.period_array_strftime(
779
+ self.asi8, self.dtype._dtype_code, na_rep, date_format
780
+ )
781
+
782
+ # ------------------------------------------------------------------
783
+
784
+ def astype(self, dtype, copy: bool = True):
785
+ # We handle Period[T] -> Period[U]
786
+ # Our parent handles everything else.
787
+ dtype = pandas_dtype(dtype)
788
+ if dtype == self._dtype:
789
+ if not copy:
790
+ return self
791
+ else:
792
+ return self.copy()
793
+ if isinstance(dtype, PeriodDtype):
794
+ return self.asfreq(dtype.freq)
795
+
796
+ if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
797
+ # GH#45038 match PeriodIndex behavior.
798
+ tz = getattr(dtype, "tz", None)
799
+ unit = dtl.dtype_to_unit(dtype)
800
+ return self.to_timestamp().tz_localize(tz).as_unit(unit)
801
+
802
+ return super().astype(dtype, copy=copy)
803
+
804
+ def searchsorted(
805
+ self,
806
+ value: NumpyValueArrayLike | ExtensionArray,
807
+ side: Literal["left", "right"] = "left",
808
+ sorter: NumpySorter | None = None,
809
+ ) -> npt.NDArray[np.intp] | np.intp:
810
+ npvalue = self._validate_setitem_value(value).view("M8[ns]")
811
+
812
+ # Cast to M8 to get datetime-like NaT placement,
813
+ # similar to dtl._period_dispatch
814
+ m8arr = self._ndarray.view("M8[ns]")
815
+ return m8arr.searchsorted(npvalue, side=side, sorter=sorter)
816
+
817
+ def _pad_or_backfill(
818
+ self,
819
+ *,
820
+ method: FillnaOptions,
821
+ limit: int | None = None,
822
+ limit_area: Literal["inside", "outside"] | None = None,
823
+ copy: bool = True,
824
+ ) -> Self:
825
+ # view as dt64 so we get treated as timelike in core.missing,
826
+ # similar to dtl._period_dispatch
827
+ dta = self.view("M8[ns]")
828
+ result = dta._pad_or_backfill(
829
+ method=method, limit=limit, limit_area=limit_area, copy=copy
830
+ )
831
+ if copy:
832
+ return cast("Self", result.view(self.dtype))
833
+ else:
834
+ return self
835
+
836
+ def fillna(
837
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
838
+ ) -> Self:
839
+ if method is not None:
840
+ # view as dt64 so we get treated as timelike in core.missing,
841
+ # similar to dtl._period_dispatch
842
+ dta = self.view("M8[ns]")
843
+ result = dta.fillna(value=value, method=method, limit=limit, copy=copy)
844
+ # error: Incompatible return value type (got "Union[ExtensionArray,
845
+ # ndarray[Any, Any]]", expected "PeriodArray")
846
+ return result.view(self.dtype) # type: ignore[return-value]
847
+ return super().fillna(value=value, method=method, limit=limit, copy=copy)
848
+
849
+ # ------------------------------------------------------------------
850
+ # Arithmetic Methods
851
+
852
+ def _addsub_int_array_or_scalar(
853
+ self, other: np.ndarray | int, op: Callable[[Any, Any], Any]
854
+ ) -> Self:
855
+ """
856
+ Add or subtract array of integers.
857
+
858
+ Parameters
859
+ ----------
860
+ other : np.ndarray[int64] or int
861
+ op : {operator.add, operator.sub}
862
+
863
+ Returns
864
+ -------
865
+ result : PeriodArray
866
+ """
867
+ assert op in [operator.add, operator.sub]
868
+ if op is operator.sub:
869
+ other = -other
870
+ res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype="i8"))
871
+ return type(self)(res_values, dtype=self.dtype)
872
+
873
+ def _add_offset(self, other: BaseOffset):
874
+ assert not isinstance(other, Tick)
875
+
876
+ self._require_matching_freq(other, base=True)
877
+ return self._addsub_int_array_or_scalar(other.n, operator.add)
878
+
879
+ # TODO: can we de-duplicate with Period._add_timedeltalike_scalar?
880
+ def _add_timedeltalike_scalar(self, other):
881
+ """
882
+ Parameters
883
+ ----------
884
+ other : timedelta, Tick, np.timedelta64
885
+
886
+ Returns
887
+ -------
888
+ PeriodArray
889
+ """
890
+ if not isinstance(self.freq, Tick):
891
+ # We cannot add timedelta-like to non-tick PeriodArray
892
+ raise raise_on_incompatible(self, other)
893
+
894
+ if isna(other):
895
+ # i.e. np.timedelta64("NaT")
896
+ return super()._add_timedeltalike_scalar(other)
897
+
898
+ td = np.asarray(Timedelta(other).asm8)
899
+ return self._add_timedelta_arraylike(td)
900
+
901
+ def _add_timedelta_arraylike(
902
+ self, other: TimedeltaArray | npt.NDArray[np.timedelta64]
903
+ ) -> Self:
904
+ """
905
+ Parameters
906
+ ----------
907
+ other : TimedeltaArray or ndarray[timedelta64]
908
+
909
+ Returns
910
+ -------
911
+ PeriodArray
912
+ """
913
+ if not self.dtype._is_tick_like():
914
+ # We cannot add timedelta-like to non-tick PeriodArray
915
+ raise TypeError(
916
+ f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
917
+ )
918
+
919
+ dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
920
+
921
+ # Similar to _check_timedeltalike_freq_compat, but we raise with a
922
+ # more specific exception message if necessary.
923
+ try:
924
+ delta = astype_overflowsafe(
925
+ np.asarray(other), dtype=dtype, copy=False, round_ok=False
926
+ )
927
+ except ValueError as err:
928
+ # e.g. if we have minutes freq and try to add 30s
929
+ # "Cannot losslessly convert units"
930
+ raise IncompatibleFrequency(
931
+ "Cannot add/subtract timedelta-like from PeriodArray that is "
932
+ "not an integer multiple of the PeriodArray's freq."
933
+ ) from err
934
+
935
+ res_values = add_overflowsafe(self.asi8, np.asarray(delta.view("i8")))
936
+ return type(self)(res_values, dtype=self.dtype)
937
+
938
+ def _check_timedeltalike_freq_compat(self, other):
939
+ """
940
+ Arithmetic operations with timedelta-like scalars or array `other`
941
+ are only valid if `other` is an integer multiple of `self.freq`.
942
+ If the operation is valid, find that integer multiple. Otherwise,
943
+ raise because the operation is invalid.
944
+
945
+ Parameters
946
+ ----------
947
+ other : timedelta, np.timedelta64, Tick,
948
+ ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
949
+
950
+ Returns
951
+ -------
952
+ multiple : int or ndarray[int64]
953
+
954
+ Raises
955
+ ------
956
+ IncompatibleFrequency
957
+ """
958
+ assert self.dtype._is_tick_like() # checked by calling function
959
+
960
+ dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
961
+
962
+ if isinstance(other, (timedelta, np.timedelta64, Tick)):
963
+ td = np.asarray(Timedelta(other).asm8)
964
+ else:
965
+ td = np.asarray(other)
966
+
967
+ try:
968
+ delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False)
969
+ except ValueError as err:
970
+ raise raise_on_incompatible(self, other) from err
971
+
972
+ delta = delta.view("i8")
973
+ return lib.item_from_zerodim(delta)
974
+
975
+
976
+ def raise_on_incompatible(left, right) -> IncompatibleFrequency:
977
+ """
978
+ Helper function to render a consistent error message when raising
979
+ IncompatibleFrequency.
980
+
981
+ Parameters
982
+ ----------
983
+ left : PeriodArray
984
+ right : None, DateOffset, Period, ndarray, or timedelta-like
985
+
986
+ Returns
987
+ -------
988
+ IncompatibleFrequency
989
+ Exception to be raised by the caller.
990
+ """
991
+ # GH#24283 error message format depends on whether right is scalar
992
+ if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
993
+ other_freq = None
994
+ elif isinstance(right, BaseOffset):
995
+ other_freq = freq_to_period_freqstr(right.n, right.name)
996
+ elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)):
997
+ other_freq = right.freqstr
998
+ else:
999
+ other_freq = delta_to_tick(Timedelta(right)).freqstr
1000
+
1001
+ own_freq = freq_to_period_freqstr(left.freq.n, left.freq.name)
1002
+ msg = DIFFERENT_FREQ.format(
1003
+ cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq
1004
+ )
1005
+ return IncompatibleFrequency(msg)
1006
+
1007
+
1008
+ # -------------------------------------------------------------------
1009
+ # Constructor Helpers
1010
+
1011
+
1012
+ def period_array(
1013
+ data: Sequence[Period | str | None] | AnyArrayLike,
1014
+ freq: str | Tick | BaseOffset | None = None,
1015
+ copy: bool = False,
1016
+ ) -> PeriodArray:
1017
+ """
1018
+ Construct a new PeriodArray from a sequence of Period scalars.
1019
+
1020
+ Parameters
1021
+ ----------
1022
+ data : Sequence of Period objects
1023
+ A sequence of Period objects. These are required to all have
1024
+ the same ``freq.`` Missing values can be indicated by ``None``
1025
+ or ``pandas.NaT``.
1026
+ freq : str, Tick, or Offset
1027
+ The frequency of every element of the array. This can be specified
1028
+ to avoid inferring the `freq` from `data`.
1029
+ copy : bool, default False
1030
+ Whether to ensure a copy of the data is made.
1031
+
1032
+ Returns
1033
+ -------
1034
+ PeriodArray
1035
+
1036
+ See Also
1037
+ --------
1038
+ PeriodArray
1039
+ pandas.PeriodIndex
1040
+
1041
+ Examples
1042
+ --------
1043
+ >>> period_array([pd.Period('2017', freq='Y'),
1044
+ ... pd.Period('2018', freq='Y')])
1045
+ <PeriodArray>
1046
+ ['2017', '2018']
1047
+ Length: 2, dtype: period[Y-DEC]
1048
+
1049
+ >>> period_array([pd.Period('2017', freq='Y'),
1050
+ ... pd.Period('2018', freq='Y'),
1051
+ ... pd.NaT])
1052
+ <PeriodArray>
1053
+ ['2017', '2018', 'NaT']
1054
+ Length: 3, dtype: period[Y-DEC]
1055
+
1056
+ Integers that look like years are handled
1057
+
1058
+ >>> period_array([2000, 2001, 2002], freq='D')
1059
+ <PeriodArray>
1060
+ ['2000-01-01', '2001-01-01', '2002-01-01']
1061
+ Length: 3, dtype: period[D]
1062
+
1063
+ Datetime-like strings may also be passed
1064
+
1065
+ >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
1066
+ <PeriodArray>
1067
+ ['2000Q1', '2000Q2', '2000Q3', '2000Q4']
1068
+ Length: 4, dtype: period[Q-DEC]
1069
+ """
1070
+ data_dtype = getattr(data, "dtype", None)
1071
+
1072
+ if lib.is_np_dtype(data_dtype, "M"):
1073
+ return PeriodArray._from_datetime64(data, freq)
1074
+ if isinstance(data_dtype, PeriodDtype):
1075
+ out = PeriodArray(data)
1076
+ if freq is not None:
1077
+ if freq == data_dtype.freq:
1078
+ return out
1079
+ return out.asfreq(freq)
1080
+ return out
1081
+
1082
+ # other iterable of some kind
1083
+ if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
1084
+ data = list(data)
1085
+
1086
+ arrdata = np.asarray(data)
1087
+
1088
+ dtype: PeriodDtype | None
1089
+ if freq:
1090
+ dtype = PeriodDtype(freq)
1091
+ else:
1092
+ dtype = None
1093
+
1094
+ if arrdata.dtype.kind == "f" and len(arrdata) > 0:
1095
+ raise TypeError("PeriodIndex does not allow floating point in construction")
1096
+
1097
+ if arrdata.dtype.kind in "iu":
1098
+ arr = arrdata.astype(np.int64, copy=False)
1099
+ # error: Argument 2 to "from_ordinals" has incompatible type "Union[str,
1100
+ # Tick, None]"; expected "Union[timedelta, BaseOffset, str]"
1101
+ ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type]
1102
+ return PeriodArray(ordinals, dtype=dtype)
1103
+
1104
+ data = ensure_object(arrdata)
1105
+ if freq is None:
1106
+ freq = libperiod.extract_freq(data)
1107
+ dtype = PeriodDtype(freq)
1108
+ return PeriodArray._from_sequence(data, dtype=dtype)
1109
+
1110
+
1111
+ @overload
1112
+ def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
1113
+ ...
1114
+
1115
+
1116
+ @overload
1117
+ def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
1118
+ ...
1119
+
1120
+
1121
+ def validate_dtype_freq(
1122
+ dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None
1123
+ ) -> BaseOffsetT:
1124
+ """
1125
+ If both a dtype and a freq are available, ensure they match. If only
1126
+ dtype is available, extract the implied freq.
1127
+
1128
+ Parameters
1129
+ ----------
1130
+ dtype : dtype
1131
+ freq : DateOffset or None
1132
+
1133
+ Returns
1134
+ -------
1135
+ freq : DateOffset
1136
+
1137
+ Raises
1138
+ ------
1139
+ ValueError : non-period dtype
1140
+ IncompatibleFrequency : mismatch between dtype and freq
1141
+ """
1142
+ if freq is not None:
1143
+ freq = to_offset(freq, is_period=True)
1144
+
1145
+ if dtype is not None:
1146
+ dtype = pandas_dtype(dtype)
1147
+ if not isinstance(dtype, PeriodDtype):
1148
+ raise ValueError("dtype must be PeriodDtype")
1149
+ if freq is None:
1150
+ freq = dtype.freq
1151
+ elif freq != dtype.freq:
1152
+ raise IncompatibleFrequency("specified freq and dtype are different")
1153
+ # error: Incompatible return value type (got "Union[BaseOffset, Any, None]",
1154
+ # expected "BaseOffset")
1155
+ return freq # type: ignore[return-value]
1156
+
1157
+
1158
+ def dt64arr_to_periodarr(
1159
+ data, freq, tz=None
1160
+ ) -> tuple[npt.NDArray[np.int64], BaseOffset]:
1161
+ """
1162
+ Convert an datetime-like array to values Period ordinals.
1163
+
1164
+ Parameters
1165
+ ----------
1166
+ data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
1167
+ freq : Optional[Union[str, Tick]]
1168
+ Must match the `freq` on the `data` if `data` is a DatetimeIndex
1169
+ or Series.
1170
+ tz : Optional[tzinfo]
1171
+
1172
+ Returns
1173
+ -------
1174
+ ordinals : ndarray[int64]
1175
+ freq : Tick
1176
+ The frequency extracted from the Series or DatetimeIndex if that's
1177
+ used.
1178
+
1179
+ """
1180
+ if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M":
1181
+ raise ValueError(f"Wrong dtype: {data.dtype}")
1182
+
1183
+ if freq is None:
1184
+ if isinstance(data, ABCIndex):
1185
+ data, freq = data._values, data.freq
1186
+ elif isinstance(data, ABCSeries):
1187
+ data, freq = data._values, data.dt.freq
1188
+
1189
+ elif isinstance(data, (ABCIndex, ABCSeries)):
1190
+ data = data._values
1191
+
1192
+ reso = get_unit_from_dtype(data.dtype)
1193
+ freq = Period._maybe_convert_freq(freq)
1194
+ base = freq._period_dtype_code
1195
+ return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
1196
+
1197
+
1198
+ def _get_ordinal_range(start, end, periods, freq, mult: int = 1):
1199
+ if com.count_not_none(start, end, periods) != 2:
1200
+ raise ValueError(
1201
+ "Of the three parameters: start, end, and periods, "
1202
+ "exactly two must be specified"
1203
+ )
1204
+
1205
+ if freq is not None:
1206
+ freq = to_offset(freq, is_period=True)
1207
+ mult = freq.n
1208
+
1209
+ if start is not None:
1210
+ start = Period(start, freq)
1211
+ if end is not None:
1212
+ end = Period(end, freq)
1213
+
1214
+ is_start_per = isinstance(start, Period)
1215
+ is_end_per = isinstance(end, Period)
1216
+
1217
+ if is_start_per and is_end_per and start.freq != end.freq:
1218
+ raise ValueError("start and end must have same freq")
1219
+ if start is NaT or end is NaT:
1220
+ raise ValueError("start and end must not be NaT")
1221
+
1222
+ if freq is None:
1223
+ if is_start_per:
1224
+ freq = start.freq
1225
+ elif is_end_per:
1226
+ freq = end.freq
1227
+ else: # pragma: no cover
1228
+ raise ValueError("Could not infer freq from start/end")
1229
+ mult = freq.n
1230
+
1231
+ if periods is not None:
1232
+ periods = periods * mult
1233
+ if start is None:
1234
+ data = np.arange(
1235
+ end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
1236
+ )
1237
+ else:
1238
+ data = np.arange(
1239
+ start.ordinal, start.ordinal + periods, mult, dtype=np.int64
1240
+ )
1241
+ else:
1242
+ data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
1243
+
1244
+ return data, freq
1245
+
1246
+
1247
+ def _range_from_fields(
1248
+ year=None,
1249
+ month=None,
1250
+ quarter=None,
1251
+ day=None,
1252
+ hour=None,
1253
+ minute=None,
1254
+ second=None,
1255
+ freq=None,
1256
+ ) -> tuple[np.ndarray, BaseOffset]:
1257
+ if hour is None:
1258
+ hour = 0
1259
+ if minute is None:
1260
+ minute = 0
1261
+ if second is None:
1262
+ second = 0
1263
+ if day is None:
1264
+ day = 1
1265
+
1266
+ ordinals = []
1267
+
1268
+ if quarter is not None:
1269
+ if freq is None:
1270
+ freq = to_offset("Q", is_period=True)
1271
+ base = FreqGroup.FR_QTR.value
1272
+ else:
1273
+ freq = to_offset(freq, is_period=True)
1274
+ base = libperiod.freq_to_dtype_code(freq)
1275
+ if base != FreqGroup.FR_QTR.value:
1276
+ raise AssertionError("base must equal FR_QTR")
1277
+
1278
+ freqstr = freq.freqstr
1279
+ year, quarter = _make_field_arrays(year, quarter)
1280
+ for y, q in zip(year, quarter):
1281
+ calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr)
1282
+ val = libperiod.period_ordinal(
1283
+ calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base
1284
+ )
1285
+ ordinals.append(val)
1286
+ else:
1287
+ freq = to_offset(freq, is_period=True)
1288
+ base = libperiod.freq_to_dtype_code(freq)
1289
+ arrays = _make_field_arrays(year, month, day, hour, minute, second)
1290
+ for y, mth, d, h, mn, s in zip(*arrays):
1291
+ ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
1292
+
1293
+ return np.array(ordinals, dtype=np.int64), freq
1294
+
1295
+
1296
+ def _make_field_arrays(*fields) -> list[np.ndarray]:
1297
+ length = None
1298
+ for x in fields:
1299
+ if isinstance(x, (list, np.ndarray, ABCSeries)):
1300
+ if length is not None and len(x) != length:
1301
+ raise ValueError("Mismatched Period array lengths")
1302
+ if length is None:
1303
+ length = len(x)
1304
+
1305
+ # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
1306
+ # "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
1307
+ # integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
1308
+ return [
1309
+ np.asarray(x)
1310
+ if isinstance(x, (np.ndarray, list, ABCSeries))
1311
+ else np.repeat(x, length) # type: ignore[arg-type]
1312
+ for x in fields
1313
+ ]
llava_next/lib/python3.10/site-packages/pandas/core/config_init.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is imported from the pandas package __init__.py file
3
+ in order to ensure that the core.config options registered here will
4
+ be available as soon as the user loads the package. if register_option
5
+ is invoked inside specific modules, they will not be registered until that
6
+ module is imported, which may or may not be a problem.
7
+
8
+ If you need to make sure options are available even before a certain
9
+ module is imported, register them here rather than in the module.
10
+
11
+ """
12
+ from __future__ import annotations
13
+
14
+ import os
15
+ from typing import Callable
16
+
17
+ import pandas._config.config as cf
18
+ from pandas._config.config import (
19
+ is_bool,
20
+ is_callable,
21
+ is_instance_factory,
22
+ is_int,
23
+ is_nonnegative_int,
24
+ is_one_of_factory,
25
+ is_str,
26
+ is_text,
27
+ )
28
+
29
+ # compute
30
+
31
+ use_bottleneck_doc = """
32
+ : bool
33
+ Use the bottleneck library to accelerate if it is installed,
34
+ the default is True
35
+ Valid values: False,True
36
+ """
37
+
38
+
39
+ def use_bottleneck_cb(key) -> None:
40
+ from pandas.core import nanops
41
+
42
+ nanops.set_use_bottleneck(cf.get_option(key))
43
+
44
+
45
+ use_numexpr_doc = """
46
+ : bool
47
+ Use the numexpr library to accelerate computation if it is installed,
48
+ the default is True
49
+ Valid values: False,True
50
+ """
51
+
52
+
53
+ def use_numexpr_cb(key) -> None:
54
+ from pandas.core.computation import expressions
55
+
56
+ expressions.set_use_numexpr(cf.get_option(key))
57
+
58
+
59
+ use_numba_doc = """
60
+ : bool
61
+ Use the numba engine option for select operations if it is installed,
62
+ the default is False
63
+ Valid values: False,True
64
+ """
65
+
66
+
67
+ def use_numba_cb(key) -> None:
68
+ from pandas.core.util import numba_
69
+
70
+ numba_.set_use_numba(cf.get_option(key))
71
+
72
+
73
+ with cf.config_prefix("compute"):
74
+ cf.register_option(
75
+ "use_bottleneck",
76
+ True,
77
+ use_bottleneck_doc,
78
+ validator=is_bool,
79
+ cb=use_bottleneck_cb,
80
+ )
81
+ cf.register_option(
82
+ "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
83
+ )
84
+ cf.register_option(
85
+ "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
86
+ )
87
+ #
88
+ # options from the "display" namespace
89
+
90
+ pc_precision_doc = """
91
+ : int
92
+ Floating point output precision in terms of number of places after the
93
+ decimal, for regular formatting as well as scientific notation. Similar
94
+ to ``precision`` in :meth:`numpy.set_printoptions`.
95
+ """
96
+
97
+ pc_colspace_doc = """
98
+ : int
99
+ Default space for DataFrame columns.
100
+ """
101
+
102
+ pc_max_rows_doc = """
103
+ : int
104
+ If max_rows is exceeded, switch to truncate view. Depending on
105
+ `large_repr`, objects are either centrally truncated or printed as
106
+ a summary view. 'None' value means unlimited.
107
+
108
+ In case python/IPython is running in a terminal and `large_repr`
109
+ equals 'truncate' this can be set to 0 and pandas will auto-detect
110
+ the height of the terminal and print a truncated object which fits
111
+ the screen height. The IPython notebook, IPython qtconsole, or
112
+ IDLE do not run in a terminal and hence it is not possible to do
113
+ correct auto-detection.
114
+ """
115
+
116
+ pc_min_rows_doc = """
117
+ : int
118
+ The numbers of rows to show in a truncated view (when `max_rows` is
119
+ exceeded). Ignored when `max_rows` is set to None or 0. When set to
120
+ None, follows the value of `max_rows`.
121
+ """
122
+
123
+ pc_max_cols_doc = """
124
+ : int
125
+ If max_cols is exceeded, switch to truncate view. Depending on
126
+ `large_repr`, objects are either centrally truncated or printed as
127
+ a summary view. 'None' value means unlimited.
128
+
129
+ In case python/IPython is running in a terminal and `large_repr`
130
+ equals 'truncate' this can be set to 0 or None and pandas will auto-detect
131
+ the width of the terminal and print a truncated object which fits
132
+ the screen width. The IPython notebook, IPython qtconsole, or IDLE
133
+ do not run in a terminal and hence it is not possible to do
134
+ correct auto-detection and defaults to 20.
135
+ """
136
+
137
+ pc_max_categories_doc = """
138
+ : int
139
+ This sets the maximum number of categories pandas should output when
140
+ printing out a `Categorical` or a Series of dtype "category".
141
+ """
142
+
143
+ pc_max_info_cols_doc = """
144
+ : int
145
+ max_info_columns is used in DataFrame.info method to decide if
146
+ per column information will be printed.
147
+ """
148
+
149
+ pc_nb_repr_h_doc = """
150
+ : boolean
151
+ When True, IPython notebook will use html representation for
152
+ pandas objects (if it is available).
153
+ """
154
+
155
+ pc_pprint_nest_depth = """
156
+ : int
157
+ Controls the number of nested levels to process when pretty-printing
158
+ """
159
+
160
+ pc_multi_sparse_doc = """
161
+ : boolean
162
+ "sparsify" MultiIndex display (don't display repeated
163
+ elements in outer levels within groups)
164
+ """
165
+
166
+ float_format_doc = """
167
+ : callable
168
+ The callable should accept a floating point number and return
169
+ a string with the desired format of the number. This is used
170
+ in some places like SeriesFormatter.
171
+ See formats.format.EngFormatter for an example.
172
+ """
173
+
174
+ max_colwidth_doc = """
175
+ : int or None
176
+ The maximum width in characters of a column in the repr of
177
+ a pandas data structure. When the column overflows, a "..."
178
+ placeholder is embedded in the output. A 'None' value means unlimited.
179
+ """
180
+
181
+ colheader_justify_doc = """
182
+ : 'left'/'right'
183
+ Controls the justification of column headers. used by DataFrameFormatter.
184
+ """
185
+
186
+ pc_expand_repr_doc = """
187
+ : boolean
188
+ Whether to print out the full DataFrame repr for wide DataFrames across
189
+ multiple lines, `max_columns` is still respected, but the output will
190
+ wrap-around across multiple "pages" if its width exceeds `display.width`.
191
+ """
192
+
193
+ pc_show_dimensions_doc = """
194
+ : boolean or 'truncate'
195
+ Whether to print out dimensions at the end of DataFrame repr.
196
+ If 'truncate' is specified, only print out the dimensions if the
197
+ frame is truncated (e.g. not display all rows and/or columns)
198
+ """
199
+
200
+ pc_east_asian_width_doc = """
201
+ : boolean
202
+ Whether to use the Unicode East Asian Width to calculate the display text
203
+ width.
204
+ Enabling this may affect to the performance (default: False)
205
+ """
206
+
207
+ pc_ambiguous_as_wide_doc = """
208
+ : boolean
209
+ Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
210
+ (default: False)
211
+ """
212
+
213
+ pc_table_schema_doc = """
214
+ : boolean
215
+ Whether to publish a Table Schema representation for frontends
216
+ that support it.
217
+ (default: False)
218
+ """
219
+
220
+ pc_html_border_doc = """
221
+ : int
222
+ A ``border=value`` attribute is inserted in the ``<table>`` tag
223
+ for the DataFrame HTML repr.
224
+ """
225
+
226
+ pc_html_use_mathjax_doc = """\
227
+ : boolean
228
+ When True, Jupyter notebook will process table contents using MathJax,
229
+ rendering mathematical expressions enclosed by the dollar symbol.
230
+ (default: True)
231
+ """
232
+
233
+ pc_max_dir_items = """\
234
+ : int
235
+ The number of items that will be added to `dir(...)`. 'None' value means
236
+ unlimited. Because dir is cached, changing this option will not immediately
237
+ affect already existing dataframes until a column is deleted or added.
238
+
239
+ This is for instance used to suggest columns from a dataframe to tab
240
+ completion.
241
+ """
242
+
243
+ pc_width_doc = """
244
+ : int
245
+ Width of the display in characters. In case python/IPython is running in
246
+ a terminal this can be set to None and pandas will correctly auto-detect
247
+ the width.
248
+ Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
249
+ terminal and hence it is not possible to correctly detect the width.
250
+ """
251
+
252
+ pc_chop_threshold_doc = """
253
+ : float or None
254
+ if set to a float value, all float values smaller than the given threshold
255
+ will be displayed as exactly 0 by repr and friends.
256
+ """
257
+
258
+ pc_max_seq_items = """
259
+ : int or None
260
+ When pretty-printing a long sequence, no more then `max_seq_items`
261
+ will be printed. If items are omitted, they will be denoted by the
262
+ addition of "..." to the resulting string.
263
+
264
+ If set to None, the number of items to be printed is unlimited.
265
+ """
266
+
267
+ pc_max_info_rows_doc = """
268
+ : int
269
+ df.info() will usually show null-counts for each column.
270
+ For large frames this can be quite slow. max_info_rows and max_info_cols
271
+ limit this null check only to frames with smaller dimensions than
272
+ specified.
273
+ """
274
+
275
+ pc_large_repr_doc = """
276
+ : 'truncate'/'info'
277
+ For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
278
+ show a truncated table, or switch to the view from
279
+ df.info() (the behaviour in earlier versions of pandas).
280
+ """
281
+
282
+ pc_memory_usage_doc = """
283
+ : bool, string or None
284
+ This specifies if the memory usage of a DataFrame should be displayed when
285
+ df.info() is called. Valid values True,False,'deep'
286
+ """
287
+
288
+
289
+ def table_schema_cb(key) -> None:
290
+ from pandas.io.formats.printing import enable_data_resource_formatter
291
+
292
+ enable_data_resource_formatter(cf.get_option(key))
293
+
294
+
295
+ def is_terminal() -> bool:
296
+ """
297
+ Detect if Python is running in a terminal.
298
+
299
+ Returns True if Python is running in a terminal or False if not.
300
+ """
301
+ try:
302
+ # error: Name 'get_ipython' is not defined
303
+ ip = get_ipython() # type: ignore[name-defined]
304
+ except NameError: # assume standard Python interpreter in a terminal
305
+ return True
306
+ else:
307
+ if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
308
+ return False
309
+ else: # IPython in a terminal
310
+ return True
311
+
312
+
313
+ with cf.config_prefix("display"):
314
+ cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
315
+ cf.register_option(
316
+ "float_format",
317
+ None,
318
+ float_format_doc,
319
+ validator=is_one_of_factory([None, is_callable]),
320
+ )
321
+ cf.register_option(
322
+ "max_info_rows",
323
+ 1690785,
324
+ pc_max_info_rows_doc,
325
+ validator=is_int,
326
+ )
327
+ cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
328
+ cf.register_option(
329
+ "min_rows",
330
+ 10,
331
+ pc_min_rows_doc,
332
+ validator=is_instance_factory([type(None), int]),
333
+ )
334
+ cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
335
+
336
+ cf.register_option(
337
+ "max_colwidth",
338
+ 50,
339
+ max_colwidth_doc,
340
+ validator=is_nonnegative_int,
341
+ )
342
+ if is_terminal():
343
+ max_cols = 0 # automatically determine optimal number of columns
344
+ else:
345
+ max_cols = 20 # cannot determine optimal number of columns
346
+ cf.register_option(
347
+ "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
348
+ )
349
+ cf.register_option(
350
+ "large_repr",
351
+ "truncate",
352
+ pc_large_repr_doc,
353
+ validator=is_one_of_factory(["truncate", "info"]),
354
+ )
355
+ cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
356
+ cf.register_option(
357
+ "colheader_justify", "right", colheader_justify_doc, validator=is_text
358
+ )
359
+ cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
360
+ cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
361
+ cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
362
+ cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
363
+ cf.register_option(
364
+ "show_dimensions",
365
+ "truncate",
366
+ pc_show_dimensions_doc,
367
+ validator=is_one_of_factory([True, False, "truncate"]),
368
+ )
369
+ cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
370
+ cf.register_option("max_seq_items", 100, pc_max_seq_items)
371
+ cf.register_option(
372
+ "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
373
+ )
374
+ cf.register_option(
375
+ "memory_usage",
376
+ True,
377
+ pc_memory_usage_doc,
378
+ validator=is_one_of_factory([None, True, False, "deep"]),
379
+ )
380
+ cf.register_option(
381
+ "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
382
+ )
383
+ cf.register_option(
384
+ "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
385
+ )
386
+ cf.register_option(
387
+ "html.table_schema",
388
+ False,
389
+ pc_table_schema_doc,
390
+ validator=is_bool,
391
+ cb=table_schema_cb,
392
+ )
393
+ cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
394
+ cf.register_option(
395
+ "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
396
+ )
397
+ cf.register_option(
398
+ "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
399
+ )
400
+
401
+ tc_sim_interactive_doc = """
402
+ : boolean
403
+ Whether to simulate interactive mode for purposes of testing
404
+ """
405
+
406
+ with cf.config_prefix("mode"):
407
+ cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
408
+
409
+ use_inf_as_na_doc = """
410
+ : boolean
411
+ True means treat None, NaN, INF, -INF as NA (old way),
412
+ False means None and NaN are null, but INF, -INF are not NA
413
+ (new way).
414
+
415
+ This option is deprecated in pandas 2.1.0 and will be removed in 3.0.
416
+ """
417
+
418
+ # We don't want to start importing everything at the global context level
419
+ # or we'll hit circular deps.
420
+
421
+
422
+ def use_inf_as_na_cb(key) -> None:
423
+ # TODO(3.0): enforcing this deprecation will close GH#52501
424
+ from pandas.core.dtypes.missing import _use_inf_as_na
425
+
426
+ _use_inf_as_na(key)
427
+
428
+
429
+ with cf.config_prefix("mode"):
430
+ cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
431
+
432
+ cf.deprecate_option(
433
+ # GH#51684
434
+ "mode.use_inf_as_na",
435
+ "use_inf_as_na option is deprecated and will be removed in a future "
436
+ "version. Convert inf values to NaN before operating instead.",
437
+ )
438
+
439
+ data_manager_doc = """
440
+ : string
441
+ Internal data manager type; can be "block" or "array". Defaults to "block",
442
+ unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs
443
+ to be set before pandas is imported).
444
+ """
445
+
446
+
447
+ with cf.config_prefix("mode"):
448
+ cf.register_option(
449
+ "data_manager",
450
+ # Get the default from an environment variable, if set, otherwise defaults
451
+ # to "block". This environment variable can be set for testing.
452
+ os.environ.get("PANDAS_DATA_MANAGER", "block"),
453
+ data_manager_doc,
454
+ validator=is_one_of_factory(["block", "array"]),
455
+ )
456
+
457
+ cf.deprecate_option(
458
+ # GH#55043
459
+ "mode.data_manager",
460
+ "data_manager option is deprecated and will be removed in a future "
461
+ "version. Only the BlockManager will be available.",
462
+ )
463
+
464
+
465
+ # TODO better name?
466
+ copy_on_write_doc = """
467
+ : bool
468
+ Use new copy-view behaviour using Copy-on-Write. Defaults to False,
469
+ unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
470
+ (if set to "1" for True, needs to be set before pandas is imported).
471
+ """
472
+
473
+
474
+ with cf.config_prefix("mode"):
475
+ cf.register_option(
476
+ "copy_on_write",
477
+ # Get the default from an environment variable, if set, otherwise defaults
478
+ # to False. This environment variable can be set for testing.
479
+ "warn"
480
+ if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn"
481
+ else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
482
+ copy_on_write_doc,
483
+ validator=is_one_of_factory([True, False, "warn"]),
484
+ )
485
+
486
+
487
+ # user warnings
488
+ chained_assignment = """
489
+ : string
490
+ Raise an exception, warn, or no action if trying to use chained assignment,
491
+ The default is warn
492
+ """
493
+
494
+ with cf.config_prefix("mode"):
495
+ cf.register_option(
496
+ "chained_assignment",
497
+ "warn",
498
+ chained_assignment,
499
+ validator=is_one_of_factory([None, "warn", "raise"]),
500
+ )
501
+
502
+
503
+ string_storage_doc = """
504
+ : string
505
+ The default storage for StringDtype. This option is ignored if
506
+ ``future.infer_string`` is set to True.
507
+ """
508
+
509
+ with cf.config_prefix("mode"):
510
+ cf.register_option(
511
+ "string_storage",
512
+ "python",
513
+ string_storage_doc,
514
+ validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]),
515
+ )
516
+
517
+
518
+ # Set up the io.excel specific reader configuration.
519
+ reader_engine_doc = """
520
+ : string
521
+ The default Excel reader engine for '{ext}' files. Available options:
522
+ auto, {others}.
523
+ """
524
+
525
+ _xls_options = ["xlrd", "calamine"]
526
+ _xlsm_options = ["xlrd", "openpyxl", "calamine"]
527
+ _xlsx_options = ["xlrd", "openpyxl", "calamine"]
528
+ _ods_options = ["odf", "calamine"]
529
+ _xlsb_options = ["pyxlsb", "calamine"]
530
+
531
+
532
+ with cf.config_prefix("io.excel.xls"):
533
+ cf.register_option(
534
+ "reader",
535
+ "auto",
536
+ reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
537
+ validator=is_one_of_factory(_xls_options + ["auto"]),
538
+ )
539
+
540
+ with cf.config_prefix("io.excel.xlsm"):
541
+ cf.register_option(
542
+ "reader",
543
+ "auto",
544
+ reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
545
+ validator=is_one_of_factory(_xlsm_options + ["auto"]),
546
+ )
547
+
548
+
549
+ with cf.config_prefix("io.excel.xlsx"):
550
+ cf.register_option(
551
+ "reader",
552
+ "auto",
553
+ reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
554
+ validator=is_one_of_factory(_xlsx_options + ["auto"]),
555
+ )
556
+
557
+
558
+ with cf.config_prefix("io.excel.ods"):
559
+ cf.register_option(
560
+ "reader",
561
+ "auto",
562
+ reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
563
+ validator=is_one_of_factory(_ods_options + ["auto"]),
564
+ )
565
+
566
+ with cf.config_prefix("io.excel.xlsb"):
567
+ cf.register_option(
568
+ "reader",
569
+ "auto",
570
+ reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
571
+ validator=is_one_of_factory(_xlsb_options + ["auto"]),
572
+ )
573
+
574
+ # Set up the io.excel specific writer configuration.
575
+ writer_engine_doc = """
576
+ : string
577
+ The default Excel writer engine for '{ext}' files. Available options:
578
+ auto, {others}.
579
+ """
580
+
581
+ _xlsm_options = ["openpyxl"]
582
+ _xlsx_options = ["openpyxl", "xlsxwriter"]
583
+ _ods_options = ["odf"]
584
+
585
+
586
+ with cf.config_prefix("io.excel.xlsm"):
587
+ cf.register_option(
588
+ "writer",
589
+ "auto",
590
+ writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
591
+ validator=str,
592
+ )
593
+
594
+
595
+ with cf.config_prefix("io.excel.xlsx"):
596
+ cf.register_option(
597
+ "writer",
598
+ "auto",
599
+ writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
600
+ validator=str,
601
+ )
602
+
603
+
604
+ with cf.config_prefix("io.excel.ods"):
605
+ cf.register_option(
606
+ "writer",
607
+ "auto",
608
+ writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
609
+ validator=str,
610
+ )
611
+
612
+
613
+ # Set up the io.parquet specific configuration.
614
+ parquet_engine_doc = """
615
+ : string
616
+ The default parquet reader/writer engine. Available options:
617
+ 'auto', 'pyarrow', 'fastparquet', the default is 'auto'
618
+ """
619
+
620
+ with cf.config_prefix("io.parquet"):
621
+ cf.register_option(
622
+ "engine",
623
+ "auto",
624
+ parquet_engine_doc,
625
+ validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
626
+ )
627
+
628
+
629
+ # Set up the io.sql specific configuration.
630
+ sql_engine_doc = """
631
+ : string
632
+ The default sql reader/writer engine. Available options:
633
+ 'auto', 'sqlalchemy', the default is 'auto'
634
+ """
635
+
636
+ with cf.config_prefix("io.sql"):
637
+ cf.register_option(
638
+ "engine",
639
+ "auto",
640
+ sql_engine_doc,
641
+ validator=is_one_of_factory(["auto", "sqlalchemy"]),
642
+ )
643
+
644
+ # --------
645
+ # Plotting
646
+ # ---------
647
+
648
+ plotting_backend_doc = """
649
+ : str
650
+ The plotting backend to use. The default value is "matplotlib", the
651
+ backend provided with pandas. Other backends can be specified by
652
+ providing the name of the module that implements the backend.
653
+ """
654
+
655
+
656
+ def register_plotting_backend_cb(key) -> None:
657
+ if key == "matplotlib":
658
+ # We defer matplotlib validation, since it's the default
659
+ return
660
+ from pandas.plotting._core import _get_plot_backend
661
+
662
+ _get_plot_backend(key)
663
+
664
+
665
+ with cf.config_prefix("plotting"):
666
+ cf.register_option(
667
+ "backend",
668
+ defval="matplotlib",
669
+ doc=plotting_backend_doc,
670
+ validator=register_plotting_backend_cb,
671
+ )
672
+
673
+
674
+ register_converter_doc = """
675
+ : bool or 'auto'.
676
+ Whether to register converters with matplotlib's units registry for
677
+ dates, times, datetimes, and Periods. Toggling to False will remove
678
+ the converters, restoring any converters that pandas overwrote.
679
+ """
680
+
681
+
682
+ def register_converter_cb(key) -> None:
683
+ from pandas.plotting import (
684
+ deregister_matplotlib_converters,
685
+ register_matplotlib_converters,
686
+ )
687
+
688
+ if cf.get_option(key):
689
+ register_matplotlib_converters()
690
+ else:
691
+ deregister_matplotlib_converters()
692
+
693
+
694
+ with cf.config_prefix("plotting.matplotlib"):
695
+ cf.register_option(
696
+ "register_converters",
697
+ "auto",
698
+ register_converter_doc,
699
+ validator=is_one_of_factory(["auto", True, False]),
700
+ cb=register_converter_cb,
701
+ )
702
+
703
+ # ------
704
+ # Styler
705
+ # ------
706
+
707
+ styler_sparse_index_doc = """
708
+ : bool
709
+ Whether to sparsify the display of a hierarchical index. Setting to False will
710
+ display each explicit level element in a hierarchical key for each row.
711
+ """
712
+
713
+ styler_sparse_columns_doc = """
714
+ : bool
715
+ Whether to sparsify the display of hierarchical columns. Setting to False will
716
+ display each explicit level element in a hierarchical key for each column.
717
+ """
718
+
719
+ styler_render_repr = """
720
+ : str
721
+ Determine which output to use in Jupyter Notebook in {"html", "latex"}.
722
+ """
723
+
724
+ styler_max_elements = """
725
+ : int
726
+ The maximum number of data-cell (<td>) elements that will be rendered before
727
+ trimming will occur over columns, rows or both if needed.
728
+ """
729
+
730
+ styler_max_rows = """
731
+ : int, optional
732
+ The maximum number of rows that will be rendered. May still be reduced to
733
+ satisfy ``max_elements``, which takes precedence.
734
+ """
735
+
736
+ styler_max_columns = """
737
+ : int, optional
738
+ The maximum number of columns that will be rendered. May still be reduced to
739
+ satisfy ``max_elements``, which takes precedence.
740
+ """
741
+
742
+ styler_precision = """
743
+ : int
744
+ The precision for floats and complex numbers.
745
+ """
746
+
747
+ styler_decimal = """
748
+ : str
749
+ The character representation for the decimal separator for floats and complex.
750
+ """
751
+
752
+ styler_thousands = """
753
+ : str, optional
754
+ The character representation for thousands separator for floats, int and complex.
755
+ """
756
+
757
+ styler_na_rep = """
758
+ : str, optional
759
+ The string representation for values identified as missing.
760
+ """
761
+
762
+ styler_escape = """
763
+ : str, optional
764
+ Whether to escape certain characters according to the given context; html or latex.
765
+ """
766
+
767
+ styler_formatter = """
768
+ : str, callable, dict, optional
769
+ A formatter object to be used as default within ``Styler.format``.
770
+ """
771
+
772
+ styler_multirow_align = """
773
+ : {"c", "t", "b"}
774
+ The specifier for vertical alignment of sparsified LaTeX multirows.
775
+ """
776
+
777
+ styler_multicol_align = r"""
778
+ : {"r", "c", "l", "naive-l", "naive-r"}
779
+ The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe
780
+ decorators can also be added to non-naive values to draw vertical
781
+ rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells.
782
+ """
783
+
784
+ styler_hrules = """
785
+ : bool
786
+ Whether to add horizontal rules on top and bottom and below the headers.
787
+ """
788
+
789
+ styler_environment = """
790
+ : str
791
+ The environment to replace ``\\begin{table}``. If "longtable" is used results
792
+ in a specific longtable environment format.
793
+ """
794
+
795
+ styler_encoding = """
796
+ : str
797
+ The encoding used for output HTML and LaTeX files.
798
+ """
799
+
800
+ styler_mathjax = """
801
+ : bool
802
+ If False will render special CSS classes to table attributes that indicate Mathjax
803
+ will not be used in Jupyter Notebook.
804
+ """
805
+
806
+ with cf.config_prefix("styler"):
807
+ cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool)
808
+
809
+ cf.register_option(
810
+ "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool
811
+ )
812
+
813
+ cf.register_option(
814
+ "render.repr",
815
+ "html",
816
+ styler_render_repr,
817
+ validator=is_one_of_factory(["html", "latex"]),
818
+ )
819
+
820
+ cf.register_option(
821
+ "render.max_elements",
822
+ 2**18,
823
+ styler_max_elements,
824
+ validator=is_nonnegative_int,
825
+ )
826
+
827
+ cf.register_option(
828
+ "render.max_rows",
829
+ None,
830
+ styler_max_rows,
831
+ validator=is_nonnegative_int,
832
+ )
833
+
834
+ cf.register_option(
835
+ "render.max_columns",
836
+ None,
837
+ styler_max_columns,
838
+ validator=is_nonnegative_int,
839
+ )
840
+
841
+ cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str)
842
+
843
+ cf.register_option("format.decimal", ".", styler_decimal, validator=is_str)
844
+
845
+ cf.register_option(
846
+ "format.precision", 6, styler_precision, validator=is_nonnegative_int
847
+ )
848
+
849
+ cf.register_option(
850
+ "format.thousands",
851
+ None,
852
+ styler_thousands,
853
+ validator=is_instance_factory([type(None), str]),
854
+ )
855
+
856
+ cf.register_option(
857
+ "format.na_rep",
858
+ None,
859
+ styler_na_rep,
860
+ validator=is_instance_factory([type(None), str]),
861
+ )
862
+
863
+ cf.register_option(
864
+ "format.escape",
865
+ None,
866
+ styler_escape,
867
+ validator=is_one_of_factory([None, "html", "latex", "latex-math"]),
868
+ )
869
+
870
+ cf.register_option(
871
+ "format.formatter",
872
+ None,
873
+ styler_formatter,
874
+ validator=is_instance_factory([type(None), dict, Callable, str]),
875
+ )
876
+
877
+ cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool)
878
+
879
+ cf.register_option(
880
+ "latex.multirow_align",
881
+ "c",
882
+ styler_multirow_align,
883
+ validator=is_one_of_factory(["c", "t", "b", "naive"]),
884
+ )
885
+
886
+ val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"]
887
+ val_mca += ["naive-l", "naive-r"]
888
+ cf.register_option(
889
+ "latex.multicol_align",
890
+ "r",
891
+ styler_multicol_align,
892
+ validator=is_one_of_factory(val_mca),
893
+ )
894
+
895
+ cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool)
896
+
897
+ cf.register_option(
898
+ "latex.environment",
899
+ None,
900
+ styler_environment,
901
+ validator=is_instance_factory([type(None), str]),
902
+ )
903
+
904
+
905
+ with cf.config_prefix("future"):
906
+ cf.register_option(
907
+ "infer_string",
908
+ False,
909
+ "Whether to infer sequence of str objects as pyarrow string "
910
+ "dtype, which will be the default in pandas 3.0 "
911
+ "(at which point this option will be deprecated).",
912
+ validator=is_one_of_factory([True, False]),
913
+ )
914
+
915
+ cf.register_option(
916
+ "no_silent_downcasting",
917
+ False,
918
+ "Whether to opt-in to the future behavior which will *not* silently "
919
+ "downcast results from Series and DataFrame `where`, `mask`, and `clip` "
920
+ "methods. "
921
+ "Silent downcasting will be removed in pandas 3.0 "
922
+ "(at which point this option will be deprecated).",
923
+ validator=is_one_of_factory([True, False]),
924
+ )
llava_next/lib/python3.10/site-packages/pandas/core/construction.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Constructor functions intended to be shared by pd.array, Series.__init__,
3
+ and Index.__new__.
4
+
5
+ These should not depend on core.internals.
6
+ """
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Sequence
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Optional,
13
+ Union,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+ from numpy import ma
21
+
22
+ from pandas._config import using_pyarrow_string_dtype
23
+
24
+ from pandas._libs import lib
25
+ from pandas._libs.tslibs import (
26
+ Period,
27
+ get_supported_dtype,
28
+ is_supported_dtype,
29
+ )
30
+ from pandas._typing import (
31
+ AnyArrayLike,
32
+ ArrayLike,
33
+ Dtype,
34
+ DtypeObj,
35
+ T,
36
+ )
37
+ from pandas.util._exceptions import find_stack_level
38
+
39
+ from pandas.core.dtypes.base import ExtensionDtype
40
+ from pandas.core.dtypes.cast import (
41
+ construct_1d_arraylike_from_scalar,
42
+ construct_1d_object_array_from_listlike,
43
+ maybe_cast_to_datetime,
44
+ maybe_cast_to_integer_array,
45
+ maybe_convert_platform,
46
+ maybe_infer_to_datetimelike,
47
+ maybe_promote,
48
+ )
49
+ from pandas.core.dtypes.common import (
50
+ is_list_like,
51
+ is_object_dtype,
52
+ is_string_dtype,
53
+ pandas_dtype,
54
+ )
55
+ from pandas.core.dtypes.dtypes import NumpyEADtype
56
+ from pandas.core.dtypes.generic import (
57
+ ABCDataFrame,
58
+ ABCExtensionArray,
59
+ ABCIndex,
60
+ ABCSeries,
61
+ )
62
+ from pandas.core.dtypes.missing import isna
63
+
64
+ import pandas.core.common as com
65
+
66
+ if TYPE_CHECKING:
67
+ from pandas import (
68
+ Index,
69
+ Series,
70
+ )
71
+ from pandas.core.arrays.base import ExtensionArray
72
+
73
+
74
+ def array(
75
+ data: Sequence[object] | AnyArrayLike,
76
+ dtype: Dtype | None = None,
77
+ copy: bool = True,
78
+ ) -> ExtensionArray:
79
+ """
80
+ Create an array.
81
+
82
+ Parameters
83
+ ----------
84
+ data : Sequence of objects
85
+ The scalars inside `data` should be instances of the
86
+ scalar type for `dtype`. It's expected that `data`
87
+ represents a 1-dimensional array of data.
88
+
89
+ When `data` is an Index or Series, the underlying array
90
+ will be extracted from `data`.
91
+
92
+ dtype : str, np.dtype, or ExtensionDtype, optional
93
+ The dtype to use for the array. This may be a NumPy
94
+ dtype or an extension type registered with pandas using
95
+ :meth:`pandas.api.extensions.register_extension_dtype`.
96
+
97
+ If not specified, there are two possibilities:
98
+
99
+ 1. When `data` is a :class:`Series`, :class:`Index`, or
100
+ :class:`ExtensionArray`, the `dtype` will be taken
101
+ from the data.
102
+ 2. Otherwise, pandas will attempt to infer the `dtype`
103
+ from the data.
104
+
105
+ Note that when `data` is a NumPy array, ``data.dtype`` is
106
+ *not* used for inferring the array type. This is because
107
+ NumPy cannot represent all the types of data that can be
108
+ held in extension arrays.
109
+
110
+ Currently, pandas will infer an extension dtype for sequences of
111
+
112
+ ============================== =======================================
113
+ Scalar Type Array Type
114
+ ============================== =======================================
115
+ :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
116
+ :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
117
+ :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
118
+ :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
119
+ :class:`int` :class:`pandas.arrays.IntegerArray`
120
+ :class:`float` :class:`pandas.arrays.FloatingArray`
121
+ :class:`str` :class:`pandas.arrays.StringArray` or
122
+ :class:`pandas.arrays.ArrowStringArray`
123
+ :class:`bool` :class:`pandas.arrays.BooleanArray`
124
+ ============================== =======================================
125
+
126
+ The ExtensionArray created when the scalar type is :class:`str` is determined by
127
+ ``pd.options.mode.string_storage`` if the dtype is not explicitly given.
128
+
129
+ For all other cases, NumPy's usual inference rules will be used.
130
+ copy : bool, default True
131
+ Whether to copy the data, even if not necessary. Depending
132
+ on the type of `data`, creating the new array may require
133
+ copying data, even if ``copy=False``.
134
+
135
+ Returns
136
+ -------
137
+ ExtensionArray
138
+ The newly created array.
139
+
140
+ Raises
141
+ ------
142
+ ValueError
143
+ When `data` is not 1-dimensional.
144
+
145
+ See Also
146
+ --------
147
+ numpy.array : Construct a NumPy array.
148
+ Series : Construct a pandas Series.
149
+ Index : Construct a pandas Index.
150
+ arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array.
151
+ Series.array : Extract the array stored within a Series.
152
+
153
+ Notes
154
+ -----
155
+ Omitting the `dtype` argument means pandas will attempt to infer the
156
+ best array type from the values in the data. As new array types are
157
+ added by pandas and 3rd party libraries, the "best" array type may
158
+ change. We recommend specifying `dtype` to ensure that
159
+
160
+ 1. the correct array type for the data is returned
161
+ 2. the returned array type doesn't change as new extension types
162
+ are added by pandas and third-party libraries
163
+
164
+ Additionally, if the underlying memory representation of the returned
165
+ array matters, we recommend specifying the `dtype` as a concrete object
166
+ rather than a string alias or allowing it to be inferred. For example,
167
+ a future version of pandas or a 3rd-party library may include a
168
+ dedicated ExtensionArray for string data. In this event, the following
169
+ would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
170
+ NumPy array.
171
+
172
+ >>> pd.array(['a', 'b'], dtype=str)
173
+ <NumpyExtensionArray>
174
+ ['a', 'b']
175
+ Length: 2, dtype: str32
176
+
177
+ This would instead return the new ExtensionArray dedicated for string
178
+ data. If you really need the new array to be backed by a NumPy array,
179
+ specify that in the dtype.
180
+
181
+ >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
182
+ <NumpyExtensionArray>
183
+ ['a', 'b']
184
+ Length: 2, dtype: str32
185
+
186
+ Finally, Pandas has arrays that mostly overlap with NumPy
187
+
188
+ * :class:`arrays.DatetimeArray`
189
+ * :class:`arrays.TimedeltaArray`
190
+
191
+ When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
192
+ passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
193
+ rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
194
+ timezone-aware data, which NumPy does not natively support.
195
+
196
+ >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
197
+ <DatetimeArray>
198
+ ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
199
+ Length: 2, dtype: datetime64[ns]
200
+
201
+ >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
202
+ <TimedeltaArray>
203
+ ['0 days 01:00:00', '0 days 02:00:00']
204
+ Length: 2, dtype: timedelta64[ns]
205
+
206
+ Examples
207
+ --------
208
+ If a dtype is not specified, pandas will infer the best dtype from the values.
209
+ See the description of `dtype` for the types pandas infers for.
210
+
211
+ >>> pd.array([1, 2])
212
+ <IntegerArray>
213
+ [1, 2]
214
+ Length: 2, dtype: Int64
215
+
216
+ >>> pd.array([1, 2, np.nan])
217
+ <IntegerArray>
218
+ [1, 2, <NA>]
219
+ Length: 3, dtype: Int64
220
+
221
+ >>> pd.array([1.1, 2.2])
222
+ <FloatingArray>
223
+ [1.1, 2.2]
224
+ Length: 2, dtype: Float64
225
+
226
+ >>> pd.array(["a", None, "c"])
227
+ <StringArray>
228
+ ['a', <NA>, 'c']
229
+ Length: 3, dtype: string
230
+
231
+ >>> with pd.option_context("string_storage", "pyarrow"):
232
+ ... arr = pd.array(["a", None, "c"])
233
+ ...
234
+ >>> arr
235
+ <ArrowStringArray>
236
+ ['a', <NA>, 'c']
237
+ Length: 3, dtype: string
238
+
239
+ >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
240
+ <PeriodArray>
241
+ ['2000-01-01', '2000-01-01']
242
+ Length: 2, dtype: period[D]
243
+
244
+ You can use the string alias for `dtype`
245
+
246
+ >>> pd.array(['a', 'b', 'a'], dtype='category')
247
+ ['a', 'b', 'a']
248
+ Categories (2, object): ['a', 'b']
249
+
250
+ Or specify the actual dtype
251
+
252
+ >>> pd.array(['a', 'b', 'a'],
253
+ ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
254
+ ['a', 'b', 'a']
255
+ Categories (3, object): ['a' < 'b' < 'c']
256
+
257
+ If pandas does not infer a dedicated extension type a
258
+ :class:`arrays.NumpyExtensionArray` is returned.
259
+
260
+ >>> pd.array([1 + 1j, 3 + 2j])
261
+ <NumpyExtensionArray>
262
+ [(1+1j), (3+2j)]
263
+ Length: 2, dtype: complex128
264
+
265
+ As mentioned in the "Notes" section, new extension types may be added
266
+ in the future (by pandas or 3rd party libraries), causing the return
267
+ value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the
268
+ `dtype` as a NumPy dtype if you need to ensure there's no future change in
269
+ behavior.
270
+
271
+ >>> pd.array([1, 2], dtype=np.dtype("int32"))
272
+ <NumpyExtensionArray>
273
+ [1, 2]
274
+ Length: 2, dtype: int32
275
+
276
+ `data` must be 1-dimensional. A ValueError is raised when the input
277
+ has the wrong dimensionality.
278
+
279
+ >>> pd.array(1)
280
+ Traceback (most recent call last):
281
+ ...
282
+ ValueError: Cannot pass scalar '1' to 'pandas.array'.
283
+ """
284
+ from pandas.core.arrays import (
285
+ BooleanArray,
286
+ DatetimeArray,
287
+ ExtensionArray,
288
+ FloatingArray,
289
+ IntegerArray,
290
+ IntervalArray,
291
+ NumpyExtensionArray,
292
+ PeriodArray,
293
+ TimedeltaArray,
294
+ )
295
+ from pandas.core.arrays.string_ import StringDtype
296
+
297
+ if lib.is_scalar(data):
298
+ msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
299
+ raise ValueError(msg)
300
+ elif isinstance(data, ABCDataFrame):
301
+ raise TypeError("Cannot pass DataFrame to 'pandas.array'")
302
+
303
+ if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
304
+ # Note: we exclude np.ndarray here, will do type inference on it
305
+ dtype = data.dtype
306
+
307
+ data = extract_array(data, extract_numpy=True)
308
+
309
+ # this returns None for not-found dtypes.
310
+ if dtype is not None:
311
+ dtype = pandas_dtype(dtype)
312
+
313
+ if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype):
314
+ # e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray
315
+ if copy:
316
+ return data.copy()
317
+ return data
318
+
319
+ if isinstance(dtype, ExtensionDtype):
320
+ cls = dtype.construct_array_type()
321
+ return cls._from_sequence(data, dtype=dtype, copy=copy)
322
+
323
+ if dtype is None:
324
+ inferred_dtype = lib.infer_dtype(data, skipna=True)
325
+ if inferred_dtype == "period":
326
+ period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
327
+ return PeriodArray._from_sequence(period_data, copy=copy)
328
+
329
+ elif inferred_dtype == "interval":
330
+ return IntervalArray(data, copy=copy)
331
+
332
+ elif inferred_dtype.startswith("datetime"):
333
+ # datetime, datetime64
334
+ try:
335
+ return DatetimeArray._from_sequence(data, copy=copy)
336
+ except ValueError:
337
+ # Mixture of timezones, fall back to NumpyExtensionArray
338
+ pass
339
+
340
+ elif inferred_dtype.startswith("timedelta"):
341
+ # timedelta, timedelta64
342
+ return TimedeltaArray._from_sequence(data, copy=copy)
343
+
344
+ elif inferred_dtype == "string":
345
+ # StringArray/ArrowStringArray depending on pd.options.mode.string_storage
346
+ dtype = StringDtype()
347
+ cls = dtype.construct_array_type()
348
+ return cls._from_sequence(data, dtype=dtype, copy=copy)
349
+
350
+ elif inferred_dtype == "integer":
351
+ return IntegerArray._from_sequence(data, copy=copy)
352
+ elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data):
353
+ return FloatingArray._from_sequence(data, copy=copy)
354
+ elif (
355
+ inferred_dtype in ("floating", "mixed-integer-float")
356
+ and getattr(data, "dtype", None) != np.float16
357
+ ):
358
+ # GH#44715 Exclude np.float16 bc FloatingArray does not support it;
359
+ # we will fall back to NumpyExtensionArray.
360
+ return FloatingArray._from_sequence(data, copy=copy)
361
+
362
+ elif inferred_dtype == "boolean":
363
+ return BooleanArray._from_sequence(data, dtype="boolean", copy=copy)
364
+
365
+ # Pandas overrides NumPy for
366
+ # 1. datetime64[ns,us,ms,s]
367
+ # 2. timedelta64[ns,us,ms,s]
368
+ # so that a DatetimeArray is returned.
369
+ if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
370
+ return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
371
+ if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
372
+ return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
373
+
374
+ elif lib.is_np_dtype(dtype, "mM"):
375
+ warnings.warn(
376
+ r"datetime64 and timedelta64 dtype resolutions other than "
377
+ r"'s', 'ms', 'us', and 'ns' are deprecated. "
378
+ r"In future releases passing unsupported resolutions will "
379
+ r"raise an exception.",
380
+ FutureWarning,
381
+ stacklevel=find_stack_level(),
382
+ )
383
+
384
+ return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
385
+
386
+
387
+ _typs = frozenset(
388
+ {
389
+ "index",
390
+ "rangeindex",
391
+ "multiindex",
392
+ "datetimeindex",
393
+ "timedeltaindex",
394
+ "periodindex",
395
+ "categoricalindex",
396
+ "intervalindex",
397
+ "series",
398
+ }
399
+ )
400
+
401
+
402
+ @overload
403
+ def extract_array(
404
+ obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
405
+ ) -> ArrayLike:
406
+ ...
407
+
408
+
409
+ @overload
410
+ def extract_array(
411
+ obj: T, extract_numpy: bool = ..., extract_range: bool = ...
412
+ ) -> T | ArrayLike:
413
+ ...
414
+
415
+
416
+ def extract_array(
417
+ obj: T, extract_numpy: bool = False, extract_range: bool = False
418
+ ) -> T | ArrayLike:
419
+ """
420
+ Extract the ndarray or ExtensionArray from a Series or Index.
421
+
422
+ For all other types, `obj` is just returned as is.
423
+
424
+ Parameters
425
+ ----------
426
+ obj : object
427
+ For Series / Index, the underlying ExtensionArray is unboxed.
428
+
429
+ extract_numpy : bool, default False
430
+ Whether to extract the ndarray from a NumpyExtensionArray.
431
+
432
+ extract_range : bool, default False
433
+ If we have a RangeIndex, return range._values if True
434
+ (which is a materialized integer ndarray), otherwise return unchanged.
435
+
436
+ Returns
437
+ -------
438
+ arr : object
439
+
440
+ Examples
441
+ --------
442
+ >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
443
+ ['a', 'b', 'c']
444
+ Categories (3, object): ['a', 'b', 'c']
445
+
446
+ Other objects like lists, arrays, and DataFrames are just passed through.
447
+
448
+ >>> extract_array([1, 2, 3])
449
+ [1, 2, 3]
450
+
451
+ For an ndarray-backed Series / Index the ndarray is returned.
452
+
453
+ >>> extract_array(pd.Series([1, 2, 3]))
454
+ array([1, 2, 3])
455
+
456
+ To extract all the way down to the ndarray, pass ``extract_numpy=True``.
457
+
458
+ >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
459
+ array([1, 2, 3])
460
+ """
461
+ typ = getattr(obj, "_typ", None)
462
+ if typ in _typs:
463
+ # i.e. isinstance(obj, (ABCIndex, ABCSeries))
464
+ if typ == "rangeindex":
465
+ if extract_range:
466
+ # error: "T" has no attribute "_values"
467
+ return obj._values # type: ignore[attr-defined]
468
+ return obj
469
+
470
+ # error: "T" has no attribute "_values"
471
+ return obj._values # type: ignore[attr-defined]
472
+
473
+ elif extract_numpy and typ == "npy_extension":
474
+ # i.e. isinstance(obj, ABCNumpyExtensionArray)
475
+ # error: "T" has no attribute "to_numpy"
476
+ return obj.to_numpy() # type: ignore[attr-defined]
477
+
478
+ return obj
479
+
480
+
481
+ def ensure_wrapped_if_datetimelike(arr):
482
+ """
483
+ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
484
+ """
485
+ if isinstance(arr, np.ndarray):
486
+ if arr.dtype.kind == "M":
487
+ from pandas.core.arrays import DatetimeArray
488
+
489
+ dtype = get_supported_dtype(arr.dtype)
490
+ return DatetimeArray._from_sequence(arr, dtype=dtype)
491
+
492
+ elif arr.dtype.kind == "m":
493
+ from pandas.core.arrays import TimedeltaArray
494
+
495
+ dtype = get_supported_dtype(arr.dtype)
496
+ return TimedeltaArray._from_sequence(arr, dtype=dtype)
497
+
498
+ return arr
499
+
500
+
501
+ def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:
502
+ """
503
+ Convert numpy MaskedArray to ensure mask is softened.
504
+ """
505
+ mask = ma.getmaskarray(data)
506
+ if mask.any():
507
+ dtype, fill_value = maybe_promote(data.dtype, np.nan)
508
+ dtype = cast(np.dtype, dtype)
509
+ data = ma.asarray(data.astype(dtype, copy=True))
510
+ data.soften_mask() # set hardmask False if it was True
511
+ data[mask] = fill_value
512
+ else:
513
+ data = data.copy()
514
+ return data
515
+
516
+
517
+ def sanitize_array(
518
+ data,
519
+ index: Index | None,
520
+ dtype: DtypeObj | None = None,
521
+ copy: bool = False,
522
+ *,
523
+ allow_2d: bool = False,
524
+ ) -> ArrayLike:
525
+ """
526
+ Sanitize input data to an ndarray or ExtensionArray, copy if specified,
527
+ coerce to the dtype if specified.
528
+
529
+ Parameters
530
+ ----------
531
+ data : Any
532
+ index : Index or None, default None
533
+ dtype : np.dtype, ExtensionDtype, or None, default None
534
+ copy : bool, default False
535
+ allow_2d : bool, default False
536
+ If False, raise if we have a 2D Arraylike.
537
+
538
+ Returns
539
+ -------
540
+ np.ndarray or ExtensionArray
541
+ """
542
+ original_dtype = dtype
543
+ if isinstance(data, ma.MaskedArray):
544
+ data = sanitize_masked_array(data)
545
+
546
+ if isinstance(dtype, NumpyEADtype):
547
+ # Avoid ending up with a NumpyExtensionArray
548
+ dtype = dtype.numpy_dtype
549
+
550
+ object_index = False
551
+ if isinstance(data, ABCIndex) and data.dtype == object and dtype is None:
552
+ object_index = True
553
+
554
+ # extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray
555
+ data = extract_array(data, extract_numpy=True, extract_range=True)
556
+
557
+ if isinstance(data, np.ndarray) and data.ndim == 0:
558
+ if dtype is None:
559
+ dtype = data.dtype
560
+ data = lib.item_from_zerodim(data)
561
+ elif isinstance(data, range):
562
+ # GH#16804
563
+ data = range_to_ndarray(data)
564
+ copy = False
565
+
566
+ if not is_list_like(data):
567
+ if index is None:
568
+ raise ValueError("index must be specified when data is not list-like")
569
+ if (
570
+ isinstance(data, str)
571
+ and using_pyarrow_string_dtype()
572
+ and original_dtype is None
573
+ ):
574
+ from pandas.core.arrays.string_ import StringDtype
575
+
576
+ dtype = StringDtype("pyarrow_numpy")
577
+ data = construct_1d_arraylike_from_scalar(data, len(index), dtype)
578
+
579
+ return data
580
+
581
+ elif isinstance(data, ABCExtensionArray):
582
+ # it is already ensured above this is not a NumpyExtensionArray
583
+ # Until GH#49309 is fixed this check needs to come before the
584
+ # ExtensionDtype check
585
+ if dtype is not None:
586
+ subarr = data.astype(dtype, copy=copy)
587
+ elif copy:
588
+ subarr = data.copy()
589
+ else:
590
+ subarr = data
591
+
592
+ elif isinstance(dtype, ExtensionDtype):
593
+ # create an extension array from its dtype
594
+ _sanitize_non_ordered(data)
595
+ cls = dtype.construct_array_type()
596
+ subarr = cls._from_sequence(data, dtype=dtype, copy=copy)
597
+
598
+ # GH#846
599
+ elif isinstance(data, np.ndarray):
600
+ if isinstance(data, np.matrix):
601
+ data = data.A
602
+
603
+ if dtype is None:
604
+ subarr = data
605
+ if data.dtype == object:
606
+ subarr = maybe_infer_to_datetimelike(data)
607
+ if (
608
+ object_index
609
+ and using_pyarrow_string_dtype()
610
+ and is_string_dtype(subarr)
611
+ ):
612
+ # Avoid inference when string option is set
613
+ subarr = data
614
+ elif data.dtype.kind == "U" and using_pyarrow_string_dtype():
615
+ from pandas.core.arrays.string_ import StringDtype
616
+
617
+ dtype = StringDtype(storage="pyarrow_numpy")
618
+ subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype)
619
+
620
+ if subarr is data and copy:
621
+ subarr = subarr.copy()
622
+
623
+ else:
624
+ # we will try to copy by-definition here
625
+ subarr = _try_cast(data, dtype, copy)
626
+
627
+ elif hasattr(data, "__array__"):
628
+ # e.g. dask array GH#38645
629
+ if not copy:
630
+ data = np.asarray(data)
631
+ else:
632
+ data = np.array(data, copy=copy)
633
+ return sanitize_array(
634
+ data,
635
+ index=index,
636
+ dtype=dtype,
637
+ copy=False,
638
+ allow_2d=allow_2d,
639
+ )
640
+
641
+ else:
642
+ _sanitize_non_ordered(data)
643
+ # materialize e.g. generators, convert e.g. tuples, abc.ValueView
644
+ data = list(data)
645
+
646
+ if len(data) == 0 and dtype is None:
647
+ # We default to float64, matching numpy
648
+ subarr = np.array([], dtype=np.float64)
649
+
650
+ elif dtype is not None:
651
+ subarr = _try_cast(data, dtype, copy)
652
+
653
+ else:
654
+ subarr = maybe_convert_platform(data)
655
+ if subarr.dtype == object:
656
+ subarr = cast(np.ndarray, subarr)
657
+ subarr = maybe_infer_to_datetimelike(subarr)
658
+
659
+ subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d)
660
+
661
+ if isinstance(subarr, np.ndarray):
662
+ # at this point we should have dtype be None or subarr.dtype == dtype
663
+ dtype = cast(np.dtype, dtype)
664
+ subarr = _sanitize_str_dtypes(subarr, data, dtype, copy)
665
+
666
+ return subarr
667
+
668
+
669
+ def range_to_ndarray(rng: range) -> np.ndarray:
670
+ """
671
+ Cast a range object to ndarray.
672
+ """
673
+ # GH#30171 perf avoid realizing range as a list in np.array
674
+ try:
675
+ arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64")
676
+ except OverflowError:
677
+ # GH#30173 handling for ranges that overflow int64
678
+ if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop):
679
+ try:
680
+ arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64")
681
+ except OverflowError:
682
+ arr = construct_1d_object_array_from_listlike(list(rng))
683
+ else:
684
+ arr = construct_1d_object_array_from_listlike(list(rng))
685
+ return arr
686
+
687
+
688
+ def _sanitize_non_ordered(data) -> None:
689
+ """
690
+ Raise only for unordered sets, e.g., not for dict_keys
691
+ """
692
+ if isinstance(data, (set, frozenset)):
693
+ raise TypeError(f"'{type(data).__name__}' type is unordered")
694
+
695
+
696
+ def _sanitize_ndim(
697
+ result: ArrayLike,
698
+ data,
699
+ dtype: DtypeObj | None,
700
+ index: Index | None,
701
+ *,
702
+ allow_2d: bool = False,
703
+ ) -> ArrayLike:
704
+ """
705
+ Ensure we have a 1-dimensional result array.
706
+ """
707
+ if getattr(result, "ndim", 0) == 0:
708
+ raise ValueError("result should be arraylike with ndim > 0")
709
+
710
+ if result.ndim == 1:
711
+ # the result that we want
712
+ result = _maybe_repeat(result, index)
713
+
714
+ elif result.ndim > 1:
715
+ if isinstance(data, np.ndarray):
716
+ if allow_2d:
717
+ return result
718
+ raise ValueError(
719
+ f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead"
720
+ )
721
+ if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
722
+ # i.e. NumpyEADtype("O")
723
+
724
+ result = com.asarray_tuplesafe(data, dtype=np.dtype("object"))
725
+ cls = dtype.construct_array_type()
726
+ result = cls._from_sequence(result, dtype=dtype)
727
+ else:
728
+ # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type
729
+ # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str,
730
+ # dtype[Any], None]"
731
+ result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type]
732
+ return result
733
+
734
+
735
+ def _sanitize_str_dtypes(
736
+ result: np.ndarray, data, dtype: np.dtype | None, copy: bool
737
+ ) -> np.ndarray:
738
+ """
739
+ Ensure we have a dtype that is supported by pandas.
740
+ """
741
+
742
+ # This is to prevent mixed-type Series getting all casted to
743
+ # NumPy string type, e.g. NaN --> '-1#IND'.
744
+ if issubclass(result.dtype.type, str):
745
+ # GH#16605
746
+ # If not empty convert the data to dtype
747
+ # GH#19853: If data is a scalar, result has already the result
748
+ if not lib.is_scalar(data):
749
+ if not np.all(isna(data)):
750
+ data = np.asarray(data, dtype=dtype)
751
+ if not copy:
752
+ result = np.asarray(data, dtype=object)
753
+ else:
754
+ result = np.array(data, dtype=object, copy=copy)
755
+ return result
756
+
757
+
758
+ def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:
759
+ """
760
+ If we have a length-1 array and an index describing how long we expect
761
+ the result to be, repeat the array.
762
+ """
763
+ if index is not None:
764
+ if 1 == len(arr) != len(index):
765
+ arr = arr.repeat(len(index))
766
+ return arr
767
+
768
+
769
+ def _try_cast(
770
+ arr: list | np.ndarray,
771
+ dtype: np.dtype,
772
+ copy: bool,
773
+ ) -> ArrayLike:
774
+ """
775
+ Convert input to numpy ndarray and optionally cast to a given dtype.
776
+
777
+ Parameters
778
+ ----------
779
+ arr : ndarray or list
780
+ Excludes: ExtensionArray, Series, Index.
781
+ dtype : np.dtype
782
+ copy : bool
783
+ If False, don't copy the data if not needed.
784
+
785
+ Returns
786
+ -------
787
+ np.ndarray or ExtensionArray
788
+ """
789
+ is_ndarray = isinstance(arr, np.ndarray)
790
+
791
+ if dtype == object:
792
+ if not is_ndarray:
793
+ subarr = construct_1d_object_array_from_listlike(arr)
794
+ return subarr
795
+ return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
796
+
797
+ elif dtype.kind == "U":
798
+ # TODO: test cases with arr.dtype.kind in "mM"
799
+ if is_ndarray:
800
+ arr = cast(np.ndarray, arr)
801
+ shape = arr.shape
802
+ if arr.ndim > 1:
803
+ arr = arr.ravel()
804
+ else:
805
+ shape = (len(arr),)
806
+ return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
807
+ shape
808
+ )
809
+
810
+ elif dtype.kind in "mM":
811
+ return maybe_cast_to_datetime(arr, dtype)
812
+
813
+ # GH#15832: Check if we are requesting a numeric dtype and
814
+ # that we can convert the data to the requested dtype.
815
+ elif dtype.kind in "iu":
816
+ # this will raise if we have e.g. floats
817
+
818
+ subarr = maybe_cast_to_integer_array(arr, dtype)
819
+ elif not copy:
820
+ subarr = np.asarray(arr, dtype=dtype)
821
+ else:
822
+ subarr = np.array(arr, dtype=dtype, copy=copy)
823
+
824
+ return subarr
llava_next/lib/python3.10/site-packages/pandas/core/flags.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+ import weakref
5
+
6
+ if TYPE_CHECKING:
7
+ from pandas.core.generic import NDFrame
8
+
9
+
10
+ class Flags:
11
+ """
12
+ Flags that apply to pandas objects.
13
+
14
+ Parameters
15
+ ----------
16
+ obj : Series or DataFrame
17
+ The object these flags are associated with.
18
+ allows_duplicate_labels : bool, default True
19
+ Whether to allow duplicate labels in this object. By default,
20
+ duplicate labels are permitted. Setting this to ``False`` will
21
+ cause an :class:`errors.DuplicateLabelError` to be raised when
22
+ `index` (or columns for DataFrame) is not unique, or any
23
+ subsequent operation on introduces duplicates.
24
+ See :ref:`duplicates.disallow` for more.
25
+
26
+ .. warning::
27
+
28
+ This is an experimental feature. Currently, many methods fail to
29
+ propagate the ``allows_duplicate_labels`` value. In future versions
30
+ it is expected that every method taking or returning one or more
31
+ DataFrame or Series objects will propagate ``allows_duplicate_labels``.
32
+
33
+ Examples
34
+ --------
35
+ Attributes can be set in two ways:
36
+
37
+ >>> df = pd.DataFrame()
38
+ >>> df.flags
39
+ <Flags(allows_duplicate_labels=True)>
40
+ >>> df.flags.allows_duplicate_labels = False
41
+ >>> df.flags
42
+ <Flags(allows_duplicate_labels=False)>
43
+
44
+ >>> df.flags['allows_duplicate_labels'] = True
45
+ >>> df.flags
46
+ <Flags(allows_duplicate_labels=True)>
47
+ """
48
+
49
+ _keys: set[str] = {"allows_duplicate_labels"}
50
+
51
+ def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
52
+ self._allows_duplicate_labels = allows_duplicate_labels
53
+ self._obj = weakref.ref(obj)
54
+
55
+ @property
56
+ def allows_duplicate_labels(self) -> bool:
57
+ """
58
+ Whether this object allows duplicate labels.
59
+
60
+ Setting ``allows_duplicate_labels=False`` ensures that the
61
+ index (and columns of a DataFrame) are unique. Most methods
62
+ that accept and return a Series or DataFrame will propagate
63
+ the value of ``allows_duplicate_labels``.
64
+
65
+ See :ref:`duplicates` for more.
66
+
67
+ See Also
68
+ --------
69
+ DataFrame.attrs : Set global metadata on this object.
70
+ DataFrame.set_flags : Set global flags on this object.
71
+
72
+ Examples
73
+ --------
74
+ >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
75
+ >>> df.flags.allows_duplicate_labels
76
+ True
77
+ >>> df.flags.allows_duplicate_labels = False
78
+ Traceback (most recent call last):
79
+ ...
80
+ pandas.errors.DuplicateLabelError: Index has duplicates.
81
+ positions
82
+ label
83
+ a [0, 1]
84
+ """
85
+ return self._allows_duplicate_labels
86
+
87
+ @allows_duplicate_labels.setter
88
+ def allows_duplicate_labels(self, value: bool) -> None:
89
+ value = bool(value)
90
+ obj = self._obj()
91
+ if obj is None:
92
+ raise ValueError("This flag's object has been deleted.")
93
+
94
+ if not value:
95
+ for ax in obj.axes:
96
+ ax._maybe_check_unique()
97
+
98
+ self._allows_duplicate_labels = value
99
+
100
+ def __getitem__(self, key: str):
101
+ if key not in self._keys:
102
+ raise KeyError(key)
103
+
104
+ return getattr(self, key)
105
+
106
+ def __setitem__(self, key: str, value) -> None:
107
+ if key not in self._keys:
108
+ raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
109
+ setattr(self, key, value)
110
+
111
+ def __repr__(self) -> str:
112
+ return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
113
+
114
+ def __eq__(self, other) -> bool:
115
+ if isinstance(other, type(self)):
116
+ return self.allows_duplicate_labels == other.allows_duplicate_labels
117
+ return False
llava_next/lib/python3.10/site-packages/pandas/core/frame.py ADDED
The diff for this file is too large to render. See raw diff
 
llava_next/lib/python3.10/site-packages/pandas/core/roperator.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reversed Operations not available in the stdlib operator module.
3
+ Defining these instead of using lambdas allows us to reference them by name.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import operator
8
+
9
+
10
+ def radd(left, right):
11
+ return right + left
12
+
13
+
14
+ def rsub(left, right):
15
+ return right - left
16
+
17
+
18
+ def rmul(left, right):
19
+ return right * left
20
+
21
+
22
+ def rdiv(left, right):
23
+ return right / left
24
+
25
+
26
+ def rtruediv(left, right):
27
+ return right / left
28
+
29
+
30
+ def rfloordiv(left, right):
31
+ return right // left
32
+
33
+
34
+ def rmod(left, right):
35
+ # check if right is a string as % is the string
36
+ # formatting operation; this is a TypeError
37
+ # otherwise perform the op
38
+ if isinstance(right, str):
39
+ typ = type(left).__name__
40
+ raise TypeError(f"{typ} cannot perform the operation mod")
41
+
42
+ return right % left
43
+
44
+
45
+ def rdivmod(left, right):
46
+ return divmod(right, left)
47
+
48
+
49
+ def rpow(left, right):
50
+ return right**left
51
+
52
+
53
+ def rand_(left, right):
54
+ return operator.and_(right, left)
55
+
56
+
57
+ def ror_(left, right):
58
+ return operator.or_(right, left)
59
+
60
+
61
+ def rxor(left, right):
62
+ return operator.xor(right, left)
llava_next/lib/python3.10/site-packages/pandas/core/series.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/transformers/models/canine/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_canine": ["CanineConfig"],
21
+ "tokenization_canine": ["CanineTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_canine"] = [
31
+ "CanineForMultipleChoice",
32
+ "CanineForQuestionAnswering",
33
+ "CanineForSequenceClassification",
34
+ "CanineForTokenClassification",
35
+ "CanineLayer",
36
+ "CanineModel",
37
+ "CaninePreTrainedModel",
38
+ "load_tf_weights_in_canine",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_canine import CanineConfig
44
+ from .tokenization_canine import CanineTokenizer
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_canine import (
53
+ CanineForMultipleChoice,
54
+ CanineForQuestionAnswering,
55
+ CanineForSequenceClassification,
56
+ CanineForTokenClassification,
57
+ CanineLayer,
58
+ CanineModel,
59
+ CaninePreTrainedModel,
60
+ load_tf_weights_in_canine,
61
+ )
62
+
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc ADDED
Binary file (7.81 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert CANINE checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
28
+ # Initialize PyTorch model
29
+ config = CanineConfig()
30
+ model = CanineModel(config)
31
+ model.eval()
32
+
33
+ print(f"Building PyTorch model from configuration: {config}")
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_canine(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model (weights and configuration)
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ model.save_pretrained(pytorch_dump_path)
41
+
42
+ # Save tokenizer files
43
+ tokenizer = CanineTokenizer()
44
+ print(f"Save tokenizer files to {pytorch_dump_path}")
45
+ tokenizer.save_pretrained(pytorch_dump_path)
46
+
47
+
48
+ if __name__ == "__main__":
49
+ parser = argparse.ArgumentParser()
50
+ # Required parameters
51
+ parser.add_argument(
52
+ "--tf_checkpoint_path",
53
+ default=None,
54
+ type=str,
55
+ required=True,
56
+ help="Path to the TensorFlow checkpoint. Should end with model.ckpt",
57
+ )
58
+ parser.add_argument(
59
+ "--pytorch_dump_path",
60
+ default=None,
61
+ type=str,
62
+ required=True,
63
+ help="Path to a folder where the PyTorch model will be placed.",
64
+ )
65
+ args = parser.parse_args()
66
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
parrot/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py ADDED
@@ -0,0 +1,1642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CANINE model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ import os
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutput,
32
+ ModelOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_canine import CanineConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "google/canine-s"
53
+ _CONFIG_FOR_DOC = "CanineConfig"
54
+
55
+
56
+ # Support up to 16 hash functions.
57
+ _PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223]
58
+
59
+
60
+ @dataclass
61
+ class CanineModelOutputWithPooling(ModelOutput):
62
+ """
63
+ Output type of [`CanineModel`]. Based on [`~modeling_outputs.BaseModelOutputWithPooling`], but with slightly
64
+ different `hidden_states` and `attentions`, as these also include the hidden states and attentions of the shallow
65
+ Transformer encoders.
66
+
67
+ Args:
68
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
69
+ Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final
70
+ shallow Transformer encoder).
71
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
72
+ Hidden-state of the first token of the sequence (classification token) at the last layer of the deep
73
+ Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer
74
+ weights are trained from the next sentence prediction (classification) objective during pretraining.
75
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each
77
+ encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length //
78
+ config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the
79
+ initial input to each Transformer encoder. The hidden states of the shallow encoders have length
80
+ `sequence_length`, but the hidden states of the deep encoder have length `sequence_length` //
81
+ `config.downsampling_rate`.
82
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size,
84
+ num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length //
85
+ config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the
86
+ attention softmax, used to compute the weighted average in the self-attention heads.
87
+ """
88
+
89
+ last_hidden_state: torch.FloatTensor = None
90
+ pooler_output: torch.FloatTensor = None
91
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
92
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
93
+
94
+
95
+ def load_tf_weights_in_canine(model, config, tf_checkpoint_path):
96
+ """Load tf checkpoints in a pytorch model."""
97
+ try:
98
+ import re
99
+
100
+ import numpy as np
101
+ import tensorflow as tf
102
+ except ImportError:
103
+ logger.error(
104
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
105
+ "https://www.tensorflow.org/install/ for installation instructions."
106
+ )
107
+ raise
108
+ tf_path = os.path.abspath(tf_checkpoint_path)
109
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
110
+ # Load weights from TF model
111
+ init_vars = tf.train.list_variables(tf_path)
112
+ names = []
113
+ arrays = []
114
+ for name, shape in init_vars:
115
+ logger.info(f"Loading TF weight {name} with shape {shape}")
116
+ array = tf.train.load_variable(tf_path, name)
117
+ names.append(name)
118
+ arrays.append(array)
119
+
120
+ for name, array in zip(names, arrays):
121
+ name = name.split("/")
122
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
123
+ # which are not required for using pretrained model
124
+ # also discard the cls weights (which were used for the next sentence prediction pre-training task)
125
+ if any(
126
+ n
127
+ in [
128
+ "adam_v",
129
+ "adam_m",
130
+ "AdamWeightDecayOptimizer",
131
+ "AdamWeightDecayOptimizer_1",
132
+ "global_step",
133
+ "cls",
134
+ "autoregressive_decoder",
135
+ "char_output_weights",
136
+ ]
137
+ for n in name
138
+ ):
139
+ logger.info(f"Skipping {'/'.join(name)}")
140
+ continue
141
+ # if first scope name starts with "bert", change it to "encoder"
142
+ if name[0] == "bert":
143
+ name[0] = "encoder"
144
+ # remove "embeddings" middle name of HashBucketCodepointEmbedders
145
+ elif name[1] == "embeddings":
146
+ name.remove(name[1])
147
+ # rename segment_embeddings to token_type_embeddings
148
+ elif name[1] == "segment_embeddings":
149
+ name[1] = "token_type_embeddings"
150
+ # rename initial convolutional projection layer
151
+ elif name[1] == "initial_char_encoder":
152
+ name = ["chars_to_molecules"] + name[-2:]
153
+ # rename final convolutional projection layer
154
+ elif name[0] == "final_char_encoder" and name[1] in ["LayerNorm", "conv"]:
155
+ name = ["projection"] + name[1:]
156
+ pointer = model
157
+ for m_name in name:
158
+ if (re.fullmatch(r"[A-Za-z]+_\d+", m_name)) and "Embedder" not in m_name:
159
+ scope_names = re.split(r"_(\d+)", m_name)
160
+ else:
161
+ scope_names = [m_name]
162
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
163
+ pointer = getattr(pointer, "weight")
164
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
165
+ pointer = getattr(pointer, "bias")
166
+ elif scope_names[0] == "output_weights":
167
+ pointer = getattr(pointer, "weight")
168
+ else:
169
+ try:
170
+ pointer = getattr(pointer, scope_names[0])
171
+ except AttributeError:
172
+ logger.info(f"Skipping {'/'.join(name)}")
173
+ continue
174
+ if len(scope_names) >= 2:
175
+ num = int(scope_names[1])
176
+ pointer = pointer[num]
177
+ if m_name[-11:] == "_embeddings":
178
+ pointer = getattr(pointer, "weight")
179
+ elif m_name[-10:] in [f"Embedder_{i}" for i in range(8)]:
180
+ pointer = getattr(pointer, "weight")
181
+ elif m_name == "kernel":
182
+ array = np.transpose(array)
183
+
184
+ if pointer.shape != array.shape:
185
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
186
+
187
+ logger.info(f"Initialize PyTorch weight {name}")
188
+ pointer.data = torch.from_numpy(array)
189
+ return model
190
+
191
+
192
+ class CanineEmbeddings(nn.Module):
193
+ """Construct the character, position and token_type embeddings."""
194
+
195
+ def __init__(self, config):
196
+ super().__init__()
197
+
198
+ self.config = config
199
+
200
+ # character embeddings
201
+ shard_embedding_size = config.hidden_size // config.num_hash_functions
202
+ for i in range(config.num_hash_functions):
203
+ name = f"HashBucketCodepointEmbedder_{i}"
204
+ setattr(self, name, nn.Embedding(config.num_hash_buckets, shard_embedding_size))
205
+ self.char_position_embeddings = nn.Embedding(config.num_hash_buckets, config.hidden_size)
206
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
207
+
208
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
209
+ # any TensorFlow checkpoint file
210
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
211
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
212
+
213
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
214
+ self.register_buffer(
215
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
216
+ )
217
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
218
+
219
+ def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int):
220
+ """
221
+ Converts ids to hash bucket ids via multiple hashing.
222
+
223
+ Args:
224
+ input_ids: The codepoints or other IDs to be hashed.
225
+ num_hashes: The number of hash functions to use.
226
+ num_buckets: The number of hash buckets (i.e. embeddings in each table).
227
+
228
+ Returns:
229
+ A list of tensors, each of which is the hash bucket IDs from one hash function.
230
+ """
231
+ if num_hashes > len(_PRIMES):
232
+ raise ValueError(f"`num_hashes` must be <= {len(_PRIMES)}")
233
+
234
+ primes = _PRIMES[:num_hashes]
235
+
236
+ result_tensors = []
237
+ for prime in primes:
238
+ hashed = ((input_ids + 1) * prime) % num_buckets
239
+ result_tensors.append(hashed)
240
+ return result_tensors
241
+
242
+ def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int):
243
+ """Converts IDs (e.g. codepoints) into embeddings via multiple hashing."""
244
+ if embedding_size % num_hashes != 0:
245
+ raise ValueError(f"Expected `embedding_size` ({embedding_size}) % `num_hashes` ({num_hashes}) == 0")
246
+
247
+ hash_bucket_tensors = self._hash_bucket_tensors(input_ids, num_hashes=num_hashes, num_buckets=num_buckets)
248
+ embedding_shards = []
249
+ for i, hash_bucket_ids in enumerate(hash_bucket_tensors):
250
+ name = f"HashBucketCodepointEmbedder_{i}"
251
+ shard_embeddings = getattr(self, name)(hash_bucket_ids)
252
+ embedding_shards.append(shard_embeddings)
253
+
254
+ return torch.cat(embedding_shards, dim=-1)
255
+
256
+ def forward(
257
+ self,
258
+ input_ids: Optional[torch.LongTensor] = None,
259
+ token_type_ids: Optional[torch.LongTensor] = None,
260
+ position_ids: Optional[torch.LongTensor] = None,
261
+ inputs_embeds: Optional[torch.FloatTensor] = None,
262
+ ) -> torch.FloatTensor:
263
+ if input_ids is not None:
264
+ input_shape = input_ids.size()
265
+ else:
266
+ input_shape = inputs_embeds.size()[:-1]
267
+
268
+ seq_length = input_shape[1]
269
+
270
+ if position_ids is None:
271
+ position_ids = self.position_ids[:, :seq_length]
272
+
273
+ if token_type_ids is None:
274
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
275
+
276
+ if inputs_embeds is None:
277
+ inputs_embeds = self._embed_hash_buckets(
278
+ input_ids, self.config.hidden_size, self.config.num_hash_functions, self.config.num_hash_buckets
279
+ )
280
+
281
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
282
+
283
+ embeddings = inputs_embeds + token_type_embeddings
284
+
285
+ if self.position_embedding_type == "absolute":
286
+ position_embeddings = self.char_position_embeddings(position_ids)
287
+ embeddings += position_embeddings
288
+ embeddings = self.LayerNorm(embeddings)
289
+ embeddings = self.dropout(embeddings)
290
+ return embeddings
291
+
292
+
293
+ class CharactersToMolecules(nn.Module):
294
+ """Convert character sequence to initial molecule sequence (i.e. downsample) using strided convolutions."""
295
+
296
+ def __init__(self, config):
297
+ super().__init__()
298
+
299
+ self.conv = nn.Conv1d(
300
+ in_channels=config.hidden_size,
301
+ out_channels=config.hidden_size,
302
+ kernel_size=config.downsampling_rate,
303
+ stride=config.downsampling_rate,
304
+ )
305
+ self.activation = ACT2FN[config.hidden_act]
306
+
307
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
308
+ # any TensorFlow checkpoint file
309
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
310
+
311
+ def forward(self, char_encoding: torch.Tensor) -> torch.Tensor:
312
+ # `cls_encoding`: [batch, 1, hidden_size]
313
+ cls_encoding = char_encoding[:, 0:1, :]
314
+
315
+ # char_encoding has shape [batch, char_seq, hidden_size]
316
+ # We transpose it to be [batch, hidden_size, char_seq]
317
+ char_encoding = torch.transpose(char_encoding, 1, 2)
318
+ downsampled = self.conv(char_encoding)
319
+ downsampled = torch.transpose(downsampled, 1, 2)
320
+ downsampled = self.activation(downsampled)
321
+
322
+ # Truncate the last molecule in order to reserve a position for [CLS].
323
+ # Often, the last position is never used (unless we completely fill the
324
+ # text buffer). This is important in order to maintain alignment on TPUs
325
+ # (i.e. a multiple of 128).
326
+ downsampled_truncated = downsampled[:, 0:-1, :]
327
+
328
+ # We also keep [CLS] as a separate sequence position since we always
329
+ # want to reserve a position (and the model capacity that goes along
330
+ # with that) in the deep BERT stack.
331
+ # `result`: [batch, molecule_seq, molecule_dim]
332
+ result = torch.cat([cls_encoding, downsampled_truncated], dim=1)
333
+
334
+ result = self.LayerNorm(result)
335
+
336
+ return result
337
+
338
+
339
+ class ConvProjection(nn.Module):
340
+ """
341
+ Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size
342
+ characters.
343
+ """
344
+
345
+ def __init__(self, config):
346
+ super().__init__()
347
+ self.config = config
348
+ self.conv = nn.Conv1d(
349
+ in_channels=config.hidden_size * 2,
350
+ out_channels=config.hidden_size,
351
+ kernel_size=config.upsampling_kernel_size,
352
+ stride=1,
353
+ )
354
+ self.activation = ACT2FN[config.hidden_act]
355
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
356
+ # any TensorFlow checkpoint file
357
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
358
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
359
+
360
+ def forward(
361
+ self,
362
+ inputs: torch.Tensor,
363
+ final_seq_char_positions: Optional[torch.Tensor] = None,
364
+ ) -> torch.Tensor:
365
+ # inputs has shape [batch, mol_seq, molecule_hidden_size+char_hidden_final]
366
+ # we transpose it to be [batch, molecule_hidden_size+char_hidden_final, mol_seq]
367
+ inputs = torch.transpose(inputs, 1, 2)
368
+
369
+ # PyTorch < 1.9 does not support padding="same" (which is used in the original implementation),
370
+ # so we pad the tensor manually before passing it to the conv layer
371
+ # based on https://github.com/google-research/big_transfer/blob/49afe42338b62af9fbe18f0258197a33ee578a6b/bit_tf2/models.py#L36-L38
372
+ pad_total = self.config.upsampling_kernel_size - 1
373
+ pad_beg = pad_total // 2
374
+ pad_end = pad_total - pad_beg
375
+
376
+ pad = nn.ConstantPad1d((pad_beg, pad_end), 0)
377
+ # `result`: shape (batch_size, char_seq_len, hidden_size)
378
+ result = self.conv(pad(inputs))
379
+ result = torch.transpose(result, 1, 2)
380
+ result = self.activation(result)
381
+ result = self.LayerNorm(result)
382
+ result = self.dropout(result)
383
+ final_char_seq = result
384
+
385
+ if final_seq_char_positions is not None:
386
+ # Limit transformer query seq and attention mask to these character
387
+ # positions to greatly reduce the compute cost. Typically, this is just
388
+ # done for the MLM training task.
389
+ # TODO add support for MLM
390
+ raise NotImplementedError("CanineForMaskedLM is currently not supported")
391
+ else:
392
+ query_seq = final_char_seq
393
+
394
+ return query_seq
395
+
396
+
397
+ class CanineSelfAttention(nn.Module):
398
+ def __init__(self, config):
399
+ super().__init__()
400
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
401
+ raise ValueError(
402
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
403
+ f"heads ({config.num_attention_heads})"
404
+ )
405
+
406
+ self.num_attention_heads = config.num_attention_heads
407
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
408
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
409
+
410
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
411
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
412
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
413
+
414
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
415
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
416
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
417
+ self.max_position_embeddings = config.max_position_embeddings
418
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
419
+
420
+ def transpose_for_scores(self, x):
421
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
422
+ x = x.view(*new_x_shape)
423
+ return x.permute(0, 2, 1, 3)
424
+
425
+ def forward(
426
+ self,
427
+ from_tensor: torch.Tensor,
428
+ to_tensor: torch.Tensor,
429
+ attention_mask: Optional[torch.FloatTensor] = None,
430
+ head_mask: Optional[torch.FloatTensor] = None,
431
+ output_attentions: Optional[bool] = False,
432
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
433
+ mixed_query_layer = self.query(from_tensor)
434
+
435
+ # If this is instantiated as a cross-attention module, the keys
436
+ # and values come from an encoder; the attention mask needs to be
437
+ # such that the encoder's padding tokens are not attended to.
438
+
439
+ key_layer = self.transpose_for_scores(self.key(to_tensor))
440
+ value_layer = self.transpose_for_scores(self.value(to_tensor))
441
+
442
+ query_layer = self.transpose_for_scores(mixed_query_layer)
443
+
444
+ # Take the dot product between "query" and "key" to get the raw attention scores.
445
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
446
+
447
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
448
+ seq_length = from_tensor.size()[1]
449
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1)
450
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1)
451
+ distance = position_ids_l - position_ids_r
452
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
453
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
454
+
455
+ if self.position_embedding_type == "relative_key":
456
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
457
+ attention_scores = attention_scores + relative_position_scores
458
+ elif self.position_embedding_type == "relative_key_query":
459
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
460
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
461
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
462
+
463
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
464
+ if attention_mask is not None:
465
+ if attention_mask.ndim == 3:
466
+ # if attention_mask is 3D, do the following:
467
+ attention_mask = torch.unsqueeze(attention_mask, dim=1)
468
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
469
+ # masked positions, this operation will create a tensor which is 0.0 for
470
+ # positions we want to attend and the dtype's smallest value for masked positions.
471
+ attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min
472
+ # Apply the attention mask (precomputed for all layers in CanineModel forward() function)
473
+ attention_scores = attention_scores + attention_mask
474
+
475
+ # Normalize the attention scores to probabilities.
476
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
477
+
478
+ # This is actually dropping out entire tokens to attend to, which might
479
+ # seem a bit unusual, but is taken from the original Transformer paper.
480
+ attention_probs = self.dropout(attention_probs)
481
+
482
+ # Mask heads if we want to
483
+ if head_mask is not None:
484
+ attention_probs = attention_probs * head_mask
485
+
486
+ context_layer = torch.matmul(attention_probs, value_layer)
487
+
488
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
489
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
490
+ context_layer = context_layer.view(*new_context_layer_shape)
491
+
492
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
493
+
494
+ return outputs
495
+
496
+
497
+ class CanineSelfOutput(nn.Module):
498
+ def __init__(self, config):
499
+ super().__init__()
500
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
501
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
502
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
503
+
504
+ def forward(
505
+ self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor
506
+ ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
507
+ hidden_states = self.dense(hidden_states)
508
+ hidden_states = self.dropout(hidden_states)
509
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
510
+ return hidden_states
511
+
512
+
513
+ class CanineAttention(nn.Module):
514
+ """
515
+ Additional arguments related to local attention:
516
+
517
+ - **local** (`bool`, *optional*, defaults to `False`) -- Whether to apply local attention.
518
+ - **always_attend_to_first_position** (`bool`, *optional*, defaults to `False`) -- Should all blocks be able to
519
+ attend
520
+ to the `to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (`bool`,
521
+ *optional*, defaults to `False`) -- Should the *from_tensor*'s first position be able to attend to all
522
+ positions within the *from_tensor*? - **attend_from_chunk_width** (`int`, *optional*, defaults to 128) -- The
523
+ width of each block-wise chunk in `from_tensor`. - **attend_from_chunk_stride** (`int`, *optional*, defaults to
524
+ 128) -- The number of elements to skip when moving to the next block in `from_tensor`. -
525
+ **attend_to_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in
526
+ *to_tensor*. - **attend_to_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to
527
+ skip when moving to the next block in `to_tensor`.
528
+ """
529
+
530
+ def __init__(
531
+ self,
532
+ config,
533
+ local=False,
534
+ always_attend_to_first_position: bool = False,
535
+ first_position_attends_to_all: bool = False,
536
+ attend_from_chunk_width: int = 128,
537
+ attend_from_chunk_stride: int = 128,
538
+ attend_to_chunk_width: int = 128,
539
+ attend_to_chunk_stride: int = 128,
540
+ ):
541
+ super().__init__()
542
+ self.self = CanineSelfAttention(config)
543
+ self.output = CanineSelfOutput(config)
544
+ self.pruned_heads = set()
545
+
546
+ # additional arguments related to local attention
547
+ self.local = local
548
+ if attend_from_chunk_width < attend_from_chunk_stride:
549
+ raise ValueError(
550
+ "`attend_from_chunk_width` < `attend_from_chunk_stride` would cause sequence positions to get skipped."
551
+ )
552
+ if attend_to_chunk_width < attend_to_chunk_stride:
553
+ raise ValueError(
554
+ "`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped."
555
+ )
556
+ self.always_attend_to_first_position = always_attend_to_first_position
557
+ self.first_position_attends_to_all = first_position_attends_to_all
558
+ self.attend_from_chunk_width = attend_from_chunk_width
559
+ self.attend_from_chunk_stride = attend_from_chunk_stride
560
+ self.attend_to_chunk_width = attend_to_chunk_width
561
+ self.attend_to_chunk_stride = attend_to_chunk_stride
562
+
563
+ def prune_heads(self, heads):
564
+ if len(heads) == 0:
565
+ return
566
+ heads, index = find_pruneable_heads_and_indices(
567
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
568
+ )
569
+
570
+ # Prune linear layers
571
+ self.self.query = prune_linear_layer(self.self.query, index)
572
+ self.self.key = prune_linear_layer(self.self.key, index)
573
+ self.self.value = prune_linear_layer(self.self.value, index)
574
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
575
+
576
+ # Update hyper params and store pruned heads
577
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
578
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
579
+ self.pruned_heads = self.pruned_heads.union(heads)
580
+
581
+ def forward(
582
+ self,
583
+ hidden_states: Tuple[torch.FloatTensor],
584
+ attention_mask: Optional[torch.FloatTensor] = None,
585
+ head_mask: Optional[torch.FloatTensor] = None,
586
+ output_attentions: Optional[bool] = False,
587
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
588
+ if not self.local:
589
+ self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions)
590
+ attention_output = self_outputs[0]
591
+ else:
592
+ from_seq_length = to_seq_length = hidden_states.shape[1]
593
+ from_tensor = to_tensor = hidden_states
594
+
595
+ # Create chunks (windows) that we will attend *from* and then concatenate them.
596
+ from_chunks = []
597
+ if self.first_position_attends_to_all:
598
+ from_chunks.append((0, 1))
599
+ # We must skip this first position so that our output sequence is the
600
+ # correct length (this matters in the *from* sequence only).
601
+ from_start = 1
602
+ else:
603
+ from_start = 0
604
+ for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride):
605
+ chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width)
606
+ from_chunks.append((chunk_start, chunk_end))
607
+
608
+ # Determine the chunks (windows) that will attend *to*.
609
+ to_chunks = []
610
+ if self.first_position_attends_to_all:
611
+ to_chunks.append((0, to_seq_length))
612
+ for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride):
613
+ chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width)
614
+ to_chunks.append((chunk_start, chunk_end))
615
+
616
+ if len(from_chunks) != len(to_chunks):
617
+ raise ValueError(
618
+ f"Expected to have same number of `from_chunks` ({from_chunks}) and "
619
+ f"`to_chunks` ({from_chunks}). Check strides."
620
+ )
621
+
622
+ # next, compute attention scores for each pair of windows and concatenate
623
+ attention_output_chunks = []
624
+ attention_probs_chunks = []
625
+ for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks):
626
+ from_tensor_chunk = from_tensor[:, from_start:from_end, :]
627
+ to_tensor_chunk = to_tensor[:, to_start:to_end, :]
628
+ # `attention_mask`: <float>[batch_size, from_seq, to_seq]
629
+ # `attention_mask_chunk`: <float>[batch_size, from_seq_chunk, to_seq_chunk]
630
+ attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end]
631
+ if self.always_attend_to_first_position:
632
+ cls_attention_mask = attention_mask[:, from_start:from_end, 0:1]
633
+ attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2)
634
+
635
+ cls_position = to_tensor[:, 0:1, :]
636
+ to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1)
637
+
638
+ attention_outputs_chunk = self.self(
639
+ from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions
640
+ )
641
+ attention_output_chunks.append(attention_outputs_chunk[0])
642
+ if output_attentions:
643
+ attention_probs_chunks.append(attention_outputs_chunk[1])
644
+
645
+ attention_output = torch.cat(attention_output_chunks, dim=1)
646
+
647
+ attention_output = self.output(attention_output, hidden_states)
648
+ outputs = (attention_output,)
649
+ if not self.local:
650
+ outputs = outputs + self_outputs[1:] # add attentions if we output them
651
+ else:
652
+ outputs = outputs + tuple(attention_probs_chunks) # add attentions if we output them
653
+ return outputs
654
+
655
+
656
+ class CanineIntermediate(nn.Module):
657
+ def __init__(self, config):
658
+ super().__init__()
659
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
660
+ if isinstance(config.hidden_act, str):
661
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
662
+ else:
663
+ self.intermediate_act_fn = config.hidden_act
664
+
665
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
666
+ hidden_states = self.dense(hidden_states)
667
+ hidden_states = self.intermediate_act_fn(hidden_states)
668
+ return hidden_states
669
+
670
+
671
+ class CanineOutput(nn.Module):
672
+ def __init__(self, config):
673
+ super().__init__()
674
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
675
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
676
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
677
+
678
+ def forward(self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor:
679
+ hidden_states = self.dense(hidden_states)
680
+ hidden_states = self.dropout(hidden_states)
681
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
682
+ return hidden_states
683
+
684
+
685
+ class CanineLayer(nn.Module):
686
+ def __init__(
687
+ self,
688
+ config,
689
+ local,
690
+ always_attend_to_first_position,
691
+ first_position_attends_to_all,
692
+ attend_from_chunk_width,
693
+ attend_from_chunk_stride,
694
+ attend_to_chunk_width,
695
+ attend_to_chunk_stride,
696
+ ):
697
+ super().__init__()
698
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
699
+ self.seq_len_dim = 1
700
+ self.attention = CanineAttention(
701
+ config,
702
+ local,
703
+ always_attend_to_first_position,
704
+ first_position_attends_to_all,
705
+ attend_from_chunk_width,
706
+ attend_from_chunk_stride,
707
+ attend_to_chunk_width,
708
+ attend_to_chunk_stride,
709
+ )
710
+ self.intermediate = CanineIntermediate(config)
711
+ self.output = CanineOutput(config)
712
+
713
+ def forward(
714
+ self,
715
+ hidden_states: Tuple[torch.FloatTensor],
716
+ attention_mask: Optional[torch.FloatTensor] = None,
717
+ head_mask: Optional[torch.FloatTensor] = None,
718
+ output_attentions: Optional[bool] = False,
719
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
720
+ self_attention_outputs = self.attention(
721
+ hidden_states,
722
+ attention_mask,
723
+ head_mask,
724
+ output_attentions=output_attentions,
725
+ )
726
+ attention_output = self_attention_outputs[0]
727
+
728
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
729
+
730
+ layer_output = apply_chunking_to_forward(
731
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
732
+ )
733
+ outputs = (layer_output,) + outputs
734
+
735
+ return outputs
736
+
737
+ def feed_forward_chunk(self, attention_output):
738
+ intermediate_output = self.intermediate(attention_output)
739
+ layer_output = self.output(intermediate_output, attention_output)
740
+ return layer_output
741
+
742
+
743
+ class CanineEncoder(nn.Module):
744
+ def __init__(
745
+ self,
746
+ config,
747
+ local=False,
748
+ always_attend_to_first_position=False,
749
+ first_position_attends_to_all=False,
750
+ attend_from_chunk_width=128,
751
+ attend_from_chunk_stride=128,
752
+ attend_to_chunk_width=128,
753
+ attend_to_chunk_stride=128,
754
+ ):
755
+ super().__init__()
756
+ self.config = config
757
+ self.layer = nn.ModuleList(
758
+ [
759
+ CanineLayer(
760
+ config,
761
+ local,
762
+ always_attend_to_first_position,
763
+ first_position_attends_to_all,
764
+ attend_from_chunk_width,
765
+ attend_from_chunk_stride,
766
+ attend_to_chunk_width,
767
+ attend_to_chunk_stride,
768
+ )
769
+ for _ in range(config.num_hidden_layers)
770
+ ]
771
+ )
772
+ self.gradient_checkpointing = False
773
+
774
+ def forward(
775
+ self,
776
+ hidden_states: Tuple[torch.FloatTensor],
777
+ attention_mask: Optional[torch.FloatTensor] = None,
778
+ head_mask: Optional[torch.FloatTensor] = None,
779
+ output_attentions: Optional[bool] = False,
780
+ output_hidden_states: Optional[bool] = False,
781
+ return_dict: Optional[bool] = True,
782
+ ) -> Union[Tuple, BaseModelOutput]:
783
+ all_hidden_states = () if output_hidden_states else None
784
+ all_self_attentions = () if output_attentions else None
785
+
786
+ for i, layer_module in enumerate(self.layer):
787
+ if output_hidden_states:
788
+ all_hidden_states = all_hidden_states + (hidden_states,)
789
+
790
+ layer_head_mask = head_mask[i] if head_mask is not None else None
791
+
792
+ if self.gradient_checkpointing and self.training:
793
+ layer_outputs = self._gradient_checkpointing_func(
794
+ layer_module.__call__,
795
+ hidden_states,
796
+ attention_mask,
797
+ layer_head_mask,
798
+ output_attentions,
799
+ )
800
+ else:
801
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
802
+
803
+ hidden_states = layer_outputs[0]
804
+ if output_attentions:
805
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
806
+
807
+ if output_hidden_states:
808
+ all_hidden_states = all_hidden_states + (hidden_states,)
809
+
810
+ if not return_dict:
811
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
812
+ return BaseModelOutput(
813
+ last_hidden_state=hidden_states,
814
+ hidden_states=all_hidden_states,
815
+ attentions=all_self_attentions,
816
+ )
817
+
818
+
819
+ class CaninePooler(nn.Module):
820
+ def __init__(self, config):
821
+ super().__init__()
822
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
823
+ self.activation = nn.Tanh()
824
+
825
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
826
+ # We "pool" the model by simply taking the hidden state corresponding
827
+ # to the first token.
828
+ first_token_tensor = hidden_states[:, 0]
829
+ pooled_output = self.dense(first_token_tensor)
830
+ pooled_output = self.activation(pooled_output)
831
+ return pooled_output
832
+
833
+
834
+ class CaninePredictionHeadTransform(nn.Module):
835
+ def __init__(self, config):
836
+ super().__init__()
837
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
838
+ if isinstance(config.hidden_act, str):
839
+ self.transform_act_fn = ACT2FN[config.hidden_act]
840
+ else:
841
+ self.transform_act_fn = config.hidden_act
842
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
843
+
844
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
845
+ hidden_states = self.dense(hidden_states)
846
+ hidden_states = self.transform_act_fn(hidden_states)
847
+ hidden_states = self.LayerNorm(hidden_states)
848
+ return hidden_states
849
+
850
+
851
+ class CanineLMPredictionHead(nn.Module):
852
+ def __init__(self, config):
853
+ super().__init__()
854
+ self.transform = CaninePredictionHeadTransform(config)
855
+
856
+ # The output weights are the same as the input embeddings, but there is
857
+ # an output-only bias for each token.
858
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
859
+
860
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
861
+
862
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
863
+ self.decoder.bias = self.bias
864
+
865
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
866
+ hidden_states = self.transform(hidden_states)
867
+ hidden_states = self.decoder(hidden_states)
868
+ return hidden_states
869
+
870
+
871
+ class CanineOnlyMLMHead(nn.Module):
872
+ def __init__(self, config):
873
+ super().__init__()
874
+ self.predictions = CanineLMPredictionHead(config)
875
+
876
+ def forward(
877
+ self,
878
+ sequence_output: Tuple[torch.Tensor],
879
+ ) -> Tuple[torch.Tensor]:
880
+ prediction_scores = self.predictions(sequence_output)
881
+ return prediction_scores
882
+
883
+
884
+ class CaninePreTrainedModel(PreTrainedModel):
885
+ """
886
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
887
+ models.
888
+ """
889
+
890
+ config_class = CanineConfig
891
+ load_tf_weights = load_tf_weights_in_canine
892
+ base_model_prefix = "canine"
893
+ supports_gradient_checkpointing = True
894
+
895
+ def _init_weights(self, module):
896
+ """Initialize the weights"""
897
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
898
+ # Slightly different from the TF version which uses truncated_normal for initialization
899
+ # cf https://github.com/pytorch/pytorch/pull/5617
900
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
901
+ if module.bias is not None:
902
+ module.bias.data.zero_()
903
+ elif isinstance(module, nn.Embedding):
904
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
905
+ if module.padding_idx is not None:
906
+ module.weight.data[module.padding_idx].zero_()
907
+ elif isinstance(module, nn.LayerNorm):
908
+ module.bias.data.zero_()
909
+ module.weight.data.fill_(1.0)
910
+
911
+
912
+ CANINE_START_DOCSTRING = r"""
913
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
914
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
915
+ behavior.
916
+
917
+ Parameters:
918
+ config ([`CanineConfig`]): Model configuration class with all the parameters of the model.
919
+ Initializing with a config file does not load the weights associated with the model, only the
920
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
921
+ """
922
+
923
+ CANINE_INPUTS_DOCSTRING = r"""
924
+ Args:
925
+ input_ids (`torch.LongTensor` of shape `({0})`):
926
+ Indices of input sequence tokens in the vocabulary.
927
+
928
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
929
+ [`PreTrainedTokenizer.__call__`] for details.
930
+
931
+ [What are input IDs?](../glossary#input-ids)
932
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
933
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
934
+
935
+ - 1 for tokens that are **not masked**,
936
+ - 0 for tokens that are **masked**.
937
+
938
+ [What are attention masks?](../glossary#attention-mask)
939
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
940
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
941
+ 1]`:
942
+
943
+ - 0 corresponds to a *sentence A* token,
944
+ - 1 corresponds to a *sentence B* token.
945
+
946
+ [What are token type IDs?](../glossary#token-type-ids)
947
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
948
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
949
+ config.max_position_embeddings - 1]`.
950
+
951
+ [What are position IDs?](../glossary#position-ids)
952
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
953
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
954
+
955
+ - 1 indicates the head is **not masked**,
956
+ - 0 indicates the head is **masked**.
957
+
958
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
959
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
960
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
961
+ model's internal embedding lookup matrix.
962
+ output_attentions (`bool`, *optional*):
963
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
964
+ tensors for more detail.
965
+ output_hidden_states (`bool`, *optional*):
966
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
967
+ more detail.
968
+ return_dict (`bool`, *optional*):
969
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
970
+ """
971
+
972
+
973
+ @add_start_docstrings(
974
+ "The bare CANINE Model transformer outputting raw hidden-states without any specific head on top.",
975
+ CANINE_START_DOCSTRING,
976
+ )
977
+ class CanineModel(CaninePreTrainedModel):
978
+ def __init__(self, config, add_pooling_layer=True):
979
+ super().__init__(config)
980
+ self.config = config
981
+ shallow_config = copy.deepcopy(config)
982
+ shallow_config.num_hidden_layers = 1
983
+
984
+ self.char_embeddings = CanineEmbeddings(config)
985
+ # shallow/low-dim transformer encoder to get a initial character encoding
986
+ self.initial_char_encoder = CanineEncoder(
987
+ shallow_config,
988
+ local=True,
989
+ always_attend_to_first_position=False,
990
+ first_position_attends_to_all=False,
991
+ attend_from_chunk_width=config.local_transformer_stride,
992
+ attend_from_chunk_stride=config.local_transformer_stride,
993
+ attend_to_chunk_width=config.local_transformer_stride,
994
+ attend_to_chunk_stride=config.local_transformer_stride,
995
+ )
996
+ self.chars_to_molecules = CharactersToMolecules(config)
997
+ # deep transformer encoder
998
+ self.encoder = CanineEncoder(config)
999
+ self.projection = ConvProjection(config)
1000
+ # shallow/low-dim transformer encoder to get a final character encoding
1001
+ self.final_char_encoder = CanineEncoder(shallow_config)
1002
+
1003
+ self.pooler = CaninePooler(config) if add_pooling_layer else None
1004
+
1005
+ # Initialize weights and apply final processing
1006
+ self.post_init()
1007
+
1008
+ def _prune_heads(self, heads_to_prune):
1009
+ """
1010
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1011
+ class PreTrainedModel
1012
+ """
1013
+ for layer, heads in heads_to_prune.items():
1014
+ self.encoder.layer[layer].attention.prune_heads(heads)
1015
+
1016
+ def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask):
1017
+ """
1018
+ Create 3D attention mask from a 2D tensor mask.
1019
+
1020
+ Args:
1021
+ from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
1022
+ to_mask: int32 Tensor of shape [batch_size, to_seq_length].
1023
+
1024
+ Returns:
1025
+ float Tensor of shape [batch_size, from_seq_length, to_seq_length].
1026
+ """
1027
+ batch_size, from_seq_length = from_tensor.shape[0], from_tensor.shape[1]
1028
+
1029
+ to_seq_length = to_mask.shape[1]
1030
+
1031
+ to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float()
1032
+
1033
+ # We don't assume that `from_tensor` is a mask (although it could be). We
1034
+ # don't actually care if we attend *from* padding tokens (only *to* padding)
1035
+ # tokens so we create a tensor of all ones.
1036
+ broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device)
1037
+
1038
+ # Here we broadcast along two dimensions to create the mask.
1039
+ mask = broadcast_ones * to_mask
1040
+
1041
+ return mask
1042
+
1043
+ def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int):
1044
+ """Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer."""
1045
+
1046
+ # first, make char_attention_mask 3D by adding a channel dim
1047
+ batch_size, char_seq_len = char_attention_mask.shape
1048
+ poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len))
1049
+
1050
+ # next, apply MaxPool1d to get pooled_molecule_mask of shape (batch_size, 1, mol_seq_len)
1051
+ pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)(
1052
+ poolable_char_mask.float()
1053
+ )
1054
+
1055
+ # finally, squeeze to get tensor of shape (batch_size, mol_seq_len)
1056
+ molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1)
1057
+
1058
+ return molecule_attention_mask
1059
+
1060
+ def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: torch.Tensor) -> torch.Tensor:
1061
+ """Repeats molecules to make them the same length as the char sequence."""
1062
+
1063
+ rate = self.config.downsampling_rate
1064
+
1065
+ molecules_without_extra_cls = molecules[:, 1:, :]
1066
+ # `repeated`: [batch_size, almost_char_seq_len, molecule_hidden_size]
1067
+ repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2)
1068
+
1069
+ # So far, we've repeated the elements sufficient for any `char_seq_length`
1070
+ # that's a multiple of `downsampling_rate`. Now we account for the last
1071
+ # n elements (n < `downsampling_rate`), i.e. the remainder of floor
1072
+ # division. We do this by repeating the last molecule a few extra times.
1073
+ last_molecule = molecules[:, -1:, :]
1074
+ remainder_length = torch.fmod(torch.tensor(char_seq_length), torch.tensor(rate)).item()
1075
+ remainder_repeated = torch.repeat_interleave(
1076
+ last_molecule,
1077
+ # +1 molecule to compensate for truncation.
1078
+ repeats=remainder_length + rate,
1079
+ dim=-2,
1080
+ )
1081
+
1082
+ # `repeated`: [batch_size, char_seq_len, molecule_hidden_size]
1083
+ return torch.cat([repeated, remainder_repeated], dim=-2)
1084
+
1085
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1086
+ @add_code_sample_docstrings(
1087
+ checkpoint=_CHECKPOINT_FOR_DOC,
1088
+ output_type=CanineModelOutputWithPooling,
1089
+ config_class=_CONFIG_FOR_DOC,
1090
+ )
1091
+ def forward(
1092
+ self,
1093
+ input_ids: Optional[torch.LongTensor] = None,
1094
+ attention_mask: Optional[torch.FloatTensor] = None,
1095
+ token_type_ids: Optional[torch.LongTensor] = None,
1096
+ position_ids: Optional[torch.LongTensor] = None,
1097
+ head_mask: Optional[torch.FloatTensor] = None,
1098
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1099
+ output_attentions: Optional[bool] = None,
1100
+ output_hidden_states: Optional[bool] = None,
1101
+ return_dict: Optional[bool] = None,
1102
+ ) -> Union[Tuple, CanineModelOutputWithPooling]:
1103
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1104
+ output_hidden_states = (
1105
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1106
+ )
1107
+ all_hidden_states = () if output_hidden_states else None
1108
+ all_self_attentions = () if output_attentions else None
1109
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1110
+
1111
+ if input_ids is not None and inputs_embeds is not None:
1112
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1113
+ elif input_ids is not None:
1114
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1115
+ input_shape = input_ids.size()
1116
+ elif inputs_embeds is not None:
1117
+ input_shape = inputs_embeds.size()[:-1]
1118
+ else:
1119
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1120
+
1121
+ batch_size, seq_length = input_shape
1122
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1123
+
1124
+ if attention_mask is None:
1125
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
1126
+ if token_type_ids is None:
1127
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1128
+
1129
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1130
+ # ourselves in which case we just need to make it broadcastable to all heads.
1131
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1132
+ molecule_attention_mask = self._downsample_attention_mask(
1133
+ attention_mask, downsampling_rate=self.config.downsampling_rate
1134
+ )
1135
+ extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1136
+ molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1])
1137
+ )
1138
+
1139
+ # Prepare head mask if needed
1140
+ # 1.0 in head_mask indicate we keep the head
1141
+ # attention_probs has shape bsz x n_heads x N x N
1142
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1143
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1144
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1145
+
1146
+ # `input_char_embeddings`: shape (batch_size, char_seq, char_dim)
1147
+ input_char_embeddings = self.char_embeddings(
1148
+ input_ids=input_ids,
1149
+ position_ids=position_ids,
1150
+ token_type_ids=token_type_ids,
1151
+ inputs_embeds=inputs_embeds,
1152
+ )
1153
+
1154
+ # Contextualize character embeddings using shallow Transformer.
1155
+ # We use a 3D attention mask for the local attention.
1156
+ # `input_char_encoding`: shape (batch_size, char_seq_len, char_dim)
1157
+ char_attention_mask = self._create_3d_attention_mask_from_input_mask(
1158
+ input_ids if input_ids is not None else inputs_embeds, attention_mask
1159
+ )
1160
+ init_chars_encoder_outputs = self.initial_char_encoder(
1161
+ input_char_embeddings,
1162
+ attention_mask=char_attention_mask,
1163
+ output_attentions=output_attentions,
1164
+ output_hidden_states=output_hidden_states,
1165
+ )
1166
+ input_char_encoding = init_chars_encoder_outputs.last_hidden_state
1167
+
1168
+ # Downsample chars to molecules.
1169
+ # The following lines have dimensions: [batch, molecule_seq, molecule_dim].
1170
+ # In this transformation, we change the dimensionality from `char_dim` to
1171
+ # `molecule_dim`, but do *NOT* add a resnet connection. Instead, we rely on
1172
+ # the resnet connections (a) from the final char transformer stack back into
1173
+ # the original char transformer stack and (b) the resnet connections from
1174
+ # the final char transformer stack back into the deep BERT stack of
1175
+ # molecules.
1176
+ #
1177
+ # Empirically, it is critical to use a powerful enough transformation here:
1178
+ # mean pooling causes training to diverge with huge gradient norms in this
1179
+ # region of the model; using a convolution here resolves this issue. From
1180
+ # this, it seems that molecules and characters require a very different
1181
+ # feature space; intuitively, this makes sense.
1182
+ init_molecule_encoding = self.chars_to_molecules(input_char_encoding)
1183
+
1184
+ # Deep BERT encoder
1185
+ # `molecule_sequence_output`: shape (batch_size, mol_seq_len, mol_dim)
1186
+ encoder_outputs = self.encoder(
1187
+ init_molecule_encoding,
1188
+ attention_mask=extended_molecule_attention_mask,
1189
+ head_mask=head_mask,
1190
+ output_attentions=output_attentions,
1191
+ output_hidden_states=output_hidden_states,
1192
+ return_dict=return_dict,
1193
+ )
1194
+ molecule_sequence_output = encoder_outputs[0]
1195
+ pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None
1196
+
1197
+ # Upsample molecules back to characters.
1198
+ # `repeated_molecules`: shape (batch_size, char_seq_len, mol_hidden_size)
1199
+ repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1])
1200
+
1201
+ # Concatenate representations (contextualized char embeddings and repeated molecules):
1202
+ # `concat`: shape [batch_size, char_seq_len, molecule_hidden_size+char_hidden_final]
1203
+ concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1)
1204
+
1205
+ # Project representation dimension back to hidden_size
1206
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1207
+ sequence_output = self.projection(concat)
1208
+
1209
+ # Apply final shallow Transformer
1210
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1211
+ final_chars_encoder_outputs = self.final_char_encoder(
1212
+ sequence_output,
1213
+ attention_mask=extended_attention_mask,
1214
+ output_attentions=output_attentions,
1215
+ output_hidden_states=output_hidden_states,
1216
+ )
1217
+ sequence_output = final_chars_encoder_outputs.last_hidden_state
1218
+
1219
+ if output_hidden_states:
1220
+ deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1]
1221
+ all_hidden_states = (
1222
+ all_hidden_states
1223
+ + init_chars_encoder_outputs.hidden_states
1224
+ + deep_encoder_hidden_states
1225
+ + final_chars_encoder_outputs.hidden_states
1226
+ )
1227
+
1228
+ if output_attentions:
1229
+ deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1]
1230
+ all_self_attentions = (
1231
+ all_self_attentions
1232
+ + init_chars_encoder_outputs.attentions
1233
+ + deep_encoder_self_attentions
1234
+ + final_chars_encoder_outputs.attentions
1235
+ )
1236
+
1237
+ if not return_dict:
1238
+ output = (sequence_output, pooled_output)
1239
+ output += tuple(v for v in [all_hidden_states, all_self_attentions] if v is not None)
1240
+ return output
1241
+
1242
+ return CanineModelOutputWithPooling(
1243
+ last_hidden_state=sequence_output,
1244
+ pooler_output=pooled_output,
1245
+ hidden_states=all_hidden_states,
1246
+ attentions=all_self_attentions,
1247
+ )
1248
+
1249
+
1250
+ @add_start_docstrings(
1251
+ """
1252
+ CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1253
+ output) e.g. for GLUE tasks.
1254
+ """,
1255
+ CANINE_START_DOCSTRING,
1256
+ )
1257
+ class CanineForSequenceClassification(CaninePreTrainedModel):
1258
+ def __init__(self, config):
1259
+ super().__init__(config)
1260
+ self.num_labels = config.num_labels
1261
+
1262
+ self.canine = CanineModel(config)
1263
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1264
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1265
+
1266
+ # Initialize weights and apply final processing
1267
+ self.post_init()
1268
+
1269
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1270
+ @add_code_sample_docstrings(
1271
+ checkpoint=_CHECKPOINT_FOR_DOC,
1272
+ output_type=SequenceClassifierOutput,
1273
+ config_class=_CONFIG_FOR_DOC,
1274
+ )
1275
+ def forward(
1276
+ self,
1277
+ input_ids: Optional[torch.LongTensor] = None,
1278
+ attention_mask: Optional[torch.FloatTensor] = None,
1279
+ token_type_ids: Optional[torch.LongTensor] = None,
1280
+ position_ids: Optional[torch.LongTensor] = None,
1281
+ head_mask: Optional[torch.FloatTensor] = None,
1282
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1283
+ labels: Optional[torch.LongTensor] = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1288
+ r"""
1289
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1290
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1291
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1292
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1293
+ """
1294
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1295
+
1296
+ outputs = self.canine(
1297
+ input_ids,
1298
+ attention_mask=attention_mask,
1299
+ token_type_ids=token_type_ids,
1300
+ position_ids=position_ids,
1301
+ head_mask=head_mask,
1302
+ inputs_embeds=inputs_embeds,
1303
+ output_attentions=output_attentions,
1304
+ output_hidden_states=output_hidden_states,
1305
+ return_dict=return_dict,
1306
+ )
1307
+
1308
+ pooled_output = outputs[1]
1309
+
1310
+ pooled_output = self.dropout(pooled_output)
1311
+ logits = self.classifier(pooled_output)
1312
+
1313
+ loss = None
1314
+ if labels is not None:
1315
+ if self.config.problem_type is None:
1316
+ if self.num_labels == 1:
1317
+ self.config.problem_type = "regression"
1318
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1319
+ self.config.problem_type = "single_label_classification"
1320
+ else:
1321
+ self.config.problem_type = "multi_label_classification"
1322
+
1323
+ if self.config.problem_type == "regression":
1324
+ loss_fct = MSELoss()
1325
+ if self.num_labels == 1:
1326
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1327
+ else:
1328
+ loss = loss_fct(logits, labels)
1329
+ elif self.config.problem_type == "single_label_classification":
1330
+ loss_fct = CrossEntropyLoss()
1331
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1332
+ elif self.config.problem_type == "multi_label_classification":
1333
+ loss_fct = BCEWithLogitsLoss()
1334
+ loss = loss_fct(logits, labels)
1335
+ if not return_dict:
1336
+ output = (logits,) + outputs[2:]
1337
+ return ((loss,) + output) if loss is not None else output
1338
+
1339
+ return SequenceClassifierOutput(
1340
+ loss=loss,
1341
+ logits=logits,
1342
+ hidden_states=outputs.hidden_states,
1343
+ attentions=outputs.attentions,
1344
+ )
1345
+
1346
+
1347
+ @add_start_docstrings(
1348
+ """
1349
+ CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1350
+ softmax) e.g. for RocStories/SWAG tasks.
1351
+ """,
1352
+ CANINE_START_DOCSTRING,
1353
+ )
1354
+ class CanineForMultipleChoice(CaninePreTrainedModel):
1355
+ def __init__(self, config):
1356
+ super().__init__(config)
1357
+
1358
+ self.canine = CanineModel(config)
1359
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1360
+ self.classifier = nn.Linear(config.hidden_size, 1)
1361
+
1362
+ # Initialize weights and apply final processing
1363
+ self.post_init()
1364
+
1365
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1366
+ @add_code_sample_docstrings(
1367
+ checkpoint=_CHECKPOINT_FOR_DOC,
1368
+ output_type=MultipleChoiceModelOutput,
1369
+ config_class=_CONFIG_FOR_DOC,
1370
+ )
1371
+ def forward(
1372
+ self,
1373
+ input_ids: Optional[torch.LongTensor] = None,
1374
+ attention_mask: Optional[torch.FloatTensor] = None,
1375
+ token_type_ids: Optional[torch.LongTensor] = None,
1376
+ position_ids: Optional[torch.LongTensor] = None,
1377
+ head_mask: Optional[torch.FloatTensor] = None,
1378
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1379
+ labels: Optional[torch.LongTensor] = None,
1380
+ output_attentions: Optional[bool] = None,
1381
+ output_hidden_states: Optional[bool] = None,
1382
+ return_dict: Optional[bool] = None,
1383
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1384
+ r"""
1385
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1386
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1387
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1388
+ `input_ids` above)
1389
+ """
1390
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1391
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1392
+
1393
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1394
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1395
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1396
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1397
+ inputs_embeds = (
1398
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1399
+ if inputs_embeds is not None
1400
+ else None
1401
+ )
1402
+
1403
+ outputs = self.canine(
1404
+ input_ids,
1405
+ attention_mask=attention_mask,
1406
+ token_type_ids=token_type_ids,
1407
+ position_ids=position_ids,
1408
+ head_mask=head_mask,
1409
+ inputs_embeds=inputs_embeds,
1410
+ output_attentions=output_attentions,
1411
+ output_hidden_states=output_hidden_states,
1412
+ return_dict=return_dict,
1413
+ )
1414
+
1415
+ pooled_output = outputs[1]
1416
+
1417
+ pooled_output = self.dropout(pooled_output)
1418
+ logits = self.classifier(pooled_output)
1419
+ reshaped_logits = logits.view(-1, num_choices)
1420
+
1421
+ loss = None
1422
+ if labels is not None:
1423
+ loss_fct = CrossEntropyLoss()
1424
+ loss = loss_fct(reshaped_logits, labels)
1425
+
1426
+ if not return_dict:
1427
+ output = (reshaped_logits,) + outputs[2:]
1428
+ return ((loss,) + output) if loss is not None else output
1429
+
1430
+ return MultipleChoiceModelOutput(
1431
+ loss=loss,
1432
+ logits=reshaped_logits,
1433
+ hidden_states=outputs.hidden_states,
1434
+ attentions=outputs.attentions,
1435
+ )
1436
+
1437
+
1438
+ @add_start_docstrings(
1439
+ """
1440
+ CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1441
+ Named-Entity-Recognition (NER) tasks.
1442
+ """,
1443
+ CANINE_START_DOCSTRING,
1444
+ )
1445
+ class CanineForTokenClassification(CaninePreTrainedModel):
1446
+ def __init__(self, config):
1447
+ super().__init__(config)
1448
+ self.num_labels = config.num_labels
1449
+
1450
+ self.canine = CanineModel(config)
1451
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1452
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1453
+
1454
+ # Initialize weights and apply final processing
1455
+ self.post_init()
1456
+
1457
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1458
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1459
+ def forward(
1460
+ self,
1461
+ input_ids: Optional[torch.LongTensor] = None,
1462
+ attention_mask: Optional[torch.FloatTensor] = None,
1463
+ token_type_ids: Optional[torch.LongTensor] = None,
1464
+ position_ids: Optional[torch.LongTensor] = None,
1465
+ head_mask: Optional[torch.FloatTensor] = None,
1466
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1467
+ labels: Optional[torch.LongTensor] = None,
1468
+ output_attentions: Optional[bool] = None,
1469
+ output_hidden_states: Optional[bool] = None,
1470
+ return_dict: Optional[bool] = None,
1471
+ ) -> Union[Tuple, TokenClassifierOutput]:
1472
+ r"""
1473
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1474
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1475
+
1476
+ Returns:
1477
+
1478
+ Example:
1479
+
1480
+ ```python
1481
+ >>> from transformers import AutoTokenizer, CanineForTokenClassification
1482
+ >>> import torch
1483
+
1484
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/canine-s")
1485
+ >>> model = CanineForTokenClassification.from_pretrained("google/canine-s")
1486
+
1487
+ >>> inputs = tokenizer(
1488
+ ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
1489
+ ... )
1490
+
1491
+ >>> with torch.no_grad():
1492
+ ... logits = model(**inputs).logits
1493
+
1494
+ >>> predicted_token_class_ids = logits.argmax(-1)
1495
+
1496
+ >>> # Note that tokens are classified rather then input words which means that
1497
+ >>> # there might be more predicted token classes than words.
1498
+ >>> # Multiple token classes might account for the same word
1499
+ >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
1500
+ >>> predicted_tokens_classes # doctest: +SKIP
1501
+ ```
1502
+
1503
+ ```python
1504
+ >>> labels = predicted_token_class_ids
1505
+ >>> loss = model(**inputs, labels=labels).loss
1506
+ >>> round(loss.item(), 2) # doctest: +SKIP
1507
+ ```"""
1508
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1509
+
1510
+ outputs = self.canine(
1511
+ input_ids,
1512
+ attention_mask=attention_mask,
1513
+ token_type_ids=token_type_ids,
1514
+ position_ids=position_ids,
1515
+ head_mask=head_mask,
1516
+ inputs_embeds=inputs_embeds,
1517
+ output_attentions=output_attentions,
1518
+ output_hidden_states=output_hidden_states,
1519
+ return_dict=return_dict,
1520
+ )
1521
+
1522
+ sequence_output = outputs[0]
1523
+
1524
+ sequence_output = self.dropout(sequence_output)
1525
+ logits = self.classifier(sequence_output)
1526
+
1527
+ loss = None
1528
+ if labels is not None:
1529
+ loss_fct = CrossEntropyLoss()
1530
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1531
+
1532
+ if not return_dict:
1533
+ output = (logits,) + outputs[2:]
1534
+ return ((loss,) + output) if loss is not None else output
1535
+
1536
+ return TokenClassifierOutput(
1537
+ loss=loss,
1538
+ logits=logits,
1539
+ hidden_states=outputs.hidden_states,
1540
+ attentions=outputs.attentions,
1541
+ )
1542
+
1543
+
1544
+ @add_start_docstrings(
1545
+ """
1546
+ CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1547
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1548
+ """,
1549
+ CANINE_START_DOCSTRING,
1550
+ )
1551
+ class CanineForQuestionAnswering(CaninePreTrainedModel):
1552
+ def __init__(self, config):
1553
+ super().__init__(config)
1554
+ self.num_labels = config.num_labels
1555
+
1556
+ self.canine = CanineModel(config)
1557
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1558
+
1559
+ # Initialize weights and apply final processing
1560
+ self.post_init()
1561
+
1562
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1563
+ @add_code_sample_docstrings(
1564
+ checkpoint="Splend1dchan/canine-c-squad",
1565
+ output_type=QuestionAnsweringModelOutput,
1566
+ config_class=_CONFIG_FOR_DOC,
1567
+ expected_output="'nice puppet'",
1568
+ expected_loss=8.81,
1569
+ )
1570
+ def forward(
1571
+ self,
1572
+ input_ids: Optional[torch.LongTensor] = None,
1573
+ attention_mask: Optional[torch.FloatTensor] = None,
1574
+ token_type_ids: Optional[torch.LongTensor] = None,
1575
+ position_ids: Optional[torch.LongTensor] = None,
1576
+ head_mask: Optional[torch.FloatTensor] = None,
1577
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1578
+ start_positions: Optional[torch.LongTensor] = None,
1579
+ end_positions: Optional[torch.LongTensor] = None,
1580
+ output_attentions: Optional[bool] = None,
1581
+ output_hidden_states: Optional[bool] = None,
1582
+ return_dict: Optional[bool] = None,
1583
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1584
+ r"""
1585
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1586
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1587
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1588
+ are not taken into account for computing the loss.
1589
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1590
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1591
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1592
+ are not taken into account for computing the loss.
1593
+ """
1594
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1595
+
1596
+ outputs = self.canine(
1597
+ input_ids,
1598
+ attention_mask=attention_mask,
1599
+ token_type_ids=token_type_ids,
1600
+ position_ids=position_ids,
1601
+ head_mask=head_mask,
1602
+ inputs_embeds=inputs_embeds,
1603
+ output_attentions=output_attentions,
1604
+ output_hidden_states=output_hidden_states,
1605
+ return_dict=return_dict,
1606
+ )
1607
+
1608
+ sequence_output = outputs[0]
1609
+
1610
+ logits = self.qa_outputs(sequence_output)
1611
+ start_logits, end_logits = logits.split(1, dim=-1)
1612
+ start_logits = start_logits.squeeze(-1)
1613
+ end_logits = end_logits.squeeze(-1)
1614
+
1615
+ total_loss = None
1616
+ if start_positions is not None and end_positions is not None:
1617
+ # If we are on multi-GPU, split add a dimension
1618
+ if len(start_positions.size()) > 1:
1619
+ start_positions = start_positions.squeeze(-1)
1620
+ if len(end_positions.size()) > 1:
1621
+ end_positions = end_positions.squeeze(-1)
1622
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1623
+ ignored_index = start_logits.size(1)
1624
+ start_positions.clamp_(0, ignored_index)
1625
+ end_positions.clamp_(0, ignored_index)
1626
+
1627
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1628
+ start_loss = loss_fct(start_logits, start_positions)
1629
+ end_loss = loss_fct(end_logits, end_positions)
1630
+ total_loss = (start_loss + end_loss) / 2
1631
+
1632
+ if not return_dict:
1633
+ output = (start_logits, end_logits) + outputs[2:]
1634
+ return ((total_loss,) + output) if total_loss is not None else output
1635
+
1636
+ return QuestionAnsweringModelOutput(
1637
+ loss=total_loss,
1638
+ start_logits=start_logits,
1639
+ end_logits=end_logits,
1640
+ hidden_states=outputs.hidden_states,
1641
+ attentions=outputs.attentions,
1642
+ )
parrot/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CANINE."""
16
+
17
+ from typing import Dict, List, Optional
18
+
19
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ # Unicode defines 1,114,112 total “codepoints”
27
+ UNICODE_VOCAB_SIZE = 1114112
28
+
29
+ # Below: Constants defining canonical codepoints for special, pseudo-characters.
30
+ # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
31
+ PAD = 0
32
+ CLS = 0xE000
33
+ SEP = 0xE001
34
+ BOS = 0xE002
35
+ MASK = 0xE003
36
+ RESERVED = 0xE004
37
+
38
+ # Maps special codepoints to human-readable names.
39
+ SPECIAL_CODEPOINTS: Dict[int, str] = {
40
+ # Special symbols are represented using codepoints values that are valid,
41
+ # but designated as "Private Use", meaning that they will never be assigned
42
+ # characters by the Unicode Consortium, and are thus safe for use here.
43
+ #
44
+ # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
45
+ # excluded and should fail with a hard error.
46
+ CLS: "[CLS]",
47
+ SEP: "[SEP]",
48
+ BOS: "[BOS]",
49
+ MASK: "[MASK]",
50
+ PAD: "[PAD]",
51
+ RESERVED: "[RESERVED]",
52
+ }
53
+
54
+ # Maps special codepoint human-readable names to their codepoint values.
55
+ SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
56
+
57
+
58
+ class CanineTokenizer(PreTrainedTokenizer):
59
+ r"""
60
+ Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then
61
+ converts each character into its Unicode code point.
62
+
63
+ [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`].
64
+
65
+ Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters.
66
+
67
+ Args:
68
+ model_max_length (`int`, *optional*, defaults to 2048):
69
+ The maximum sentence length the model accepts.
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ bos_token=chr(CLS),
75
+ eos_token=chr(SEP),
76
+ sep_token=chr(SEP),
77
+ cls_token=chr(CLS),
78
+ pad_token=chr(PAD),
79
+ mask_token=chr(MASK),
80
+ add_prefix_space=False,
81
+ model_max_length=2048,
82
+ **kwargs,
83
+ ):
84
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
85
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
86
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
87
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
88
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
89
+
90
+ # Mask token behave like a normal word, i.e. include the space before it
91
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
92
+
93
+ # Creates a mapping for looking up the IDs of special symbols.
94
+ self._special_codepoints: Dict[str, int] = {}
95
+ for codepoint, name in SPECIAL_CODEPOINTS.items():
96
+ self._special_codepoints[name] = codepoint
97
+
98
+ # Creates a mapping for looking up the string forms of special symbol IDs.
99
+ self._special_codepoint_strings: Dict[int, str] = {
100
+ codepoint: name for name, codepoint in self._special_codepoints.items()
101
+ }
102
+
103
+ self._unicode_vocab_size = UNICODE_VOCAB_SIZE
104
+ self._num_special_tokens = len(self._special_codepoints)
105
+
106
+ super().__init__(
107
+ bos_token=bos_token,
108
+ eos_token=eos_token,
109
+ sep_token=sep_token,
110
+ cls_token=cls_token,
111
+ pad_token=pad_token,
112
+ mask_token=mask_token,
113
+ add_prefix_space=add_prefix_space,
114
+ model_max_length=model_max_length,
115
+ **kwargs,
116
+ )
117
+
118
+ @property
119
+ def vocab_size(self) -> int:
120
+ return self._unicode_vocab_size
121
+
122
+ def get_vocab(self):
123
+ vocab = {chr(i): i for i in range(self.vocab_size)}
124
+ vocab.update(self.added_tokens_encoder)
125
+ return vocab
126
+
127
+ def _tokenize(self, text: str) -> List[str]:
128
+ """Tokenize a string (i.e. perform character splitting)."""
129
+ return list(text)
130
+
131
+ def _convert_token_to_id(self, token: str) -> int:
132
+ """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value)."""
133
+ try:
134
+ return ord(token)
135
+ except TypeError:
136
+ raise ValueError(f"invalid token: '{token}'")
137
+
138
+ def _convert_id_to_token(self, index: int) -> str:
139
+ """
140
+ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to
141
+ human-readable format.
142
+ """
143
+ try:
144
+ if index in SPECIAL_CODEPOINTS:
145
+ return SPECIAL_CODEPOINTS[index]
146
+ return chr(index)
147
+ except TypeError:
148
+ raise ValueError(f"invalid id: {index}")
149
+
150
+ def convert_tokens_to_string(self, tokens):
151
+ return "".join(tokens)
152
+
153
+ def build_inputs_with_special_tokens(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
155
+ ) -> List[int]:
156
+ """
157
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
158
+ adding special tokens. A CANINE sequence has the following format:
159
+
160
+ - single sequence: `[CLS] X [SEP]`
161
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
162
+
163
+ Args:
164
+ token_ids_0 (`List[int]`):
165
+ List of IDs to which the special tokens will be added.
166
+ token_ids_1 (`List[int]`, *optional*):
167
+ Optional second list of IDs for sequence pairs.
168
+
169
+ Returns:
170
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
171
+ """
172
+ sep = [self.sep_token_id]
173
+ cls = [self.cls_token_id]
174
+
175
+ result = cls + token_ids_0 + sep
176
+ if token_ids_1 is not None:
177
+ result += token_ids_1 + sep
178
+ return result
179
+
180
+ def get_special_tokens_mask(
181
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
182
+ ) -> List[int]:
183
+ """
184
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
185
+ special tokens using the tokenizer `prepare_for_model` method.
186
+
187
+ Args:
188
+ token_ids_0 (`List[int]`):
189
+ List of IDs.
190
+ token_ids_1 (`List[int]`, *optional*):
191
+ Optional second list of IDs for sequence pairs.
192
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
193
+ Whether or not the token list is already formatted with special tokens for the model.
194
+
195
+ Returns:
196
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
197
+ """
198
+ if already_has_special_tokens:
199
+ return super().get_special_tokens_mask(
200
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
201
+ )
202
+
203
+ result = [1] + ([0] * len(token_ids_0)) + [1]
204
+ if token_ids_1 is not None:
205
+ result += ([0] * len(token_ids_1)) + [1]
206
+ return result
207
+
208
+ def create_token_type_ids_from_sequences(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
210
+ ) -> List[int]:
211
+ """
212
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE
213
+ sequence pair mask has the following format:
214
+
215
+ ```
216
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
217
+ | first sequence | second sequence |
218
+ ```
219
+
220
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
230
+ """
231
+ sep = [self.sep_token_id]
232
+ cls = [self.cls_token_id]
233
+
234
+ result = len(cls + token_ids_0 + sep) * [0]
235
+ if token_ids_1 is not None:
236
+ result += len(token_ids_1 + sep) * [1]
237
+ return result
238
+
239
+ # CanineTokenizer has no vocab file
240
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
241
+ return ()
parrot/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/modeling_clipseg.cpython-310.pyc ADDED
Binary file (43.8 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/processing_clipseg.cpython-310.pyc ADDED
Binary file (6.6 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/clipseg/processing_clipseg.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for CLIPSeg
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding
23
+
24
+
25
+ class CLIPSegProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor.
28
+
29
+ [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the
30
+ [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information.
31
+
32
+ Args:
33
+ image_processor ([`ViTImageProcessor`], *optional*):
34
+ The image processor is a required input.
35
+ tokenizer ([`CLIPTokenizerFast`], *optional*):
36
+ The tokenizer is a required input.
37
+ """
38
+
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "ViTImageProcessor"
41
+ tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+
61
+ def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs):
62
+ """
63
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
64
+ and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode
65
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
66
+ ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of
67
+ the above two methods for more information.
68
+
69
+ Args:
70
+ text (`str`, `List[str]`, `List[List[str]]`):
71
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
72
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
73
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
74
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
75
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
76
+ tensor. Both channels-first and channels-last formats are supported.
77
+ visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
78
+ The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image,
79
+ NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape
80
+ (C, H, W), where C is a number of channels, H and W are image height and width.
81
+
82
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
83
+ If set, will return tensors of a particular framework. Acceptable values are:
84
+
85
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
86
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
87
+ - `'np'`: Return NumPy `np.ndarray` objects.
88
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
89
+
90
+ Returns:
91
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
92
+
93
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
94
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
95
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
96
+ `None`).
97
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
98
+ """
99
+ if text is None and visual_prompt is None and images is None:
100
+ raise ValueError("You have to specify either text, visual prompt or images.")
101
+
102
+ if text is not None and visual_prompt is not None:
103
+ raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
104
+
105
+ if text is not None:
106
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
107
+
108
+ if visual_prompt is not None:
109
+ prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs)
110
+
111
+ if images is not None:
112
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
113
+
114
+ if visual_prompt is not None and images is not None:
115
+ encoding = {
116
+ "pixel_values": image_features.pixel_values,
117
+ "conditional_pixel_values": prompt_features.pixel_values,
118
+ }
119
+ return encoding
120
+ elif text is not None and images is not None:
121
+ encoding["pixel_values"] = image_features.pixel_values
122
+ return encoding
123
+ elif text is not None:
124
+ return encoding
125
+ elif visual_prompt is not None:
126
+ encoding = {
127
+ "conditional_pixel_values": prompt_features.pixel_values,
128
+ }
129
+ return encoding
130
+ else:
131
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
132
+
133
+ def batch_decode(self, *args, **kwargs):
134
+ """
135
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
136
+ refer to the docstring of this method for more information.
137
+ """
138
+ return self.tokenizer.batch_decode(*args, **kwargs)
139
+
140
+ def decode(self, *args, **kwargs):
141
+ """
142
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
143
+ the docstring of this method for more information.
144
+ """
145
+ return self.tokenizer.decode(*args, **kwargs)
146
+
147
+ @property
148
+ def feature_extractor_class(self):
149
+ warnings.warn(
150
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
151
+ FutureWarning,
152
+ )
153
+ return self.image_processor_class
154
+
155
+ @property
156
+ def feature_extractor(self):
157
+ warnings.warn(
158
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
159
+ FutureWarning,
160
+ )
161
+ return self.image_processor
parrot/lib/python3.10/site-packages/transformers/models/codegen/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_codegen": ["CodeGenConfig", "CodeGenOnnxConfig"],
21
+ "tokenization_codegen": ["CodeGenTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_codegen"] = [
39
+ "CodeGenForCausalLM",
40
+ "CodeGenModel",
41
+ "CodeGenPreTrainedModel",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_codegen import CodeGenConfig, CodeGenOnnxConfig
46
+ from .tokenization_codegen import CodeGenTokenizer
47
+
48
+ try:
49
+ if not is_tokenizers_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_codegen_fast import CodeGenTokenizerFast
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_codegen import (
63
+ CodeGenForCausalLM,
64
+ CodeGenModel,
65
+ CodeGenPreTrainedModel,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc ADDED
Binary file (8.26 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CodeGen model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class CodeGenConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
31
+ CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the CodeGen
33
+ [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
34
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
35
+ [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 50400):
39
+ Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`CodeGenModel`].
41
+ n_positions (`int`, *optional*, defaults to 2048):
42
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
43
+ just in case (e.g., 512 or 1024 or 2048).
44
+ n_ctx (`int`, *optional*, defaults to 2048):
45
+ This attribute is used in `CodeGenModel.__init__` without any real effect.
46
+ n_embd (`int`, *optional*, defaults to 4096):
47
+ Dimensionality of the embeddings and hidden states.
48
+ n_layer (`int`, *optional*, defaults to 28):
49
+ Number of hidden layers in the Transformer encoder.
50
+ n_head (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ rotary_dim (`int`, *optional*, defaults to 64):
53
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
54
+ n_inner (`int`, *optional*):
55
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
56
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
57
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
58
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
61
+ The dropout ratio for the embeddings.
62
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for the attention.
64
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
65
+ The epsilon to use in the layer normalization layers.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ use_cache (`bool`, *optional*, defaults to `True`):
69
+ Whether or not the model should return the last key/values attentions (not used by all models).
70
+ bos_token_id (`int`, *optional*, defaults to 50256):
71
+ Beginning of stream token id.
72
+ eos_token_id (`int`, *optional*, defaults to 50256):
73
+ End of stream token id.
74
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
75
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
76
+ model has a output word embedding layer.
77
+
78
+ Example:
79
+
80
+ ```python
81
+ >>> from transformers import CodeGenConfig, CodeGenModel
82
+
83
+ >>> # Initializing a CodeGen 6B configuration
84
+ >>> configuration = CodeGenConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the configuration
87
+ >>> model = CodeGenModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "codegen"
94
+ attribute_map = {
95
+ "max_position_embeddings": "n_positions",
96
+ "hidden_size": "n_embd",
97
+ "num_attention_heads": "n_head",
98
+ "num_hidden_layers": "n_layer",
99
+ }
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=50400,
104
+ n_positions=2048,
105
+ n_ctx=2048,
106
+ n_embd=4096,
107
+ n_layer=28,
108
+ n_head=16,
109
+ rotary_dim=64,
110
+ n_inner=None,
111
+ activation_function="gelu_new",
112
+ resid_pdrop=0.0,
113
+ embd_pdrop=0.0,
114
+ attn_pdrop=0.0,
115
+ layer_norm_epsilon=1e-5,
116
+ initializer_range=0.02,
117
+ use_cache=True,
118
+ bos_token_id=50256,
119
+ eos_token_id=50256,
120
+ tie_word_embeddings=False,
121
+ **kwargs,
122
+ ):
123
+ self.vocab_size = vocab_size
124
+ self.n_ctx = n_ctx
125
+ self.n_positions = n_positions
126
+ self.n_embd = n_embd
127
+ self.n_layer = n_layer
128
+ self.n_head = n_head
129
+ self.n_inner = n_inner
130
+ self.rotary_dim = rotary_dim
131
+ self.activation_function = activation_function
132
+ self.resid_pdrop = resid_pdrop
133
+ self.embd_pdrop = embd_pdrop
134
+ self.attn_pdrop = attn_pdrop
135
+ self.layer_norm_epsilon = layer_norm_epsilon
136
+ self.initializer_range = initializer_range
137
+ self.use_cache = use_cache
138
+
139
+ self.bos_token_id = bos_token_id
140
+ self.eos_token_id = eos_token_id
141
+
142
+ super().__init__(
143
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
144
+ )
145
+
146
+
147
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
148
+ class CodeGenOnnxConfig(OnnxConfigWithPast):
149
+ def __init__(
150
+ self,
151
+ config: PretrainedConfig,
152
+ task: str = "default",
153
+ patching_specs: List[PatchingSpec] = None,
154
+ use_past: bool = False,
155
+ ):
156
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
157
+ if not getattr(self._config, "pad_token_id", None):
158
+ # TODO: how to do that better?
159
+ self._config.pad_token_id = 0
160
+
161
+ @property
162
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
163
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
164
+ if self.use_past:
165
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
166
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
167
+ else:
168
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
169
+
170
+ return common_inputs
171
+
172
+ @property
173
+ def num_layers(self) -> int:
174
+ return self._config.n_layer
175
+
176
+ @property
177
+ def num_attention_heads(self) -> int:
178
+ return self._config.n_head
179
+
180
+ def generate_dummy_inputs(
181
+ self,
182
+ tokenizer: PreTrainedTokenizer,
183
+ batch_size: int = -1,
184
+ seq_length: int = -1,
185
+ is_pair: bool = False,
186
+ framework: Optional[TensorType] = None,
187
+ ) -> Mapping[str, Any]:
188
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
189
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
190
+ )
191
+
192
+ # We need to order the input in the way they appears in the forward()
193
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
194
+
195
+ # Need to add the past_keys
196
+ if self.use_past:
197
+ if not is_torch_available():
198
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
199
+ else:
200
+ import torch
201
+
202
+ batch, seqlen = common_inputs["input_ids"].shape
203
+ # Not using the same length for past_key_values
204
+ past_key_values_length = seqlen + 2
205
+ past_shape = (
206
+ batch,
207
+ self.num_attention_heads,
208
+ past_key_values_length,
209
+ self._config.hidden_size // self.num_attention_heads,
210
+ )
211
+ ordered_inputs["past_key_values"] = [
212
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
213
+ ]
214
+
215
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
216
+ if self.use_past:
217
+ mask_dtype = ordered_inputs["attention_mask"].dtype
218
+ ordered_inputs["attention_mask"] = torch.cat(
219
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
220
+ )
221
+
222
+ return ordered_inputs
223
+
224
+ @property
225
+ def default_onnx_opset(self) -> int:
226
+ return 13
parrot/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CodeGen model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
28
+ from .configuration_codegen import CodeGenConfig
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ _CHECKPOINT_FOR_DOC = "Salesforce/codegen-2B-mono"
34
+ _CONFIG_FOR_DOC = "CodeGenConfig"
35
+
36
+
37
+ # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
38
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
39
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
40
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
41
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
42
+
43
+
44
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
45
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
46
+ x1 = x[:, :, :, ::2]
47
+ x2 = x[:, :, :, 1::2]
48
+ x = torch.stack((-x2, x1), dim=-1)
49
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
50
+
51
+
52
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
53
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
54
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
55
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
56
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
57
+
58
+
59
+ class CodeGenAttention(nn.Module):
60
+ def __init__(self, config):
61
+ super().__init__()
62
+
63
+ max_positions = config.max_position_embeddings
64
+ self.register_buffer(
65
+ "causal_mask",
66
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
67
+ 1, 1, max_positions, max_positions
68
+ ),
69
+ persistent=False,
70
+ )
71
+
72
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
73
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
74
+
75
+ self.embed_dim = config.hidden_size
76
+ self.num_attention_heads = config.num_attention_heads
77
+ self.head_dim = self.embed_dim // self.num_attention_heads
78
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
79
+ raise ValueError(
80
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
81
+ f" `num_attention_heads`: {self.num_attention_heads})."
82
+ )
83
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
84
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
85
+
86
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
87
+ self.rotary_dim = config.rotary_dim
88
+ pos_embd_dim = self.rotary_dim or self.embed_dim
89
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
90
+
91
+ def _split_heads(self, x, n_head, dim_head, mp_num):
92
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
93
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
94
+ return reshaped
95
+
96
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
97
+ """
98
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
99
+ """
100
+ if len(tensor.shape) == 5:
101
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
102
+ elif len(tensor.shape) == 4:
103
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
104
+ else:
105
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
106
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
107
+ return tensor.view(new_shape)
108
+
109
+ def _attn(
110
+ self,
111
+ query,
112
+ key,
113
+ value,
114
+ attention_mask=None,
115
+ head_mask=None,
116
+ ):
117
+ # compute causal mask from causal mask buffer
118
+ query_length, key_length = query.size(-2), key.size(-2)
119
+ causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
120
+
121
+ # Keep the attention weights computation in fp32 to avoid overflow issues
122
+ query = query.to(torch.float32)
123
+ key = key.to(torch.float32)
124
+
125
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
126
+
127
+ attn_weights = attn_weights / self.scale_attn
128
+ mask_value = torch.finfo(attn_weights.dtype).min
129
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
130
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
131
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
132
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
133
+
134
+ if attention_mask is not None:
135
+ # Apply the attention mask
136
+ attn_weights = attn_weights + attention_mask
137
+
138
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
139
+ attn_weights = attn_weights.to(value.dtype)
140
+ attn_weights = self.attn_dropout(attn_weights)
141
+
142
+ # Mask heads if we want to
143
+ if head_mask is not None:
144
+ attn_weights = attn_weights * head_mask
145
+
146
+ attn_output = torch.matmul(attn_weights, value)
147
+
148
+ return attn_output, attn_weights
149
+
150
+ def forward(
151
+ self,
152
+ hidden_states: Optional[torch.FloatTensor],
153
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
154
+ attention_mask: Optional[torch.FloatTensor] = None,
155
+ position_ids: Optional[torch.LongTensor] = None,
156
+ head_mask: Optional[torch.FloatTensor] = None,
157
+ use_cache: Optional[bool] = False,
158
+ output_attentions: Optional[bool] = False,
159
+ ) -> Union[
160
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
161
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
162
+ ]:
163
+ qkv = self.qkv_proj(hidden_states)
164
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
165
+ mp_num = 4
166
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
167
+
168
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
169
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
170
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
171
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
172
+
173
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
174
+ value = value.permute(0, 2, 1, 3)
175
+
176
+ embed_positions = self.embed_positions
177
+ if embed_positions.device != position_ids.device:
178
+ embed_positions = embed_positions.to(position_ids.device)
179
+ self.embed_positions = embed_positions
180
+
181
+ sincos = embed_positions[position_ids]
182
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
183
+
184
+ if self.rotary_dim is not None:
185
+ k_rot = key[:, :, :, : self.rotary_dim]
186
+ k_pass = key[:, :, :, self.rotary_dim :]
187
+
188
+ q_rot = query[:, :, :, : self.rotary_dim]
189
+ q_pass = query[:, :, :, self.rotary_dim :]
190
+
191
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
192
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
193
+
194
+ key = torch.cat([k_rot, k_pass], dim=-1)
195
+ query = torch.cat([q_rot, q_pass], dim=-1)
196
+ else:
197
+ key = apply_rotary_pos_emb(key, sin, cos)
198
+ query = apply_rotary_pos_emb(query, sin, cos)
199
+
200
+ key = key.permute(0, 2, 1, 3)
201
+ query = query.permute(0, 2, 1, 3)
202
+
203
+ if layer_past is not None:
204
+ past_key = layer_past[0]
205
+ past_value = layer_past[1]
206
+ key = torch.cat((past_key, key), dim=-2)
207
+ value = torch.cat((past_value, value), dim=-2)
208
+
209
+ if use_cache is True:
210
+ # Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
211
+ # Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
212
+ present = (key.to(hidden_states.dtype), value)
213
+ else:
214
+ present = None
215
+
216
+ # compute self-attention: V x Softmax(QK^T)
217
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
218
+
219
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
220
+ attn_output = self.out_proj(attn_output)
221
+ attn_output = self.resid_dropout(attn_output)
222
+
223
+ outputs = (attn_output, present)
224
+ if output_attentions:
225
+ outputs += (attn_weights,)
226
+
227
+ return outputs # a, present, (attentions)
228
+
229
+
230
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
231
+ class CodeGenMLP(nn.Module):
232
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
233
+ super().__init__()
234
+ embed_dim = config.n_embd
235
+
236
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
237
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
238
+
239
+ self.act = ACT2FN[config.activation_function]
240
+ self.dropout = nn.Dropout(config.resid_pdrop)
241
+
242
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
243
+ hidden_states = self.fc_in(hidden_states)
244
+ hidden_states = self.act(hidden_states)
245
+ hidden_states = self.fc_out(hidden_states)
246
+ hidden_states = self.dropout(hidden_states)
247
+ return hidden_states
248
+
249
+
250
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
251
+ class CodeGenBlock(nn.Module):
252
+ # Ignore copy
253
+ def __init__(self, config):
254
+ super().__init__()
255
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
256
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
257
+ self.attn = CodeGenAttention(config)
258
+ self.mlp = CodeGenMLP(inner_dim, config)
259
+
260
+ def forward(
261
+ self,
262
+ hidden_states: Optional[torch.FloatTensor],
263
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
264
+ attention_mask: Optional[torch.FloatTensor] = None,
265
+ position_ids: Optional[torch.LongTensor] = None,
266
+ head_mask: Optional[torch.FloatTensor] = None,
267
+ use_cache: Optional[bool] = False,
268
+ output_attentions: Optional[bool] = False,
269
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
270
+ residual = hidden_states
271
+ hidden_states = self.ln_1(hidden_states)
272
+ attn_outputs = self.attn(
273
+ hidden_states=hidden_states,
274
+ layer_past=layer_past,
275
+ attention_mask=attention_mask,
276
+ position_ids=position_ids,
277
+ head_mask=head_mask,
278
+ use_cache=use_cache,
279
+ output_attentions=output_attentions,
280
+ )
281
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
282
+ outputs = attn_outputs[1:]
283
+
284
+ feed_forward_hidden_states = self.mlp(hidden_states)
285
+ hidden_states = attn_output + feed_forward_hidden_states + residual
286
+
287
+ if use_cache:
288
+ outputs = (hidden_states,) + outputs
289
+ else:
290
+ outputs = (hidden_states,) + outputs[1:]
291
+
292
+ return outputs # hidden_states, present, (attentions)
293
+
294
+
295
+ class CodeGenPreTrainedModel(PreTrainedModel):
296
+ """
297
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
298
+ models.
299
+ """
300
+
301
+ config_class = CodeGenConfig
302
+ base_model_prefix = "transformer"
303
+ supports_gradient_checkpointing = True
304
+ _no_split_modules = ["CodeGenBlock"]
305
+ _skip_keys_device_placement = "past_key_values"
306
+
307
+ def __init__(self, *inputs, **kwargs):
308
+ super().__init__(*inputs, **kwargs)
309
+
310
+ def _init_weights(self, module):
311
+ """Initialize the weights."""
312
+ if isinstance(module, (nn.Linear,)):
313
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
314
+ # cf https://github.com/pytorch/pytorch/pull/5617
315
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
316
+ if module.bias is not None:
317
+ module.bias.data.zero_()
318
+ elif isinstance(module, nn.Embedding):
319
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
320
+ if module.padding_idx is not None:
321
+ module.weight.data[module.padding_idx].zero_()
322
+ elif isinstance(module, nn.LayerNorm):
323
+ module.bias.data.zero_()
324
+ module.weight.data.fill_(1.0)
325
+
326
+
327
+ CODEGEN_START_DOCSTRING = r"""
328
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
329
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
330
+ behavior.
331
+
332
+ Parameters:
333
+ config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.
334
+ Initializing with a config file does not load the weights associated with the model, only the
335
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
336
+ """
337
+
338
+ CODEGEN_INPUTS_DOCSTRING = r"""
339
+ Args:
340
+ input_ids (`torch.LongTensor` of shape `({0})`):
341
+ Indices of input sequence tokens in the vocabulary.
342
+
343
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
344
+ [`PreTrainedTokenizer.__call__`] for details.
345
+
346
+ [What are input IDs?](../glossary#input-ids)
347
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
348
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
349
+
350
+ - 1 for tokens that are **not masked**,
351
+ - 0 for tokens that are **masked**.
352
+
353
+ [What are attention masks?](../glossary#attention-mask)
354
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
355
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
356
+ 1]`:
357
+
358
+ - 0 corresponds to a *sentence A* token,
359
+ - 1 corresponds to a *sentence B* token.
360
+
361
+ [What are token type IDs?](../glossary#token-type-ids)
362
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
363
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
364
+ config.n_positions - 1]`.
365
+
366
+ [What are position IDs?](../glossary#position-ids)
367
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
368
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
369
+
370
+ - 1 indicates the head is **not masked**,
371
+ - 0 indicates the head is **masked**.
372
+
373
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
374
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
375
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
376
+ model's internal embedding lookup matrix.
377
+ output_attentions (`bool`, *optional*):
378
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
379
+ tensors for more detail.
380
+ output_hidden_states (`bool`, *optional*):
381
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
382
+ more detail.
383
+ return_dict (`bool`, *optional*):
384
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
385
+ """
386
+
387
+
388
+ @add_start_docstrings(
389
+ "The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.",
390
+ CODEGEN_START_DOCSTRING,
391
+ )
392
+ class CodeGenModel(CodeGenPreTrainedModel):
393
+ def __init__(self, config):
394
+ super().__init__(config)
395
+
396
+ self.embed_dim = config.n_embd
397
+ self.vocab_size = config.vocab_size
398
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
399
+ self.drop = nn.Dropout(config.embd_pdrop)
400
+ self.h = nn.ModuleList([CodeGenBlock(config) for _ in range(config.n_layer)])
401
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
402
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
403
+
404
+ self.gradient_checkpointing = False
405
+
406
+ # Initialize weights and apply final processing
407
+ self.post_init()
408
+
409
+ def get_input_embeddings(self):
410
+ return self.wte
411
+
412
+ def set_input_embeddings(self, new_embeddings):
413
+ self.wte = new_embeddings
414
+
415
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
416
+ @add_code_sample_docstrings(
417
+ checkpoint=_CHECKPOINT_FOR_DOC,
418
+ output_type=BaseModelOutputWithPast,
419
+ config_class=_CONFIG_FOR_DOC,
420
+ )
421
+ def forward(
422
+ self,
423
+ input_ids: Optional[torch.LongTensor] = None,
424
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
425
+ attention_mask: Optional[torch.FloatTensor] = None,
426
+ token_type_ids: Optional[torch.LongTensor] = None,
427
+ position_ids: Optional[torch.LongTensor] = None,
428
+ head_mask: Optional[torch.FloatTensor] = None,
429
+ inputs_embeds: Optional[torch.FloatTensor] = None,
430
+ use_cache: Optional[bool] = None,
431
+ output_attentions: Optional[bool] = None,
432
+ output_hidden_states: Optional[bool] = None,
433
+ return_dict: Optional[bool] = None,
434
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
435
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
436
+ output_hidden_states = (
437
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
438
+ )
439
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
440
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
441
+
442
+ if input_ids is not None and inputs_embeds is not None:
443
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
444
+ elif input_ids is not None:
445
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
446
+ input_shape = input_ids.size()
447
+ input_ids = input_ids.view(-1, input_shape[-1])
448
+ batch_size = input_ids.shape[0]
449
+ elif inputs_embeds is not None:
450
+ input_shape = inputs_embeds.size()[:-1]
451
+ batch_size = inputs_embeds.shape[0]
452
+ else:
453
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
454
+
455
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
456
+
457
+ if token_type_ids is not None:
458
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
459
+
460
+ if past_key_values is None:
461
+ past_length = 0
462
+ past_key_values = tuple([None] * len(self.h))
463
+ else:
464
+ past_length = past_key_values[0][0].size(-2)
465
+
466
+ if position_ids is None:
467
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
468
+ position_ids = position_ids.unsqueeze(0)
469
+
470
+ # Attention mask.
471
+ if attention_mask is not None:
472
+ if batch_size <= 0:
473
+ raise ValueError("batch_size has to be defined and > 0")
474
+ attention_mask = attention_mask.view(batch_size, -1)
475
+ # We create a 3D attention mask from a 2D tensor mask.
476
+ # Sizes are [batch_size, 1, 1, to_seq_length]
477
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
478
+ # this attention mask is more simple than the triangular masking of causal attention
479
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
480
+ attention_mask = attention_mask[:, None, None, :]
481
+
482
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
483
+ # masked positions, this operation will create a tensor which is 0.0 for
484
+ # positions we want to attend and the dtype's smallest value for masked positions.
485
+ # Since we are adding it to the raw scores before the softmax, this is
486
+ # effectively the same as removing these entirely.
487
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
488
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
489
+
490
+ # Prepare head mask if needed
491
+ # 1.0 in head_mask indicate we keep the head
492
+ # attention_probs has shape bsz x num_attention_heads x N x N
493
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
494
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
495
+
496
+ if inputs_embeds is None:
497
+ inputs_embeds = self.wte(input_ids)
498
+
499
+ hidden_states = inputs_embeds
500
+
501
+ if token_type_ids is not None:
502
+ token_type_embeds = self.wte(token_type_ids)
503
+ hidden_states = hidden_states + token_type_embeds
504
+
505
+ hidden_states = self.drop(hidden_states)
506
+
507
+ output_shape = input_shape + (hidden_states.size(-1),)
508
+
509
+ if self.gradient_checkpointing and self.training:
510
+ if use_cache:
511
+ logger.warning_once(
512
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
513
+ "`use_cache=False`..."
514
+ )
515
+ use_cache = False
516
+
517
+ presents = () if use_cache else None
518
+ all_self_attentions = () if output_attentions else None
519
+ all_hidden_states = () if output_hidden_states else None
520
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
521
+ if output_hidden_states:
522
+ all_hidden_states = all_hidden_states + (hidden_states,)
523
+
524
+ if self.gradient_checkpointing and self.training:
525
+ outputs = self._gradient_checkpointing_func(
526
+ block.__call__,
527
+ hidden_states,
528
+ None,
529
+ attention_mask,
530
+ position_ids,
531
+ head_mask[i],
532
+ use_cache,
533
+ output_attentions,
534
+ )
535
+ else:
536
+ outputs = block(
537
+ hidden_states=hidden_states,
538
+ layer_past=layer_past,
539
+ attention_mask=attention_mask,
540
+ position_ids=position_ids,
541
+ head_mask=head_mask[i],
542
+ use_cache=use_cache,
543
+ output_attentions=output_attentions,
544
+ )
545
+
546
+ hidden_states = outputs[0]
547
+ if use_cache is True:
548
+ presents = presents + (outputs[1],)
549
+
550
+ if output_attentions:
551
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
552
+
553
+ hidden_states = self.ln_f(hidden_states)
554
+
555
+ hidden_states = hidden_states.view(output_shape)
556
+ # Add last hidden state
557
+ if output_hidden_states:
558
+ all_hidden_states = all_hidden_states + (hidden_states,)
559
+
560
+ if not return_dict:
561
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
562
+
563
+ return BaseModelOutputWithPast(
564
+ last_hidden_state=hidden_states,
565
+ past_key_values=presents,
566
+ hidden_states=all_hidden_states,
567
+ attentions=all_self_attentions,
568
+ )
569
+
570
+
571
+ @add_start_docstrings(
572
+ """
573
+ The CodeGen Model transformer with a language modeling head on top.
574
+ """,
575
+ CODEGEN_START_DOCSTRING,
576
+ )
577
+ class CodeGenForCausalLM(CodeGenPreTrainedModel):
578
+ _tied_weights_keys = ["lm_head.weight"]
579
+
580
+ def __init__(self, config):
581
+ super().__init__(config)
582
+ self.transformer = CodeGenModel(config)
583
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
584
+
585
+ # Initialize weights and apply final processing
586
+ self.post_init()
587
+
588
+ def get_output_embeddings(self):
589
+ return self.lm_head
590
+
591
+ def set_output_embeddings(self, new_embeddings):
592
+ self.lm_head = new_embeddings
593
+
594
+ def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, past_key_values=None, **kwargs):
595
+ token_type_ids = kwargs.get("token_type_ids", None)
596
+ # Omit tokens covered by past_key_values
597
+ if past_key_values:
598
+ past_length = past_key_values[0][0].shape[2]
599
+
600
+ # Some generation methods already pass only the last input ID
601
+ if input_ids.shape[1] > past_length:
602
+ remove_prefix_length = past_length
603
+ else:
604
+ # Default to old behavior: keep only final ID
605
+ remove_prefix_length = input_ids.shape[1] - 1
606
+
607
+ input_ids = input_ids[:, remove_prefix_length:]
608
+ if token_type_ids is not None:
609
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
610
+
611
+ attention_mask = kwargs.get("attention_mask", None)
612
+ position_ids = kwargs.get("position_ids", None)
613
+
614
+ if attention_mask is not None and position_ids is None:
615
+ # create position_ids on the fly for batch generation
616
+ position_ids = attention_mask.long().cumsum(-1) - 1
617
+ position_ids.masked_fill_(attention_mask == 0, 1)
618
+ if past_key_values:
619
+ position_ids = position_ids[:, -input_ids.shape[1] :]
620
+
621
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
622
+ if inputs_embeds is not None and past_key_values is None:
623
+ model_inputs = {"inputs_embeds": inputs_embeds}
624
+ else:
625
+ model_inputs = {"input_ids": input_ids.contiguous()}
626
+
627
+ model_inputs.update(
628
+ {
629
+ "past_key_values": past_key_values,
630
+ "use_cache": kwargs.get("use_cache"),
631
+ "position_ids": position_ids,
632
+ "attention_mask": attention_mask,
633
+ "token_type_ids": token_type_ids,
634
+ }
635
+ )
636
+ return model_inputs
637
+
638
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
639
+ @add_code_sample_docstrings(
640
+ checkpoint=_CHECKPOINT_FOR_DOC,
641
+ output_type=CausalLMOutputWithPast,
642
+ config_class=_CONFIG_FOR_DOC,
643
+ )
644
+ def forward(
645
+ self,
646
+ input_ids: Optional[torch.LongTensor] = None,
647
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
648
+ attention_mask: Optional[torch.FloatTensor] = None,
649
+ token_type_ids: Optional[torch.LongTensor] = None,
650
+ position_ids: Optional[torch.LongTensor] = None,
651
+ head_mask: Optional[torch.FloatTensor] = None,
652
+ inputs_embeds: Optional[torch.FloatTensor] = None,
653
+ labels: Optional[torch.LongTensor] = None,
654
+ use_cache: Optional[bool] = None,
655
+ output_attentions: Optional[bool] = None,
656
+ output_hidden_states: Optional[bool] = None,
657
+ return_dict: Optional[bool] = None,
658
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
659
+ r"""
660
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
661
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
662
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
663
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
664
+ """
665
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
666
+
667
+ transformer_outputs = self.transformer(
668
+ input_ids,
669
+ past_key_values=past_key_values,
670
+ attention_mask=attention_mask,
671
+ token_type_ids=token_type_ids,
672
+ position_ids=position_ids,
673
+ head_mask=head_mask,
674
+ inputs_embeds=inputs_embeds,
675
+ use_cache=use_cache,
676
+ output_attentions=output_attentions,
677
+ output_hidden_states=output_hidden_states,
678
+ return_dict=return_dict,
679
+ )
680
+ hidden_states = transformer_outputs[0]
681
+
682
+ # make sure sampling in fp16 works correctly and
683
+ # compute loss in fp32 to match with mesh-tf version
684
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
685
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
686
+
687
+ loss = None
688
+ if labels is not None:
689
+ # move labels to correct device to enable model parallelism
690
+ labels = labels.to(lm_logits.device)
691
+ # Shift so that tokens < n predict n
692
+ shift_logits = lm_logits[..., :-1, :].contiguous()
693
+ shift_labels = labels[..., 1:].contiguous()
694
+ # Flatten the tokens
695
+ loss_fct = CrossEntropyLoss()
696
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
697
+
698
+ loss = loss.to(hidden_states.dtype)
699
+
700
+ if not return_dict:
701
+ output = (lm_logits,) + transformer_outputs[1:]
702
+ return ((loss,) + output) if loss is not None else output
703
+
704
+ return CausalLMOutputWithPast(
705
+ loss=loss,
706
+ logits=lm_logits,
707
+ past_key_values=transformer_outputs.past_key_values,
708
+ hidden_states=transformer_outputs.hidden_states,
709
+ attentions=transformer_outputs.attentions,
710
+ )
711
+
712
+ @staticmethod
713
+ def _reorder_cache(
714
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
715
+ ) -> Tuple[Tuple[torch.Tensor]]:
716
+ """
717
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
718
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
719
+ beam_idx at every generation step.
720
+ """
721
+ return tuple(
722
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
723
+ for layer_past in past_key_values
724
+ )
parrot/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CodeGen"""
16
+
17
+
18
+ import json
19
+ import os
20
+ from functools import lru_cache
21
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import regex as re
25
+
26
+ from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ if is_torch_available():
31
+ import torch
32
+ if is_tf_available():
33
+ import tensorflow as tf
34
+
35
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ VOCAB_FILES_NAMES = {
41
+ "vocab_file": "vocab.json",
42
+ "merges_file": "merges.txt",
43
+ }
44
+
45
+
46
+ @lru_cache()
47
+ def bytes_to_unicode():
48
+ """
49
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
50
+ characters the bpe code barfs on.
51
+
52
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
53
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
54
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
55
+ tables between utf-8 bytes and unicode strings.
56
+ """
57
+ bs = (
58
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
59
+ )
60
+ cs = bs[:]
61
+ n = 0
62
+ for b in range(2**8):
63
+ if b not in bs:
64
+ bs.append(b)
65
+ cs.append(2**8 + n)
66
+ n += 1
67
+ cs = [chr(n) for n in cs]
68
+ return dict(zip(bs, cs))
69
+
70
+
71
+ def get_pairs(word):
72
+ """
73
+ Return set of symbol pairs in a word.
74
+
75
+ Word is represented as tuple of symbols (symbols being variable-length strings).
76
+ """
77
+ pairs = set()
78
+ prev_char = word[0]
79
+ for char in word[1:]:
80
+ pairs.add((prev_char, char))
81
+ prev_char = char
82
+ return pairs
83
+
84
+
85
+ class CodeGenTokenizer(PreTrainedTokenizer):
86
+ """
87
+ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
88
+
89
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
90
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
91
+
92
+ ```python
93
+ >>> from transformers import CodeGenTokenizer
94
+
95
+ >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
96
+ >>> tokenizer("Hello world")["input_ids"]
97
+ [15496, 995]
98
+
99
+ >>> tokenizer(" Hello world")["input_ids"]
100
+ [18435, 995]
101
+ ```
102
+
103
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
104
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
105
+
106
+ <Tip>
107
+
108
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
109
+
110
+ </Tip>
111
+
112
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
113
+ this superclass for more information regarding those methods.
114
+
115
+ Args:
116
+ vocab_file (`str`):
117
+ Path to the vocabulary file.
118
+ merges_file (`str`):
119
+ Path to the merges file.
120
+ errors (`str`, *optional*, defaults to `"replace"`):
121
+ Paradigm to follow when decoding bytes to UTF-8. See
122
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
123
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
124
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
125
+ token instead.
126
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
127
+ The beginning of sequence token.
128
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
129
+ The end of sequence token.
130
+ pad_token (`str`, *optional*):
131
+ The token used for padding, for example when batching sequences of different lengths.
132
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
133
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
134
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
135
+ add_bos_token (`bool`, *optional*, defaults to `False`):
136
+ Whether to add a beginning of sequence token at the start of sequences.
137
+ return_token_type_ids (`bool`, *optional*, defaults to `False`):
138
+ Whether to return token type IDs.
139
+ """
140
+
141
+ vocab_files_names = VOCAB_FILES_NAMES
142
+ model_input_names = ["input_ids", "attention_mask"]
143
+
144
+ def __init__(
145
+ self,
146
+ vocab_file,
147
+ merges_file,
148
+ errors="replace",
149
+ unk_token="<|endoftext|>",
150
+ bos_token="<|endoftext|>",
151
+ eos_token="<|endoftext|>",
152
+ pad_token=None,
153
+ add_prefix_space=False,
154
+ add_bos_token=False,
155
+ return_token_type_ids=False,
156
+ **kwargs,
157
+ ):
158
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
159
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
160
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
161
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
162
+ self.add_bos_token = add_bos_token
163
+ self.return_token_type_ids = return_token_type_ids
164
+ if self.return_token_type_ids:
165
+ self.model_input_names.append("token_type_ids")
166
+
167
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
168
+ self.encoder = json.load(vocab_handle)
169
+ self.decoder = {v: k for k, v in self.encoder.items()}
170
+ self.errors = errors # how to handle errors in decoding
171
+ self.byte_encoder = bytes_to_unicode()
172
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
173
+ with open(merges_file, encoding="utf-8") as merges_handle:
174
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
175
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
176
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
177
+ self.cache = {}
178
+ self.add_prefix_space = add_prefix_space
179
+
180
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
181
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
182
+ super().__init__(
183
+ errors=errors,
184
+ unk_token=unk_token,
185
+ bos_token=bos_token,
186
+ eos_token=eos_token,
187
+ pad_token=pad_token,
188
+ add_prefix_space=add_prefix_space,
189
+ add_bos_token=add_bos_token,
190
+ return_token_type_ids=return_token_type_ids,
191
+ **kwargs,
192
+ )
193
+
194
+ @property
195
+ def vocab_size(self):
196
+ return len(self.encoder)
197
+
198
+ def get_vocab(self):
199
+ return dict(self.encoder, **self.added_tokens_encoder)
200
+
201
+ def bpe(self, token):
202
+ if token in self.cache:
203
+ return self.cache[token]
204
+ word = tuple(token)
205
+ pairs = get_pairs(word)
206
+
207
+ if not pairs:
208
+ return token
209
+
210
+ while True:
211
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
212
+ if bigram not in self.bpe_ranks:
213
+ break
214
+ first, second = bigram
215
+ new_word = []
216
+ i = 0
217
+ while i < len(word):
218
+ try:
219
+ j = word.index(first, i)
220
+ except ValueError:
221
+ new_word.extend(word[i:])
222
+ break
223
+ else:
224
+ new_word.extend(word[i:j])
225
+ i = j
226
+
227
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
228
+ new_word.append(first + second)
229
+ i += 2
230
+ else:
231
+ new_word.append(word[i])
232
+ i += 1
233
+ new_word = tuple(new_word)
234
+ word = new_word
235
+ if len(word) == 1:
236
+ break
237
+ else:
238
+ pairs = get_pairs(word)
239
+ word = " ".join(word)
240
+ self.cache[token] = word
241
+ return word
242
+
243
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
244
+ if self.add_bos_token:
245
+ bos_token_ids = [self.bos_token_id]
246
+ else:
247
+ bos_token_ids = []
248
+
249
+ output = bos_token_ids + token_ids_0
250
+
251
+ if token_ids_1 is None:
252
+ return output
253
+
254
+ return output + bos_token_ids + token_ids_1
255
+
256
+ def _tokenize(self, text):
257
+ """Tokenize a string."""
258
+ bpe_tokens = []
259
+ for token in re.findall(self.pat, text):
260
+ token = "".join(
261
+ self.byte_encoder[b] for b in token.encode("utf-8")
262
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
263
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
264
+ return bpe_tokens
265
+
266
+ def _convert_token_to_id(self, token):
267
+ """Converts a token (str) in an id using the vocab."""
268
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
269
+
270
+ def _convert_id_to_token(self, index):
271
+ """Converts an index (integer) in a token (str) using the vocab."""
272
+ return self.decoder.get(index)
273
+
274
+ def convert_tokens_to_string(self, tokens):
275
+ """Converts a sequence of tokens (string) in a single string."""
276
+ text = "".join(tokens)
277
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
278
+ return text
279
+
280
+ def create_token_type_ids_from_sequences(
281
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
282
+ ) -> List[int]:
283
+ """
284
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence
285
+ pair mask has the following format:
286
+
287
+ ```
288
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
289
+ | first sequence | second sequence |
290
+ ```
291
+
292
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
293
+
294
+ Args:
295
+ token_ids_0 (`List[int]`):
296
+ List of IDs.
297
+ token_ids_1 (`List[int]`, *optional*):
298
+ Optional second list of IDs for sequence pairs.
299
+
300
+ Returns:
301
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
302
+ """
303
+ sep = [self.sep_token_id] if self.sep_token_id is not None else []
304
+ cls = [self.cls_token_id] if self.sep_token_id is not None else []
305
+ if token_ids_1 is None:
306
+ return len(cls + token_ids_0 + sep) * [0]
307
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
308
+
309
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
310
+ if not os.path.isdir(save_directory):
311
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
312
+ return
313
+ vocab_file = os.path.join(
314
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
315
+ )
316
+ merge_file = os.path.join(
317
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
318
+ )
319
+
320
+ with open(vocab_file, "w", encoding="utf-8") as f:
321
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
322
+
323
+ index = 0
324
+ with open(merge_file, "w", encoding="utf-8") as writer:
325
+ writer.write("#version: 0.2\n")
326
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
327
+ if index != token_index:
328
+ logger.warning(
329
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
330
+ " Please check that the tokenizer is not corrupted!"
331
+ )
332
+ index = token_index
333
+ writer.write(" ".join(bpe_tokens) + "\n")
334
+ index += 1
335
+
336
+ return vocab_file, merge_file
337
+
338
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
339
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
340
+ if is_split_into_words or add_prefix_space:
341
+ text = " " + text
342
+ return (text, kwargs)
343
+
344
+ def decode(
345
+ self,
346
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
347
+ skip_special_tokens: bool = False,
348
+ clean_up_tokenization_spaces: bool = None,
349
+ truncate_before_pattern: Optional[List[str]] = None,
350
+ **kwargs,
351
+ ) -> str:
352
+ """
353
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
354
+ tokens and clean up tokenization spaces.
355
+
356
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
357
+
358
+ Args:
359
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
360
+ List of tokenized input ids. Can be obtained using the `__call__` method.
361
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
362
+ Whether or not to remove special tokens in the decoding.
363
+ clean_up_tokenization_spaces (`bool`, *optional*):
364
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
365
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
366
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
367
+ A list of regular expression strings that will be used to truncate the returned string. This can be
368
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
369
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
370
+ kwargs (additional keyword arguments, *optional*):
371
+ Will be passed to the underlying model specific decode method.
372
+
373
+ Returns:
374
+ `str`: The decoded sentence.
375
+ """
376
+
377
+ token_ids = to_py_obj(token_ids)
378
+
379
+ decoded_text = super()._decode(
380
+ token_ids=token_ids,
381
+ skip_special_tokens=skip_special_tokens,
382
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
383
+ **kwargs,
384
+ )
385
+
386
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
387
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
388
+
389
+ return decoded_text
390
+
391
+ def truncate(self, completion, truncate_before_pattern):
392
+ def find_re(string, pattern, start_pos):
393
+ m = pattern.search(string, start_pos)
394
+ return m.start() if m else -1
395
+
396
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
397
+
398
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
399
+
400
+ if len(prints) > 1:
401
+ completion = completion[: prints[1].start()]
402
+
403
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
404
+
405
+ if len(defs) > 1:
406
+ completion = completion[: defs[1].start()]
407
+
408
+ start_pos = 0
409
+
410
+ terminals_pos = [
411
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
412
+ ]
413
+
414
+ if len(terminals_pos) > 0:
415
+ return completion[: min(terminals_pos)]
416
+ else:
417
+ return completion