Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +3 -0
- mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/_config.cpython-310.pyc +3 -0
- mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-310.pyc +3 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/executor.py +239 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/extensions.py +584 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py +27 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py +196 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py +125 -0
- mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py +245 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/concat.py +598 -0
- mgm/lib/python3.10/site-packages/pandas/core/internals/construction.py +1072 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__init__.py +93 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/array_ops.py +604 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/common.py +146 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/dispatch.py +30 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/docstrings.py +772 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/invalid.py +62 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py +189 -0
- mgm/lib/python3.10/site-packages/pandas/core/ops/missing.py +176 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/api.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/concat.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/encoding.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/melt.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/merge.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/pivot.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/reshape.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/tile.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1099,3 +1099,6 @@ mgm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs d
|
|
| 1099 |
openflamingo/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filter=lfs diff=lfs merge=lfs -text
|
| 1100 |
mgm/bin/python3 filter=lfs diff=lfs merge=lfs -text
|
| 1101 |
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1099 |
openflamingo/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filter=lfs diff=lfs merge=lfs -text
|
| 1100 |
mgm/bin/python3 filter=lfs diff=lfs merge=lfs -text
|
| 1101 |
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1102 |
+
openflamingo/lib/python3.10/site-packages/torch/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text
|
| 1103 |
+
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1104 |
+
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/_config.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/_config.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ac7b5e500e4847a1bf68095e41d1104371b724bf1a9d937ba68667cf4d32c79
|
| 3 |
+
size 297846
|
mgm/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55d13106ef62cb8b4381977e666d675d0105580781a1779f4a09f0ccb71d38fe
|
| 3 |
+
size 1424144
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (167 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc
ADDED
|
Binary file (5.08 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/executor.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
from typing import (
|
| 5 |
+
TYPE_CHECKING,
|
| 6 |
+
Any,
|
| 7 |
+
Callable,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from pandas._typing import Scalar
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
from pandas.compat._optional import import_optional_dependency
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@functools.cache
|
| 19 |
+
def generate_apply_looper(func, nopython=True, nogil=True, parallel=False):
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
import numba
|
| 22 |
+
else:
|
| 23 |
+
numba = import_optional_dependency("numba")
|
| 24 |
+
nb_compat_func = numba.extending.register_jitable(func)
|
| 25 |
+
|
| 26 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
| 27 |
+
def nb_looper(values, axis):
|
| 28 |
+
# Operate on the first row/col in order to get
|
| 29 |
+
# the output shape
|
| 30 |
+
if axis == 0:
|
| 31 |
+
first_elem = values[:, 0]
|
| 32 |
+
dim0 = values.shape[1]
|
| 33 |
+
else:
|
| 34 |
+
first_elem = values[0]
|
| 35 |
+
dim0 = values.shape[0]
|
| 36 |
+
res0 = nb_compat_func(first_elem)
|
| 37 |
+
# Use np.asarray to get shape for
|
| 38 |
+
# https://github.com/numba/numba/issues/4202#issuecomment-1185981507
|
| 39 |
+
buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape
|
| 40 |
+
if axis == 0:
|
| 41 |
+
buf_shape = buf_shape[::-1]
|
| 42 |
+
buff = np.empty(buf_shape)
|
| 43 |
+
|
| 44 |
+
if axis == 1:
|
| 45 |
+
buff[0] = res0
|
| 46 |
+
for i in numba.prange(1, values.shape[0]):
|
| 47 |
+
buff[i] = nb_compat_func(values[i])
|
| 48 |
+
else:
|
| 49 |
+
buff[:, 0] = res0
|
| 50 |
+
for j in numba.prange(1, values.shape[1]):
|
| 51 |
+
buff[:, j] = nb_compat_func(values[:, j])
|
| 52 |
+
return buff
|
| 53 |
+
|
| 54 |
+
return nb_looper
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@functools.cache
|
| 58 |
+
def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel):
|
| 59 |
+
if TYPE_CHECKING:
|
| 60 |
+
import numba
|
| 61 |
+
else:
|
| 62 |
+
numba = import_optional_dependency("numba")
|
| 63 |
+
|
| 64 |
+
if is_grouped_kernel:
|
| 65 |
+
|
| 66 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
| 67 |
+
def column_looper(
|
| 68 |
+
values: np.ndarray,
|
| 69 |
+
labels: np.ndarray,
|
| 70 |
+
ngroups: int,
|
| 71 |
+
min_periods: int,
|
| 72 |
+
*args,
|
| 73 |
+
):
|
| 74 |
+
result = np.empty((values.shape[0], ngroups), dtype=result_dtype)
|
| 75 |
+
na_positions = {}
|
| 76 |
+
for i in numba.prange(values.shape[0]):
|
| 77 |
+
output, na_pos = func(
|
| 78 |
+
values[i], result_dtype, labels, ngroups, min_periods, *args
|
| 79 |
+
)
|
| 80 |
+
result[i] = output
|
| 81 |
+
if len(na_pos) > 0:
|
| 82 |
+
na_positions[i] = np.array(na_pos)
|
| 83 |
+
return result, na_positions
|
| 84 |
+
|
| 85 |
+
else:
|
| 86 |
+
|
| 87 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
| 88 |
+
def column_looper(
|
| 89 |
+
values: np.ndarray,
|
| 90 |
+
start: np.ndarray,
|
| 91 |
+
end: np.ndarray,
|
| 92 |
+
min_periods: int,
|
| 93 |
+
*args,
|
| 94 |
+
):
|
| 95 |
+
result = np.empty((values.shape[0], len(start)), dtype=result_dtype)
|
| 96 |
+
na_positions = {}
|
| 97 |
+
for i in numba.prange(values.shape[0]):
|
| 98 |
+
output, na_pos = func(
|
| 99 |
+
values[i], result_dtype, start, end, min_periods, *args
|
| 100 |
+
)
|
| 101 |
+
result[i] = output
|
| 102 |
+
if len(na_pos) > 0:
|
| 103 |
+
na_positions[i] = np.array(na_pos)
|
| 104 |
+
return result, na_positions
|
| 105 |
+
|
| 106 |
+
return column_looper
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
default_dtype_mapping: dict[np.dtype, Any] = {
|
| 110 |
+
np.dtype("int8"): np.int64,
|
| 111 |
+
np.dtype("int16"): np.int64,
|
| 112 |
+
np.dtype("int32"): np.int64,
|
| 113 |
+
np.dtype("int64"): np.int64,
|
| 114 |
+
np.dtype("uint8"): np.uint64,
|
| 115 |
+
np.dtype("uint16"): np.uint64,
|
| 116 |
+
np.dtype("uint32"): np.uint64,
|
| 117 |
+
np.dtype("uint64"): np.uint64,
|
| 118 |
+
np.dtype("float32"): np.float64,
|
| 119 |
+
np.dtype("float64"): np.float64,
|
| 120 |
+
np.dtype("complex64"): np.complex128,
|
| 121 |
+
np.dtype("complex128"): np.complex128,
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# TODO: Preserve complex dtypes
|
| 126 |
+
|
| 127 |
+
float_dtype_mapping: dict[np.dtype, Any] = {
|
| 128 |
+
np.dtype("int8"): np.float64,
|
| 129 |
+
np.dtype("int16"): np.float64,
|
| 130 |
+
np.dtype("int32"): np.float64,
|
| 131 |
+
np.dtype("int64"): np.float64,
|
| 132 |
+
np.dtype("uint8"): np.float64,
|
| 133 |
+
np.dtype("uint16"): np.float64,
|
| 134 |
+
np.dtype("uint32"): np.float64,
|
| 135 |
+
np.dtype("uint64"): np.float64,
|
| 136 |
+
np.dtype("float32"): np.float64,
|
| 137 |
+
np.dtype("float64"): np.float64,
|
| 138 |
+
np.dtype("complex64"): np.float64,
|
| 139 |
+
np.dtype("complex128"): np.float64,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
identity_dtype_mapping: dict[np.dtype, Any] = {
|
| 143 |
+
np.dtype("int8"): np.int8,
|
| 144 |
+
np.dtype("int16"): np.int16,
|
| 145 |
+
np.dtype("int32"): np.int32,
|
| 146 |
+
np.dtype("int64"): np.int64,
|
| 147 |
+
np.dtype("uint8"): np.uint8,
|
| 148 |
+
np.dtype("uint16"): np.uint16,
|
| 149 |
+
np.dtype("uint32"): np.uint32,
|
| 150 |
+
np.dtype("uint64"): np.uint64,
|
| 151 |
+
np.dtype("float32"): np.float32,
|
| 152 |
+
np.dtype("float64"): np.float64,
|
| 153 |
+
np.dtype("complex64"): np.complex64,
|
| 154 |
+
np.dtype("complex128"): np.complex128,
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def generate_shared_aggregator(
|
| 159 |
+
func: Callable[..., Scalar],
|
| 160 |
+
dtype_mapping: dict[np.dtype, np.dtype],
|
| 161 |
+
is_grouped_kernel: bool,
|
| 162 |
+
nopython: bool,
|
| 163 |
+
nogil: bool,
|
| 164 |
+
parallel: bool,
|
| 165 |
+
):
|
| 166 |
+
"""
|
| 167 |
+
Generate a Numba function that loops over the columns 2D object and applies
|
| 168 |
+
a 1D numba kernel over each column.
|
| 169 |
+
|
| 170 |
+
Parameters
|
| 171 |
+
----------
|
| 172 |
+
func : function
|
| 173 |
+
aggregation function to be applied to each column
|
| 174 |
+
dtype_mapping: dict or None
|
| 175 |
+
If not None, maps a dtype to a result dtype.
|
| 176 |
+
Otherwise, will fall back to default mapping.
|
| 177 |
+
is_grouped_kernel: bool, default False
|
| 178 |
+
Whether func operates using the group labels (True)
|
| 179 |
+
or using starts/ends arrays
|
| 180 |
+
|
| 181 |
+
If true, you also need to pass the number of groups to this function
|
| 182 |
+
nopython : bool
|
| 183 |
+
nopython to be passed into numba.jit
|
| 184 |
+
nogil : bool
|
| 185 |
+
nogil to be passed into numba.jit
|
| 186 |
+
parallel : bool
|
| 187 |
+
parallel to be passed into numba.jit
|
| 188 |
+
|
| 189 |
+
Returns
|
| 190 |
+
-------
|
| 191 |
+
Numba function
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
# A wrapper around the looper function,
|
| 195 |
+
# to dispatch based on dtype since numba is unable to do that in nopython mode
|
| 196 |
+
|
| 197 |
+
# It also post-processes the values by inserting nans where number of observations
|
| 198 |
+
# is less than min_periods
|
| 199 |
+
# Cannot do this in numba nopython mode
|
| 200 |
+
# (you'll run into type-unification error when you cast int -> float)
|
| 201 |
+
def looper_wrapper(
|
| 202 |
+
values,
|
| 203 |
+
start=None,
|
| 204 |
+
end=None,
|
| 205 |
+
labels=None,
|
| 206 |
+
ngroups=None,
|
| 207 |
+
min_periods: int = 0,
|
| 208 |
+
**kwargs,
|
| 209 |
+
):
|
| 210 |
+
result_dtype = dtype_mapping[values.dtype]
|
| 211 |
+
column_looper = make_looper(
|
| 212 |
+
func, result_dtype, is_grouped_kernel, nopython, nogil, parallel
|
| 213 |
+
)
|
| 214 |
+
# Need to unpack kwargs since numba only supports *args
|
| 215 |
+
if is_grouped_kernel:
|
| 216 |
+
result, na_positions = column_looper(
|
| 217 |
+
values, labels, ngroups, min_periods, *kwargs.values()
|
| 218 |
+
)
|
| 219 |
+
else:
|
| 220 |
+
result, na_positions = column_looper(
|
| 221 |
+
values, start, end, min_periods, *kwargs.values()
|
| 222 |
+
)
|
| 223 |
+
if result.dtype.kind == "i":
|
| 224 |
+
# Look if na_positions is not empty
|
| 225 |
+
# If so, convert the whole block
|
| 226 |
+
# This is OK since int dtype cannot hold nan,
|
| 227 |
+
# so if min_periods not satisfied for 1 col, it is not satisfied for
|
| 228 |
+
# all columns at that index
|
| 229 |
+
for na_pos in na_positions.values():
|
| 230 |
+
if len(na_pos) > 0:
|
| 231 |
+
result = result.astype("float64")
|
| 232 |
+
break
|
| 233 |
+
# TODO: Optimize this
|
| 234 |
+
for i, na_pos in na_positions.items():
|
| 235 |
+
if len(na_pos) > 0:
|
| 236 |
+
result[i, na_pos] = np.nan
|
| 237 |
+
return result
|
| 238 |
+
|
| 239 |
+
return looper_wrapper
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/extensions.py
ADDED
|
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Disable type checking for this module since numba's internals
|
| 2 |
+
# are not typed, and we use numba's internals via its extension API
|
| 3 |
+
# mypy: ignore-errors
|
| 4 |
+
"""
|
| 5 |
+
Utility classes/functions to let numba recognize
|
| 6 |
+
pandas Index/Series/DataFrame
|
| 7 |
+
|
| 8 |
+
Mostly vendored from https://github.com/numba/numba/blob/main/numba/tests/pdlike_usecase.py
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
from contextlib import contextmanager
|
| 14 |
+
import operator
|
| 15 |
+
|
| 16 |
+
import numba
|
| 17 |
+
from numba import types
|
| 18 |
+
from numba.core import cgutils
|
| 19 |
+
from numba.core.datamodel import models
|
| 20 |
+
from numba.core.extending import (
|
| 21 |
+
NativeValue,
|
| 22 |
+
box,
|
| 23 |
+
lower_builtin,
|
| 24 |
+
make_attribute_wrapper,
|
| 25 |
+
overload,
|
| 26 |
+
overload_attribute,
|
| 27 |
+
overload_method,
|
| 28 |
+
register_model,
|
| 29 |
+
type_callable,
|
| 30 |
+
typeof_impl,
|
| 31 |
+
unbox,
|
| 32 |
+
)
|
| 33 |
+
from numba.core.imputils import impl_ret_borrowed
|
| 34 |
+
import numpy as np
|
| 35 |
+
|
| 36 |
+
from pandas._libs import lib
|
| 37 |
+
|
| 38 |
+
from pandas.core.indexes.base import Index
|
| 39 |
+
from pandas.core.indexing import _iLocIndexer
|
| 40 |
+
from pandas.core.internals import SingleBlockManager
|
| 41 |
+
from pandas.core.series import Series
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Helper function to hack around fact that Index casts numpy string dtype to object
|
| 45 |
+
#
|
| 46 |
+
# Idea is to set an attribute on a Index called _numba_data
|
| 47 |
+
# that is the original data, or the object data casted to numpy string dtype,
|
| 48 |
+
# with a context manager that is unset afterwards
|
| 49 |
+
@contextmanager
|
| 50 |
+
def set_numba_data(index: Index):
|
| 51 |
+
numba_data = index._data
|
| 52 |
+
if numba_data.dtype == object:
|
| 53 |
+
if not lib.is_string_array(numba_data):
|
| 54 |
+
raise ValueError(
|
| 55 |
+
"The numba engine only supports using string or numeric column names"
|
| 56 |
+
)
|
| 57 |
+
numba_data = numba_data.astype("U")
|
| 58 |
+
try:
|
| 59 |
+
index._numba_data = numba_data
|
| 60 |
+
yield index
|
| 61 |
+
finally:
|
| 62 |
+
del index._numba_data
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# TODO: Range index support
|
| 66 |
+
# (this currently lowers OK, but does not round-trip)
|
| 67 |
+
class IndexType(types.Type):
|
| 68 |
+
"""
|
| 69 |
+
The type class for Index objects.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, dtype, layout, pyclass: any) -> None:
|
| 73 |
+
self.pyclass = pyclass
|
| 74 |
+
name = f"index({dtype}, {layout})"
|
| 75 |
+
self.dtype = dtype
|
| 76 |
+
self.layout = layout
|
| 77 |
+
super().__init__(name)
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def key(self):
|
| 81 |
+
return self.pyclass, self.dtype, self.layout
|
| 82 |
+
|
| 83 |
+
@property
|
| 84 |
+
def as_array(self):
|
| 85 |
+
return types.Array(self.dtype, 1, self.layout)
|
| 86 |
+
|
| 87 |
+
def copy(self, dtype=None, ndim: int = 1, layout=None):
|
| 88 |
+
assert ndim == 1
|
| 89 |
+
if dtype is None:
|
| 90 |
+
dtype = self.dtype
|
| 91 |
+
layout = layout or self.layout
|
| 92 |
+
return type(self)(dtype, layout, self.pyclass)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class SeriesType(types.Type):
|
| 96 |
+
"""
|
| 97 |
+
The type class for Series objects.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(self, dtype, index, namety) -> None:
|
| 101 |
+
assert isinstance(index, IndexType)
|
| 102 |
+
self.dtype = dtype
|
| 103 |
+
self.index = index
|
| 104 |
+
self.values = types.Array(self.dtype, 1, "C")
|
| 105 |
+
self.namety = namety
|
| 106 |
+
name = f"series({dtype}, {index}, {namety})"
|
| 107 |
+
super().__init__(name)
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def key(self):
|
| 111 |
+
return self.dtype, self.index, self.namety
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def as_array(self):
|
| 115 |
+
return self.values
|
| 116 |
+
|
| 117 |
+
def copy(self, dtype=None, ndim: int = 1, layout: str = "C"):
|
| 118 |
+
assert ndim == 1
|
| 119 |
+
assert layout == "C"
|
| 120 |
+
if dtype is None:
|
| 121 |
+
dtype = self.dtype
|
| 122 |
+
return type(self)(dtype, self.index, self.namety)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@typeof_impl.register(Index)
|
| 126 |
+
def typeof_index(val, c):
|
| 127 |
+
"""
|
| 128 |
+
This will assume that only strings are in object dtype
|
| 129 |
+
index.
|
| 130 |
+
(you should check this before this gets lowered down to numba)
|
| 131 |
+
"""
|
| 132 |
+
# arrty = typeof_impl(val._data, c)
|
| 133 |
+
arrty = typeof_impl(val._numba_data, c)
|
| 134 |
+
assert arrty.ndim == 1
|
| 135 |
+
return IndexType(arrty.dtype, arrty.layout, type(val))
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@typeof_impl.register(Series)
|
| 139 |
+
def typeof_series(val, c):
|
| 140 |
+
index = typeof_impl(val.index, c)
|
| 141 |
+
arrty = typeof_impl(val.values, c)
|
| 142 |
+
namety = typeof_impl(val.name, c)
|
| 143 |
+
assert arrty.ndim == 1
|
| 144 |
+
assert arrty.layout == "C"
|
| 145 |
+
return SeriesType(arrty.dtype, index, namety)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@type_callable(Series)
|
| 149 |
+
def type_series_constructor(context):
|
| 150 |
+
def typer(data, index, name=None):
|
| 151 |
+
if isinstance(index, IndexType) and isinstance(data, types.Array):
|
| 152 |
+
assert data.ndim == 1
|
| 153 |
+
if name is None:
|
| 154 |
+
name = types.intp
|
| 155 |
+
return SeriesType(data.dtype, index, name)
|
| 156 |
+
|
| 157 |
+
return typer
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@type_callable(Index)
|
| 161 |
+
def type_index_constructor(context):
|
| 162 |
+
def typer(data, hashmap=None):
|
| 163 |
+
if isinstance(data, types.Array):
|
| 164 |
+
assert data.layout == "C"
|
| 165 |
+
assert data.ndim == 1
|
| 166 |
+
assert hashmap is None or isinstance(hashmap, types.DictType)
|
| 167 |
+
return IndexType(data.dtype, layout=data.layout, pyclass=Index)
|
| 168 |
+
|
| 169 |
+
return typer
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# Backend extensions for Index and Series and Frame
|
| 173 |
+
@register_model(IndexType)
|
| 174 |
+
class IndexModel(models.StructModel):
|
| 175 |
+
def __init__(self, dmm, fe_type) -> None:
|
| 176 |
+
# We don't want the numpy string scalar type in our hashmap
|
| 177 |
+
members = [
|
| 178 |
+
("data", fe_type.as_array),
|
| 179 |
+
# This is an attempt to emulate our hashtable code with a numba
|
| 180 |
+
# typed dict
|
| 181 |
+
# It maps from values in the index to their integer positions in the array
|
| 182 |
+
("hashmap", types.DictType(fe_type.dtype, types.intp)),
|
| 183 |
+
# Pointer to the Index object this was created from, or that it
|
| 184 |
+
# boxes to
|
| 185 |
+
# https://numba.discourse.group/t/qst-how-to-cache-the-boxing-of-an-object/2128/2?u=lithomas1
|
| 186 |
+
("parent", types.pyobject),
|
| 187 |
+
]
|
| 188 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@register_model(SeriesType)
|
| 192 |
+
class SeriesModel(models.StructModel):
|
| 193 |
+
def __init__(self, dmm, fe_type) -> None:
|
| 194 |
+
members = [
|
| 195 |
+
("index", fe_type.index),
|
| 196 |
+
("values", fe_type.as_array),
|
| 197 |
+
("name", fe_type.namety),
|
| 198 |
+
]
|
| 199 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
make_attribute_wrapper(IndexType, "data", "_data")
|
| 203 |
+
make_attribute_wrapper(IndexType, "hashmap", "hashmap")
|
| 204 |
+
|
| 205 |
+
make_attribute_wrapper(SeriesType, "index", "index")
|
| 206 |
+
make_attribute_wrapper(SeriesType, "values", "values")
|
| 207 |
+
make_attribute_wrapper(SeriesType, "name", "name")
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@lower_builtin(Series, types.Array, IndexType)
|
| 211 |
+
def pdseries_constructor(context, builder, sig, args):
|
| 212 |
+
data, index = args
|
| 213 |
+
series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
| 214 |
+
series.index = index
|
| 215 |
+
series.values = data
|
| 216 |
+
series.name = context.get_constant(types.intp, 0)
|
| 217 |
+
return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@lower_builtin(Series, types.Array, IndexType, types.intp)
|
| 221 |
+
@lower_builtin(Series, types.Array, IndexType, types.float64)
|
| 222 |
+
@lower_builtin(Series, types.Array, IndexType, types.unicode_type)
|
| 223 |
+
def pdseries_constructor_with_name(context, builder, sig, args):
|
| 224 |
+
data, index, name = args
|
| 225 |
+
series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
| 226 |
+
series.index = index
|
| 227 |
+
series.values = data
|
| 228 |
+
series.name = name
|
| 229 |
+
return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
@lower_builtin(Index, types.Array, types.DictType, types.pyobject)
|
| 233 |
+
def index_constructor_2arg(context, builder, sig, args):
|
| 234 |
+
(data, hashmap, parent) = args
|
| 235 |
+
index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
| 236 |
+
|
| 237 |
+
index.data = data
|
| 238 |
+
index.hashmap = hashmap
|
| 239 |
+
index.parent = parent
|
| 240 |
+
return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
@lower_builtin(Index, types.Array, types.DictType)
|
| 244 |
+
def index_constructor_2arg_parent(context, builder, sig, args):
|
| 245 |
+
# Basically same as index_constructor_1arg, but also lets you specify the
|
| 246 |
+
# parent object
|
| 247 |
+
(data, hashmap) = args
|
| 248 |
+
index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
| 249 |
+
|
| 250 |
+
index.data = data
|
| 251 |
+
index.hashmap = hashmap
|
| 252 |
+
return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@lower_builtin(Index, types.Array)
|
| 256 |
+
def index_constructor_1arg(context, builder, sig, args):
|
| 257 |
+
from numba.typed import Dict
|
| 258 |
+
|
| 259 |
+
key_type = sig.return_type.dtype
|
| 260 |
+
value_type = types.intp
|
| 261 |
+
|
| 262 |
+
def index_impl(data):
|
| 263 |
+
return Index(data, Dict.empty(key_type, value_type))
|
| 264 |
+
|
| 265 |
+
return context.compile_internal(builder, index_impl, sig, args)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# Helper to convert the unicodecharseq (numpy string scalar) into a unicode_type
|
| 269 |
+
# (regular string)
|
| 270 |
+
def maybe_cast_str(x):
|
| 271 |
+
# Dummy function that numba can overload
|
| 272 |
+
pass
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
@overload(maybe_cast_str)
|
| 276 |
+
def maybe_cast_str_impl(x):
|
| 277 |
+
"""Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string).
|
| 278 |
+
Is a no-op for other types."""
|
| 279 |
+
if isinstance(x, types.UnicodeCharSeq):
|
| 280 |
+
return lambda x: str(x)
|
| 281 |
+
else:
|
| 282 |
+
return lambda x: x
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@unbox(IndexType)
|
| 286 |
+
def unbox_index(typ, obj, c):
|
| 287 |
+
"""
|
| 288 |
+
Convert a Index object to a native structure.
|
| 289 |
+
|
| 290 |
+
Note: Object dtype is not allowed here
|
| 291 |
+
"""
|
| 292 |
+
data_obj = c.pyapi.object_getattr_string(obj, "_numba_data")
|
| 293 |
+
index = cgutils.create_struct_proxy(typ)(c.context, c.builder)
|
| 294 |
+
# If we see an object array, assume its been validated as only containing strings
|
| 295 |
+
# We still need to do the conversion though
|
| 296 |
+
index.data = c.unbox(typ.as_array, data_obj).value
|
| 297 |
+
typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict))
|
| 298 |
+
# Create an empty typed dict in numba for the hashmap for indexing
|
| 299 |
+
# equiv of numba.typed.Dict.empty(typ.dtype, types.intp)
|
| 300 |
+
arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype))
|
| 301 |
+
intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp))
|
| 302 |
+
hashmap_obj = c.pyapi.call_method(
|
| 303 |
+
typed_dict_obj, "empty", (arr_type_obj, intp_type_obj)
|
| 304 |
+
)
|
| 305 |
+
index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value
|
| 306 |
+
# Set the parent for speedy boxing.
|
| 307 |
+
index.parent = obj
|
| 308 |
+
|
| 309 |
+
# Decrefs
|
| 310 |
+
c.pyapi.decref(data_obj)
|
| 311 |
+
c.pyapi.decref(arr_type_obj)
|
| 312 |
+
c.pyapi.decref(intp_type_obj)
|
| 313 |
+
c.pyapi.decref(typed_dict_obj)
|
| 314 |
+
|
| 315 |
+
return NativeValue(index._getvalue())
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@unbox(SeriesType)
|
| 319 |
+
def unbox_series(typ, obj, c):
|
| 320 |
+
"""
|
| 321 |
+
Convert a Series object to a native structure.
|
| 322 |
+
"""
|
| 323 |
+
index_obj = c.pyapi.object_getattr_string(obj, "index")
|
| 324 |
+
values_obj = c.pyapi.object_getattr_string(obj, "values")
|
| 325 |
+
name_obj = c.pyapi.object_getattr_string(obj, "name")
|
| 326 |
+
|
| 327 |
+
series = cgutils.create_struct_proxy(typ)(c.context, c.builder)
|
| 328 |
+
series.index = c.unbox(typ.index, index_obj).value
|
| 329 |
+
series.values = c.unbox(typ.values, values_obj).value
|
| 330 |
+
series.name = c.unbox(typ.namety, name_obj).value
|
| 331 |
+
|
| 332 |
+
# Decrefs
|
| 333 |
+
c.pyapi.decref(index_obj)
|
| 334 |
+
c.pyapi.decref(values_obj)
|
| 335 |
+
c.pyapi.decref(name_obj)
|
| 336 |
+
|
| 337 |
+
return NativeValue(series._getvalue())
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
@box(IndexType)
|
| 341 |
+
def box_index(typ, val, c):
|
| 342 |
+
"""
|
| 343 |
+
Convert a native index structure to a Index object.
|
| 344 |
+
|
| 345 |
+
If our native index is of a numpy string dtype, we'll cast it to
|
| 346 |
+
object.
|
| 347 |
+
"""
|
| 348 |
+
# First build a Numpy array object, then wrap it in a Index
|
| 349 |
+
index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
|
| 350 |
+
|
| 351 |
+
res = cgutils.alloca_once_value(c.builder, index.parent)
|
| 352 |
+
|
| 353 |
+
# Does parent exist?
|
| 354 |
+
# (it means already boxed once, or Index same as original df.index or df.columns)
|
| 355 |
+
# xref https://github.com/numba/numba/blob/596e8a55334cc46854e3192766e643767bd7c934/numba/core/boxing.py#L593C17-L593C17
|
| 356 |
+
with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (
|
| 357 |
+
has_parent,
|
| 358 |
+
otherwise,
|
| 359 |
+
):
|
| 360 |
+
with has_parent:
|
| 361 |
+
c.pyapi.incref(index.parent)
|
| 362 |
+
with otherwise:
|
| 363 |
+
# TODO: preserve the original class for the index
|
| 364 |
+
# Also need preserve the name of the Index
|
| 365 |
+
# class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass))
|
| 366 |
+
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index))
|
| 367 |
+
array_obj = c.box(typ.as_array, index.data)
|
| 368 |
+
if isinstance(typ.dtype, types.UnicodeCharSeq):
|
| 369 |
+
# We converted to numpy string dtype, convert back
|
| 370 |
+
# to object since _simple_new won't do that for uss
|
| 371 |
+
object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object("object"))
|
| 372 |
+
array_obj = c.pyapi.call_method(array_obj, "astype", (object_str_obj,))
|
| 373 |
+
c.pyapi.decref(object_str_obj)
|
| 374 |
+
# this is basically Index._simple_new(array_obj, name_obj) in python
|
| 375 |
+
index_obj = c.pyapi.call_method(class_obj, "_simple_new", (array_obj,))
|
| 376 |
+
index.parent = index_obj
|
| 377 |
+
c.builder.store(index_obj, res)
|
| 378 |
+
|
| 379 |
+
# Decrefs
|
| 380 |
+
c.pyapi.decref(class_obj)
|
| 381 |
+
c.pyapi.decref(array_obj)
|
| 382 |
+
return c.builder.load(res)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
@box(SeriesType)
|
| 386 |
+
def box_series(typ, val, c):
|
| 387 |
+
"""
|
| 388 |
+
Convert a native series structure to a Series object.
|
| 389 |
+
"""
|
| 390 |
+
series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
|
| 391 |
+
series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr))
|
| 392 |
+
mgr_const_obj = c.pyapi.unserialize(
|
| 393 |
+
c.pyapi.serialize_object(SingleBlockManager.from_array)
|
| 394 |
+
)
|
| 395 |
+
index_obj = c.box(typ.index, series.index)
|
| 396 |
+
array_obj = c.box(typ.as_array, series.values)
|
| 397 |
+
name_obj = c.box(typ.namety, series.name)
|
| 398 |
+
# This is basically equivalent of
|
| 399 |
+
# pd.Series(data=array_obj, index=index_obj)
|
| 400 |
+
# To improve perf, we will construct the Series from a manager
|
| 401 |
+
# object to avoid checks.
|
| 402 |
+
# We'll also set the name attribute manually to avoid validation
|
| 403 |
+
mgr_obj = c.pyapi.call_function_objargs(
|
| 404 |
+
mgr_const_obj,
|
| 405 |
+
(
|
| 406 |
+
array_obj,
|
| 407 |
+
index_obj,
|
| 408 |
+
),
|
| 409 |
+
)
|
| 410 |
+
mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, "axes")
|
| 411 |
+
# Series._constructor_from_mgr(mgr, axes)
|
| 412 |
+
series_obj = c.pyapi.call_function_objargs(
|
| 413 |
+
series_const_obj, (mgr_obj, mgr_axes_obj)
|
| 414 |
+
)
|
| 415 |
+
c.pyapi.object_setattr_string(series_obj, "_name", name_obj)
|
| 416 |
+
|
| 417 |
+
# Decrefs
|
| 418 |
+
c.pyapi.decref(series_const_obj)
|
| 419 |
+
c.pyapi.decref(mgr_axes_obj)
|
| 420 |
+
c.pyapi.decref(mgr_obj)
|
| 421 |
+
c.pyapi.decref(mgr_const_obj)
|
| 422 |
+
c.pyapi.decref(index_obj)
|
| 423 |
+
c.pyapi.decref(array_obj)
|
| 424 |
+
c.pyapi.decref(name_obj)
|
| 425 |
+
|
| 426 |
+
return series_obj
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
# Add common series reductions (e.g. mean, sum),
|
| 430 |
+
# and also add common binops (e.g. add, sub, mul, div)
|
| 431 |
+
def generate_series_reduction(ser_reduction, ser_method):
|
| 432 |
+
@overload_method(SeriesType, ser_reduction)
|
| 433 |
+
def series_reduction(series):
|
| 434 |
+
def series_reduction_impl(series):
|
| 435 |
+
return ser_method(series.values)
|
| 436 |
+
|
| 437 |
+
return series_reduction_impl
|
| 438 |
+
|
| 439 |
+
return series_reduction
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def generate_series_binop(binop):
|
| 443 |
+
@overload(binop)
|
| 444 |
+
def series_binop(series1, value):
|
| 445 |
+
if isinstance(series1, SeriesType):
|
| 446 |
+
if isinstance(value, SeriesType):
|
| 447 |
+
|
| 448 |
+
def series_binop_impl(series1, series2):
|
| 449 |
+
# TODO: Check index matching?
|
| 450 |
+
return Series(
|
| 451 |
+
binop(series1.values, series2.values),
|
| 452 |
+
series1.index,
|
| 453 |
+
series1.name,
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
return series_binop_impl
|
| 457 |
+
else:
|
| 458 |
+
|
| 459 |
+
def series_binop_impl(series1, value):
|
| 460 |
+
return Series(
|
| 461 |
+
binop(series1.values, value), series1.index, series1.name
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
return series_binop_impl
|
| 465 |
+
|
| 466 |
+
return series_binop
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
series_reductions = [
|
| 470 |
+
("sum", np.sum),
|
| 471 |
+
("mean", np.mean),
|
| 472 |
+
# Disabled due to discrepancies between numba std. dev
|
| 473 |
+
# and pandas std. dev (no way to specify dof)
|
| 474 |
+
# ("std", np.std),
|
| 475 |
+
# ("var", np.var),
|
| 476 |
+
("min", np.min),
|
| 477 |
+
("max", np.max),
|
| 478 |
+
]
|
| 479 |
+
for reduction, reduction_method in series_reductions:
|
| 480 |
+
generate_series_reduction(reduction, reduction_method)
|
| 481 |
+
|
| 482 |
+
series_binops = [operator.add, operator.sub, operator.mul, operator.truediv]
|
| 483 |
+
|
| 484 |
+
for ser_binop in series_binops:
|
| 485 |
+
generate_series_binop(ser_binop)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
# get_loc on Index
|
| 489 |
+
@overload_method(IndexType, "get_loc")
|
| 490 |
+
def index_get_loc(index, item):
|
| 491 |
+
def index_get_loc_impl(index, item):
|
| 492 |
+
# Initialize the hash table if not initialized
|
| 493 |
+
if len(index.hashmap) == 0:
|
| 494 |
+
for i, val in enumerate(index._data):
|
| 495 |
+
index.hashmap[val] = i
|
| 496 |
+
return index.hashmap[item]
|
| 497 |
+
|
| 498 |
+
return index_get_loc_impl
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
# Indexing for Series/Index
|
| 502 |
+
@overload(operator.getitem)
|
| 503 |
+
def series_indexing(series, item):
|
| 504 |
+
if isinstance(series, SeriesType):
|
| 505 |
+
|
| 506 |
+
def series_getitem(series, item):
|
| 507 |
+
loc = series.index.get_loc(item)
|
| 508 |
+
return series.iloc[loc]
|
| 509 |
+
|
| 510 |
+
return series_getitem
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
@overload(operator.getitem)
|
| 514 |
+
def index_indexing(index, idx):
|
| 515 |
+
if isinstance(index, IndexType):
|
| 516 |
+
|
| 517 |
+
def index_getitem(index, idx):
|
| 518 |
+
return index._data[idx]
|
| 519 |
+
|
| 520 |
+
return index_getitem
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class IlocType(types.Type):
|
| 524 |
+
def __init__(self, obj_type) -> None:
|
| 525 |
+
self.obj_type = obj_type
|
| 526 |
+
name = f"iLocIndexer({obj_type})"
|
| 527 |
+
super().__init__(name=name)
|
| 528 |
+
|
| 529 |
+
@property
|
| 530 |
+
def key(self):
|
| 531 |
+
return self.obj_type
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
@typeof_impl.register(_iLocIndexer)
|
| 535 |
+
def typeof_iloc(val, c):
|
| 536 |
+
objtype = typeof_impl(val.obj, c)
|
| 537 |
+
return IlocType(objtype)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@type_callable(_iLocIndexer)
|
| 541 |
+
def type_iloc_constructor(context):
|
| 542 |
+
def typer(obj):
|
| 543 |
+
if isinstance(obj, SeriesType):
|
| 544 |
+
return IlocType(obj)
|
| 545 |
+
|
| 546 |
+
return typer
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
@lower_builtin(_iLocIndexer, SeriesType)
|
| 550 |
+
def iloc_constructor(context, builder, sig, args):
|
| 551 |
+
(obj,) = args
|
| 552 |
+
iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
| 553 |
+
iloc_indexer.obj = obj
|
| 554 |
+
return impl_ret_borrowed(
|
| 555 |
+
context, builder, sig.return_type, iloc_indexer._getvalue()
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
@register_model(IlocType)
|
| 560 |
+
class ILocModel(models.StructModel):
|
| 561 |
+
def __init__(self, dmm, fe_type) -> None:
|
| 562 |
+
members = [("obj", fe_type.obj_type)]
|
| 563 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
make_attribute_wrapper(IlocType, "obj", "obj")
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@overload_attribute(SeriesType, "iloc")
|
| 570 |
+
def series_iloc(series):
|
| 571 |
+
def get(series):
|
| 572 |
+
return _iLocIndexer(series)
|
| 573 |
+
|
| 574 |
+
return get
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@overload(operator.getitem)
|
| 578 |
+
def iloc_getitem(iloc_indexer, i):
|
| 579 |
+
if isinstance(iloc_indexer, IlocType):
|
| 580 |
+
|
| 581 |
+
def getitem_impl(iloc_indexer, i):
|
| 582 |
+
return iloc_indexer.obj.values[i]
|
| 583 |
+
|
| 584 |
+
return getitem_impl
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas.core._numba.kernels.mean_ import (
|
| 2 |
+
grouped_mean,
|
| 3 |
+
sliding_mean,
|
| 4 |
+
)
|
| 5 |
+
from pandas.core._numba.kernels.min_max_ import (
|
| 6 |
+
grouped_min_max,
|
| 7 |
+
sliding_min_max,
|
| 8 |
+
)
|
| 9 |
+
from pandas.core._numba.kernels.sum_ import (
|
| 10 |
+
grouped_sum,
|
| 11 |
+
sliding_sum,
|
| 12 |
+
)
|
| 13 |
+
from pandas.core._numba.kernels.var_ import (
|
| 14 |
+
grouped_var,
|
| 15 |
+
sliding_var,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"sliding_mean",
|
| 20 |
+
"grouped_mean",
|
| 21 |
+
"sliding_sum",
|
| 22 |
+
"grouped_sum",
|
| 23 |
+
"sliding_var",
|
| 24 |
+
"grouped_var",
|
| 25 |
+
"sliding_min_max",
|
| 26 |
+
"grouped_min_max",
|
| 27 |
+
]
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Numba 1D mean kernels that can be shared by
|
| 3 |
+
* Dataframe / Series
|
| 4 |
+
* groupby
|
| 5 |
+
* rolling / expanding
|
| 6 |
+
|
| 7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from typing import TYPE_CHECKING
|
| 12 |
+
|
| 13 |
+
import numba
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
from pandas.core._numba.kernels.shared import is_monotonic_increasing
|
| 17 |
+
from pandas.core._numba.kernels.sum_ import grouped_kahan_sum
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
from pandas._typing import npt
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 24 |
+
def add_mean(
|
| 25 |
+
val: float,
|
| 26 |
+
nobs: int,
|
| 27 |
+
sum_x: float,
|
| 28 |
+
neg_ct: int,
|
| 29 |
+
compensation: float,
|
| 30 |
+
num_consecutive_same_value: int,
|
| 31 |
+
prev_value: float,
|
| 32 |
+
) -> tuple[int, float, int, float, int, float]:
|
| 33 |
+
if not np.isnan(val):
|
| 34 |
+
nobs += 1
|
| 35 |
+
y = val - compensation
|
| 36 |
+
t = sum_x + y
|
| 37 |
+
compensation = t - sum_x - y
|
| 38 |
+
sum_x = t
|
| 39 |
+
if val < 0:
|
| 40 |
+
neg_ct += 1
|
| 41 |
+
|
| 42 |
+
if val == prev_value:
|
| 43 |
+
num_consecutive_same_value += 1
|
| 44 |
+
else:
|
| 45 |
+
num_consecutive_same_value = 1
|
| 46 |
+
prev_value = val
|
| 47 |
+
|
| 48 |
+
return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 52 |
+
def remove_mean(
|
| 53 |
+
val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
|
| 54 |
+
) -> tuple[int, float, int, float]:
|
| 55 |
+
if not np.isnan(val):
|
| 56 |
+
nobs -= 1
|
| 57 |
+
y = -val - compensation
|
| 58 |
+
t = sum_x + y
|
| 59 |
+
compensation = t - sum_x - y
|
| 60 |
+
sum_x = t
|
| 61 |
+
if val < 0:
|
| 62 |
+
neg_ct -= 1
|
| 63 |
+
return nobs, sum_x, neg_ct, compensation
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 67 |
+
def sliding_mean(
|
| 68 |
+
values: np.ndarray,
|
| 69 |
+
result_dtype: np.dtype,
|
| 70 |
+
start: np.ndarray,
|
| 71 |
+
end: np.ndarray,
|
| 72 |
+
min_periods: int,
|
| 73 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 74 |
+
N = len(start)
|
| 75 |
+
nobs = 0
|
| 76 |
+
sum_x = 0.0
|
| 77 |
+
neg_ct = 0
|
| 78 |
+
compensation_add = 0.0
|
| 79 |
+
compensation_remove = 0.0
|
| 80 |
+
|
| 81 |
+
is_monotonic_increasing_bounds = is_monotonic_increasing(
|
| 82 |
+
start
|
| 83 |
+
) and is_monotonic_increasing(end)
|
| 84 |
+
|
| 85 |
+
output = np.empty(N, dtype=result_dtype)
|
| 86 |
+
|
| 87 |
+
for i in range(N):
|
| 88 |
+
s = start[i]
|
| 89 |
+
e = end[i]
|
| 90 |
+
if i == 0 or not is_monotonic_increasing_bounds:
|
| 91 |
+
prev_value = values[s]
|
| 92 |
+
num_consecutive_same_value = 0
|
| 93 |
+
|
| 94 |
+
for j in range(s, e):
|
| 95 |
+
val = values[j]
|
| 96 |
+
(
|
| 97 |
+
nobs,
|
| 98 |
+
sum_x,
|
| 99 |
+
neg_ct,
|
| 100 |
+
compensation_add,
|
| 101 |
+
num_consecutive_same_value,
|
| 102 |
+
prev_value,
|
| 103 |
+
) = add_mean(
|
| 104 |
+
val,
|
| 105 |
+
nobs,
|
| 106 |
+
sum_x,
|
| 107 |
+
neg_ct,
|
| 108 |
+
compensation_add,
|
| 109 |
+
num_consecutive_same_value,
|
| 110 |
+
prev_value, # pyright: ignore[reportGeneralTypeIssues]
|
| 111 |
+
)
|
| 112 |
+
else:
|
| 113 |
+
for j in range(start[i - 1], s):
|
| 114 |
+
val = values[j]
|
| 115 |
+
nobs, sum_x, neg_ct, compensation_remove = remove_mean(
|
| 116 |
+
val, nobs, sum_x, neg_ct, compensation_remove
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
for j in range(end[i - 1], e):
|
| 120 |
+
val = values[j]
|
| 121 |
+
(
|
| 122 |
+
nobs,
|
| 123 |
+
sum_x,
|
| 124 |
+
neg_ct,
|
| 125 |
+
compensation_add,
|
| 126 |
+
num_consecutive_same_value,
|
| 127 |
+
prev_value,
|
| 128 |
+
) = add_mean(
|
| 129 |
+
val,
|
| 130 |
+
nobs,
|
| 131 |
+
sum_x,
|
| 132 |
+
neg_ct,
|
| 133 |
+
compensation_add,
|
| 134 |
+
num_consecutive_same_value,
|
| 135 |
+
prev_value, # pyright: ignore[reportGeneralTypeIssues]
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if nobs >= min_periods and nobs > 0:
|
| 139 |
+
result = sum_x / nobs
|
| 140 |
+
if num_consecutive_same_value >= nobs:
|
| 141 |
+
result = prev_value
|
| 142 |
+
elif neg_ct == 0 and result < 0:
|
| 143 |
+
result = 0
|
| 144 |
+
elif neg_ct == nobs and result > 0:
|
| 145 |
+
result = 0
|
| 146 |
+
else:
|
| 147 |
+
result = np.nan
|
| 148 |
+
|
| 149 |
+
output[i] = result
|
| 150 |
+
|
| 151 |
+
if not is_monotonic_increasing_bounds:
|
| 152 |
+
nobs = 0
|
| 153 |
+
sum_x = 0.0
|
| 154 |
+
neg_ct = 0
|
| 155 |
+
compensation_remove = 0.0
|
| 156 |
+
|
| 157 |
+
# na_position is empty list since float64 can already hold nans
|
| 158 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
| 159 |
+
# empty list of ints on its own
|
| 160 |
+
na_pos = [0 for i in range(0)]
|
| 161 |
+
return output, na_pos
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 165 |
+
def grouped_mean(
|
| 166 |
+
values: np.ndarray,
|
| 167 |
+
result_dtype: np.dtype,
|
| 168 |
+
labels: npt.NDArray[np.intp],
|
| 169 |
+
ngroups: int,
|
| 170 |
+
min_periods: int,
|
| 171 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 172 |
+
output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
|
| 173 |
+
values, result_dtype, labels, ngroups
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# Post-processing, replace sums that don't satisfy min_periods
|
| 177 |
+
for lab in range(ngroups):
|
| 178 |
+
nobs = nobs_arr[lab]
|
| 179 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
| 180 |
+
prev_value = prev_vals[lab]
|
| 181 |
+
sum_x = output[lab]
|
| 182 |
+
if nobs >= min_periods:
|
| 183 |
+
if num_consecutive_same_value >= nobs:
|
| 184 |
+
result = prev_value * nobs
|
| 185 |
+
else:
|
| 186 |
+
result = sum_x
|
| 187 |
+
else:
|
| 188 |
+
result = np.nan
|
| 189 |
+
result /= nobs
|
| 190 |
+
output[lab] = result
|
| 191 |
+
|
| 192 |
+
# na_position is empty list since float64 can already hold nans
|
| 193 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
| 194 |
+
# empty list of ints on its own
|
| 195 |
+
na_pos = [0 for i in range(0)]
|
| 196 |
+
return output, na_pos
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Numba 1D min/max kernels that can be shared by
|
| 3 |
+
* Dataframe / Series
|
| 4 |
+
* groupby
|
| 5 |
+
* rolling / expanding
|
| 6 |
+
|
| 7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from typing import TYPE_CHECKING
|
| 12 |
+
|
| 13 |
+
import numba
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from pandas._typing import npt
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 21 |
+
def sliding_min_max(
|
| 22 |
+
values: np.ndarray,
|
| 23 |
+
result_dtype: np.dtype,
|
| 24 |
+
start: np.ndarray,
|
| 25 |
+
end: np.ndarray,
|
| 26 |
+
min_periods: int,
|
| 27 |
+
is_max: bool,
|
| 28 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 29 |
+
N = len(start)
|
| 30 |
+
nobs = 0
|
| 31 |
+
output = np.empty(N, dtype=result_dtype)
|
| 32 |
+
na_pos = []
|
| 33 |
+
# Use deque once numba supports it
|
| 34 |
+
# https://github.com/numba/numba/issues/7417
|
| 35 |
+
Q: list = []
|
| 36 |
+
W: list = []
|
| 37 |
+
for i in range(N):
|
| 38 |
+
curr_win_size = end[i] - start[i]
|
| 39 |
+
if i == 0:
|
| 40 |
+
st = start[i]
|
| 41 |
+
else:
|
| 42 |
+
st = end[i - 1]
|
| 43 |
+
|
| 44 |
+
for k in range(st, end[i]):
|
| 45 |
+
ai = values[k]
|
| 46 |
+
if not np.isnan(ai):
|
| 47 |
+
nobs += 1
|
| 48 |
+
elif is_max:
|
| 49 |
+
ai = -np.inf
|
| 50 |
+
else:
|
| 51 |
+
ai = np.inf
|
| 52 |
+
# Discard previous entries if we find new min or max
|
| 53 |
+
if is_max:
|
| 54 |
+
while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
|
| 55 |
+
Q.pop()
|
| 56 |
+
else:
|
| 57 |
+
while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
|
| 58 |
+
Q.pop()
|
| 59 |
+
Q.append(k)
|
| 60 |
+
W.append(k)
|
| 61 |
+
|
| 62 |
+
# Discard entries outside and left of current window
|
| 63 |
+
while Q and Q[0] <= start[i] - 1:
|
| 64 |
+
Q.pop(0)
|
| 65 |
+
while W and W[0] <= start[i] - 1:
|
| 66 |
+
if not np.isnan(values[W[0]]):
|
| 67 |
+
nobs -= 1
|
| 68 |
+
W.pop(0)
|
| 69 |
+
|
| 70 |
+
# Save output based on index in input value array
|
| 71 |
+
if Q and curr_win_size > 0 and nobs >= min_periods:
|
| 72 |
+
output[i] = values[Q[0]]
|
| 73 |
+
else:
|
| 74 |
+
if values.dtype.kind != "i":
|
| 75 |
+
output[i] = np.nan
|
| 76 |
+
else:
|
| 77 |
+
na_pos.append(i)
|
| 78 |
+
|
| 79 |
+
return output, na_pos
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 83 |
+
def grouped_min_max(
|
| 84 |
+
values: np.ndarray,
|
| 85 |
+
result_dtype: np.dtype,
|
| 86 |
+
labels: npt.NDArray[np.intp],
|
| 87 |
+
ngroups: int,
|
| 88 |
+
min_periods: int,
|
| 89 |
+
is_max: bool,
|
| 90 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 91 |
+
N = len(labels)
|
| 92 |
+
nobs = np.zeros(ngroups, dtype=np.int64)
|
| 93 |
+
na_pos = []
|
| 94 |
+
output = np.empty(ngroups, dtype=result_dtype)
|
| 95 |
+
|
| 96 |
+
for i in range(N):
|
| 97 |
+
lab = labels[i]
|
| 98 |
+
val = values[i]
|
| 99 |
+
if lab < 0:
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
if values.dtype.kind == "i" or not np.isnan(val):
|
| 103 |
+
nobs[lab] += 1
|
| 104 |
+
else:
|
| 105 |
+
# NaN value cannot be a min/max value
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
if nobs[lab] == 1:
|
| 109 |
+
# First element in group, set output equal to this
|
| 110 |
+
output[lab] = val
|
| 111 |
+
continue
|
| 112 |
+
|
| 113 |
+
if is_max:
|
| 114 |
+
if val > output[lab]:
|
| 115 |
+
output[lab] = val
|
| 116 |
+
else:
|
| 117 |
+
if val < output[lab]:
|
| 118 |
+
output[lab] = val
|
| 119 |
+
|
| 120 |
+
# Set labels that don't satisfy min_periods as np.nan
|
| 121 |
+
for lab, count in enumerate(nobs):
|
| 122 |
+
if count < min_periods:
|
| 123 |
+
na_pos.append(lab)
|
| 124 |
+
|
| 125 |
+
return output, na_pos
|
mgm/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Numba 1D var kernels that can be shared by
|
| 3 |
+
* Dataframe / Series
|
| 4 |
+
* groupby
|
| 5 |
+
* rolling / expanding
|
| 6 |
+
|
| 7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from typing import TYPE_CHECKING
|
| 12 |
+
|
| 13 |
+
import numba
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from pandas._typing import npt
|
| 18 |
+
|
| 19 |
+
from pandas.core._numba.kernels.shared import is_monotonic_increasing
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 23 |
+
def add_var(
|
| 24 |
+
val: float,
|
| 25 |
+
nobs: int,
|
| 26 |
+
mean_x: float,
|
| 27 |
+
ssqdm_x: float,
|
| 28 |
+
compensation: float,
|
| 29 |
+
num_consecutive_same_value: int,
|
| 30 |
+
prev_value: float,
|
| 31 |
+
) -> tuple[int, float, float, float, int, float]:
|
| 32 |
+
if not np.isnan(val):
|
| 33 |
+
if val == prev_value:
|
| 34 |
+
num_consecutive_same_value += 1
|
| 35 |
+
else:
|
| 36 |
+
num_consecutive_same_value = 1
|
| 37 |
+
prev_value = val
|
| 38 |
+
|
| 39 |
+
nobs += 1
|
| 40 |
+
prev_mean = mean_x - compensation
|
| 41 |
+
y = val - compensation
|
| 42 |
+
t = y - mean_x
|
| 43 |
+
compensation = t + mean_x - y
|
| 44 |
+
delta = t
|
| 45 |
+
if nobs:
|
| 46 |
+
mean_x += delta / nobs
|
| 47 |
+
else:
|
| 48 |
+
mean_x = 0
|
| 49 |
+
ssqdm_x += (val - prev_mean) * (val - mean_x)
|
| 50 |
+
return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 54 |
+
def remove_var(
|
| 55 |
+
val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
|
| 56 |
+
) -> tuple[int, float, float, float]:
|
| 57 |
+
if not np.isnan(val):
|
| 58 |
+
nobs -= 1
|
| 59 |
+
if nobs:
|
| 60 |
+
prev_mean = mean_x - compensation
|
| 61 |
+
y = val - compensation
|
| 62 |
+
t = y - mean_x
|
| 63 |
+
compensation = t + mean_x - y
|
| 64 |
+
delta = t
|
| 65 |
+
mean_x -= delta / nobs
|
| 66 |
+
ssqdm_x -= (val - prev_mean) * (val - mean_x)
|
| 67 |
+
else:
|
| 68 |
+
mean_x = 0
|
| 69 |
+
ssqdm_x = 0
|
| 70 |
+
return nobs, mean_x, ssqdm_x, compensation
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 74 |
+
def sliding_var(
|
| 75 |
+
values: np.ndarray,
|
| 76 |
+
result_dtype: np.dtype,
|
| 77 |
+
start: np.ndarray,
|
| 78 |
+
end: np.ndarray,
|
| 79 |
+
min_periods: int,
|
| 80 |
+
ddof: int = 1,
|
| 81 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 82 |
+
N = len(start)
|
| 83 |
+
nobs = 0
|
| 84 |
+
mean_x = 0.0
|
| 85 |
+
ssqdm_x = 0.0
|
| 86 |
+
compensation_add = 0.0
|
| 87 |
+
compensation_remove = 0.0
|
| 88 |
+
|
| 89 |
+
min_periods = max(min_periods, 1)
|
| 90 |
+
is_monotonic_increasing_bounds = is_monotonic_increasing(
|
| 91 |
+
start
|
| 92 |
+
) and is_monotonic_increasing(end)
|
| 93 |
+
|
| 94 |
+
output = np.empty(N, dtype=result_dtype)
|
| 95 |
+
|
| 96 |
+
for i in range(N):
|
| 97 |
+
s = start[i]
|
| 98 |
+
e = end[i]
|
| 99 |
+
if i == 0 or not is_monotonic_increasing_bounds:
|
| 100 |
+
prev_value = values[s]
|
| 101 |
+
num_consecutive_same_value = 0
|
| 102 |
+
|
| 103 |
+
for j in range(s, e):
|
| 104 |
+
val = values[j]
|
| 105 |
+
(
|
| 106 |
+
nobs,
|
| 107 |
+
mean_x,
|
| 108 |
+
ssqdm_x,
|
| 109 |
+
compensation_add,
|
| 110 |
+
num_consecutive_same_value,
|
| 111 |
+
prev_value,
|
| 112 |
+
) = add_var(
|
| 113 |
+
val,
|
| 114 |
+
nobs,
|
| 115 |
+
mean_x,
|
| 116 |
+
ssqdm_x,
|
| 117 |
+
compensation_add,
|
| 118 |
+
num_consecutive_same_value,
|
| 119 |
+
prev_value,
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
for j in range(start[i - 1], s):
|
| 123 |
+
val = values[j]
|
| 124 |
+
nobs, mean_x, ssqdm_x, compensation_remove = remove_var(
|
| 125 |
+
val, nobs, mean_x, ssqdm_x, compensation_remove
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
for j in range(end[i - 1], e):
|
| 129 |
+
val = values[j]
|
| 130 |
+
(
|
| 131 |
+
nobs,
|
| 132 |
+
mean_x,
|
| 133 |
+
ssqdm_x,
|
| 134 |
+
compensation_add,
|
| 135 |
+
num_consecutive_same_value,
|
| 136 |
+
prev_value,
|
| 137 |
+
) = add_var(
|
| 138 |
+
val,
|
| 139 |
+
nobs,
|
| 140 |
+
mean_x,
|
| 141 |
+
ssqdm_x,
|
| 142 |
+
compensation_add,
|
| 143 |
+
num_consecutive_same_value,
|
| 144 |
+
prev_value,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
if nobs >= min_periods and nobs > ddof:
|
| 148 |
+
if nobs == 1 or num_consecutive_same_value >= nobs:
|
| 149 |
+
result = 0.0
|
| 150 |
+
else:
|
| 151 |
+
result = ssqdm_x / (nobs - ddof)
|
| 152 |
+
else:
|
| 153 |
+
result = np.nan
|
| 154 |
+
|
| 155 |
+
output[i] = result
|
| 156 |
+
|
| 157 |
+
if not is_monotonic_increasing_bounds:
|
| 158 |
+
nobs = 0
|
| 159 |
+
mean_x = 0.0
|
| 160 |
+
ssqdm_x = 0.0
|
| 161 |
+
compensation_remove = 0.0
|
| 162 |
+
|
| 163 |
+
# na_position is empty list since float64 can already hold nans
|
| 164 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
| 165 |
+
# empty list of ints on its own
|
| 166 |
+
na_pos = [0 for i in range(0)]
|
| 167 |
+
return output, na_pos
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
| 171 |
+
def grouped_var(
|
| 172 |
+
values: np.ndarray,
|
| 173 |
+
result_dtype: np.dtype,
|
| 174 |
+
labels: npt.NDArray[np.intp],
|
| 175 |
+
ngroups: int,
|
| 176 |
+
min_periods: int,
|
| 177 |
+
ddof: int = 1,
|
| 178 |
+
) -> tuple[np.ndarray, list[int]]:
|
| 179 |
+
N = len(labels)
|
| 180 |
+
|
| 181 |
+
nobs_arr = np.zeros(ngroups, dtype=np.int64)
|
| 182 |
+
comp_arr = np.zeros(ngroups, dtype=values.dtype)
|
| 183 |
+
consecutive_counts = np.zeros(ngroups, dtype=np.int64)
|
| 184 |
+
prev_vals = np.zeros(ngroups, dtype=values.dtype)
|
| 185 |
+
output = np.zeros(ngroups, dtype=result_dtype)
|
| 186 |
+
means = np.zeros(ngroups, dtype=result_dtype)
|
| 187 |
+
|
| 188 |
+
for i in range(N):
|
| 189 |
+
lab = labels[i]
|
| 190 |
+
val = values[i]
|
| 191 |
+
|
| 192 |
+
if lab < 0:
|
| 193 |
+
continue
|
| 194 |
+
|
| 195 |
+
mean_x = means[lab]
|
| 196 |
+
ssqdm_x = output[lab]
|
| 197 |
+
nobs = nobs_arr[lab]
|
| 198 |
+
compensation_add = comp_arr[lab]
|
| 199 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
| 200 |
+
prev_value = prev_vals[lab]
|
| 201 |
+
|
| 202 |
+
(
|
| 203 |
+
nobs,
|
| 204 |
+
mean_x,
|
| 205 |
+
ssqdm_x,
|
| 206 |
+
compensation_add,
|
| 207 |
+
num_consecutive_same_value,
|
| 208 |
+
prev_value,
|
| 209 |
+
) = add_var(
|
| 210 |
+
val,
|
| 211 |
+
nobs,
|
| 212 |
+
mean_x,
|
| 213 |
+
ssqdm_x,
|
| 214 |
+
compensation_add,
|
| 215 |
+
num_consecutive_same_value,
|
| 216 |
+
prev_value,
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
output[lab] = ssqdm_x
|
| 220 |
+
means[lab] = mean_x
|
| 221 |
+
consecutive_counts[lab] = num_consecutive_same_value
|
| 222 |
+
prev_vals[lab] = prev_value
|
| 223 |
+
comp_arr[lab] = compensation_add
|
| 224 |
+
nobs_arr[lab] = nobs
|
| 225 |
+
|
| 226 |
+
# Post-processing, replace vars that don't satisfy min_periods
|
| 227 |
+
for lab in range(ngroups):
|
| 228 |
+
nobs = nobs_arr[lab]
|
| 229 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
| 230 |
+
ssqdm_x = output[lab]
|
| 231 |
+
if nobs >= min_periods and nobs > ddof:
|
| 232 |
+
if nobs == 1 or num_consecutive_same_value >= nobs:
|
| 233 |
+
result = 0.0
|
| 234 |
+
else:
|
| 235 |
+
result = ssqdm_x / (nobs - ddof)
|
| 236 |
+
else:
|
| 237 |
+
result = np.nan
|
| 238 |
+
output[lab] = result
|
| 239 |
+
|
| 240 |
+
# Second pass to get the std.dev
|
| 241 |
+
# na_position is empty list since float64 can already hold nans
|
| 242 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
| 243 |
+
# empty list of ints on its own
|
| 244 |
+
na_pos = [0 for i in range(0)]
|
| 245 |
+
return output, na_pos
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.53 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (3.15 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc
ADDED
|
Binary file (39.1 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc
ADDED
|
Binary file (59.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc
ADDED
|
Binary file (60.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc
ADDED
|
Binary file (3.17 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/internals/concat.py
ADDED
|
@@ -0,0 +1,598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import (
|
| 4 |
+
TYPE_CHECKING,
|
| 5 |
+
cast,
|
| 6 |
+
)
|
| 7 |
+
import warnings
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from pandas._libs import (
|
| 12 |
+
NaT,
|
| 13 |
+
algos as libalgos,
|
| 14 |
+
internals as libinternals,
|
| 15 |
+
lib,
|
| 16 |
+
)
|
| 17 |
+
from pandas._libs.missing import NA
|
| 18 |
+
from pandas.util._decorators import cache_readonly
|
| 19 |
+
from pandas.util._exceptions import find_stack_level
|
| 20 |
+
|
| 21 |
+
from pandas.core.dtypes.cast import (
|
| 22 |
+
ensure_dtype_can_hold_na,
|
| 23 |
+
find_common_type,
|
| 24 |
+
)
|
| 25 |
+
from pandas.core.dtypes.common import (
|
| 26 |
+
is_1d_only_ea_dtype,
|
| 27 |
+
is_scalar,
|
| 28 |
+
needs_i8_conversion,
|
| 29 |
+
)
|
| 30 |
+
from pandas.core.dtypes.concat import concat_compat
|
| 31 |
+
from pandas.core.dtypes.dtypes import (
|
| 32 |
+
ExtensionDtype,
|
| 33 |
+
SparseDtype,
|
| 34 |
+
)
|
| 35 |
+
from pandas.core.dtypes.missing import (
|
| 36 |
+
is_valid_na_for_dtype,
|
| 37 |
+
isna,
|
| 38 |
+
isna_all,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
| 42 |
+
from pandas.core.internals.array_manager import ArrayManager
|
| 43 |
+
from pandas.core.internals.blocks import (
|
| 44 |
+
ensure_block_shape,
|
| 45 |
+
new_block_2d,
|
| 46 |
+
)
|
| 47 |
+
from pandas.core.internals.managers import (
|
| 48 |
+
BlockManager,
|
| 49 |
+
make_na_array,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
if TYPE_CHECKING:
|
| 53 |
+
from collections.abc import Sequence
|
| 54 |
+
|
| 55 |
+
from pandas._typing import (
|
| 56 |
+
ArrayLike,
|
| 57 |
+
AxisInt,
|
| 58 |
+
DtypeObj,
|
| 59 |
+
Manager2D,
|
| 60 |
+
Shape,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
from pandas import Index
|
| 64 |
+
from pandas.core.internals.blocks import (
|
| 65 |
+
Block,
|
| 66 |
+
BlockPlacement,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _concatenate_array_managers(
|
| 71 |
+
mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt
|
| 72 |
+
) -> Manager2D:
|
| 73 |
+
"""
|
| 74 |
+
Concatenate array managers into one.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
|
| 79 |
+
axes : list of Index
|
| 80 |
+
concat_axis : int
|
| 81 |
+
|
| 82 |
+
Returns
|
| 83 |
+
-------
|
| 84 |
+
ArrayManager
|
| 85 |
+
"""
|
| 86 |
+
if concat_axis == 1:
|
| 87 |
+
return mgrs[0].concat_vertical(mgrs, axes)
|
| 88 |
+
else:
|
| 89 |
+
# concatting along the columns -> combine reindexed arrays in a single manager
|
| 90 |
+
assert concat_axis == 0
|
| 91 |
+
return mgrs[0].concat_horizontal(mgrs, axes)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def concatenate_managers(
|
| 95 |
+
mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool
|
| 96 |
+
) -> Manager2D:
|
| 97 |
+
"""
|
| 98 |
+
Concatenate block managers into one.
|
| 99 |
+
|
| 100 |
+
Parameters
|
| 101 |
+
----------
|
| 102 |
+
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
|
| 103 |
+
axes : list of Index
|
| 104 |
+
concat_axis : int
|
| 105 |
+
copy : bool
|
| 106 |
+
|
| 107 |
+
Returns
|
| 108 |
+
-------
|
| 109 |
+
BlockManager
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
needs_copy = copy and concat_axis == 0
|
| 113 |
+
|
| 114 |
+
# TODO(ArrayManager) this assumes that all managers are of the same type
|
| 115 |
+
if isinstance(mgrs_indexers[0][0], ArrayManager):
|
| 116 |
+
mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
|
| 117 |
+
# error: Argument 1 to "_concatenate_array_managers" has incompatible
|
| 118 |
+
# type "List[BlockManager]"; expected "List[Union[ArrayManager,
|
| 119 |
+
# SingleArrayManager, BlockManager, SingleBlockManager]]"
|
| 120 |
+
return _concatenate_array_managers(
|
| 121 |
+
mgrs, axes, concat_axis # type: ignore[arg-type]
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Assertions disabled for performance
|
| 125 |
+
# for tup in mgrs_indexers:
|
| 126 |
+
# # caller is responsible for ensuring this
|
| 127 |
+
# indexers = tup[1]
|
| 128 |
+
# assert concat_axis not in indexers
|
| 129 |
+
|
| 130 |
+
if concat_axis == 0:
|
| 131 |
+
mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
|
| 132 |
+
return mgrs[0].concat_horizontal(mgrs, axes)
|
| 133 |
+
|
| 134 |
+
if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0:
|
| 135 |
+
first_dtype = mgrs_indexers[0][0].blocks[0].dtype
|
| 136 |
+
if first_dtype in [np.float64, np.float32]:
|
| 137 |
+
# TODO: support more dtypes here. This will be simpler once
|
| 138 |
+
# JoinUnit.is_na behavior is deprecated.
|
| 139 |
+
if (
|
| 140 |
+
all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers)
|
| 141 |
+
and len(mgrs_indexers) > 1
|
| 142 |
+
):
|
| 143 |
+
# Fastpath!
|
| 144 |
+
# Length restriction is just to avoid having to worry about 'copy'
|
| 145 |
+
shape = tuple(len(x) for x in axes)
|
| 146 |
+
nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype)
|
| 147 |
+
return BlockManager((nb,), axes)
|
| 148 |
+
|
| 149 |
+
mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
|
| 150 |
+
|
| 151 |
+
if len(mgrs) == 1:
|
| 152 |
+
mgr = mgrs[0]
|
| 153 |
+
out = mgr.copy(deep=False)
|
| 154 |
+
out.axes = axes
|
| 155 |
+
return out
|
| 156 |
+
|
| 157 |
+
concat_plan = _get_combined_plan(mgrs)
|
| 158 |
+
|
| 159 |
+
blocks = []
|
| 160 |
+
values: ArrayLike
|
| 161 |
+
|
| 162 |
+
for placement, join_units in concat_plan:
|
| 163 |
+
unit = join_units[0]
|
| 164 |
+
blk = unit.block
|
| 165 |
+
|
| 166 |
+
if _is_uniform_join_units(join_units):
|
| 167 |
+
vals = [ju.block.values for ju in join_units]
|
| 168 |
+
|
| 169 |
+
if not blk.is_extension:
|
| 170 |
+
# _is_uniform_join_units ensures a single dtype, so
|
| 171 |
+
# we can use np.concatenate, which is more performant
|
| 172 |
+
# than concat_compat
|
| 173 |
+
# error: Argument 1 to "concatenate" has incompatible type
|
| 174 |
+
# "List[Union[ndarray[Any, Any], ExtensionArray]]";
|
| 175 |
+
# expected "Union[_SupportsArray[dtype[Any]],
|
| 176 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]]]"
|
| 177 |
+
values = np.concatenate(vals, axis=1) # type: ignore[arg-type]
|
| 178 |
+
elif is_1d_only_ea_dtype(blk.dtype):
|
| 179 |
+
# TODO(EA2D): special-casing not needed with 2D EAs
|
| 180 |
+
values = concat_compat(vals, axis=0, ea_compat_axis=True)
|
| 181 |
+
values = ensure_block_shape(values, ndim=2)
|
| 182 |
+
else:
|
| 183 |
+
values = concat_compat(vals, axis=1)
|
| 184 |
+
|
| 185 |
+
values = ensure_wrapped_if_datetimelike(values)
|
| 186 |
+
|
| 187 |
+
fastpath = blk.values.dtype == values.dtype
|
| 188 |
+
else:
|
| 189 |
+
values = _concatenate_join_units(join_units, copy=copy)
|
| 190 |
+
fastpath = False
|
| 191 |
+
|
| 192 |
+
if fastpath:
|
| 193 |
+
b = blk.make_block_same_class(values, placement=placement)
|
| 194 |
+
else:
|
| 195 |
+
b = new_block_2d(values, placement=placement)
|
| 196 |
+
|
| 197 |
+
blocks.append(b)
|
| 198 |
+
|
| 199 |
+
return BlockManager(tuple(blocks), axes)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _maybe_reindex_columns_na_proxy(
|
| 203 |
+
axes: list[Index],
|
| 204 |
+
mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]],
|
| 205 |
+
needs_copy: bool,
|
| 206 |
+
) -> list[BlockManager]:
|
| 207 |
+
"""
|
| 208 |
+
Reindex along columns so that all of the BlockManagers being concatenated
|
| 209 |
+
have matching columns.
|
| 210 |
+
|
| 211 |
+
Columns added in this reindexing have dtype=np.void, indicating they
|
| 212 |
+
should be ignored when choosing a column's final dtype.
|
| 213 |
+
"""
|
| 214 |
+
new_mgrs = []
|
| 215 |
+
|
| 216 |
+
for mgr, indexers in mgrs_indexers:
|
| 217 |
+
# For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
|
| 218 |
+
# is a cheap reindexing.
|
| 219 |
+
for i, indexer in indexers.items():
|
| 220 |
+
mgr = mgr.reindex_indexer(
|
| 221 |
+
axes[i],
|
| 222 |
+
indexers[i],
|
| 223 |
+
axis=i,
|
| 224 |
+
copy=False,
|
| 225 |
+
only_slice=True, # only relevant for i==0
|
| 226 |
+
allow_dups=True,
|
| 227 |
+
use_na_proxy=True, # only relevant for i==0
|
| 228 |
+
)
|
| 229 |
+
if needs_copy and not indexers:
|
| 230 |
+
mgr = mgr.copy()
|
| 231 |
+
|
| 232 |
+
new_mgrs.append(mgr)
|
| 233 |
+
return new_mgrs
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool:
|
| 237 |
+
"""
|
| 238 |
+
Check if this Manager can be treated as a single ndarray.
|
| 239 |
+
"""
|
| 240 |
+
if mgr.nblocks != 1:
|
| 241 |
+
return False
|
| 242 |
+
blk = mgr.blocks[0]
|
| 243 |
+
if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1):
|
| 244 |
+
return False
|
| 245 |
+
|
| 246 |
+
return blk.dtype == first_dtype
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def _concat_homogeneous_fastpath(
|
| 250 |
+
mgrs_indexers, shape: Shape, first_dtype: np.dtype
|
| 251 |
+
) -> Block:
|
| 252 |
+
"""
|
| 253 |
+
With single-Block managers with homogeneous dtypes (that can already hold nan),
|
| 254 |
+
we avoid [...]
|
| 255 |
+
"""
|
| 256 |
+
# assumes
|
| 257 |
+
# all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers)
|
| 258 |
+
|
| 259 |
+
if all(not indexers for _, indexers in mgrs_indexers):
|
| 260 |
+
# https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739
|
| 261 |
+
arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]
|
| 262 |
+
arr = np.concatenate(arrs).T
|
| 263 |
+
bp = libinternals.BlockPlacement(slice(shape[0]))
|
| 264 |
+
nb = new_block_2d(arr, bp)
|
| 265 |
+
return nb
|
| 266 |
+
|
| 267 |
+
arr = np.empty(shape, dtype=first_dtype)
|
| 268 |
+
|
| 269 |
+
if first_dtype == np.float64:
|
| 270 |
+
take_func = libalgos.take_2d_axis0_float64_float64
|
| 271 |
+
else:
|
| 272 |
+
take_func = libalgos.take_2d_axis0_float32_float32
|
| 273 |
+
|
| 274 |
+
start = 0
|
| 275 |
+
for mgr, indexers in mgrs_indexers:
|
| 276 |
+
mgr_len = mgr.shape[1]
|
| 277 |
+
end = start + mgr_len
|
| 278 |
+
|
| 279 |
+
if 0 in indexers:
|
| 280 |
+
take_func(
|
| 281 |
+
mgr.blocks[0].values,
|
| 282 |
+
indexers[0],
|
| 283 |
+
arr[:, start:end],
|
| 284 |
+
)
|
| 285 |
+
else:
|
| 286 |
+
# No reindexing necessary, we can copy values directly
|
| 287 |
+
arr[:, start:end] = mgr.blocks[0].values
|
| 288 |
+
|
| 289 |
+
start += mgr_len
|
| 290 |
+
|
| 291 |
+
bp = libinternals.BlockPlacement(slice(shape[0]))
|
| 292 |
+
nb = new_block_2d(arr, bp)
|
| 293 |
+
return nb
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def _get_combined_plan(
|
| 297 |
+
mgrs: list[BlockManager],
|
| 298 |
+
) -> list[tuple[BlockPlacement, list[JoinUnit]]]:
|
| 299 |
+
plan = []
|
| 300 |
+
|
| 301 |
+
max_len = mgrs[0].shape[0]
|
| 302 |
+
|
| 303 |
+
blknos_list = [mgr.blknos for mgr in mgrs]
|
| 304 |
+
pairs = libinternals.get_concat_blkno_indexers(blknos_list)
|
| 305 |
+
for ind, (blknos, bp) in enumerate(pairs):
|
| 306 |
+
# assert bp.is_slice_like
|
| 307 |
+
# assert len(bp) > 0
|
| 308 |
+
|
| 309 |
+
units_for_bp = []
|
| 310 |
+
for k, mgr in enumerate(mgrs):
|
| 311 |
+
blkno = blknos[k]
|
| 312 |
+
|
| 313 |
+
nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len)
|
| 314 |
+
unit = JoinUnit(nb)
|
| 315 |
+
units_for_bp.append(unit)
|
| 316 |
+
|
| 317 |
+
plan.append((bp, units_for_bp))
|
| 318 |
+
|
| 319 |
+
return plan
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def _get_block_for_concat_plan(
|
| 323 |
+
mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int
|
| 324 |
+
) -> Block:
|
| 325 |
+
blk = mgr.blocks[blkno]
|
| 326 |
+
# Assertions disabled for performance:
|
| 327 |
+
# assert bp.is_slice_like
|
| 328 |
+
# assert blkno != -1
|
| 329 |
+
# assert (mgr.blknos[bp] == blkno).all()
|
| 330 |
+
|
| 331 |
+
if len(bp) == len(blk.mgr_locs) and (
|
| 332 |
+
blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1
|
| 333 |
+
):
|
| 334 |
+
nb = blk
|
| 335 |
+
else:
|
| 336 |
+
ax0_blk_indexer = mgr.blklocs[bp.indexer]
|
| 337 |
+
|
| 338 |
+
slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len)
|
| 339 |
+
# TODO: in all extant test cases 2023-04-08 we have a slice here.
|
| 340 |
+
# Will this always be the case?
|
| 341 |
+
if isinstance(slc, slice):
|
| 342 |
+
nb = blk.slice_block_columns(slc)
|
| 343 |
+
else:
|
| 344 |
+
nb = blk.take_block_columns(slc)
|
| 345 |
+
|
| 346 |
+
# assert nb.shape == (len(bp), mgr.shape[1])
|
| 347 |
+
return nb
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
class JoinUnit:
|
| 351 |
+
def __init__(self, block: Block) -> None:
|
| 352 |
+
self.block = block
|
| 353 |
+
|
| 354 |
+
def __repr__(self) -> str:
|
| 355 |
+
return f"{type(self).__name__}({repr(self.block)})"
|
| 356 |
+
|
| 357 |
+
def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
|
| 358 |
+
"""
|
| 359 |
+
Check that we are all-NA of a type/dtype that is compatible with this dtype.
|
| 360 |
+
Augments `self.is_na` with an additional check of the type of NA values.
|
| 361 |
+
"""
|
| 362 |
+
if not self.is_na:
|
| 363 |
+
return False
|
| 364 |
+
|
| 365 |
+
blk = self.block
|
| 366 |
+
if blk.dtype.kind == "V":
|
| 367 |
+
return True
|
| 368 |
+
|
| 369 |
+
if blk.dtype == object:
|
| 370 |
+
values = blk.values
|
| 371 |
+
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
|
| 372 |
+
|
| 373 |
+
na_value = blk.fill_value
|
| 374 |
+
if na_value is NaT and blk.dtype != dtype:
|
| 375 |
+
# e.g. we are dt64 and other is td64
|
| 376 |
+
# fill_values match but we should not cast blk.values to dtype
|
| 377 |
+
# TODO: this will need updating if we ever have non-nano dt64/td64
|
| 378 |
+
return False
|
| 379 |
+
|
| 380 |
+
if na_value is NA and needs_i8_conversion(dtype):
|
| 381 |
+
# FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
|
| 382 |
+
# e.g. blk.dtype == "Int64" and dtype is td64, we dont want
|
| 383 |
+
# to consider these as matching
|
| 384 |
+
return False
|
| 385 |
+
|
| 386 |
+
# TODO: better to use can_hold_element?
|
| 387 |
+
return is_valid_na_for_dtype(na_value, dtype)
|
| 388 |
+
|
| 389 |
+
@cache_readonly
|
| 390 |
+
def is_na(self) -> bool:
|
| 391 |
+
blk = self.block
|
| 392 |
+
if blk.dtype.kind == "V":
|
| 393 |
+
return True
|
| 394 |
+
|
| 395 |
+
if not blk._can_hold_na:
|
| 396 |
+
return False
|
| 397 |
+
|
| 398 |
+
values = blk.values
|
| 399 |
+
if values.size == 0:
|
| 400 |
+
# GH#39122 this case will return False once deprecation is enforced
|
| 401 |
+
return True
|
| 402 |
+
|
| 403 |
+
if isinstance(values.dtype, SparseDtype):
|
| 404 |
+
return False
|
| 405 |
+
|
| 406 |
+
if values.ndim == 1:
|
| 407 |
+
# TODO(EA2D): no need for special case with 2D EAs
|
| 408 |
+
val = values[0]
|
| 409 |
+
if not is_scalar(val) or not isna(val):
|
| 410 |
+
# ideally isna_all would do this short-circuiting
|
| 411 |
+
return False
|
| 412 |
+
return isna_all(values)
|
| 413 |
+
else:
|
| 414 |
+
val = values[0][0]
|
| 415 |
+
if not is_scalar(val) or not isna(val):
|
| 416 |
+
# ideally isna_all would do this short-circuiting
|
| 417 |
+
return False
|
| 418 |
+
return all(isna_all(row) for row in values)
|
| 419 |
+
|
| 420 |
+
@cache_readonly
|
| 421 |
+
def is_na_after_size_and_isna_all_deprecation(self) -> bool:
|
| 422 |
+
"""
|
| 423 |
+
Will self.is_na be True after values.size == 0 deprecation and isna_all
|
| 424 |
+
deprecation are enforced?
|
| 425 |
+
"""
|
| 426 |
+
blk = self.block
|
| 427 |
+
if blk.dtype.kind == "V":
|
| 428 |
+
return True
|
| 429 |
+
return False
|
| 430 |
+
|
| 431 |
+
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
|
| 432 |
+
values: ArrayLike
|
| 433 |
+
|
| 434 |
+
if upcasted_na is None and self.block.dtype.kind != "V":
|
| 435 |
+
# No upcasting is necessary
|
| 436 |
+
return self.block.values
|
| 437 |
+
else:
|
| 438 |
+
fill_value = upcasted_na
|
| 439 |
+
|
| 440 |
+
if self._is_valid_na_for(empty_dtype):
|
| 441 |
+
# note: always holds when self.block.dtype.kind == "V"
|
| 442 |
+
blk_dtype = self.block.dtype
|
| 443 |
+
|
| 444 |
+
if blk_dtype == np.dtype("object"):
|
| 445 |
+
# we want to avoid filling with np.nan if we are
|
| 446 |
+
# using None; we already know that we are all
|
| 447 |
+
# nulls
|
| 448 |
+
values = cast(np.ndarray, self.block.values)
|
| 449 |
+
if values.size and values[0, 0] is None:
|
| 450 |
+
fill_value = None
|
| 451 |
+
|
| 452 |
+
return make_na_array(empty_dtype, self.block.shape, fill_value)
|
| 453 |
+
|
| 454 |
+
return self.block.values
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
|
| 458 |
+
"""
|
| 459 |
+
Concatenate values from several join units along axis=1.
|
| 460 |
+
"""
|
| 461 |
+
empty_dtype, empty_dtype_future = _get_empty_dtype(join_units)
|
| 462 |
+
|
| 463 |
+
has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
|
| 464 |
+
upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
|
| 465 |
+
|
| 466 |
+
to_concat = [
|
| 467 |
+
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
|
| 468 |
+
for ju in join_units
|
| 469 |
+
]
|
| 470 |
+
|
| 471 |
+
if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat):
|
| 472 |
+
# TODO(EA2D): special case not needed if all EAs used HybridBlocks
|
| 473 |
+
|
| 474 |
+
# error: No overload variant of "__getitem__" of "ExtensionArray" matches
|
| 475 |
+
# argument type "Tuple[int, slice]"
|
| 476 |
+
to_concat = [
|
| 477 |
+
t
|
| 478 |
+
if is_1d_only_ea_dtype(t.dtype)
|
| 479 |
+
else t[0, :] # type: ignore[call-overload]
|
| 480 |
+
for t in to_concat
|
| 481 |
+
]
|
| 482 |
+
concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
|
| 483 |
+
concat_values = ensure_block_shape(concat_values, 2)
|
| 484 |
+
|
| 485 |
+
else:
|
| 486 |
+
concat_values = concat_compat(to_concat, axis=1)
|
| 487 |
+
|
| 488 |
+
if empty_dtype != empty_dtype_future:
|
| 489 |
+
if empty_dtype == concat_values.dtype:
|
| 490 |
+
# GH#39122, GH#40893
|
| 491 |
+
warnings.warn(
|
| 492 |
+
"The behavior of DataFrame concatenation with empty or all-NA "
|
| 493 |
+
"entries is deprecated. In a future version, this will no longer "
|
| 494 |
+
"exclude empty or all-NA columns when determining the result dtypes. "
|
| 495 |
+
"To retain the old behavior, exclude the relevant entries before "
|
| 496 |
+
"the concat operation.",
|
| 497 |
+
FutureWarning,
|
| 498 |
+
stacklevel=find_stack_level(),
|
| 499 |
+
)
|
| 500 |
+
return concat_values
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
|
| 504 |
+
"""
|
| 505 |
+
Find the NA value to go with this dtype.
|
| 506 |
+
"""
|
| 507 |
+
if isinstance(dtype, ExtensionDtype):
|
| 508 |
+
return dtype.na_value
|
| 509 |
+
elif dtype.kind in "mM":
|
| 510 |
+
return dtype.type("NaT")
|
| 511 |
+
elif dtype.kind in "fc":
|
| 512 |
+
return dtype.type("NaN")
|
| 513 |
+
elif dtype.kind == "b":
|
| 514 |
+
# different from missing.na_value_for_dtype
|
| 515 |
+
return None
|
| 516 |
+
elif dtype.kind in "iu":
|
| 517 |
+
if not has_none_blocks:
|
| 518 |
+
# different from missing.na_value_for_dtype
|
| 519 |
+
return None
|
| 520 |
+
return np.nan
|
| 521 |
+
elif dtype.kind == "O":
|
| 522 |
+
return np.nan
|
| 523 |
+
raise NotImplementedError
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]:
|
| 527 |
+
"""
|
| 528 |
+
Return dtype and N/A values to use when concatenating specified units.
|
| 529 |
+
|
| 530 |
+
Returned N/A value may be None which means there was no casting involved.
|
| 531 |
+
|
| 532 |
+
Returns
|
| 533 |
+
-------
|
| 534 |
+
dtype
|
| 535 |
+
"""
|
| 536 |
+
if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):
|
| 537 |
+
empty_dtype = join_units[0].block.dtype
|
| 538 |
+
return empty_dtype, empty_dtype
|
| 539 |
+
|
| 540 |
+
has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
|
| 541 |
+
|
| 542 |
+
dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
|
| 543 |
+
if not len(dtypes):
|
| 544 |
+
dtypes = [
|
| 545 |
+
unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
|
| 546 |
+
]
|
| 547 |
+
|
| 548 |
+
dtype = find_common_type(dtypes)
|
| 549 |
+
if has_none_blocks:
|
| 550 |
+
dtype = ensure_dtype_can_hold_na(dtype)
|
| 551 |
+
|
| 552 |
+
dtype_future = dtype
|
| 553 |
+
if len(dtypes) != len(join_units):
|
| 554 |
+
dtypes_future = [
|
| 555 |
+
unit.block.dtype
|
| 556 |
+
for unit in join_units
|
| 557 |
+
if not unit.is_na_after_size_and_isna_all_deprecation
|
| 558 |
+
]
|
| 559 |
+
if not len(dtypes_future):
|
| 560 |
+
dtypes_future = [
|
| 561 |
+
unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
|
| 562 |
+
]
|
| 563 |
+
|
| 564 |
+
if len(dtypes) != len(dtypes_future):
|
| 565 |
+
dtype_future = find_common_type(dtypes_future)
|
| 566 |
+
if has_none_blocks:
|
| 567 |
+
dtype_future = ensure_dtype_can_hold_na(dtype_future)
|
| 568 |
+
|
| 569 |
+
return dtype, dtype_future
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
|
| 573 |
+
"""
|
| 574 |
+
Check if the join units consist of blocks of uniform type that can
|
| 575 |
+
be concatenated using Block.concat_same_type instead of the generic
|
| 576 |
+
_concatenate_join_units (which uses `concat_compat`).
|
| 577 |
+
|
| 578 |
+
"""
|
| 579 |
+
first = join_units[0].block
|
| 580 |
+
if first.dtype.kind == "V":
|
| 581 |
+
return False
|
| 582 |
+
return (
|
| 583 |
+
# exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
|
| 584 |
+
all(type(ju.block) is type(first) for ju in join_units)
|
| 585 |
+
and
|
| 586 |
+
# e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform
|
| 587 |
+
all(
|
| 588 |
+
ju.block.dtype == first.dtype
|
| 589 |
+
# GH#42092 we only want the dtype_equal check for non-numeric blocks
|
| 590 |
+
# (for now, may change but that would need a deprecation)
|
| 591 |
+
or ju.block.dtype.kind in "iub"
|
| 592 |
+
for ju in join_units
|
| 593 |
+
)
|
| 594 |
+
and
|
| 595 |
+
# no blocks that would get missing values (can lead to type upcasts)
|
| 596 |
+
# unless we're an extension dtype.
|
| 597 |
+
all(not ju.is_na or ju.block.is_extension for ju in join_units)
|
| 598 |
+
)
|
mgm/lib/python3.10/site-packages/pandas/core/internals/construction.py
ADDED
|
@@ -0,0 +1,1072 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions for preparing various inputs passed to the DataFrame or Series
|
| 3 |
+
constructors before passing them to a BlockManager.
|
| 4 |
+
"""
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from collections import abc
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Any,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from numpy import ma
|
| 15 |
+
|
| 16 |
+
from pandas._config import using_pyarrow_string_dtype
|
| 17 |
+
|
| 18 |
+
from pandas._libs import lib
|
| 19 |
+
|
| 20 |
+
from pandas.core.dtypes.astype import astype_is_view
|
| 21 |
+
from pandas.core.dtypes.cast import (
|
| 22 |
+
construct_1d_arraylike_from_scalar,
|
| 23 |
+
dict_compat,
|
| 24 |
+
maybe_cast_to_datetime,
|
| 25 |
+
maybe_convert_platform,
|
| 26 |
+
maybe_infer_to_datetimelike,
|
| 27 |
+
)
|
| 28 |
+
from pandas.core.dtypes.common import (
|
| 29 |
+
is_1d_only_ea_dtype,
|
| 30 |
+
is_integer_dtype,
|
| 31 |
+
is_list_like,
|
| 32 |
+
is_named_tuple,
|
| 33 |
+
is_object_dtype,
|
| 34 |
+
)
|
| 35 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
| 36 |
+
from pandas.core.dtypes.generic import (
|
| 37 |
+
ABCDataFrame,
|
| 38 |
+
ABCSeries,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
from pandas.core import (
|
| 42 |
+
algorithms,
|
| 43 |
+
common as com,
|
| 44 |
+
)
|
| 45 |
+
from pandas.core.arrays import ExtensionArray
|
| 46 |
+
from pandas.core.arrays.string_ import StringDtype
|
| 47 |
+
from pandas.core.construction import (
|
| 48 |
+
array as pd_array,
|
| 49 |
+
ensure_wrapped_if_datetimelike,
|
| 50 |
+
extract_array,
|
| 51 |
+
range_to_ndarray,
|
| 52 |
+
sanitize_array,
|
| 53 |
+
)
|
| 54 |
+
from pandas.core.indexes.api import (
|
| 55 |
+
DatetimeIndex,
|
| 56 |
+
Index,
|
| 57 |
+
TimedeltaIndex,
|
| 58 |
+
default_index,
|
| 59 |
+
ensure_index,
|
| 60 |
+
get_objs_combined_axis,
|
| 61 |
+
union_indexes,
|
| 62 |
+
)
|
| 63 |
+
from pandas.core.internals.array_manager import (
|
| 64 |
+
ArrayManager,
|
| 65 |
+
SingleArrayManager,
|
| 66 |
+
)
|
| 67 |
+
from pandas.core.internals.blocks import (
|
| 68 |
+
BlockPlacement,
|
| 69 |
+
ensure_block_shape,
|
| 70 |
+
new_block,
|
| 71 |
+
new_block_2d,
|
| 72 |
+
)
|
| 73 |
+
from pandas.core.internals.managers import (
|
| 74 |
+
BlockManager,
|
| 75 |
+
SingleBlockManager,
|
| 76 |
+
create_block_manager_from_blocks,
|
| 77 |
+
create_block_manager_from_column_arrays,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if TYPE_CHECKING:
|
| 81 |
+
from collections.abc import (
|
| 82 |
+
Hashable,
|
| 83 |
+
Sequence,
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
from pandas._typing import (
|
| 87 |
+
ArrayLike,
|
| 88 |
+
DtypeObj,
|
| 89 |
+
Manager,
|
| 90 |
+
npt,
|
| 91 |
+
)
|
| 92 |
+
# ---------------------------------------------------------------------
|
| 93 |
+
# BlockManager Interface
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def arrays_to_mgr(
|
| 97 |
+
arrays,
|
| 98 |
+
columns: Index,
|
| 99 |
+
index,
|
| 100 |
+
*,
|
| 101 |
+
dtype: DtypeObj | None = None,
|
| 102 |
+
verify_integrity: bool = True,
|
| 103 |
+
typ: str | None = None,
|
| 104 |
+
consolidate: bool = True,
|
| 105 |
+
) -> Manager:
|
| 106 |
+
"""
|
| 107 |
+
Segregate Series based on type and coerce into matrices.
|
| 108 |
+
|
| 109 |
+
Needs to handle a lot of exceptional cases.
|
| 110 |
+
"""
|
| 111 |
+
if verify_integrity:
|
| 112 |
+
# figure out the index, if necessary
|
| 113 |
+
if index is None:
|
| 114 |
+
index = _extract_index(arrays)
|
| 115 |
+
else:
|
| 116 |
+
index = ensure_index(index)
|
| 117 |
+
|
| 118 |
+
# don't force copy because getting jammed in an ndarray anyway
|
| 119 |
+
arrays, refs = _homogenize(arrays, index, dtype)
|
| 120 |
+
# _homogenize ensures
|
| 121 |
+
# - all(len(x) == len(index) for x in arrays)
|
| 122 |
+
# - all(x.ndim == 1 for x in arrays)
|
| 123 |
+
# - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
|
| 124 |
+
# - all(type(x) is not NumpyExtensionArray for x in arrays)
|
| 125 |
+
|
| 126 |
+
else:
|
| 127 |
+
index = ensure_index(index)
|
| 128 |
+
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
|
| 129 |
+
# with _from_arrays, the passed arrays should never be Series objects
|
| 130 |
+
refs = [None] * len(arrays)
|
| 131 |
+
|
| 132 |
+
# Reached via DataFrame._from_arrays; we do minimal validation here
|
| 133 |
+
for arr in arrays:
|
| 134 |
+
if (
|
| 135 |
+
not isinstance(arr, (np.ndarray, ExtensionArray))
|
| 136 |
+
or arr.ndim != 1
|
| 137 |
+
or len(arr) != len(index)
|
| 138 |
+
):
|
| 139 |
+
raise ValueError(
|
| 140 |
+
"Arrays must be 1-dimensional np.ndarray or ExtensionArray "
|
| 141 |
+
"with length matching len(index)"
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
columns = ensure_index(columns)
|
| 145 |
+
if len(columns) != len(arrays):
|
| 146 |
+
raise ValueError("len(arrays) must match len(columns)")
|
| 147 |
+
|
| 148 |
+
# from BlockManager perspective
|
| 149 |
+
axes = [columns, index]
|
| 150 |
+
|
| 151 |
+
if typ == "block":
|
| 152 |
+
return create_block_manager_from_column_arrays(
|
| 153 |
+
arrays, axes, consolidate=consolidate, refs=refs
|
| 154 |
+
)
|
| 155 |
+
elif typ == "array":
|
| 156 |
+
return ArrayManager(arrays, [index, columns])
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def rec_array_to_mgr(
|
| 162 |
+
data: np.rec.recarray | np.ndarray,
|
| 163 |
+
index,
|
| 164 |
+
columns,
|
| 165 |
+
dtype: DtypeObj | None,
|
| 166 |
+
copy: bool,
|
| 167 |
+
typ: str,
|
| 168 |
+
) -> Manager:
|
| 169 |
+
"""
|
| 170 |
+
Extract from a masked rec array and create the manager.
|
| 171 |
+
"""
|
| 172 |
+
# essentially process a record array then fill it
|
| 173 |
+
fdata = ma.getdata(data)
|
| 174 |
+
if index is None:
|
| 175 |
+
index = default_index(len(fdata))
|
| 176 |
+
else:
|
| 177 |
+
index = ensure_index(index)
|
| 178 |
+
|
| 179 |
+
if columns is not None:
|
| 180 |
+
columns = ensure_index(columns)
|
| 181 |
+
arrays, arr_columns = to_arrays(fdata, columns)
|
| 182 |
+
|
| 183 |
+
# create the manager
|
| 184 |
+
|
| 185 |
+
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))
|
| 186 |
+
if columns is None:
|
| 187 |
+
columns = arr_columns
|
| 188 |
+
|
| 189 |
+
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
|
| 190 |
+
|
| 191 |
+
if copy:
|
| 192 |
+
mgr = mgr.copy()
|
| 193 |
+
return mgr
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager:
|
| 197 |
+
"""
|
| 198 |
+
Convert to specific type of Manager. Does not copy if the type is already
|
| 199 |
+
correct. Does not guarantee a copy otherwise. `copy` keyword only controls
|
| 200 |
+
whether conversion from Block->ArrayManager copies the 1D arrays.
|
| 201 |
+
"""
|
| 202 |
+
new_mgr: Manager
|
| 203 |
+
|
| 204 |
+
if typ == "block":
|
| 205 |
+
if isinstance(mgr, BlockManager):
|
| 206 |
+
new_mgr = mgr
|
| 207 |
+
else:
|
| 208 |
+
if mgr.ndim == 2:
|
| 209 |
+
new_mgr = arrays_to_mgr(
|
| 210 |
+
mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
|
| 211 |
+
)
|
| 212 |
+
else:
|
| 213 |
+
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
|
| 214 |
+
elif typ == "array":
|
| 215 |
+
if isinstance(mgr, ArrayManager):
|
| 216 |
+
new_mgr = mgr
|
| 217 |
+
else:
|
| 218 |
+
if mgr.ndim == 2:
|
| 219 |
+
arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
|
| 220 |
+
if copy:
|
| 221 |
+
arrays = [arr.copy() for arr in arrays]
|
| 222 |
+
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
|
| 223 |
+
else:
|
| 224 |
+
array = mgr.internal_values()
|
| 225 |
+
if copy:
|
| 226 |
+
array = array.copy()
|
| 227 |
+
new_mgr = SingleArrayManager([array], [mgr.index])
|
| 228 |
+
else:
|
| 229 |
+
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
|
| 230 |
+
return new_mgr
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
# ---------------------------------------------------------------------
|
| 234 |
+
# DataFrame Constructor Interface
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def ndarray_to_mgr(
|
| 238 |
+
values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
|
| 239 |
+
) -> Manager:
|
| 240 |
+
# used in DataFrame.__init__
|
| 241 |
+
# input must be a ndarray, list, Series, Index, ExtensionArray
|
| 242 |
+
|
| 243 |
+
if isinstance(values, ABCSeries):
|
| 244 |
+
if columns is None:
|
| 245 |
+
if values.name is not None:
|
| 246 |
+
columns = Index([values.name])
|
| 247 |
+
if index is None:
|
| 248 |
+
index = values.index
|
| 249 |
+
else:
|
| 250 |
+
values = values.reindex(index)
|
| 251 |
+
|
| 252 |
+
# zero len case (GH #2234)
|
| 253 |
+
if not len(values) and columns is not None and len(columns):
|
| 254 |
+
values = np.empty((0, 1), dtype=object)
|
| 255 |
+
|
| 256 |
+
# if the array preparation does a copy -> avoid this for ArrayManager,
|
| 257 |
+
# since the copy is done on conversion to 1D arrays
|
| 258 |
+
copy_on_sanitize = False if typ == "array" else copy
|
| 259 |
+
|
| 260 |
+
vdtype = getattr(values, "dtype", None)
|
| 261 |
+
refs = None
|
| 262 |
+
if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
|
| 263 |
+
# GH#19157
|
| 264 |
+
|
| 265 |
+
if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
|
| 266 |
+
# GH#12513 a EA dtype passed with a 2D array, split into
|
| 267 |
+
# multiple EAs that view the values
|
| 268 |
+
# error: No overload variant of "__getitem__" of "ExtensionArray"
|
| 269 |
+
# matches argument type "Tuple[slice, int]"
|
| 270 |
+
values = [
|
| 271 |
+
values[:, n] # type: ignore[call-overload]
|
| 272 |
+
for n in range(values.shape[1])
|
| 273 |
+
]
|
| 274 |
+
else:
|
| 275 |
+
values = [values]
|
| 276 |
+
|
| 277 |
+
if columns is None:
|
| 278 |
+
columns = Index(range(len(values)))
|
| 279 |
+
else:
|
| 280 |
+
columns = ensure_index(columns)
|
| 281 |
+
|
| 282 |
+
return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
|
| 283 |
+
|
| 284 |
+
elif isinstance(vdtype, ExtensionDtype):
|
| 285 |
+
# i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype)
|
| 286 |
+
# are already caught above
|
| 287 |
+
values = extract_array(values, extract_numpy=True)
|
| 288 |
+
if copy:
|
| 289 |
+
values = values.copy()
|
| 290 |
+
if values.ndim == 1:
|
| 291 |
+
values = values.reshape(-1, 1)
|
| 292 |
+
|
| 293 |
+
elif isinstance(values, (ABCSeries, Index)):
|
| 294 |
+
if not copy_on_sanitize and (
|
| 295 |
+
dtype is None or astype_is_view(values.dtype, dtype)
|
| 296 |
+
):
|
| 297 |
+
refs = values._references
|
| 298 |
+
|
| 299 |
+
if copy_on_sanitize:
|
| 300 |
+
values = values._values.copy()
|
| 301 |
+
else:
|
| 302 |
+
values = values._values
|
| 303 |
+
|
| 304 |
+
values = _ensure_2d(values)
|
| 305 |
+
|
| 306 |
+
elif isinstance(values, (np.ndarray, ExtensionArray)):
|
| 307 |
+
# drop subclass info
|
| 308 |
+
_copy = (
|
| 309 |
+
copy_on_sanitize
|
| 310 |
+
if (dtype is None or astype_is_view(values.dtype, dtype))
|
| 311 |
+
else False
|
| 312 |
+
)
|
| 313 |
+
values = np.array(values, copy=_copy)
|
| 314 |
+
values = _ensure_2d(values)
|
| 315 |
+
|
| 316 |
+
else:
|
| 317 |
+
# by definition an array here
|
| 318 |
+
# the dtypes will be coerced to a single dtype
|
| 319 |
+
values = _prep_ndarraylike(values, copy=copy_on_sanitize)
|
| 320 |
+
|
| 321 |
+
if dtype is not None and values.dtype != dtype:
|
| 322 |
+
# GH#40110 see similar check inside sanitize_array
|
| 323 |
+
values = sanitize_array(
|
| 324 |
+
values,
|
| 325 |
+
None,
|
| 326 |
+
dtype=dtype,
|
| 327 |
+
copy=copy_on_sanitize,
|
| 328 |
+
allow_2d=True,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
# _prep_ndarraylike ensures that values.ndim == 2 at this point
|
| 332 |
+
index, columns = _get_axes(
|
| 333 |
+
values.shape[0], values.shape[1], index=index, columns=columns
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
_check_values_indices_shape_match(values, index, columns)
|
| 337 |
+
|
| 338 |
+
if typ == "array":
|
| 339 |
+
if issubclass(values.dtype.type, str):
|
| 340 |
+
values = np.array(values, dtype=object)
|
| 341 |
+
|
| 342 |
+
if dtype is None and is_object_dtype(values.dtype):
|
| 343 |
+
arrays = [
|
| 344 |
+
ensure_wrapped_if_datetimelike(
|
| 345 |
+
maybe_infer_to_datetimelike(values[:, i])
|
| 346 |
+
)
|
| 347 |
+
for i in range(values.shape[1])
|
| 348 |
+
]
|
| 349 |
+
else:
|
| 350 |
+
if lib.is_np_dtype(values.dtype, "mM"):
|
| 351 |
+
values = ensure_wrapped_if_datetimelike(values)
|
| 352 |
+
arrays = [values[:, i] for i in range(values.shape[1])]
|
| 353 |
+
|
| 354 |
+
if copy:
|
| 355 |
+
arrays = [arr.copy() for arr in arrays]
|
| 356 |
+
|
| 357 |
+
return ArrayManager(arrays, [index, columns], verify_integrity=False)
|
| 358 |
+
|
| 359 |
+
values = values.T
|
| 360 |
+
|
| 361 |
+
# if we don't have a dtype specified, then try to convert objects
|
| 362 |
+
# on the entire block; this is to convert if we have datetimelike's
|
| 363 |
+
# embedded in an object type
|
| 364 |
+
if dtype is None and is_object_dtype(values.dtype):
|
| 365 |
+
obj_columns = list(values)
|
| 366 |
+
maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
|
| 367 |
+
# don't convert (and copy) the objects if no type inference occurs
|
| 368 |
+
if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
|
| 369 |
+
dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
|
| 370 |
+
block_values = [
|
| 371 |
+
new_block_2d(dvals_list[n], placement=BlockPlacement(n))
|
| 372 |
+
for n in range(len(dvals_list))
|
| 373 |
+
]
|
| 374 |
+
else:
|
| 375 |
+
bp = BlockPlacement(slice(len(columns)))
|
| 376 |
+
nb = new_block_2d(values, placement=bp, refs=refs)
|
| 377 |
+
block_values = [nb]
|
| 378 |
+
elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype():
|
| 379 |
+
dtype = StringDtype(storage="pyarrow_numpy")
|
| 380 |
+
|
| 381 |
+
obj_columns = list(values)
|
| 382 |
+
block_values = [
|
| 383 |
+
new_block(
|
| 384 |
+
dtype.construct_array_type()._from_sequence(data, dtype=dtype),
|
| 385 |
+
BlockPlacement(slice(i, i + 1)),
|
| 386 |
+
ndim=2,
|
| 387 |
+
)
|
| 388 |
+
for i, data in enumerate(obj_columns)
|
| 389 |
+
]
|
| 390 |
+
|
| 391 |
+
else:
|
| 392 |
+
bp = BlockPlacement(slice(len(columns)))
|
| 393 |
+
nb = new_block_2d(values, placement=bp, refs=refs)
|
| 394 |
+
block_values = [nb]
|
| 395 |
+
|
| 396 |
+
if len(columns) == 0:
|
| 397 |
+
# TODO: check len(values) == 0?
|
| 398 |
+
block_values = []
|
| 399 |
+
|
| 400 |
+
return create_block_manager_from_blocks(
|
| 401 |
+
block_values, [columns, index], verify_integrity=False
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def _check_values_indices_shape_match(
|
| 406 |
+
values: np.ndarray, index: Index, columns: Index
|
| 407 |
+
) -> None:
|
| 408 |
+
"""
|
| 409 |
+
Check that the shape implied by our axes matches the actual shape of the
|
| 410 |
+
data.
|
| 411 |
+
"""
|
| 412 |
+
if values.shape[1] != len(columns) or values.shape[0] != len(index):
|
| 413 |
+
# Could let this raise in Block constructor, but we get a more
|
| 414 |
+
# helpful exception message this way.
|
| 415 |
+
if values.shape[0] == 0 < len(index):
|
| 416 |
+
raise ValueError("Empty data passed with indices specified.")
|
| 417 |
+
|
| 418 |
+
passed = values.shape
|
| 419 |
+
implied = (len(index), len(columns))
|
| 420 |
+
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def dict_to_mgr(
|
| 424 |
+
data: dict,
|
| 425 |
+
index,
|
| 426 |
+
columns,
|
| 427 |
+
*,
|
| 428 |
+
dtype: DtypeObj | None = None,
|
| 429 |
+
typ: str = "block",
|
| 430 |
+
copy: bool = True,
|
| 431 |
+
) -> Manager:
|
| 432 |
+
"""
|
| 433 |
+
Segregate Series based on type and coerce into matrices.
|
| 434 |
+
Needs to handle a lot of exceptional cases.
|
| 435 |
+
|
| 436 |
+
Used in DataFrame.__init__
|
| 437 |
+
"""
|
| 438 |
+
arrays: Sequence[Any] | Series
|
| 439 |
+
|
| 440 |
+
if columns is not None:
|
| 441 |
+
from pandas.core.series import Series
|
| 442 |
+
|
| 443 |
+
arrays = Series(data, index=columns, dtype=object)
|
| 444 |
+
missing = arrays.isna()
|
| 445 |
+
if index is None:
|
| 446 |
+
# GH10856
|
| 447 |
+
# raise ValueError if only scalars in dict
|
| 448 |
+
index = _extract_index(arrays[~missing])
|
| 449 |
+
else:
|
| 450 |
+
index = ensure_index(index)
|
| 451 |
+
|
| 452 |
+
# no obvious "empty" int column
|
| 453 |
+
if missing.any() and not is_integer_dtype(dtype):
|
| 454 |
+
nan_dtype: DtypeObj
|
| 455 |
+
|
| 456 |
+
if dtype is not None:
|
| 457 |
+
# calling sanitize_array ensures we don't mix-and-match
|
| 458 |
+
# NA dtypes
|
| 459 |
+
midxs = missing.values.nonzero()[0]
|
| 460 |
+
for i in midxs:
|
| 461 |
+
arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
|
| 462 |
+
arrays.iat[i] = arr
|
| 463 |
+
else:
|
| 464 |
+
# GH#1783
|
| 465 |
+
nan_dtype = np.dtype("object")
|
| 466 |
+
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
|
| 467 |
+
nmissing = missing.sum()
|
| 468 |
+
if copy:
|
| 469 |
+
rhs = [val] * nmissing
|
| 470 |
+
else:
|
| 471 |
+
# GH#45369
|
| 472 |
+
rhs = [val.copy() for _ in range(nmissing)]
|
| 473 |
+
arrays.loc[missing] = rhs
|
| 474 |
+
|
| 475 |
+
arrays = list(arrays)
|
| 476 |
+
columns = ensure_index(columns)
|
| 477 |
+
|
| 478 |
+
else:
|
| 479 |
+
keys = list(data.keys())
|
| 480 |
+
columns = Index(keys) if keys else default_index(0)
|
| 481 |
+
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
|
| 482 |
+
|
| 483 |
+
if copy:
|
| 484 |
+
if typ == "block":
|
| 485 |
+
# We only need to copy arrays that will not get consolidated, i.e.
|
| 486 |
+
# only EA arrays
|
| 487 |
+
arrays = [
|
| 488 |
+
x.copy()
|
| 489 |
+
if isinstance(x, ExtensionArray)
|
| 490 |
+
else x.copy(deep=True)
|
| 491 |
+
if (
|
| 492 |
+
isinstance(x, Index)
|
| 493 |
+
or isinstance(x, ABCSeries)
|
| 494 |
+
and is_1d_only_ea_dtype(x.dtype)
|
| 495 |
+
)
|
| 496 |
+
else x
|
| 497 |
+
for x in arrays
|
| 498 |
+
]
|
| 499 |
+
else:
|
| 500 |
+
# dtype check to exclude e.g. range objects, scalars
|
| 501 |
+
arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
|
| 502 |
+
|
| 503 |
+
return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def nested_data_to_arrays(
|
| 507 |
+
data: Sequence,
|
| 508 |
+
columns: Index | None,
|
| 509 |
+
index: Index | None,
|
| 510 |
+
dtype: DtypeObj | None,
|
| 511 |
+
) -> tuple[list[ArrayLike], Index, Index]:
|
| 512 |
+
"""
|
| 513 |
+
Convert a single sequence of arrays to multiple arrays.
|
| 514 |
+
"""
|
| 515 |
+
# By the time we get here we have already checked treat_as_nested(data)
|
| 516 |
+
|
| 517 |
+
if is_named_tuple(data[0]) and columns is None:
|
| 518 |
+
columns = ensure_index(data[0]._fields)
|
| 519 |
+
|
| 520 |
+
arrays, columns = to_arrays(data, columns, dtype=dtype)
|
| 521 |
+
columns = ensure_index(columns)
|
| 522 |
+
|
| 523 |
+
if index is None:
|
| 524 |
+
if isinstance(data[0], ABCSeries):
|
| 525 |
+
index = _get_names_from_index(data)
|
| 526 |
+
else:
|
| 527 |
+
index = default_index(len(data))
|
| 528 |
+
|
| 529 |
+
return arrays, columns, index
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def treat_as_nested(data) -> bool:
|
| 533 |
+
"""
|
| 534 |
+
Check if we should use nested_data_to_arrays.
|
| 535 |
+
"""
|
| 536 |
+
return (
|
| 537 |
+
len(data) > 0
|
| 538 |
+
and is_list_like(data[0])
|
| 539 |
+
and getattr(data[0], "ndim", 1) == 1
|
| 540 |
+
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
# ---------------------------------------------------------------------
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray:
|
| 548 |
+
# values is specifically _not_ ndarray, EA, Index, or Series
|
| 549 |
+
# We only get here with `not treat_as_nested(values)`
|
| 550 |
+
|
| 551 |
+
if len(values) == 0:
|
| 552 |
+
# TODO: check for length-zero range, in which case return int64 dtype?
|
| 553 |
+
# TODO: reuse anything in try_cast?
|
| 554 |
+
return np.empty((0, 0), dtype=object)
|
| 555 |
+
elif isinstance(values, range):
|
| 556 |
+
arr = range_to_ndarray(values)
|
| 557 |
+
return arr[..., np.newaxis]
|
| 558 |
+
|
| 559 |
+
def convert(v):
|
| 560 |
+
if not is_list_like(v) or isinstance(v, ABCDataFrame):
|
| 561 |
+
return v
|
| 562 |
+
|
| 563 |
+
v = extract_array(v, extract_numpy=True)
|
| 564 |
+
res = maybe_convert_platform(v)
|
| 565 |
+
# We don't do maybe_infer_to_datetimelike here bc we will end up doing
|
| 566 |
+
# it column-by-column in ndarray_to_mgr
|
| 567 |
+
return res
|
| 568 |
+
|
| 569 |
+
# we could have a 1-dim or 2-dim list here
|
| 570 |
+
# this is equiv of np.asarray, but does object conversion
|
| 571 |
+
# and platform dtype preservation
|
| 572 |
+
# does not convert e.g. [1, "a", True] to ["1", "a", "True"] like
|
| 573 |
+
# np.asarray would
|
| 574 |
+
if is_list_like(values[0]):
|
| 575 |
+
values = np.array([convert(v) for v in values])
|
| 576 |
+
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
|
| 577 |
+
# GH#21861 see test_constructor_list_of_lists
|
| 578 |
+
values = np.array([convert(v) for v in values])
|
| 579 |
+
else:
|
| 580 |
+
values = convert(values)
|
| 581 |
+
|
| 582 |
+
return _ensure_2d(values)
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def _ensure_2d(values: np.ndarray) -> np.ndarray:
|
| 586 |
+
"""
|
| 587 |
+
Reshape 1D values, raise on anything else other than 2D.
|
| 588 |
+
"""
|
| 589 |
+
if values.ndim == 1:
|
| 590 |
+
values = values.reshape((values.shape[0], 1))
|
| 591 |
+
elif values.ndim != 2:
|
| 592 |
+
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
|
| 593 |
+
return values
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def _homogenize(
|
| 597 |
+
data, index: Index, dtype: DtypeObj | None
|
| 598 |
+
) -> tuple[list[ArrayLike], list[Any]]:
|
| 599 |
+
oindex = None
|
| 600 |
+
homogenized = []
|
| 601 |
+
# if the original array-like in `data` is a Series, keep track of this Series' refs
|
| 602 |
+
refs: list[Any] = []
|
| 603 |
+
|
| 604 |
+
for val in data:
|
| 605 |
+
if isinstance(val, (ABCSeries, Index)):
|
| 606 |
+
if dtype is not None:
|
| 607 |
+
val = val.astype(dtype, copy=False)
|
| 608 |
+
if isinstance(val, ABCSeries) and val.index is not index:
|
| 609 |
+
# Forces alignment. No need to copy data since we
|
| 610 |
+
# are putting it into an ndarray later
|
| 611 |
+
val = val.reindex(index, copy=False)
|
| 612 |
+
refs.append(val._references)
|
| 613 |
+
val = val._values
|
| 614 |
+
else:
|
| 615 |
+
if isinstance(val, dict):
|
| 616 |
+
# GH#41785 this _should_ be equivalent to (but faster than)
|
| 617 |
+
# val = Series(val, index=index)._values
|
| 618 |
+
if oindex is None:
|
| 619 |
+
oindex = index.astype("O")
|
| 620 |
+
|
| 621 |
+
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
|
| 622 |
+
# see test_constructor_dict_datetime64_index
|
| 623 |
+
val = dict_compat(val)
|
| 624 |
+
else:
|
| 625 |
+
# see test_constructor_subclass_dict
|
| 626 |
+
val = dict(val)
|
| 627 |
+
val = lib.fast_multiget(val, oindex._values, default=np.nan)
|
| 628 |
+
|
| 629 |
+
val = sanitize_array(val, index, dtype=dtype, copy=False)
|
| 630 |
+
com.require_length_match(val, index)
|
| 631 |
+
refs.append(None)
|
| 632 |
+
|
| 633 |
+
homogenized.append(val)
|
| 634 |
+
|
| 635 |
+
return homogenized, refs
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def _extract_index(data) -> Index:
|
| 639 |
+
"""
|
| 640 |
+
Try to infer an Index from the passed data, raise ValueError on failure.
|
| 641 |
+
"""
|
| 642 |
+
index: Index
|
| 643 |
+
if len(data) == 0:
|
| 644 |
+
return default_index(0)
|
| 645 |
+
|
| 646 |
+
raw_lengths = []
|
| 647 |
+
indexes: list[list[Hashable] | Index] = []
|
| 648 |
+
|
| 649 |
+
have_raw_arrays = False
|
| 650 |
+
have_series = False
|
| 651 |
+
have_dicts = False
|
| 652 |
+
|
| 653 |
+
for val in data:
|
| 654 |
+
if isinstance(val, ABCSeries):
|
| 655 |
+
have_series = True
|
| 656 |
+
indexes.append(val.index)
|
| 657 |
+
elif isinstance(val, dict):
|
| 658 |
+
have_dicts = True
|
| 659 |
+
indexes.append(list(val.keys()))
|
| 660 |
+
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
|
| 661 |
+
have_raw_arrays = True
|
| 662 |
+
raw_lengths.append(len(val))
|
| 663 |
+
elif isinstance(val, np.ndarray) and val.ndim > 1:
|
| 664 |
+
raise ValueError("Per-column arrays must each be 1-dimensional")
|
| 665 |
+
|
| 666 |
+
if not indexes and not raw_lengths:
|
| 667 |
+
raise ValueError("If using all scalar values, you must pass an index")
|
| 668 |
+
|
| 669 |
+
if have_series:
|
| 670 |
+
index = union_indexes(indexes)
|
| 671 |
+
elif have_dicts:
|
| 672 |
+
index = union_indexes(indexes, sort=False)
|
| 673 |
+
|
| 674 |
+
if have_raw_arrays:
|
| 675 |
+
lengths = list(set(raw_lengths))
|
| 676 |
+
if len(lengths) > 1:
|
| 677 |
+
raise ValueError("All arrays must be of the same length")
|
| 678 |
+
|
| 679 |
+
if have_dicts:
|
| 680 |
+
raise ValueError(
|
| 681 |
+
"Mixing dicts with non-Series may lead to ambiguous ordering."
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
if have_series:
|
| 685 |
+
if lengths[0] != len(index):
|
| 686 |
+
msg = (
|
| 687 |
+
f"array length {lengths[0]} does not match index "
|
| 688 |
+
f"length {len(index)}"
|
| 689 |
+
)
|
| 690 |
+
raise ValueError(msg)
|
| 691 |
+
else:
|
| 692 |
+
index = default_index(lengths[0])
|
| 693 |
+
|
| 694 |
+
return ensure_index(index)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def reorder_arrays(
|
| 698 |
+
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
|
| 699 |
+
) -> tuple[list[ArrayLike], Index]:
|
| 700 |
+
"""
|
| 701 |
+
Pre-emptively (cheaply) reindex arrays with new columns.
|
| 702 |
+
"""
|
| 703 |
+
# reorder according to the columns
|
| 704 |
+
if columns is not None:
|
| 705 |
+
if not columns.equals(arr_columns):
|
| 706 |
+
# if they are equal, there is nothing to do
|
| 707 |
+
new_arrays: list[ArrayLike] = []
|
| 708 |
+
indexer = arr_columns.get_indexer(columns)
|
| 709 |
+
for i, k in enumerate(indexer):
|
| 710 |
+
if k == -1:
|
| 711 |
+
# by convention default is all-NaN object dtype
|
| 712 |
+
arr = np.empty(length, dtype=object)
|
| 713 |
+
arr.fill(np.nan)
|
| 714 |
+
else:
|
| 715 |
+
arr = arrays[k]
|
| 716 |
+
new_arrays.append(arr)
|
| 717 |
+
|
| 718 |
+
arrays = new_arrays
|
| 719 |
+
arr_columns = columns
|
| 720 |
+
|
| 721 |
+
return arrays, arr_columns
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
def _get_names_from_index(data) -> Index:
|
| 725 |
+
has_some_name = any(getattr(s, "name", None) is not None for s in data)
|
| 726 |
+
if not has_some_name:
|
| 727 |
+
return default_index(len(data))
|
| 728 |
+
|
| 729 |
+
index: list[Hashable] = list(range(len(data)))
|
| 730 |
+
count = 0
|
| 731 |
+
for i, s in enumerate(data):
|
| 732 |
+
n = getattr(s, "name", None)
|
| 733 |
+
if n is not None:
|
| 734 |
+
index[i] = n
|
| 735 |
+
else:
|
| 736 |
+
index[i] = f"Unnamed {count}"
|
| 737 |
+
count += 1
|
| 738 |
+
|
| 739 |
+
return Index(index)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
def _get_axes(
|
| 743 |
+
N: int, K: int, index: Index | None, columns: Index | None
|
| 744 |
+
) -> tuple[Index, Index]:
|
| 745 |
+
# helper to create the axes as indexes
|
| 746 |
+
# return axes or defaults
|
| 747 |
+
|
| 748 |
+
if index is None:
|
| 749 |
+
index = default_index(N)
|
| 750 |
+
else:
|
| 751 |
+
index = ensure_index(index)
|
| 752 |
+
|
| 753 |
+
if columns is None:
|
| 754 |
+
columns = default_index(K)
|
| 755 |
+
else:
|
| 756 |
+
columns = ensure_index(columns)
|
| 757 |
+
return index, columns
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def dataclasses_to_dicts(data):
|
| 761 |
+
"""
|
| 762 |
+
Converts a list of dataclass instances to a list of dictionaries.
|
| 763 |
+
|
| 764 |
+
Parameters
|
| 765 |
+
----------
|
| 766 |
+
data : List[Type[dataclass]]
|
| 767 |
+
|
| 768 |
+
Returns
|
| 769 |
+
--------
|
| 770 |
+
list_dict : List[dict]
|
| 771 |
+
|
| 772 |
+
Examples
|
| 773 |
+
--------
|
| 774 |
+
>>> from dataclasses import dataclass
|
| 775 |
+
>>> @dataclass
|
| 776 |
+
... class Point:
|
| 777 |
+
... x: int
|
| 778 |
+
... y: int
|
| 779 |
+
|
| 780 |
+
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
|
| 781 |
+
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
|
| 782 |
+
|
| 783 |
+
"""
|
| 784 |
+
from dataclasses import asdict
|
| 785 |
+
|
| 786 |
+
return list(map(asdict, data))
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
# ---------------------------------------------------------------------
|
| 790 |
+
# Conversion of Inputs to Arrays
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
def to_arrays(
|
| 794 |
+
data, columns: Index | None, dtype: DtypeObj | None = None
|
| 795 |
+
) -> tuple[list[ArrayLike], Index]:
|
| 796 |
+
"""
|
| 797 |
+
Return list of arrays, columns.
|
| 798 |
+
|
| 799 |
+
Returns
|
| 800 |
+
-------
|
| 801 |
+
list[ArrayLike]
|
| 802 |
+
These will become columns in a DataFrame.
|
| 803 |
+
Index
|
| 804 |
+
This will become frame.columns.
|
| 805 |
+
|
| 806 |
+
Notes
|
| 807 |
+
-----
|
| 808 |
+
Ensures that len(result_arrays) == len(result_index).
|
| 809 |
+
"""
|
| 810 |
+
|
| 811 |
+
if not len(data):
|
| 812 |
+
if isinstance(data, np.ndarray):
|
| 813 |
+
if data.dtype.names is not None:
|
| 814 |
+
# i.e. numpy structured array
|
| 815 |
+
columns = ensure_index(data.dtype.names)
|
| 816 |
+
arrays = [data[name] for name in columns]
|
| 817 |
+
|
| 818 |
+
if len(data) == 0:
|
| 819 |
+
# GH#42456 the indexing above results in list of 2D ndarrays
|
| 820 |
+
# TODO: is that an issue with numpy?
|
| 821 |
+
for i, arr in enumerate(arrays):
|
| 822 |
+
if arr.ndim == 2:
|
| 823 |
+
arrays[i] = arr[:, 0]
|
| 824 |
+
|
| 825 |
+
return arrays, columns
|
| 826 |
+
return [], ensure_index([])
|
| 827 |
+
|
| 828 |
+
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
|
| 829 |
+
# e.g. recarray
|
| 830 |
+
columns = Index(list(data.dtype.names))
|
| 831 |
+
arrays = [data[k] for k in columns]
|
| 832 |
+
return arrays, columns
|
| 833 |
+
|
| 834 |
+
if isinstance(data[0], (list, tuple)):
|
| 835 |
+
arr = _list_to_arrays(data)
|
| 836 |
+
elif isinstance(data[0], abc.Mapping):
|
| 837 |
+
arr, columns = _list_of_dict_to_arrays(data, columns)
|
| 838 |
+
elif isinstance(data[0], ABCSeries):
|
| 839 |
+
arr, columns = _list_of_series_to_arrays(data, columns)
|
| 840 |
+
else:
|
| 841 |
+
# last ditch effort
|
| 842 |
+
data = [tuple(x) for x in data]
|
| 843 |
+
arr = _list_to_arrays(data)
|
| 844 |
+
|
| 845 |
+
content, columns = _finalize_columns_and_data(arr, columns, dtype)
|
| 846 |
+
return content, columns
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
|
| 850 |
+
# Returned np.ndarray has ndim = 2
|
| 851 |
+
# Note: we already check len(data) > 0 before getting hre
|
| 852 |
+
if isinstance(data[0], tuple):
|
| 853 |
+
content = lib.to_object_array_tuples(data)
|
| 854 |
+
else:
|
| 855 |
+
# list of lists
|
| 856 |
+
content = lib.to_object_array(data)
|
| 857 |
+
return content
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def _list_of_series_to_arrays(
|
| 861 |
+
data: list,
|
| 862 |
+
columns: Index | None,
|
| 863 |
+
) -> tuple[np.ndarray, Index]:
|
| 864 |
+
# returned np.ndarray has ndim == 2
|
| 865 |
+
|
| 866 |
+
if columns is None:
|
| 867 |
+
# We know pass_data is non-empty because data[0] is a Series
|
| 868 |
+
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
|
| 869 |
+
columns = get_objs_combined_axis(pass_data, sort=False)
|
| 870 |
+
|
| 871 |
+
indexer_cache: dict[int, np.ndarray] = {}
|
| 872 |
+
|
| 873 |
+
aligned_values = []
|
| 874 |
+
for s in data:
|
| 875 |
+
index = getattr(s, "index", None)
|
| 876 |
+
if index is None:
|
| 877 |
+
index = default_index(len(s))
|
| 878 |
+
|
| 879 |
+
if id(index) in indexer_cache:
|
| 880 |
+
indexer = indexer_cache[id(index)]
|
| 881 |
+
else:
|
| 882 |
+
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
|
| 883 |
+
|
| 884 |
+
values = extract_array(s, extract_numpy=True)
|
| 885 |
+
aligned_values.append(algorithms.take_nd(values, indexer))
|
| 886 |
+
|
| 887 |
+
content = np.vstack(aligned_values)
|
| 888 |
+
return content, columns
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
def _list_of_dict_to_arrays(
|
| 892 |
+
data: list[dict],
|
| 893 |
+
columns: Index | None,
|
| 894 |
+
) -> tuple[np.ndarray, Index]:
|
| 895 |
+
"""
|
| 896 |
+
Convert list of dicts to numpy arrays
|
| 897 |
+
|
| 898 |
+
if `columns` is not passed, column names are inferred from the records
|
| 899 |
+
- for OrderedDict and dicts, the column names match
|
| 900 |
+
the key insertion-order from the first record to the last.
|
| 901 |
+
- For other kinds of dict-likes, the keys are lexically sorted.
|
| 902 |
+
|
| 903 |
+
Parameters
|
| 904 |
+
----------
|
| 905 |
+
data : iterable
|
| 906 |
+
collection of records (OrderedDict, dict)
|
| 907 |
+
columns: iterables or None
|
| 908 |
+
|
| 909 |
+
Returns
|
| 910 |
+
-------
|
| 911 |
+
content : np.ndarray[object, ndim=2]
|
| 912 |
+
columns : Index
|
| 913 |
+
"""
|
| 914 |
+
if columns is None:
|
| 915 |
+
gen = (list(x.keys()) for x in data)
|
| 916 |
+
sort = not any(isinstance(d, dict) for d in data)
|
| 917 |
+
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
|
| 918 |
+
columns = ensure_index(pre_cols)
|
| 919 |
+
|
| 920 |
+
# assure that they are of the base dict class and not of derived
|
| 921 |
+
# classes
|
| 922 |
+
data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721
|
| 923 |
+
|
| 924 |
+
content = lib.dicts_to_array(data, list(columns))
|
| 925 |
+
return content, columns
|
| 926 |
+
|
| 927 |
+
|
| 928 |
+
def _finalize_columns_and_data(
|
| 929 |
+
content: np.ndarray, # ndim == 2
|
| 930 |
+
columns: Index | None,
|
| 931 |
+
dtype: DtypeObj | None,
|
| 932 |
+
) -> tuple[list[ArrayLike], Index]:
|
| 933 |
+
"""
|
| 934 |
+
Ensure we have valid columns, cast object dtypes if possible.
|
| 935 |
+
"""
|
| 936 |
+
contents = list(content.T)
|
| 937 |
+
|
| 938 |
+
try:
|
| 939 |
+
columns = _validate_or_indexify_columns(contents, columns)
|
| 940 |
+
except AssertionError as err:
|
| 941 |
+
# GH#26429 do not raise user-facing AssertionError
|
| 942 |
+
raise ValueError(err) from err
|
| 943 |
+
|
| 944 |
+
if len(contents) and contents[0].dtype == np.object_:
|
| 945 |
+
contents = convert_object_array(contents, dtype=dtype)
|
| 946 |
+
|
| 947 |
+
return contents, columns
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
def _validate_or_indexify_columns(
|
| 951 |
+
content: list[np.ndarray], columns: Index | None
|
| 952 |
+
) -> Index:
|
| 953 |
+
"""
|
| 954 |
+
If columns is None, make numbers as column names; Otherwise, validate that
|
| 955 |
+
columns have valid length.
|
| 956 |
+
|
| 957 |
+
Parameters
|
| 958 |
+
----------
|
| 959 |
+
content : list of np.ndarrays
|
| 960 |
+
columns : Index or None
|
| 961 |
+
|
| 962 |
+
Returns
|
| 963 |
+
-------
|
| 964 |
+
Index
|
| 965 |
+
If columns is None, assign positional column index value as columns.
|
| 966 |
+
|
| 967 |
+
Raises
|
| 968 |
+
------
|
| 969 |
+
1. AssertionError when content is not composed of list of lists, and if
|
| 970 |
+
length of columns is not equal to length of content.
|
| 971 |
+
2. ValueError when content is list of lists, but length of each sub-list
|
| 972 |
+
is not equal
|
| 973 |
+
3. ValueError when content is list of lists, but length of sub-list is
|
| 974 |
+
not equal to length of content
|
| 975 |
+
"""
|
| 976 |
+
if columns is None:
|
| 977 |
+
columns = default_index(len(content))
|
| 978 |
+
else:
|
| 979 |
+
# Add mask for data which is composed of list of lists
|
| 980 |
+
is_mi_list = isinstance(columns, list) and all(
|
| 981 |
+
isinstance(col, list) for col in columns
|
| 982 |
+
)
|
| 983 |
+
|
| 984 |
+
if not is_mi_list and len(columns) != len(content): # pragma: no cover
|
| 985 |
+
# caller's responsibility to check for this...
|
| 986 |
+
raise AssertionError(
|
| 987 |
+
f"{len(columns)} columns passed, passed data had "
|
| 988 |
+
f"{len(content)} columns"
|
| 989 |
+
)
|
| 990 |
+
if is_mi_list:
|
| 991 |
+
# check if nested list column, length of each sub-list should be equal
|
| 992 |
+
if len({len(col) for col in columns}) > 1:
|
| 993 |
+
raise ValueError(
|
| 994 |
+
"Length of columns passed for MultiIndex columns is different"
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
# if columns is not empty and length of sublist is not equal to content
|
| 998 |
+
if columns and len(columns[0]) != len(content):
|
| 999 |
+
raise ValueError(
|
| 1000 |
+
f"{len(columns[0])} columns passed, passed data had "
|
| 1001 |
+
f"{len(content)} columns"
|
| 1002 |
+
)
|
| 1003 |
+
return columns
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
def convert_object_array(
|
| 1007 |
+
content: list[npt.NDArray[np.object_]],
|
| 1008 |
+
dtype: DtypeObj | None,
|
| 1009 |
+
dtype_backend: str = "numpy",
|
| 1010 |
+
coerce_float: bool = False,
|
| 1011 |
+
) -> list[ArrayLike]:
|
| 1012 |
+
"""
|
| 1013 |
+
Internal function to convert object array.
|
| 1014 |
+
|
| 1015 |
+
Parameters
|
| 1016 |
+
----------
|
| 1017 |
+
content: List[np.ndarray]
|
| 1018 |
+
dtype: np.dtype or ExtensionDtype
|
| 1019 |
+
dtype_backend: Controls if nullable/pyarrow dtypes are returned.
|
| 1020 |
+
coerce_float: Cast floats that are integers to int.
|
| 1021 |
+
|
| 1022 |
+
Returns
|
| 1023 |
+
-------
|
| 1024 |
+
List[ArrayLike]
|
| 1025 |
+
"""
|
| 1026 |
+
# provide soft conversion of object dtypes
|
| 1027 |
+
|
| 1028 |
+
def convert(arr):
|
| 1029 |
+
if dtype != np.dtype("O"):
|
| 1030 |
+
arr = lib.maybe_convert_objects(
|
| 1031 |
+
arr,
|
| 1032 |
+
try_float=coerce_float,
|
| 1033 |
+
convert_to_nullable_dtype=dtype_backend != "numpy",
|
| 1034 |
+
)
|
| 1035 |
+
# Notes on cases that get here 2023-02-15
|
| 1036 |
+
# 1) we DO get here when arr is all Timestamps and dtype=None
|
| 1037 |
+
# 2) disabling this doesn't break the world, so this must be
|
| 1038 |
+
# getting caught at a higher level
|
| 1039 |
+
# 3) passing convert_non_numeric to maybe_convert_objects get this right
|
| 1040 |
+
# 4) convert_non_numeric?
|
| 1041 |
+
|
| 1042 |
+
if dtype is None:
|
| 1043 |
+
if arr.dtype == np.dtype("O"):
|
| 1044 |
+
# i.e. maybe_convert_objects didn't convert
|
| 1045 |
+
arr = maybe_infer_to_datetimelike(arr)
|
| 1046 |
+
if dtype_backend != "numpy" and arr.dtype == np.dtype("O"):
|
| 1047 |
+
new_dtype = StringDtype()
|
| 1048 |
+
arr_cls = new_dtype.construct_array_type()
|
| 1049 |
+
arr = arr_cls._from_sequence(arr, dtype=new_dtype)
|
| 1050 |
+
elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):
|
| 1051 |
+
if arr.dtype.kind in "iufb":
|
| 1052 |
+
arr = pd_array(arr, copy=False)
|
| 1053 |
+
|
| 1054 |
+
elif isinstance(dtype, ExtensionDtype):
|
| 1055 |
+
# TODO: test(s) that get here
|
| 1056 |
+
# TODO: try to de-duplicate this convert function with
|
| 1057 |
+
# core.construction functions
|
| 1058 |
+
cls = dtype.construct_array_type()
|
| 1059 |
+
arr = cls._from_sequence(arr, dtype=dtype, copy=False)
|
| 1060 |
+
elif dtype.kind in "mM":
|
| 1061 |
+
# This restriction is harmless bc these are the only cases
|
| 1062 |
+
# where maybe_cast_to_datetime is not a no-op.
|
| 1063 |
+
# Here we know:
|
| 1064 |
+
# 1) dtype.kind in "mM" and
|
| 1065 |
+
# 2) arr is either object or numeric dtype
|
| 1066 |
+
arr = maybe_cast_to_datetime(arr, dtype)
|
| 1067 |
+
|
| 1068 |
+
return arr
|
| 1069 |
+
|
| 1070 |
+
arrays = [convert(arr) for arr in content]
|
| 1071 |
+
|
| 1072 |
+
return arrays
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__init__.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Arithmetic operations for PandasObjects
|
| 3 |
+
|
| 4 |
+
This is not a public API.
|
| 5 |
+
"""
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
from pandas.core.ops.array_ops import (
|
| 9 |
+
arithmetic_op,
|
| 10 |
+
comp_method_OBJECT_ARRAY,
|
| 11 |
+
comparison_op,
|
| 12 |
+
fill_binop,
|
| 13 |
+
get_array_op,
|
| 14 |
+
logical_op,
|
| 15 |
+
maybe_prepare_scalar_for_op,
|
| 16 |
+
)
|
| 17 |
+
from pandas.core.ops.common import (
|
| 18 |
+
get_op_result_name,
|
| 19 |
+
unpack_zerodim_and_defer,
|
| 20 |
+
)
|
| 21 |
+
from pandas.core.ops.docstrings import make_flex_doc
|
| 22 |
+
from pandas.core.ops.invalid import invalid_comparison
|
| 23 |
+
from pandas.core.ops.mask_ops import (
|
| 24 |
+
kleene_and,
|
| 25 |
+
kleene_or,
|
| 26 |
+
kleene_xor,
|
| 27 |
+
)
|
| 28 |
+
from pandas.core.roperator import (
|
| 29 |
+
radd,
|
| 30 |
+
rand_,
|
| 31 |
+
rdiv,
|
| 32 |
+
rdivmod,
|
| 33 |
+
rfloordiv,
|
| 34 |
+
rmod,
|
| 35 |
+
rmul,
|
| 36 |
+
ror_,
|
| 37 |
+
rpow,
|
| 38 |
+
rsub,
|
| 39 |
+
rtruediv,
|
| 40 |
+
rxor,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# -----------------------------------------------------------------------------
|
| 44 |
+
# constants
|
| 45 |
+
ARITHMETIC_BINOPS: set[str] = {
|
| 46 |
+
"add",
|
| 47 |
+
"sub",
|
| 48 |
+
"mul",
|
| 49 |
+
"pow",
|
| 50 |
+
"mod",
|
| 51 |
+
"floordiv",
|
| 52 |
+
"truediv",
|
| 53 |
+
"divmod",
|
| 54 |
+
"radd",
|
| 55 |
+
"rsub",
|
| 56 |
+
"rmul",
|
| 57 |
+
"rpow",
|
| 58 |
+
"rmod",
|
| 59 |
+
"rfloordiv",
|
| 60 |
+
"rtruediv",
|
| 61 |
+
"rdivmod",
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
__all__ = [
|
| 66 |
+
"ARITHMETIC_BINOPS",
|
| 67 |
+
"arithmetic_op",
|
| 68 |
+
"comparison_op",
|
| 69 |
+
"comp_method_OBJECT_ARRAY",
|
| 70 |
+
"invalid_comparison",
|
| 71 |
+
"fill_binop",
|
| 72 |
+
"kleene_and",
|
| 73 |
+
"kleene_or",
|
| 74 |
+
"kleene_xor",
|
| 75 |
+
"logical_op",
|
| 76 |
+
"make_flex_doc",
|
| 77 |
+
"radd",
|
| 78 |
+
"rand_",
|
| 79 |
+
"rdiv",
|
| 80 |
+
"rdivmod",
|
| 81 |
+
"rfloordiv",
|
| 82 |
+
"rmod",
|
| 83 |
+
"rmul",
|
| 84 |
+
"ror_",
|
| 85 |
+
"rpow",
|
| 86 |
+
"rsub",
|
| 87 |
+
"rtruediv",
|
| 88 |
+
"rxor",
|
| 89 |
+
"unpack_zerodim_and_defer",
|
| 90 |
+
"get_op_result_name",
|
| 91 |
+
"maybe_prepare_scalar_for_op",
|
| 92 |
+
"get_array_op",
|
| 93 |
+
]
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc
ADDED
|
Binary file (891 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc
ADDED
|
Binary file (1.8 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc
ADDED
|
Binary file (3.85 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/ops/array_ops.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions for arithmetic and comparison operations on NumPy arrays and
|
| 3 |
+
ExtensionArrays.
|
| 4 |
+
"""
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import datetime
|
| 8 |
+
from functools import partial
|
| 9 |
+
import operator
|
| 10 |
+
from typing import (
|
| 11 |
+
TYPE_CHECKING,
|
| 12 |
+
Any,
|
| 13 |
+
)
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from pandas._libs import (
|
| 19 |
+
NaT,
|
| 20 |
+
Timedelta,
|
| 21 |
+
Timestamp,
|
| 22 |
+
lib,
|
| 23 |
+
ops as libops,
|
| 24 |
+
)
|
| 25 |
+
from pandas._libs.tslibs import (
|
| 26 |
+
BaseOffset,
|
| 27 |
+
get_supported_dtype,
|
| 28 |
+
is_supported_dtype,
|
| 29 |
+
is_unitless,
|
| 30 |
+
)
|
| 31 |
+
from pandas.util._exceptions import find_stack_level
|
| 32 |
+
|
| 33 |
+
from pandas.core.dtypes.cast import (
|
| 34 |
+
construct_1d_object_array_from_listlike,
|
| 35 |
+
find_common_type,
|
| 36 |
+
)
|
| 37 |
+
from pandas.core.dtypes.common import (
|
| 38 |
+
ensure_object,
|
| 39 |
+
is_bool_dtype,
|
| 40 |
+
is_list_like,
|
| 41 |
+
is_numeric_v_string_like,
|
| 42 |
+
is_object_dtype,
|
| 43 |
+
is_scalar,
|
| 44 |
+
)
|
| 45 |
+
from pandas.core.dtypes.generic import (
|
| 46 |
+
ABCExtensionArray,
|
| 47 |
+
ABCIndex,
|
| 48 |
+
ABCSeries,
|
| 49 |
+
)
|
| 50 |
+
from pandas.core.dtypes.missing import (
|
| 51 |
+
isna,
|
| 52 |
+
notna,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
from pandas.core import roperator
|
| 56 |
+
from pandas.core.computation import expressions
|
| 57 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
| 58 |
+
from pandas.core.ops import missing
|
| 59 |
+
from pandas.core.ops.dispatch import should_extension_dispatch
|
| 60 |
+
from pandas.core.ops.invalid import invalid_comparison
|
| 61 |
+
|
| 62 |
+
if TYPE_CHECKING:
|
| 63 |
+
from pandas._typing import (
|
| 64 |
+
ArrayLike,
|
| 65 |
+
Shape,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# -----------------------------------------------------------------------------
|
| 69 |
+
# Masking NA values and fallbacks for operations numpy does not support
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def fill_binop(left, right, fill_value):
|
| 73 |
+
"""
|
| 74 |
+
If a non-None fill_value is given, replace null entries in left and right
|
| 75 |
+
with this value, but only in positions where _one_ of left/right is null,
|
| 76 |
+
not both.
|
| 77 |
+
|
| 78 |
+
Parameters
|
| 79 |
+
----------
|
| 80 |
+
left : array-like
|
| 81 |
+
right : array-like
|
| 82 |
+
fill_value : object
|
| 83 |
+
|
| 84 |
+
Returns
|
| 85 |
+
-------
|
| 86 |
+
left : array-like
|
| 87 |
+
right : array-like
|
| 88 |
+
|
| 89 |
+
Notes
|
| 90 |
+
-----
|
| 91 |
+
Makes copies if fill_value is not None and NAs are present.
|
| 92 |
+
"""
|
| 93 |
+
if fill_value is not None:
|
| 94 |
+
left_mask = isna(left)
|
| 95 |
+
right_mask = isna(right)
|
| 96 |
+
|
| 97 |
+
# one but not both
|
| 98 |
+
mask = left_mask ^ right_mask
|
| 99 |
+
|
| 100 |
+
if left_mask.any():
|
| 101 |
+
# Avoid making a copy if we can
|
| 102 |
+
left = left.copy()
|
| 103 |
+
left[left_mask & mask] = fill_value
|
| 104 |
+
|
| 105 |
+
if right_mask.any():
|
| 106 |
+
# Avoid making a copy if we can
|
| 107 |
+
right = right.copy()
|
| 108 |
+
right[right_mask & mask] = fill_value
|
| 109 |
+
|
| 110 |
+
return left, right
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def comp_method_OBJECT_ARRAY(op, x, y):
|
| 114 |
+
if isinstance(y, list):
|
| 115 |
+
# e.g. test_tuple_categories
|
| 116 |
+
y = construct_1d_object_array_from_listlike(y)
|
| 117 |
+
|
| 118 |
+
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
|
| 119 |
+
if not is_object_dtype(y.dtype):
|
| 120 |
+
y = y.astype(np.object_)
|
| 121 |
+
|
| 122 |
+
if isinstance(y, (ABCSeries, ABCIndex)):
|
| 123 |
+
y = y._values
|
| 124 |
+
|
| 125 |
+
if x.shape != y.shape:
|
| 126 |
+
raise ValueError("Shapes must match", x.shape, y.shape)
|
| 127 |
+
result = libops.vec_compare(x.ravel(), y.ravel(), op)
|
| 128 |
+
else:
|
| 129 |
+
result = libops.scalar_compare(x.ravel(), y, op)
|
| 130 |
+
return result.reshape(x.shape)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _masked_arith_op(x: np.ndarray, y, op):
|
| 134 |
+
"""
|
| 135 |
+
If the given arithmetic operation fails, attempt it again on
|
| 136 |
+
only the non-null elements of the input array(s).
|
| 137 |
+
|
| 138 |
+
Parameters
|
| 139 |
+
----------
|
| 140 |
+
x : np.ndarray
|
| 141 |
+
y : np.ndarray, Series, Index
|
| 142 |
+
op : binary operator
|
| 143 |
+
"""
|
| 144 |
+
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
|
| 145 |
+
# the logic valid for both Series and DataFrame ops.
|
| 146 |
+
xrav = x.ravel()
|
| 147 |
+
|
| 148 |
+
if isinstance(y, np.ndarray):
|
| 149 |
+
dtype = find_common_type([x.dtype, y.dtype])
|
| 150 |
+
result = np.empty(x.size, dtype=dtype)
|
| 151 |
+
|
| 152 |
+
if len(x) != len(y):
|
| 153 |
+
raise ValueError(x.shape, y.shape)
|
| 154 |
+
ymask = notna(y)
|
| 155 |
+
|
| 156 |
+
# NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
|
| 157 |
+
# we would get int64 dtype, see GH#19956
|
| 158 |
+
yrav = y.ravel()
|
| 159 |
+
mask = notna(xrav) & ymask.ravel()
|
| 160 |
+
|
| 161 |
+
# See GH#5284, GH#5035, GH#19448 for historical reference
|
| 162 |
+
if mask.any():
|
| 163 |
+
result[mask] = op(xrav[mask], yrav[mask])
|
| 164 |
+
|
| 165 |
+
else:
|
| 166 |
+
if not is_scalar(y):
|
| 167 |
+
raise TypeError(
|
| 168 |
+
f"Cannot broadcast np.ndarray with operand of type { type(y) }"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# mask is only meaningful for x
|
| 172 |
+
result = np.empty(x.size, dtype=x.dtype)
|
| 173 |
+
mask = notna(xrav)
|
| 174 |
+
|
| 175 |
+
# 1 ** np.nan is 1. So we have to unmask those.
|
| 176 |
+
if op is pow:
|
| 177 |
+
mask = np.where(x == 1, False, mask)
|
| 178 |
+
elif op is roperator.rpow:
|
| 179 |
+
mask = np.where(y == 1, False, mask)
|
| 180 |
+
|
| 181 |
+
if mask.any():
|
| 182 |
+
result[mask] = op(xrav[mask], y)
|
| 183 |
+
|
| 184 |
+
np.putmask(result, ~mask, np.nan)
|
| 185 |
+
result = result.reshape(x.shape) # 2D compat
|
| 186 |
+
return result
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False):
|
| 190 |
+
"""
|
| 191 |
+
Return the result of evaluating op on the passed in values.
|
| 192 |
+
|
| 193 |
+
If native types are not compatible, try coercion to object dtype.
|
| 194 |
+
|
| 195 |
+
Parameters
|
| 196 |
+
----------
|
| 197 |
+
left : np.ndarray
|
| 198 |
+
right : np.ndarray or scalar
|
| 199 |
+
Excludes DataFrame, Series, Index, ExtensionArray.
|
| 200 |
+
is_cmp : bool, default False
|
| 201 |
+
If this a comparison operation.
|
| 202 |
+
|
| 203 |
+
Returns
|
| 204 |
+
-------
|
| 205 |
+
array-like
|
| 206 |
+
|
| 207 |
+
Raises
|
| 208 |
+
------
|
| 209 |
+
TypeError : invalid operation
|
| 210 |
+
"""
|
| 211 |
+
if isinstance(right, str):
|
| 212 |
+
# can never use numexpr
|
| 213 |
+
func = op
|
| 214 |
+
else:
|
| 215 |
+
func = partial(expressions.evaluate, op)
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
result = func(left, right)
|
| 219 |
+
except TypeError:
|
| 220 |
+
if not is_cmp and (
|
| 221 |
+
left.dtype == object or getattr(right, "dtype", None) == object
|
| 222 |
+
):
|
| 223 |
+
# For object dtype, fallback to a masked operation (only operating
|
| 224 |
+
# on the non-missing values)
|
| 225 |
+
# Don't do this for comparisons, as that will handle complex numbers
|
| 226 |
+
# incorrectly, see GH#32047
|
| 227 |
+
result = _masked_arith_op(left, right, op)
|
| 228 |
+
else:
|
| 229 |
+
raise
|
| 230 |
+
|
| 231 |
+
if is_cmp and (is_scalar(result) or result is NotImplemented):
|
| 232 |
+
# numpy returned a scalar instead of operating element-wise
|
| 233 |
+
# e.g. numeric array vs str
|
| 234 |
+
# TODO: can remove this after dropping some future numpy version?
|
| 235 |
+
return invalid_comparison(left, right, op)
|
| 236 |
+
|
| 237 |
+
return missing.dispatch_fill_zeros(op, left, right, result)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def arithmetic_op(left: ArrayLike, right: Any, op):
|
| 241 |
+
"""
|
| 242 |
+
Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
|
| 243 |
+
|
| 244 |
+
Note: the caller is responsible for ensuring that numpy warnings are
|
| 245 |
+
suppressed (with np.errstate(all="ignore")) if needed.
|
| 246 |
+
|
| 247 |
+
Parameters
|
| 248 |
+
----------
|
| 249 |
+
left : np.ndarray or ExtensionArray
|
| 250 |
+
right : object
|
| 251 |
+
Cannot be a DataFrame or Index. Series is *not* excluded.
|
| 252 |
+
op : {operator.add, operator.sub, ...}
|
| 253 |
+
Or one of the reversed variants from roperator.
|
| 254 |
+
|
| 255 |
+
Returns
|
| 256 |
+
-------
|
| 257 |
+
ndarray or ExtensionArray
|
| 258 |
+
Or a 2-tuple of these in the case of divmod or rdivmod.
|
| 259 |
+
"""
|
| 260 |
+
# NB: We assume that extract_array and ensure_wrapped_if_datetimelike
|
| 261 |
+
# have already been called on `left` and `right`,
|
| 262 |
+
# and `maybe_prepare_scalar_for_op` has already been called on `right`
|
| 263 |
+
# We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy
|
| 264 |
+
# casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390)
|
| 265 |
+
|
| 266 |
+
if (
|
| 267 |
+
should_extension_dispatch(left, right)
|
| 268 |
+
or isinstance(right, (Timedelta, BaseOffset, Timestamp))
|
| 269 |
+
or right is NaT
|
| 270 |
+
):
|
| 271 |
+
# Timedelta/Timestamp and other custom scalars are included in the check
|
| 272 |
+
# because numexpr will fail on it, see GH#31457
|
| 273 |
+
res_values = op(left, right)
|
| 274 |
+
else:
|
| 275 |
+
# TODO we should handle EAs consistently and move this check before the if/else
|
| 276 |
+
# (https://github.com/pandas-dev/pandas/issues/41165)
|
| 277 |
+
# error: Argument 2 to "_bool_arith_check" has incompatible type
|
| 278 |
+
# "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"
|
| 279 |
+
_bool_arith_check(op, left, right) # type: ignore[arg-type]
|
| 280 |
+
|
| 281 |
+
# error: Argument 1 to "_na_arithmetic_op" has incompatible type
|
| 282 |
+
# "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"
|
| 283 |
+
res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type]
|
| 284 |
+
|
| 285 |
+
return res_values
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
|
| 289 |
+
"""
|
| 290 |
+
Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
|
| 291 |
+
|
| 292 |
+
Note: the caller is responsible for ensuring that numpy warnings are
|
| 293 |
+
suppressed (with np.errstate(all="ignore")) if needed.
|
| 294 |
+
|
| 295 |
+
Parameters
|
| 296 |
+
----------
|
| 297 |
+
left : np.ndarray or ExtensionArray
|
| 298 |
+
right : object
|
| 299 |
+
Cannot be a DataFrame, Series, or Index.
|
| 300 |
+
op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}
|
| 301 |
+
|
| 302 |
+
Returns
|
| 303 |
+
-------
|
| 304 |
+
ndarray or ExtensionArray
|
| 305 |
+
"""
|
| 306 |
+
# NB: We assume extract_array has already been called on left and right
|
| 307 |
+
lvalues = ensure_wrapped_if_datetimelike(left)
|
| 308 |
+
rvalues = ensure_wrapped_if_datetimelike(right)
|
| 309 |
+
|
| 310 |
+
rvalues = lib.item_from_zerodim(rvalues)
|
| 311 |
+
if isinstance(rvalues, list):
|
| 312 |
+
# We don't catch tuple here bc we may be comparing e.g. MultiIndex
|
| 313 |
+
# to a tuple that represents a single entry, see test_compare_tuple_strs
|
| 314 |
+
rvalues = np.asarray(rvalues)
|
| 315 |
+
|
| 316 |
+
if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):
|
| 317 |
+
# TODO: make this treatment consistent across ops and classes.
|
| 318 |
+
# We are not catching all listlikes here (e.g. frozenset, tuple)
|
| 319 |
+
# The ambiguous case is object-dtype. See GH#27803
|
| 320 |
+
if len(lvalues) != len(rvalues):
|
| 321 |
+
raise ValueError(
|
| 322 |
+
"Lengths must match to compare", lvalues.shape, rvalues.shape
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
if should_extension_dispatch(lvalues, rvalues) or (
|
| 326 |
+
(isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT)
|
| 327 |
+
and lvalues.dtype != object
|
| 328 |
+
):
|
| 329 |
+
# Call the method on lvalues
|
| 330 |
+
res_values = op(lvalues, rvalues)
|
| 331 |
+
|
| 332 |
+
elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA?
|
| 333 |
+
# numpy does not like comparisons vs None
|
| 334 |
+
if op is operator.ne:
|
| 335 |
+
res_values = np.ones(lvalues.shape, dtype=bool)
|
| 336 |
+
else:
|
| 337 |
+
res_values = np.zeros(lvalues.shape, dtype=bool)
|
| 338 |
+
|
| 339 |
+
elif is_numeric_v_string_like(lvalues, rvalues):
|
| 340 |
+
# GH#36377 going through the numexpr path would incorrectly raise
|
| 341 |
+
return invalid_comparison(lvalues, rvalues, op)
|
| 342 |
+
|
| 343 |
+
elif lvalues.dtype == object or isinstance(rvalues, str):
|
| 344 |
+
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
|
| 345 |
+
|
| 346 |
+
else:
|
| 347 |
+
res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)
|
| 348 |
+
|
| 349 |
+
return res_values
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def na_logical_op(x: np.ndarray, y, op):
|
| 353 |
+
try:
|
| 354 |
+
# For exposition, write:
|
| 355 |
+
# yarr = isinstance(y, np.ndarray)
|
| 356 |
+
# yint = is_integer(y) or (yarr and y.dtype.kind == "i")
|
| 357 |
+
# ybool = is_bool(y) or (yarr and y.dtype.kind == "b")
|
| 358 |
+
# xint = x.dtype.kind == "i"
|
| 359 |
+
# xbool = x.dtype.kind == "b"
|
| 360 |
+
# Then Cases where this goes through without raising include:
|
| 361 |
+
# (xint or xbool) and (yint or bool)
|
| 362 |
+
result = op(x, y)
|
| 363 |
+
except TypeError:
|
| 364 |
+
if isinstance(y, np.ndarray):
|
| 365 |
+
# bool-bool dtype operations should be OK, should not get here
|
| 366 |
+
assert not (x.dtype.kind == "b" and y.dtype.kind == "b")
|
| 367 |
+
x = ensure_object(x)
|
| 368 |
+
y = ensure_object(y)
|
| 369 |
+
result = libops.vec_binop(x.ravel(), y.ravel(), op)
|
| 370 |
+
else:
|
| 371 |
+
# let null fall thru
|
| 372 |
+
assert lib.is_scalar(y)
|
| 373 |
+
if not isna(y):
|
| 374 |
+
y = bool(y)
|
| 375 |
+
try:
|
| 376 |
+
result = libops.scalar_binop(x, y, op)
|
| 377 |
+
except (
|
| 378 |
+
TypeError,
|
| 379 |
+
ValueError,
|
| 380 |
+
AttributeError,
|
| 381 |
+
OverflowError,
|
| 382 |
+
NotImplementedError,
|
| 383 |
+
) as err:
|
| 384 |
+
typ = type(y).__name__
|
| 385 |
+
raise TypeError(
|
| 386 |
+
f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array "
|
| 387 |
+
f"and scalar of type [{typ}]"
|
| 388 |
+
) from err
|
| 389 |
+
|
| 390 |
+
return result.reshape(x.shape)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:
|
| 394 |
+
"""
|
| 395 |
+
Evaluate a logical operation `|`, `&`, or `^`.
|
| 396 |
+
|
| 397 |
+
Parameters
|
| 398 |
+
----------
|
| 399 |
+
left : np.ndarray or ExtensionArray
|
| 400 |
+
right : object
|
| 401 |
+
Cannot be a DataFrame, Series, or Index.
|
| 402 |
+
op : {operator.and_, operator.or_, operator.xor}
|
| 403 |
+
Or one of the reversed variants from roperator.
|
| 404 |
+
|
| 405 |
+
Returns
|
| 406 |
+
-------
|
| 407 |
+
ndarray or ExtensionArray
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
def fill_bool(x, left=None):
|
| 411 |
+
# if `left` is specifically not-boolean, we do not cast to bool
|
| 412 |
+
if x.dtype.kind in "cfO":
|
| 413 |
+
# dtypes that can hold NA
|
| 414 |
+
mask = isna(x)
|
| 415 |
+
if mask.any():
|
| 416 |
+
x = x.astype(object)
|
| 417 |
+
x[mask] = False
|
| 418 |
+
|
| 419 |
+
if left is None or left.dtype.kind == "b":
|
| 420 |
+
x = x.astype(bool)
|
| 421 |
+
return x
|
| 422 |
+
|
| 423 |
+
right = lib.item_from_zerodim(right)
|
| 424 |
+
if is_list_like(right) and not hasattr(right, "dtype"):
|
| 425 |
+
# e.g. list, tuple
|
| 426 |
+
warnings.warn(
|
| 427 |
+
"Logical ops (and, or, xor) between Pandas objects and dtype-less "
|
| 428 |
+
"sequences (e.g. list, tuple) are deprecated and will raise in a "
|
| 429 |
+
"future version. Wrap the object in a Series, Index, or np.array "
|
| 430 |
+
"before operating instead.",
|
| 431 |
+
FutureWarning,
|
| 432 |
+
stacklevel=find_stack_level(),
|
| 433 |
+
)
|
| 434 |
+
right = construct_1d_object_array_from_listlike(right)
|
| 435 |
+
|
| 436 |
+
# NB: We assume extract_array has already been called on left and right
|
| 437 |
+
lvalues = ensure_wrapped_if_datetimelike(left)
|
| 438 |
+
rvalues = right
|
| 439 |
+
|
| 440 |
+
if should_extension_dispatch(lvalues, rvalues):
|
| 441 |
+
# Call the method on lvalues
|
| 442 |
+
res_values = op(lvalues, rvalues)
|
| 443 |
+
|
| 444 |
+
else:
|
| 445 |
+
if isinstance(rvalues, np.ndarray):
|
| 446 |
+
is_other_int_dtype = rvalues.dtype.kind in "iu"
|
| 447 |
+
if not is_other_int_dtype:
|
| 448 |
+
rvalues = fill_bool(rvalues, lvalues)
|
| 449 |
+
|
| 450 |
+
else:
|
| 451 |
+
# i.e. scalar
|
| 452 |
+
is_other_int_dtype = lib.is_integer(rvalues)
|
| 453 |
+
|
| 454 |
+
res_values = na_logical_op(lvalues, rvalues, op)
|
| 455 |
+
|
| 456 |
+
# For int vs int `^`, `|`, `&` are bitwise operators and return
|
| 457 |
+
# integer dtypes. Otherwise these are boolean ops
|
| 458 |
+
if not (left.dtype.kind in "iu" and is_other_int_dtype):
|
| 459 |
+
res_values = fill_bool(res_values)
|
| 460 |
+
|
| 461 |
+
return res_values
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def get_array_op(op):
|
| 465 |
+
"""
|
| 466 |
+
Return a binary array operation corresponding to the given operator op.
|
| 467 |
+
|
| 468 |
+
Parameters
|
| 469 |
+
----------
|
| 470 |
+
op : function
|
| 471 |
+
Binary operator from operator or roperator module.
|
| 472 |
+
|
| 473 |
+
Returns
|
| 474 |
+
-------
|
| 475 |
+
functools.partial
|
| 476 |
+
"""
|
| 477 |
+
if isinstance(op, partial):
|
| 478 |
+
# We get here via dispatch_to_series in DataFrame case
|
| 479 |
+
# e.g. test_rolling_consistency_var_debiasing_factors
|
| 480 |
+
return op
|
| 481 |
+
|
| 482 |
+
op_name = op.__name__.strip("_").lstrip("r")
|
| 483 |
+
if op_name == "arith_op":
|
| 484 |
+
# Reached via DataFrame._combine_frame i.e. flex methods
|
| 485 |
+
# e.g. test_df_add_flex_filled_mixed_dtypes
|
| 486 |
+
return op
|
| 487 |
+
|
| 488 |
+
if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:
|
| 489 |
+
return partial(comparison_op, op=op)
|
| 490 |
+
elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:
|
| 491 |
+
return partial(logical_op, op=op)
|
| 492 |
+
elif op_name in {
|
| 493 |
+
"add",
|
| 494 |
+
"sub",
|
| 495 |
+
"mul",
|
| 496 |
+
"truediv",
|
| 497 |
+
"floordiv",
|
| 498 |
+
"mod",
|
| 499 |
+
"divmod",
|
| 500 |
+
"pow",
|
| 501 |
+
}:
|
| 502 |
+
return partial(arithmetic_op, op=op)
|
| 503 |
+
else:
|
| 504 |
+
raise NotImplementedError(op_name)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def maybe_prepare_scalar_for_op(obj, shape: Shape):
|
| 508 |
+
"""
|
| 509 |
+
Cast non-pandas objects to pandas types to unify behavior of arithmetic
|
| 510 |
+
and comparison operations.
|
| 511 |
+
|
| 512 |
+
Parameters
|
| 513 |
+
----------
|
| 514 |
+
obj: object
|
| 515 |
+
shape : tuple[int]
|
| 516 |
+
|
| 517 |
+
Returns
|
| 518 |
+
-------
|
| 519 |
+
out : object
|
| 520 |
+
|
| 521 |
+
Notes
|
| 522 |
+
-----
|
| 523 |
+
Be careful to call this *after* determining the `name` attribute to be
|
| 524 |
+
attached to the result of the arithmetic operation.
|
| 525 |
+
"""
|
| 526 |
+
if type(obj) is datetime.timedelta:
|
| 527 |
+
# GH#22390 cast up to Timedelta to rely on Timedelta
|
| 528 |
+
# implementation; otherwise operation against numeric-dtype
|
| 529 |
+
# raises TypeError
|
| 530 |
+
return Timedelta(obj)
|
| 531 |
+
elif type(obj) is datetime.datetime:
|
| 532 |
+
# cast up to Timestamp to rely on Timestamp implementation, see Timedelta above
|
| 533 |
+
return Timestamp(obj)
|
| 534 |
+
elif isinstance(obj, np.datetime64):
|
| 535 |
+
# GH#28080 numpy casts integer-dtype to datetime64 when doing
|
| 536 |
+
# array[int] + datetime64, which we do not allow
|
| 537 |
+
if isna(obj):
|
| 538 |
+
from pandas.core.arrays import DatetimeArray
|
| 539 |
+
|
| 540 |
+
# Avoid possible ambiguities with pd.NaT
|
| 541 |
+
# GH 52295
|
| 542 |
+
if is_unitless(obj.dtype):
|
| 543 |
+
obj = obj.astype("datetime64[ns]")
|
| 544 |
+
elif not is_supported_dtype(obj.dtype):
|
| 545 |
+
new_dtype = get_supported_dtype(obj.dtype)
|
| 546 |
+
obj = obj.astype(new_dtype)
|
| 547 |
+
right = np.broadcast_to(obj, shape)
|
| 548 |
+
return DatetimeArray._simple_new(right, dtype=right.dtype)
|
| 549 |
+
|
| 550 |
+
return Timestamp(obj)
|
| 551 |
+
|
| 552 |
+
elif isinstance(obj, np.timedelta64):
|
| 553 |
+
if isna(obj):
|
| 554 |
+
from pandas.core.arrays import TimedeltaArray
|
| 555 |
+
|
| 556 |
+
# wrapping timedelta64("NaT") in Timedelta returns NaT,
|
| 557 |
+
# which would incorrectly be treated as a datetime-NaT, so
|
| 558 |
+
# we broadcast and wrap in a TimedeltaArray
|
| 559 |
+
# GH 52295
|
| 560 |
+
if is_unitless(obj.dtype):
|
| 561 |
+
obj = obj.astype("timedelta64[ns]")
|
| 562 |
+
elif not is_supported_dtype(obj.dtype):
|
| 563 |
+
new_dtype = get_supported_dtype(obj.dtype)
|
| 564 |
+
obj = obj.astype(new_dtype)
|
| 565 |
+
right = np.broadcast_to(obj, shape)
|
| 566 |
+
return TimedeltaArray._simple_new(right, dtype=right.dtype)
|
| 567 |
+
|
| 568 |
+
# In particular non-nanosecond timedelta64 needs to be cast to
|
| 569 |
+
# nanoseconds, or else we get undesired behavior like
|
| 570 |
+
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
|
| 571 |
+
return Timedelta(obj)
|
| 572 |
+
|
| 573 |
+
# We want NumPy numeric scalars to behave like Python scalars
|
| 574 |
+
# post NEP 50
|
| 575 |
+
elif isinstance(obj, np.integer):
|
| 576 |
+
return int(obj)
|
| 577 |
+
|
| 578 |
+
elif isinstance(obj, np.floating):
|
| 579 |
+
return float(obj)
|
| 580 |
+
|
| 581 |
+
return obj
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
_BOOL_OP_NOT_ALLOWED = {
|
| 585 |
+
operator.truediv,
|
| 586 |
+
roperator.rtruediv,
|
| 587 |
+
operator.floordiv,
|
| 588 |
+
roperator.rfloordiv,
|
| 589 |
+
operator.pow,
|
| 590 |
+
roperator.rpow,
|
| 591 |
+
}
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def _bool_arith_check(op, a: np.ndarray, b):
|
| 595 |
+
"""
|
| 596 |
+
In contrast to numpy, pandas raises an error for certain operations
|
| 597 |
+
with booleans.
|
| 598 |
+
"""
|
| 599 |
+
if op in _BOOL_OP_NOT_ALLOWED:
|
| 600 |
+
if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)):
|
| 601 |
+
op_name = op.__name__.strip("_").lstrip("r")
|
| 602 |
+
raise NotImplementedError(
|
| 603 |
+
f"operator '{op_name}' not implemented for bool dtypes"
|
| 604 |
+
)
|
mgm/lib/python3.10/site-packages/pandas/core/ops/common.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Boilerplate functions used in defining binary operations.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from functools import wraps
|
| 7 |
+
from typing import (
|
| 8 |
+
TYPE_CHECKING,
|
| 9 |
+
Callable,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from pandas._libs.lib import item_from_zerodim
|
| 13 |
+
from pandas._libs.missing import is_matching_na
|
| 14 |
+
|
| 15 |
+
from pandas.core.dtypes.generic import (
|
| 16 |
+
ABCIndex,
|
| 17 |
+
ABCSeries,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from pandas._typing import F
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:
|
| 25 |
+
"""
|
| 26 |
+
Boilerplate for pandas conventions in arithmetic and comparison methods.
|
| 27 |
+
|
| 28 |
+
Parameters
|
| 29 |
+
----------
|
| 30 |
+
name : str
|
| 31 |
+
|
| 32 |
+
Returns
|
| 33 |
+
-------
|
| 34 |
+
decorator
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def wrapper(method: F) -> F:
|
| 38 |
+
return _unpack_zerodim_and_defer(method, name)
|
| 39 |
+
|
| 40 |
+
return wrapper
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _unpack_zerodim_and_defer(method, name: str):
|
| 44 |
+
"""
|
| 45 |
+
Boilerplate for pandas conventions in arithmetic and comparison methods.
|
| 46 |
+
|
| 47 |
+
Ensure method returns NotImplemented when operating against "senior"
|
| 48 |
+
classes. Ensure zero-dimensional ndarrays are always unpacked.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
method : binary method
|
| 53 |
+
name : str
|
| 54 |
+
|
| 55 |
+
Returns
|
| 56 |
+
-------
|
| 57 |
+
method
|
| 58 |
+
"""
|
| 59 |
+
stripped_name = name.removeprefix("__").removesuffix("__")
|
| 60 |
+
is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"}
|
| 61 |
+
|
| 62 |
+
@wraps(method)
|
| 63 |
+
def new_method(self, other):
|
| 64 |
+
if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries):
|
| 65 |
+
# For comparison ops, Index does *not* defer to Series
|
| 66 |
+
pass
|
| 67 |
+
else:
|
| 68 |
+
prio = getattr(other, "__pandas_priority__", None)
|
| 69 |
+
if prio is not None:
|
| 70 |
+
if prio > self.__pandas_priority__:
|
| 71 |
+
# e.g. other is DataFrame while self is Index/Series/EA
|
| 72 |
+
return NotImplemented
|
| 73 |
+
|
| 74 |
+
other = item_from_zerodim(other)
|
| 75 |
+
|
| 76 |
+
return method(self, other)
|
| 77 |
+
|
| 78 |
+
return new_method
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_op_result_name(left, right):
|
| 82 |
+
"""
|
| 83 |
+
Find the appropriate name to pin to an operation result. This result
|
| 84 |
+
should always be either an Index or a Series.
|
| 85 |
+
|
| 86 |
+
Parameters
|
| 87 |
+
----------
|
| 88 |
+
left : {Series, Index}
|
| 89 |
+
right : object
|
| 90 |
+
|
| 91 |
+
Returns
|
| 92 |
+
-------
|
| 93 |
+
name : object
|
| 94 |
+
Usually a string
|
| 95 |
+
"""
|
| 96 |
+
if isinstance(right, (ABCSeries, ABCIndex)):
|
| 97 |
+
name = _maybe_match_name(left, right)
|
| 98 |
+
else:
|
| 99 |
+
name = left.name
|
| 100 |
+
return name
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _maybe_match_name(a, b):
|
| 104 |
+
"""
|
| 105 |
+
Try to find a name to attach to the result of an operation between
|
| 106 |
+
a and b. If only one of these has a `name` attribute, return that
|
| 107 |
+
name. Otherwise return a consensus name if they match or None if
|
| 108 |
+
they have different names.
|
| 109 |
+
|
| 110 |
+
Parameters
|
| 111 |
+
----------
|
| 112 |
+
a : object
|
| 113 |
+
b : object
|
| 114 |
+
|
| 115 |
+
Returns
|
| 116 |
+
-------
|
| 117 |
+
name : str or None
|
| 118 |
+
|
| 119 |
+
See Also
|
| 120 |
+
--------
|
| 121 |
+
pandas.core.common.consensus_name_attr
|
| 122 |
+
"""
|
| 123 |
+
a_has = hasattr(a, "name")
|
| 124 |
+
b_has = hasattr(b, "name")
|
| 125 |
+
if a_has and b_has:
|
| 126 |
+
try:
|
| 127 |
+
if a.name == b.name:
|
| 128 |
+
return a.name
|
| 129 |
+
elif is_matching_na(a.name, b.name):
|
| 130 |
+
# e.g. both are np.nan
|
| 131 |
+
return a.name
|
| 132 |
+
else:
|
| 133 |
+
return None
|
| 134 |
+
except TypeError:
|
| 135 |
+
# pd.NA
|
| 136 |
+
if is_matching_na(a.name, b.name):
|
| 137 |
+
return a.name
|
| 138 |
+
return None
|
| 139 |
+
except ValueError:
|
| 140 |
+
# e.g. np.int64(1) vs (np.int64(1), np.int64(2))
|
| 141 |
+
return None
|
| 142 |
+
elif a_has:
|
| 143 |
+
return a.name
|
| 144 |
+
elif b_has:
|
| 145 |
+
return b.name
|
| 146 |
+
return None
|
mgm/lib/python3.10/site-packages/pandas/core/ops/dispatch.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions for defining unary operations.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import (
|
| 7 |
+
TYPE_CHECKING,
|
| 8 |
+
Any,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
from pandas.core.dtypes.generic import ABCExtensionArray
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from pandas._typing import ArrayLike
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def should_extension_dispatch(left: ArrayLike, right: Any) -> bool:
|
| 18 |
+
"""
|
| 19 |
+
Identify cases where Series operation should dispatch to ExtensionArray method.
|
| 20 |
+
|
| 21 |
+
Parameters
|
| 22 |
+
----------
|
| 23 |
+
left : np.ndarray or ExtensionArray
|
| 24 |
+
right : object
|
| 25 |
+
|
| 26 |
+
Returns
|
| 27 |
+
-------
|
| 28 |
+
bool
|
| 29 |
+
"""
|
| 30 |
+
return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray)
|
mgm/lib/python3.10/site-packages/pandas/core/ops/docstrings.py
ADDED
|
@@ -0,0 +1,772 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Templating for ops docstrings
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def make_flex_doc(op_name: str, typ: str) -> str:
|
| 8 |
+
"""
|
| 9 |
+
Make the appropriate substitutions for the given operation and class-typ
|
| 10 |
+
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
|
| 11 |
+
to attach to a generated method.
|
| 12 |
+
|
| 13 |
+
Parameters
|
| 14 |
+
----------
|
| 15 |
+
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
|
| 16 |
+
typ : str {series, 'dataframe']}
|
| 17 |
+
|
| 18 |
+
Returns
|
| 19 |
+
-------
|
| 20 |
+
doc : str
|
| 21 |
+
"""
|
| 22 |
+
op_name = op_name.replace("__", "")
|
| 23 |
+
op_desc = _op_descriptions[op_name]
|
| 24 |
+
|
| 25 |
+
op_desc_op = op_desc["op"]
|
| 26 |
+
assert op_desc_op is not None # for mypy
|
| 27 |
+
if op_name.startswith("r"):
|
| 28 |
+
equiv = f"other {op_desc_op} {typ}"
|
| 29 |
+
elif op_name == "divmod":
|
| 30 |
+
equiv = f"{op_name}({typ}, other)"
|
| 31 |
+
else:
|
| 32 |
+
equiv = f"{typ} {op_desc_op} other"
|
| 33 |
+
|
| 34 |
+
if typ == "series":
|
| 35 |
+
base_doc = _flex_doc_SERIES
|
| 36 |
+
if op_desc["reverse"]:
|
| 37 |
+
base_doc += _see_also_reverse_SERIES.format(
|
| 38 |
+
reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"]
|
| 39 |
+
)
|
| 40 |
+
doc_no_examples = base_doc.format(
|
| 41 |
+
desc=op_desc["desc"],
|
| 42 |
+
op_name=op_name,
|
| 43 |
+
equiv=equiv,
|
| 44 |
+
series_returns=op_desc["series_returns"],
|
| 45 |
+
)
|
| 46 |
+
ser_example = op_desc["series_examples"]
|
| 47 |
+
if ser_example:
|
| 48 |
+
doc = doc_no_examples + ser_example
|
| 49 |
+
else:
|
| 50 |
+
doc = doc_no_examples
|
| 51 |
+
elif typ == "dataframe":
|
| 52 |
+
if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]:
|
| 53 |
+
base_doc = _flex_comp_doc_FRAME
|
| 54 |
+
doc = _flex_comp_doc_FRAME.format(
|
| 55 |
+
op_name=op_name,
|
| 56 |
+
desc=op_desc["desc"],
|
| 57 |
+
)
|
| 58 |
+
else:
|
| 59 |
+
base_doc = _flex_doc_FRAME
|
| 60 |
+
doc = base_doc.format(
|
| 61 |
+
desc=op_desc["desc"],
|
| 62 |
+
op_name=op_name,
|
| 63 |
+
equiv=equiv,
|
| 64 |
+
reverse=op_desc["reverse"],
|
| 65 |
+
)
|
| 66 |
+
else:
|
| 67 |
+
raise AssertionError("Invalid typ argument.")
|
| 68 |
+
return doc
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
_common_examples_algebra_SERIES = """
|
| 72 |
+
Examples
|
| 73 |
+
--------
|
| 74 |
+
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
|
| 75 |
+
>>> a
|
| 76 |
+
a 1.0
|
| 77 |
+
b 1.0
|
| 78 |
+
c 1.0
|
| 79 |
+
d NaN
|
| 80 |
+
dtype: float64
|
| 81 |
+
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
|
| 82 |
+
>>> b
|
| 83 |
+
a 1.0
|
| 84 |
+
b NaN
|
| 85 |
+
d 1.0
|
| 86 |
+
e NaN
|
| 87 |
+
dtype: float64"""
|
| 88 |
+
|
| 89 |
+
_common_examples_comparison_SERIES = """
|
| 90 |
+
Examples
|
| 91 |
+
--------
|
| 92 |
+
>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])
|
| 93 |
+
>>> a
|
| 94 |
+
a 1.0
|
| 95 |
+
b 1.0
|
| 96 |
+
c 1.0
|
| 97 |
+
d NaN
|
| 98 |
+
e 1.0
|
| 99 |
+
dtype: float64
|
| 100 |
+
>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])
|
| 101 |
+
>>> b
|
| 102 |
+
a 0.0
|
| 103 |
+
b 1.0
|
| 104 |
+
c 2.0
|
| 105 |
+
d NaN
|
| 106 |
+
f 1.0
|
| 107 |
+
dtype: float64"""
|
| 108 |
+
|
| 109 |
+
_add_example_SERIES = (
|
| 110 |
+
_common_examples_algebra_SERIES
|
| 111 |
+
+ """
|
| 112 |
+
>>> a.add(b, fill_value=0)
|
| 113 |
+
a 2.0
|
| 114 |
+
b 1.0
|
| 115 |
+
c 1.0
|
| 116 |
+
d 1.0
|
| 117 |
+
e NaN
|
| 118 |
+
dtype: float64
|
| 119 |
+
"""
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
_sub_example_SERIES = (
|
| 123 |
+
_common_examples_algebra_SERIES
|
| 124 |
+
+ """
|
| 125 |
+
>>> a.subtract(b, fill_value=0)
|
| 126 |
+
a 0.0
|
| 127 |
+
b 1.0
|
| 128 |
+
c 1.0
|
| 129 |
+
d -1.0
|
| 130 |
+
e NaN
|
| 131 |
+
dtype: float64
|
| 132 |
+
"""
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
_mul_example_SERIES = (
|
| 136 |
+
_common_examples_algebra_SERIES
|
| 137 |
+
+ """
|
| 138 |
+
>>> a.multiply(b, fill_value=0)
|
| 139 |
+
a 1.0
|
| 140 |
+
b 0.0
|
| 141 |
+
c 0.0
|
| 142 |
+
d 0.0
|
| 143 |
+
e NaN
|
| 144 |
+
dtype: float64
|
| 145 |
+
"""
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
_div_example_SERIES = (
|
| 149 |
+
_common_examples_algebra_SERIES
|
| 150 |
+
+ """
|
| 151 |
+
>>> a.divide(b, fill_value=0)
|
| 152 |
+
a 1.0
|
| 153 |
+
b inf
|
| 154 |
+
c inf
|
| 155 |
+
d 0.0
|
| 156 |
+
e NaN
|
| 157 |
+
dtype: float64
|
| 158 |
+
"""
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
_floordiv_example_SERIES = (
|
| 162 |
+
_common_examples_algebra_SERIES
|
| 163 |
+
+ """
|
| 164 |
+
>>> a.floordiv(b, fill_value=0)
|
| 165 |
+
a 1.0
|
| 166 |
+
b inf
|
| 167 |
+
c inf
|
| 168 |
+
d 0.0
|
| 169 |
+
e NaN
|
| 170 |
+
dtype: float64
|
| 171 |
+
"""
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
_divmod_example_SERIES = (
|
| 175 |
+
_common_examples_algebra_SERIES
|
| 176 |
+
+ """
|
| 177 |
+
>>> a.divmod(b, fill_value=0)
|
| 178 |
+
(a 1.0
|
| 179 |
+
b inf
|
| 180 |
+
c inf
|
| 181 |
+
d 0.0
|
| 182 |
+
e NaN
|
| 183 |
+
dtype: float64,
|
| 184 |
+
a 0.0
|
| 185 |
+
b NaN
|
| 186 |
+
c NaN
|
| 187 |
+
d 0.0
|
| 188 |
+
e NaN
|
| 189 |
+
dtype: float64)
|
| 190 |
+
"""
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
_mod_example_SERIES = (
|
| 194 |
+
_common_examples_algebra_SERIES
|
| 195 |
+
+ """
|
| 196 |
+
>>> a.mod(b, fill_value=0)
|
| 197 |
+
a 0.0
|
| 198 |
+
b NaN
|
| 199 |
+
c NaN
|
| 200 |
+
d 0.0
|
| 201 |
+
e NaN
|
| 202 |
+
dtype: float64
|
| 203 |
+
"""
|
| 204 |
+
)
|
| 205 |
+
_pow_example_SERIES = (
|
| 206 |
+
_common_examples_algebra_SERIES
|
| 207 |
+
+ """
|
| 208 |
+
>>> a.pow(b, fill_value=0)
|
| 209 |
+
a 1.0
|
| 210 |
+
b 1.0
|
| 211 |
+
c 1.0
|
| 212 |
+
d 0.0
|
| 213 |
+
e NaN
|
| 214 |
+
dtype: float64
|
| 215 |
+
"""
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
_ne_example_SERIES = (
|
| 219 |
+
_common_examples_algebra_SERIES
|
| 220 |
+
+ """
|
| 221 |
+
>>> a.ne(b, fill_value=0)
|
| 222 |
+
a False
|
| 223 |
+
b True
|
| 224 |
+
c True
|
| 225 |
+
d True
|
| 226 |
+
e True
|
| 227 |
+
dtype: bool
|
| 228 |
+
"""
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
_eq_example_SERIES = (
|
| 232 |
+
_common_examples_algebra_SERIES
|
| 233 |
+
+ """
|
| 234 |
+
>>> a.eq(b, fill_value=0)
|
| 235 |
+
a True
|
| 236 |
+
b False
|
| 237 |
+
c False
|
| 238 |
+
d False
|
| 239 |
+
e False
|
| 240 |
+
dtype: bool
|
| 241 |
+
"""
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
_lt_example_SERIES = (
|
| 245 |
+
_common_examples_comparison_SERIES
|
| 246 |
+
+ """
|
| 247 |
+
>>> a.lt(b, fill_value=0)
|
| 248 |
+
a False
|
| 249 |
+
b False
|
| 250 |
+
c True
|
| 251 |
+
d False
|
| 252 |
+
e False
|
| 253 |
+
f True
|
| 254 |
+
dtype: bool
|
| 255 |
+
"""
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
_le_example_SERIES = (
|
| 259 |
+
_common_examples_comparison_SERIES
|
| 260 |
+
+ """
|
| 261 |
+
>>> a.le(b, fill_value=0)
|
| 262 |
+
a False
|
| 263 |
+
b True
|
| 264 |
+
c True
|
| 265 |
+
d False
|
| 266 |
+
e False
|
| 267 |
+
f True
|
| 268 |
+
dtype: bool
|
| 269 |
+
"""
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
_gt_example_SERIES = (
|
| 273 |
+
_common_examples_comparison_SERIES
|
| 274 |
+
+ """
|
| 275 |
+
>>> a.gt(b, fill_value=0)
|
| 276 |
+
a True
|
| 277 |
+
b False
|
| 278 |
+
c False
|
| 279 |
+
d False
|
| 280 |
+
e True
|
| 281 |
+
f False
|
| 282 |
+
dtype: bool
|
| 283 |
+
"""
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
_ge_example_SERIES = (
|
| 287 |
+
_common_examples_comparison_SERIES
|
| 288 |
+
+ """
|
| 289 |
+
>>> a.ge(b, fill_value=0)
|
| 290 |
+
a True
|
| 291 |
+
b True
|
| 292 |
+
c False
|
| 293 |
+
d False
|
| 294 |
+
e True
|
| 295 |
+
f False
|
| 296 |
+
dtype: bool
|
| 297 |
+
"""
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
_returns_series = """Series\n The result of the operation."""
|
| 301 |
+
|
| 302 |
+
_returns_tuple = """2-Tuple of Series\n The result of the operation."""
|
| 303 |
+
|
| 304 |
+
_op_descriptions: dict[str, dict[str, str | None]] = {
|
| 305 |
+
# Arithmetic Operators
|
| 306 |
+
"add": {
|
| 307 |
+
"op": "+",
|
| 308 |
+
"desc": "Addition",
|
| 309 |
+
"reverse": "radd",
|
| 310 |
+
"series_examples": _add_example_SERIES,
|
| 311 |
+
"series_returns": _returns_series,
|
| 312 |
+
},
|
| 313 |
+
"sub": {
|
| 314 |
+
"op": "-",
|
| 315 |
+
"desc": "Subtraction",
|
| 316 |
+
"reverse": "rsub",
|
| 317 |
+
"series_examples": _sub_example_SERIES,
|
| 318 |
+
"series_returns": _returns_series,
|
| 319 |
+
},
|
| 320 |
+
"mul": {
|
| 321 |
+
"op": "*",
|
| 322 |
+
"desc": "Multiplication",
|
| 323 |
+
"reverse": "rmul",
|
| 324 |
+
"series_examples": _mul_example_SERIES,
|
| 325 |
+
"series_returns": _returns_series,
|
| 326 |
+
"df_examples": None,
|
| 327 |
+
},
|
| 328 |
+
"mod": {
|
| 329 |
+
"op": "%",
|
| 330 |
+
"desc": "Modulo",
|
| 331 |
+
"reverse": "rmod",
|
| 332 |
+
"series_examples": _mod_example_SERIES,
|
| 333 |
+
"series_returns": _returns_series,
|
| 334 |
+
},
|
| 335 |
+
"pow": {
|
| 336 |
+
"op": "**",
|
| 337 |
+
"desc": "Exponential power",
|
| 338 |
+
"reverse": "rpow",
|
| 339 |
+
"series_examples": _pow_example_SERIES,
|
| 340 |
+
"series_returns": _returns_series,
|
| 341 |
+
"df_examples": None,
|
| 342 |
+
},
|
| 343 |
+
"truediv": {
|
| 344 |
+
"op": "/",
|
| 345 |
+
"desc": "Floating division",
|
| 346 |
+
"reverse": "rtruediv",
|
| 347 |
+
"series_examples": _div_example_SERIES,
|
| 348 |
+
"series_returns": _returns_series,
|
| 349 |
+
"df_examples": None,
|
| 350 |
+
},
|
| 351 |
+
"floordiv": {
|
| 352 |
+
"op": "//",
|
| 353 |
+
"desc": "Integer division",
|
| 354 |
+
"reverse": "rfloordiv",
|
| 355 |
+
"series_examples": _floordiv_example_SERIES,
|
| 356 |
+
"series_returns": _returns_series,
|
| 357 |
+
"df_examples": None,
|
| 358 |
+
},
|
| 359 |
+
"divmod": {
|
| 360 |
+
"op": "divmod",
|
| 361 |
+
"desc": "Integer division and modulo",
|
| 362 |
+
"reverse": "rdivmod",
|
| 363 |
+
"series_examples": _divmod_example_SERIES,
|
| 364 |
+
"series_returns": _returns_tuple,
|
| 365 |
+
"df_examples": None,
|
| 366 |
+
},
|
| 367 |
+
# Comparison Operators
|
| 368 |
+
"eq": {
|
| 369 |
+
"op": "==",
|
| 370 |
+
"desc": "Equal to",
|
| 371 |
+
"reverse": None,
|
| 372 |
+
"series_examples": _eq_example_SERIES,
|
| 373 |
+
"series_returns": _returns_series,
|
| 374 |
+
},
|
| 375 |
+
"ne": {
|
| 376 |
+
"op": "!=",
|
| 377 |
+
"desc": "Not equal to",
|
| 378 |
+
"reverse": None,
|
| 379 |
+
"series_examples": _ne_example_SERIES,
|
| 380 |
+
"series_returns": _returns_series,
|
| 381 |
+
},
|
| 382 |
+
"lt": {
|
| 383 |
+
"op": "<",
|
| 384 |
+
"desc": "Less than",
|
| 385 |
+
"reverse": None,
|
| 386 |
+
"series_examples": _lt_example_SERIES,
|
| 387 |
+
"series_returns": _returns_series,
|
| 388 |
+
},
|
| 389 |
+
"le": {
|
| 390 |
+
"op": "<=",
|
| 391 |
+
"desc": "Less than or equal to",
|
| 392 |
+
"reverse": None,
|
| 393 |
+
"series_examples": _le_example_SERIES,
|
| 394 |
+
"series_returns": _returns_series,
|
| 395 |
+
},
|
| 396 |
+
"gt": {
|
| 397 |
+
"op": ">",
|
| 398 |
+
"desc": "Greater than",
|
| 399 |
+
"reverse": None,
|
| 400 |
+
"series_examples": _gt_example_SERIES,
|
| 401 |
+
"series_returns": _returns_series,
|
| 402 |
+
},
|
| 403 |
+
"ge": {
|
| 404 |
+
"op": ">=",
|
| 405 |
+
"desc": "Greater than or equal to",
|
| 406 |
+
"reverse": None,
|
| 407 |
+
"series_examples": _ge_example_SERIES,
|
| 408 |
+
"series_returns": _returns_series,
|
| 409 |
+
},
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
_py_num_ref = """see
|
| 413 |
+
`Python documentation
|
| 414 |
+
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
|
| 415 |
+
for more details"""
|
| 416 |
+
_op_names = list(_op_descriptions.keys())
|
| 417 |
+
for key in _op_names:
|
| 418 |
+
reverse_op = _op_descriptions[key]["reverse"]
|
| 419 |
+
if reverse_op is not None:
|
| 420 |
+
_op_descriptions[reverse_op] = _op_descriptions[key].copy()
|
| 421 |
+
_op_descriptions[reverse_op]["reverse"] = key
|
| 422 |
+
_op_descriptions[key][
|
| 423 |
+
"see_also_desc"
|
| 424 |
+
] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}"
|
| 425 |
+
_op_descriptions[reverse_op][
|
| 426 |
+
"see_also_desc"
|
| 427 |
+
] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}"
|
| 428 |
+
|
| 429 |
+
_flex_doc_SERIES = """
|
| 430 |
+
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
|
| 431 |
+
|
| 432 |
+
Equivalent to ``{equiv}``, but with support to substitute a fill_value for
|
| 433 |
+
missing data in either one of the inputs.
|
| 434 |
+
|
| 435 |
+
Parameters
|
| 436 |
+
----------
|
| 437 |
+
other : Series or scalar value
|
| 438 |
+
level : int or name
|
| 439 |
+
Broadcast across a level, matching Index values on the
|
| 440 |
+
passed MultiIndex level.
|
| 441 |
+
fill_value : None or float value, default None (NaN)
|
| 442 |
+
Fill existing missing (NaN) values, and any new element needed for
|
| 443 |
+
successful Series alignment, with this value before computation.
|
| 444 |
+
If data in both corresponding Series locations is missing
|
| 445 |
+
the result of filling (at that location) will be missing.
|
| 446 |
+
axis : {{0 or 'index'}}
|
| 447 |
+
Unused. Parameter needed for compatibility with DataFrame.
|
| 448 |
+
|
| 449 |
+
Returns
|
| 450 |
+
-------
|
| 451 |
+
{series_returns}
|
| 452 |
+
"""
|
| 453 |
+
|
| 454 |
+
_see_also_reverse_SERIES = """
|
| 455 |
+
See Also
|
| 456 |
+
--------
|
| 457 |
+
Series.{reverse} : {see_also_desc}.
|
| 458 |
+
"""
|
| 459 |
+
|
| 460 |
+
_flex_doc_FRAME = """
|
| 461 |
+
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
|
| 462 |
+
|
| 463 |
+
Equivalent to ``{equiv}``, but with support to substitute a fill_value
|
| 464 |
+
for missing data in one of the inputs. With reverse version, `{reverse}`.
|
| 465 |
+
|
| 466 |
+
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to
|
| 467 |
+
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
|
| 468 |
+
|
| 469 |
+
Parameters
|
| 470 |
+
----------
|
| 471 |
+
other : scalar, sequence, Series, dict or DataFrame
|
| 472 |
+
Any single or multiple element data structure, or list-like object.
|
| 473 |
+
axis : {{0 or 'index', 1 or 'columns'}}
|
| 474 |
+
Whether to compare by the index (0 or 'index') or columns.
|
| 475 |
+
(1 or 'columns'). For Series input, axis to match Series index on.
|
| 476 |
+
level : int or label
|
| 477 |
+
Broadcast across a level, matching Index values on the
|
| 478 |
+
passed MultiIndex level.
|
| 479 |
+
fill_value : float or None, default None
|
| 480 |
+
Fill existing missing (NaN) values, and any new element needed for
|
| 481 |
+
successful DataFrame alignment, with this value before computation.
|
| 482 |
+
If data in both corresponding DataFrame locations is missing
|
| 483 |
+
the result will be missing.
|
| 484 |
+
|
| 485 |
+
Returns
|
| 486 |
+
-------
|
| 487 |
+
DataFrame
|
| 488 |
+
Result of the arithmetic operation.
|
| 489 |
+
|
| 490 |
+
See Also
|
| 491 |
+
--------
|
| 492 |
+
DataFrame.add : Add DataFrames.
|
| 493 |
+
DataFrame.sub : Subtract DataFrames.
|
| 494 |
+
DataFrame.mul : Multiply DataFrames.
|
| 495 |
+
DataFrame.div : Divide DataFrames (float division).
|
| 496 |
+
DataFrame.truediv : Divide DataFrames (float division).
|
| 497 |
+
DataFrame.floordiv : Divide DataFrames (integer division).
|
| 498 |
+
DataFrame.mod : Calculate modulo (remainder after division).
|
| 499 |
+
DataFrame.pow : Calculate exponential power.
|
| 500 |
+
|
| 501 |
+
Notes
|
| 502 |
+
-----
|
| 503 |
+
Mismatched indices will be unioned together.
|
| 504 |
+
|
| 505 |
+
Examples
|
| 506 |
+
--------
|
| 507 |
+
>>> df = pd.DataFrame({{'angles': [0, 3, 4],
|
| 508 |
+
... 'degrees': [360, 180, 360]}},
|
| 509 |
+
... index=['circle', 'triangle', 'rectangle'])
|
| 510 |
+
>>> df
|
| 511 |
+
angles degrees
|
| 512 |
+
circle 0 360
|
| 513 |
+
triangle 3 180
|
| 514 |
+
rectangle 4 360
|
| 515 |
+
|
| 516 |
+
Add a scalar with operator version which return the same
|
| 517 |
+
results.
|
| 518 |
+
|
| 519 |
+
>>> df + 1
|
| 520 |
+
angles degrees
|
| 521 |
+
circle 1 361
|
| 522 |
+
triangle 4 181
|
| 523 |
+
rectangle 5 361
|
| 524 |
+
|
| 525 |
+
>>> df.add(1)
|
| 526 |
+
angles degrees
|
| 527 |
+
circle 1 361
|
| 528 |
+
triangle 4 181
|
| 529 |
+
rectangle 5 361
|
| 530 |
+
|
| 531 |
+
Divide by constant with reverse version.
|
| 532 |
+
|
| 533 |
+
>>> df.div(10)
|
| 534 |
+
angles degrees
|
| 535 |
+
circle 0.0 36.0
|
| 536 |
+
triangle 0.3 18.0
|
| 537 |
+
rectangle 0.4 36.0
|
| 538 |
+
|
| 539 |
+
>>> df.rdiv(10)
|
| 540 |
+
angles degrees
|
| 541 |
+
circle inf 0.027778
|
| 542 |
+
triangle 3.333333 0.055556
|
| 543 |
+
rectangle 2.500000 0.027778
|
| 544 |
+
|
| 545 |
+
Subtract a list and Series by axis with operator version.
|
| 546 |
+
|
| 547 |
+
>>> df - [1, 2]
|
| 548 |
+
angles degrees
|
| 549 |
+
circle -1 358
|
| 550 |
+
triangle 2 178
|
| 551 |
+
rectangle 3 358
|
| 552 |
+
|
| 553 |
+
>>> df.sub([1, 2], axis='columns')
|
| 554 |
+
angles degrees
|
| 555 |
+
circle -1 358
|
| 556 |
+
triangle 2 178
|
| 557 |
+
rectangle 3 358
|
| 558 |
+
|
| 559 |
+
>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']),
|
| 560 |
+
... axis='index')
|
| 561 |
+
angles degrees
|
| 562 |
+
circle -1 359
|
| 563 |
+
triangle 2 179
|
| 564 |
+
rectangle 3 359
|
| 565 |
+
|
| 566 |
+
Multiply a dictionary by axis.
|
| 567 |
+
|
| 568 |
+
>>> df.mul({{'angles': 0, 'degrees': 2}})
|
| 569 |
+
angles degrees
|
| 570 |
+
circle 0 720
|
| 571 |
+
triangle 0 360
|
| 572 |
+
rectangle 0 720
|
| 573 |
+
|
| 574 |
+
>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index')
|
| 575 |
+
angles degrees
|
| 576 |
+
circle 0 0
|
| 577 |
+
triangle 6 360
|
| 578 |
+
rectangle 12 1080
|
| 579 |
+
|
| 580 |
+
Multiply a DataFrame of different shape with operator version.
|
| 581 |
+
|
| 582 |
+
>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},
|
| 583 |
+
... index=['circle', 'triangle', 'rectangle'])
|
| 584 |
+
>>> other
|
| 585 |
+
angles
|
| 586 |
+
circle 0
|
| 587 |
+
triangle 3
|
| 588 |
+
rectangle 4
|
| 589 |
+
|
| 590 |
+
>>> df * other
|
| 591 |
+
angles degrees
|
| 592 |
+
circle 0 NaN
|
| 593 |
+
triangle 9 NaN
|
| 594 |
+
rectangle 16 NaN
|
| 595 |
+
|
| 596 |
+
>>> df.mul(other, fill_value=0)
|
| 597 |
+
angles degrees
|
| 598 |
+
circle 0 0.0
|
| 599 |
+
triangle 9 0.0
|
| 600 |
+
rectangle 16 0.0
|
| 601 |
+
|
| 602 |
+
Divide by a MultiIndex by level.
|
| 603 |
+
|
| 604 |
+
>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6],
|
| 605 |
+
... 'degrees': [360, 180, 360, 360, 540, 720]}},
|
| 606 |
+
... index=[['A', 'A', 'A', 'B', 'B', 'B'],
|
| 607 |
+
... ['circle', 'triangle', 'rectangle',
|
| 608 |
+
... 'square', 'pentagon', 'hexagon']])
|
| 609 |
+
>>> df_multindex
|
| 610 |
+
angles degrees
|
| 611 |
+
A circle 0 360
|
| 612 |
+
triangle 3 180
|
| 613 |
+
rectangle 4 360
|
| 614 |
+
B square 4 360
|
| 615 |
+
pentagon 5 540
|
| 616 |
+
hexagon 6 720
|
| 617 |
+
|
| 618 |
+
>>> df.div(df_multindex, level=1, fill_value=0)
|
| 619 |
+
angles degrees
|
| 620 |
+
A circle NaN 1.0
|
| 621 |
+
triangle 1.0 1.0
|
| 622 |
+
rectangle 1.0 1.0
|
| 623 |
+
B square 0.0 0.0
|
| 624 |
+
pentagon 0.0 0.0
|
| 625 |
+
hexagon 0.0 0.0
|
| 626 |
+
"""
|
| 627 |
+
|
| 628 |
+
_flex_comp_doc_FRAME = """
|
| 629 |
+
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
|
| 630 |
+
|
| 631 |
+
Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison
|
| 632 |
+
operators.
|
| 633 |
+
|
| 634 |
+
Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis
|
| 635 |
+
(rows or columns) and level for comparison.
|
| 636 |
+
|
| 637 |
+
Parameters
|
| 638 |
+
----------
|
| 639 |
+
other : scalar, sequence, Series, or DataFrame
|
| 640 |
+
Any single or multiple element data structure, or list-like object.
|
| 641 |
+
axis : {{0 or 'index', 1 or 'columns'}}, default 'columns'
|
| 642 |
+
Whether to compare by the index (0 or 'index') or columns
|
| 643 |
+
(1 or 'columns').
|
| 644 |
+
level : int or label
|
| 645 |
+
Broadcast across a level, matching Index values on the passed
|
| 646 |
+
MultiIndex level.
|
| 647 |
+
|
| 648 |
+
Returns
|
| 649 |
+
-------
|
| 650 |
+
DataFrame of bool
|
| 651 |
+
Result of the comparison.
|
| 652 |
+
|
| 653 |
+
See Also
|
| 654 |
+
--------
|
| 655 |
+
DataFrame.eq : Compare DataFrames for equality elementwise.
|
| 656 |
+
DataFrame.ne : Compare DataFrames for inequality elementwise.
|
| 657 |
+
DataFrame.le : Compare DataFrames for less than inequality
|
| 658 |
+
or equality elementwise.
|
| 659 |
+
DataFrame.lt : Compare DataFrames for strictly less than
|
| 660 |
+
inequality elementwise.
|
| 661 |
+
DataFrame.ge : Compare DataFrames for greater than inequality
|
| 662 |
+
or equality elementwise.
|
| 663 |
+
DataFrame.gt : Compare DataFrames for strictly greater than
|
| 664 |
+
inequality elementwise.
|
| 665 |
+
|
| 666 |
+
Notes
|
| 667 |
+
-----
|
| 668 |
+
Mismatched indices will be unioned together.
|
| 669 |
+
`NaN` values are considered different (i.e. `NaN` != `NaN`).
|
| 670 |
+
|
| 671 |
+
Examples
|
| 672 |
+
--------
|
| 673 |
+
>>> df = pd.DataFrame({{'cost': [250, 150, 100],
|
| 674 |
+
... 'revenue': [100, 250, 300]}},
|
| 675 |
+
... index=['A', 'B', 'C'])
|
| 676 |
+
>>> df
|
| 677 |
+
cost revenue
|
| 678 |
+
A 250 100
|
| 679 |
+
B 150 250
|
| 680 |
+
C 100 300
|
| 681 |
+
|
| 682 |
+
Comparison with a scalar, using either the operator or method:
|
| 683 |
+
|
| 684 |
+
>>> df == 100
|
| 685 |
+
cost revenue
|
| 686 |
+
A False True
|
| 687 |
+
B False False
|
| 688 |
+
C True False
|
| 689 |
+
|
| 690 |
+
>>> df.eq(100)
|
| 691 |
+
cost revenue
|
| 692 |
+
A False True
|
| 693 |
+
B False False
|
| 694 |
+
C True False
|
| 695 |
+
|
| 696 |
+
When `other` is a :class:`Series`, the columns of a DataFrame are aligned
|
| 697 |
+
with the index of `other` and broadcast:
|
| 698 |
+
|
| 699 |
+
>>> df != pd.Series([100, 250], index=["cost", "revenue"])
|
| 700 |
+
cost revenue
|
| 701 |
+
A True True
|
| 702 |
+
B True False
|
| 703 |
+
C False True
|
| 704 |
+
|
| 705 |
+
Use the method to control the broadcast axis:
|
| 706 |
+
|
| 707 |
+
>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index')
|
| 708 |
+
cost revenue
|
| 709 |
+
A True False
|
| 710 |
+
B True True
|
| 711 |
+
C True True
|
| 712 |
+
D True True
|
| 713 |
+
|
| 714 |
+
When comparing to an arbitrary sequence, the number of columns must
|
| 715 |
+
match the number elements in `other`:
|
| 716 |
+
|
| 717 |
+
>>> df == [250, 100]
|
| 718 |
+
cost revenue
|
| 719 |
+
A True True
|
| 720 |
+
B False False
|
| 721 |
+
C False False
|
| 722 |
+
|
| 723 |
+
Use the method to control the axis:
|
| 724 |
+
|
| 725 |
+
>>> df.eq([250, 250, 100], axis='index')
|
| 726 |
+
cost revenue
|
| 727 |
+
A True False
|
| 728 |
+
B False True
|
| 729 |
+
C True False
|
| 730 |
+
|
| 731 |
+
Compare to a DataFrame of different shape.
|
| 732 |
+
|
| 733 |
+
>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}},
|
| 734 |
+
... index=['A', 'B', 'C', 'D'])
|
| 735 |
+
>>> other
|
| 736 |
+
revenue
|
| 737 |
+
A 300
|
| 738 |
+
B 250
|
| 739 |
+
C 100
|
| 740 |
+
D 150
|
| 741 |
+
|
| 742 |
+
>>> df.gt(other)
|
| 743 |
+
cost revenue
|
| 744 |
+
A False False
|
| 745 |
+
B False False
|
| 746 |
+
C False True
|
| 747 |
+
D False False
|
| 748 |
+
|
| 749 |
+
Compare to a MultiIndex by level.
|
| 750 |
+
|
| 751 |
+
>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220],
|
| 752 |
+
... 'revenue': [100, 250, 300, 200, 175, 225]}},
|
| 753 |
+
... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'],
|
| 754 |
+
... ['A', 'B', 'C', 'A', 'B', 'C']])
|
| 755 |
+
>>> df_multindex
|
| 756 |
+
cost revenue
|
| 757 |
+
Q1 A 250 100
|
| 758 |
+
B 150 250
|
| 759 |
+
C 100 300
|
| 760 |
+
Q2 A 150 200
|
| 761 |
+
B 300 175
|
| 762 |
+
C 220 225
|
| 763 |
+
|
| 764 |
+
>>> df.le(df_multindex, level=1)
|
| 765 |
+
cost revenue
|
| 766 |
+
Q1 A True True
|
| 767 |
+
B True True
|
| 768 |
+
C True True
|
| 769 |
+
Q2 A False True
|
| 770 |
+
B True False
|
| 771 |
+
C True False
|
| 772 |
+
"""
|
mgm/lib/python3.10/site-packages/pandas/core/ops/invalid.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Templates for invalid operations.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import operator
|
| 7 |
+
from typing import TYPE_CHECKING
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from pandas._typing import npt
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]:
|
| 16 |
+
"""
|
| 17 |
+
If a comparison has mismatched types and is not necessarily meaningful,
|
| 18 |
+
follow python3 conventions by:
|
| 19 |
+
|
| 20 |
+
- returning all-False for equality
|
| 21 |
+
- returning all-True for inequality
|
| 22 |
+
- raising TypeError otherwise
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
----------
|
| 26 |
+
left : array-like
|
| 27 |
+
right : scalar, array-like
|
| 28 |
+
op : operator.{eq, ne, lt, le, gt}
|
| 29 |
+
|
| 30 |
+
Raises
|
| 31 |
+
------
|
| 32 |
+
TypeError : on inequality comparisons
|
| 33 |
+
"""
|
| 34 |
+
if op is operator.eq:
|
| 35 |
+
res_values = np.zeros(left.shape, dtype=bool)
|
| 36 |
+
elif op is operator.ne:
|
| 37 |
+
res_values = np.ones(left.shape, dtype=bool)
|
| 38 |
+
else:
|
| 39 |
+
typ = type(right).__name__
|
| 40 |
+
raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}")
|
| 41 |
+
return res_values
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def make_invalid_op(name: str):
|
| 45 |
+
"""
|
| 46 |
+
Return a binary method that always raises a TypeError.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
name : str
|
| 51 |
+
|
| 52 |
+
Returns
|
| 53 |
+
-------
|
| 54 |
+
invalid_op : function
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def invalid_op(self, other=None):
|
| 58 |
+
typ = type(self).__name__
|
| 59 |
+
raise TypeError(f"cannot perform {name} with this index type: {typ}")
|
| 60 |
+
|
| 61 |
+
invalid_op.__name__ = name
|
| 62 |
+
return invalid_op
|
mgm/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Ops for masked arrays.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from pandas._libs import (
|
| 9 |
+
lib,
|
| 10 |
+
missing as libmissing,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def kleene_or(
|
| 15 |
+
left: bool | np.ndarray | libmissing.NAType,
|
| 16 |
+
right: bool | np.ndarray | libmissing.NAType,
|
| 17 |
+
left_mask: np.ndarray | None,
|
| 18 |
+
right_mask: np.ndarray | None,
|
| 19 |
+
):
|
| 20 |
+
"""
|
| 21 |
+
Boolean ``or`` using Kleene logic.
|
| 22 |
+
|
| 23 |
+
Values are NA where we have ``NA | NA`` or ``NA | False``.
|
| 24 |
+
``NA | True`` is considered True.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
left, right : ndarray, NA, or bool
|
| 29 |
+
The values of the array.
|
| 30 |
+
left_mask, right_mask : ndarray, optional
|
| 31 |
+
The masks. Only one of these may be None, which implies that
|
| 32 |
+
the associated `left` or `right` value is a scalar.
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
result, mask: ndarray[bool]
|
| 37 |
+
The result of the logical or, and the new mask.
|
| 38 |
+
"""
|
| 39 |
+
# To reduce the number of cases, we ensure that `left` & `left_mask`
|
| 40 |
+
# always come from an array, not a scalar. This is safe, since
|
| 41 |
+
# A | B == B | A
|
| 42 |
+
if left_mask is None:
|
| 43 |
+
return kleene_or(right, left, right_mask, left_mask)
|
| 44 |
+
|
| 45 |
+
if not isinstance(left, np.ndarray):
|
| 46 |
+
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
|
| 47 |
+
|
| 48 |
+
raise_for_nan(right, method="or")
|
| 49 |
+
|
| 50 |
+
if right is libmissing.NA:
|
| 51 |
+
result = left.copy()
|
| 52 |
+
else:
|
| 53 |
+
result = left | right
|
| 54 |
+
|
| 55 |
+
if right_mask is not None:
|
| 56 |
+
# output is unknown where (False & NA), (NA & False), (NA & NA)
|
| 57 |
+
left_false = ~(left | left_mask)
|
| 58 |
+
right_false = ~(right | right_mask)
|
| 59 |
+
mask = (
|
| 60 |
+
(left_false & right_mask)
|
| 61 |
+
| (right_false & left_mask)
|
| 62 |
+
| (left_mask & right_mask)
|
| 63 |
+
)
|
| 64 |
+
else:
|
| 65 |
+
if right is True:
|
| 66 |
+
mask = np.zeros_like(left_mask)
|
| 67 |
+
elif right is libmissing.NA:
|
| 68 |
+
mask = (~left & ~left_mask) | left_mask
|
| 69 |
+
else:
|
| 70 |
+
# False
|
| 71 |
+
mask = left_mask.copy()
|
| 72 |
+
|
| 73 |
+
return result, mask
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def kleene_xor(
|
| 77 |
+
left: bool | np.ndarray | libmissing.NAType,
|
| 78 |
+
right: bool | np.ndarray | libmissing.NAType,
|
| 79 |
+
left_mask: np.ndarray | None,
|
| 80 |
+
right_mask: np.ndarray | None,
|
| 81 |
+
):
|
| 82 |
+
"""
|
| 83 |
+
Boolean ``xor`` using Kleene logic.
|
| 84 |
+
|
| 85 |
+
This is the same as ``or``, with the following adjustments
|
| 86 |
+
|
| 87 |
+
* True, True -> False
|
| 88 |
+
* True, NA -> NA
|
| 89 |
+
|
| 90 |
+
Parameters
|
| 91 |
+
----------
|
| 92 |
+
left, right : ndarray, NA, or bool
|
| 93 |
+
The values of the array.
|
| 94 |
+
left_mask, right_mask : ndarray, optional
|
| 95 |
+
The masks. Only one of these may be None, which implies that
|
| 96 |
+
the associated `left` or `right` value is a scalar.
|
| 97 |
+
|
| 98 |
+
Returns
|
| 99 |
+
-------
|
| 100 |
+
result, mask: ndarray[bool]
|
| 101 |
+
The result of the logical xor, and the new mask.
|
| 102 |
+
"""
|
| 103 |
+
# To reduce the number of cases, we ensure that `left` & `left_mask`
|
| 104 |
+
# always come from an array, not a scalar. This is safe, since
|
| 105 |
+
# A ^ B == B ^ A
|
| 106 |
+
if left_mask is None:
|
| 107 |
+
return kleene_xor(right, left, right_mask, left_mask)
|
| 108 |
+
|
| 109 |
+
if not isinstance(left, np.ndarray):
|
| 110 |
+
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
|
| 111 |
+
|
| 112 |
+
raise_for_nan(right, method="xor")
|
| 113 |
+
if right is libmissing.NA:
|
| 114 |
+
result = np.zeros_like(left)
|
| 115 |
+
else:
|
| 116 |
+
result = left ^ right
|
| 117 |
+
|
| 118 |
+
if right_mask is None:
|
| 119 |
+
if right is libmissing.NA:
|
| 120 |
+
mask = np.ones_like(left_mask)
|
| 121 |
+
else:
|
| 122 |
+
mask = left_mask.copy()
|
| 123 |
+
else:
|
| 124 |
+
mask = left_mask | right_mask
|
| 125 |
+
|
| 126 |
+
return result, mask
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def kleene_and(
|
| 130 |
+
left: bool | libmissing.NAType | np.ndarray,
|
| 131 |
+
right: bool | libmissing.NAType | np.ndarray,
|
| 132 |
+
left_mask: np.ndarray | None,
|
| 133 |
+
right_mask: np.ndarray | None,
|
| 134 |
+
):
|
| 135 |
+
"""
|
| 136 |
+
Boolean ``and`` using Kleene logic.
|
| 137 |
+
|
| 138 |
+
Values are ``NA`` for ``NA & NA`` or ``True & NA``.
|
| 139 |
+
|
| 140 |
+
Parameters
|
| 141 |
+
----------
|
| 142 |
+
left, right : ndarray, NA, or bool
|
| 143 |
+
The values of the array.
|
| 144 |
+
left_mask, right_mask : ndarray, optional
|
| 145 |
+
The masks. Only one of these may be None, which implies that
|
| 146 |
+
the associated `left` or `right` value is a scalar.
|
| 147 |
+
|
| 148 |
+
Returns
|
| 149 |
+
-------
|
| 150 |
+
result, mask: ndarray[bool]
|
| 151 |
+
The result of the logical xor, and the new mask.
|
| 152 |
+
"""
|
| 153 |
+
# To reduce the number of cases, we ensure that `left` & `left_mask`
|
| 154 |
+
# always come from an array, not a scalar. This is safe, since
|
| 155 |
+
# A & B == B & A
|
| 156 |
+
if left_mask is None:
|
| 157 |
+
return kleene_and(right, left, right_mask, left_mask)
|
| 158 |
+
|
| 159 |
+
if not isinstance(left, np.ndarray):
|
| 160 |
+
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
|
| 161 |
+
raise_for_nan(right, method="and")
|
| 162 |
+
|
| 163 |
+
if right is libmissing.NA:
|
| 164 |
+
result = np.zeros_like(left)
|
| 165 |
+
else:
|
| 166 |
+
result = left & right
|
| 167 |
+
|
| 168 |
+
if right_mask is None:
|
| 169 |
+
# Scalar `right`
|
| 170 |
+
if right is libmissing.NA:
|
| 171 |
+
mask = (left & ~left_mask) | left_mask
|
| 172 |
+
|
| 173 |
+
else:
|
| 174 |
+
mask = left_mask.copy()
|
| 175 |
+
if right is False:
|
| 176 |
+
# unmask everything
|
| 177 |
+
mask[:] = False
|
| 178 |
+
else:
|
| 179 |
+
# unmask where either left or right is False
|
| 180 |
+
left_false = ~(left | left_mask)
|
| 181 |
+
right_false = ~(right | right_mask)
|
| 182 |
+
mask = (left_mask & ~right_false) | (right_mask & ~left_false)
|
| 183 |
+
|
| 184 |
+
return result, mask
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def raise_for_nan(value, method: str) -> None:
|
| 188 |
+
if lib.is_float(value) and np.isnan(value):
|
| 189 |
+
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
|
mgm/lib/python3.10/site-packages/pandas/core/ops/missing.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Missing data handling for arithmetic operations.
|
| 3 |
+
|
| 4 |
+
In particular, pandas conventions regarding division by zero differ
|
| 5 |
+
from numpy in the following ways:
|
| 6 |
+
1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2)
|
| 7 |
+
gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for
|
| 8 |
+
the remaining pairs
|
| 9 |
+
(the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN).
|
| 10 |
+
|
| 11 |
+
pandas convention is to return [-inf, nan, inf] for all dtype
|
| 12 |
+
combinations.
|
| 13 |
+
|
| 14 |
+
Note: the numpy behavior described here is py3-specific.
|
| 15 |
+
|
| 16 |
+
2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2)
|
| 17 |
+
gives precisely the same results as the // operation.
|
| 18 |
+
|
| 19 |
+
pandas convention is to return [nan, nan, nan] for all dtype
|
| 20 |
+
combinations.
|
| 21 |
+
|
| 22 |
+
3) divmod behavior consistent with 1) and 2).
|
| 23 |
+
"""
|
| 24 |
+
from __future__ import annotations
|
| 25 |
+
|
| 26 |
+
import operator
|
| 27 |
+
|
| 28 |
+
import numpy as np
|
| 29 |
+
|
| 30 |
+
from pandas.core import roperator
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _fill_zeros(result: np.ndarray, x, y):
|
| 34 |
+
"""
|
| 35 |
+
If this is a reversed op, then flip x,y
|
| 36 |
+
|
| 37 |
+
If we have an integer value (or array in y)
|
| 38 |
+
and we have 0's, fill them with np.nan,
|
| 39 |
+
return the result.
|
| 40 |
+
|
| 41 |
+
Mask the nan's from x.
|
| 42 |
+
"""
|
| 43 |
+
if result.dtype.kind == "f":
|
| 44 |
+
return result
|
| 45 |
+
|
| 46 |
+
is_variable_type = hasattr(y, "dtype")
|
| 47 |
+
is_scalar_type = not isinstance(y, np.ndarray)
|
| 48 |
+
|
| 49 |
+
if not is_variable_type and not is_scalar_type:
|
| 50 |
+
# e.g. test_series_ops_name_retention with mod we get here with list/tuple
|
| 51 |
+
return result
|
| 52 |
+
|
| 53 |
+
if is_scalar_type:
|
| 54 |
+
y = np.array(y)
|
| 55 |
+
|
| 56 |
+
if y.dtype.kind in "iu":
|
| 57 |
+
ymask = y == 0
|
| 58 |
+
if ymask.any():
|
| 59 |
+
# GH#7325, mask and nans must be broadcastable
|
| 60 |
+
mask = ymask & ~np.isnan(result)
|
| 61 |
+
|
| 62 |
+
# GH#9308 doing ravel on result and mask can improve putmask perf,
|
| 63 |
+
# but can also make unwanted copies.
|
| 64 |
+
result = result.astype("float64", copy=False)
|
| 65 |
+
|
| 66 |
+
np.putmask(result, mask, np.nan)
|
| 67 |
+
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
|
| 72 |
+
"""
|
| 73 |
+
Set results of 0 // 0 to np.nan, regardless of the dtypes
|
| 74 |
+
of the numerator or the denominator.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
x : ndarray
|
| 79 |
+
y : ndarray
|
| 80 |
+
result : ndarray
|
| 81 |
+
|
| 82 |
+
Returns
|
| 83 |
+
-------
|
| 84 |
+
ndarray
|
| 85 |
+
The filled result.
|
| 86 |
+
|
| 87 |
+
Examples
|
| 88 |
+
--------
|
| 89 |
+
>>> x = np.array([1, 0, -1], dtype=np.int64)
|
| 90 |
+
>>> x
|
| 91 |
+
array([ 1, 0, -1])
|
| 92 |
+
>>> y = 0 # int 0; numpy behavior is different with float
|
| 93 |
+
>>> result = x // y
|
| 94 |
+
>>> result # raw numpy result does not fill division by zero
|
| 95 |
+
array([0, 0, 0])
|
| 96 |
+
>>> mask_zero_div_zero(x, y, result)
|
| 97 |
+
array([ inf, nan, -inf])
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
if not hasattr(y, "dtype"):
|
| 101 |
+
# e.g. scalar, tuple
|
| 102 |
+
y = np.array(y)
|
| 103 |
+
if not hasattr(x, "dtype"):
|
| 104 |
+
# e.g scalar, tuple
|
| 105 |
+
x = np.array(x)
|
| 106 |
+
|
| 107 |
+
zmask = y == 0
|
| 108 |
+
|
| 109 |
+
if zmask.any():
|
| 110 |
+
# Flip sign if necessary for -0.0
|
| 111 |
+
zneg_mask = zmask & np.signbit(y)
|
| 112 |
+
zpos_mask = zmask & ~zneg_mask
|
| 113 |
+
|
| 114 |
+
x_lt0 = x < 0
|
| 115 |
+
x_gt0 = x > 0
|
| 116 |
+
nan_mask = zmask & (x == 0)
|
| 117 |
+
neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)
|
| 118 |
+
posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)
|
| 119 |
+
|
| 120 |
+
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
|
| 121 |
+
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
|
| 122 |
+
result = result.astype("float64", copy=False)
|
| 123 |
+
|
| 124 |
+
result[nan_mask] = np.nan
|
| 125 |
+
result[posinf_mask] = np.inf
|
| 126 |
+
result[neginf_mask] = -np.inf
|
| 127 |
+
|
| 128 |
+
return result
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def dispatch_fill_zeros(op, left, right, result):
|
| 132 |
+
"""
|
| 133 |
+
Call _fill_zeros with the appropriate fill value depending on the operation,
|
| 134 |
+
with special logic for divmod and rdivmod.
|
| 135 |
+
|
| 136 |
+
Parameters
|
| 137 |
+
----------
|
| 138 |
+
op : function (operator.add, operator.div, ...)
|
| 139 |
+
left : object (np.ndarray for non-reversed ops)
|
| 140 |
+
We have excluded ExtensionArrays here
|
| 141 |
+
right : object (np.ndarray for reversed ops)
|
| 142 |
+
We have excluded ExtensionArrays here
|
| 143 |
+
result : ndarray
|
| 144 |
+
|
| 145 |
+
Returns
|
| 146 |
+
-------
|
| 147 |
+
result : np.ndarray
|
| 148 |
+
|
| 149 |
+
Notes
|
| 150 |
+
-----
|
| 151 |
+
For divmod and rdivmod, the `result` parameter and returned `result`
|
| 152 |
+
is a 2-tuple of ndarray objects.
|
| 153 |
+
"""
|
| 154 |
+
if op is divmod:
|
| 155 |
+
result = (
|
| 156 |
+
mask_zero_div_zero(left, right, result[0]),
|
| 157 |
+
_fill_zeros(result[1], left, right),
|
| 158 |
+
)
|
| 159 |
+
elif op is roperator.rdivmod:
|
| 160 |
+
result = (
|
| 161 |
+
mask_zero_div_zero(right, left, result[0]),
|
| 162 |
+
_fill_zeros(result[1], right, left),
|
| 163 |
+
)
|
| 164 |
+
elif op is operator.floordiv:
|
| 165 |
+
# Note: no need to do this for truediv; in py3 numpy behaves the way
|
| 166 |
+
# we want.
|
| 167 |
+
result = mask_zero_div_zero(left, right, result)
|
| 168 |
+
elif op is roperator.rfloordiv:
|
| 169 |
+
# Note: no need to do this for rtruediv; in py3 numpy behaves the way
|
| 170 |
+
# we want.
|
| 171 |
+
result = mask_zero_div_zero(right, left, result)
|
| 172 |
+
elif op is operator.mod:
|
| 173 |
+
result = _fill_zeros(result, left, right)
|
| 174 |
+
elif op is roperator.rmod:
|
| 175 |
+
result = _fill_zeros(result, right, left)
|
| 176 |
+
return result
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (168 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (756 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/concat.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/encoding.cpython-310.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/melt.cpython-310.pyc
ADDED
|
Binary file (16.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/merge.cpython-310.pyc
ADDED
|
Binary file (61.3 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/pivot.cpython-310.pyc
ADDED
|
Binary file (19.7 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/reshape.cpython-310.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/tile.cpython-310.pyc
ADDED
|
Binary file (18.3 kB). View file
|
|
|