Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/ossl-modules/legacy.so +3 -0
- parrot/share/terminfo/g/glasstty +0 -0
- parrot/share/terminfo/g/gnome-rh72 +0 -0
- parrot/share/terminfo/g/go225 +0 -0
- parrot/share/terminfo/g/graphos +0 -0
- parrot/share/terminfo/g/graphos-30 +0 -0
- parrot/share/terminfo/g/gs5430-22 +0 -0
- parrot/share/terminfo/g/gt40 +0 -0
- parrot/share/terminfo/g/guru+unk +0 -0
- parrot/share/terminfo/g/guru-76-w-s +0 -0
- parrot/share/terminfo/w/wsvt25 +0 -0
- parrot/share/terminfo/w/wy-99fgta +0 -0
- parrot/share/terminfo/w/wy100q +0 -0
- parrot/share/terminfo/w/wy160-43 +0 -0
- parrot/share/terminfo/w/wy160-43-w +0 -0
- parrot/share/terminfo/w/wy325-25w +0 -0
- parrot/share/terminfo/w/wy325-42 +0 -0
- parrot/share/terminfo/w/wy325-wvb +0 -0
- parrot/share/terminfo/w/wy520-48 +0 -0
- parrot/share/terminfo/w/wy520-48wpc +0 -0
- parrot/share/terminfo/w/wy520-wvb +0 -0
- parrot/share/terminfo/w/wy60-25-w +0 -0
- parrot/share/terminfo/w/wy60-PC +0 -0
- parrot/share/terminfo/w/wy99a-ansi +0 -0
- parrot/share/terminfo/w/wy99gt-wvb +0 -0
- parrot/share/terminfo/w/wyse-75ap +0 -0
- parrot/share/terminfo/w/wyse150-25-w +0 -0
- parrot/share/terminfo/w/wyse150-vb +0 -0
- parrot/share/terminfo/w/wyse150-w +0 -0
- parrot/share/terminfo/w/wyse160-43 +0 -0
- parrot/share/terminfo/w/wyse185-24 +0 -0
- parrot/share/terminfo/w/wyse325-w +0 -0
- parrot/share/terminfo/w/wyse50-vb +0 -0
- parrot/share/terminfo/w/wyse50-wvb +0 -0
- parrot/share/terminfo/w/wyse520-vb +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/apply.py +2062 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py +9 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py +67 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py +90 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py +149 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py +226 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/replace.py +152 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/take.py +594 -0
- videollama2/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py +50 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arraylike.py +530 -0
- videollama2/lib/python3.10/site-packages/pandas/core/base.py +1391 -0
- videollama2/lib/python3.10/site-packages/pandas/core/common.py +657 -0
- videollama2/lib/python3.10/site-packages/pandas/core/config_init.py +924 -0
- videollama2/lib/python3.10/site-packages/pandas/core/construction.py +824 -0
.gitattributes
CHANGED
|
@@ -973,3 +973,4 @@ videollama2/lib/python3.10/site-packages/fontTools/feaLib/lexer.cpython-310-x86_
|
|
| 973 |
videollama2/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 974 |
videollama2/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 975 |
parrot/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 973 |
videollama2/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 974 |
videollama2/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 975 |
parrot/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf filter=lfs diff=lfs merge=lfs -text
|
| 976 |
+
parrot/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/ossl-modules/legacy.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8b8163a4a4cea7693a8d64c064788cc830bfb72e5aae56e9432fee32f5f45a0
|
| 3 |
+
size 158976
|
parrot/share/terminfo/g/glasstty
ADDED
|
Binary file (407 Bytes). View file
|
|
|
parrot/share/terminfo/g/gnome-rh72
ADDED
|
Binary file (1.62 kB). View file
|
|
|
parrot/share/terminfo/g/go225
ADDED
|
Binary file (950 Bytes). View file
|
|
|
parrot/share/terminfo/g/graphos
ADDED
|
Binary file (577 Bytes). View file
|
|
|
parrot/share/terminfo/g/graphos-30
ADDED
|
Binary file (595 Bytes). View file
|
|
|
parrot/share/terminfo/g/gs5430-22
ADDED
|
Binary file (1.18 kB). View file
|
|
|
parrot/share/terminfo/g/gt40
ADDED
|
Binary file (108 Bytes). View file
|
|
|
parrot/share/terminfo/g/guru+unk
ADDED
|
Binary file (1.27 kB). View file
|
|
|
parrot/share/terminfo/g/guru-76-w-s
ADDED
|
Binary file (1.35 kB). View file
|
|
|
parrot/share/terminfo/w/wsvt25
ADDED
|
Binary file (1.85 kB). View file
|
|
|
parrot/share/terminfo/w/wy-99fgta
ADDED
|
Binary file (1.28 kB). View file
|
|
|
parrot/share/terminfo/w/wy100q
ADDED
|
Binary file (466 Bytes). View file
|
|
|
parrot/share/terminfo/w/wy160-43
ADDED
|
Binary file (1.35 kB). View file
|
|
|
parrot/share/terminfo/w/wy160-43-w
ADDED
|
Binary file (1.36 kB). View file
|
|
|
parrot/share/terminfo/w/wy325-25w
ADDED
|
Binary file (1.2 kB). View file
|
|
|
parrot/share/terminfo/w/wy325-42
ADDED
|
Binary file (1.21 kB). View file
|
|
|
parrot/share/terminfo/w/wy325-wvb
ADDED
|
Binary file (1.24 kB). View file
|
|
|
parrot/share/terminfo/w/wy520-48
ADDED
|
Binary file (1.69 kB). View file
|
|
|
parrot/share/terminfo/w/wy520-48wpc
ADDED
|
Binary file (1.75 kB). View file
|
|
|
parrot/share/terminfo/w/wy520-wvb
ADDED
|
Binary file (1.75 kB). View file
|
|
|
parrot/share/terminfo/w/wy60-25-w
ADDED
|
Binary file (1.58 kB). View file
|
|
|
parrot/share/terminfo/w/wy60-PC
ADDED
|
Binary file (804 Bytes). View file
|
|
|
parrot/share/terminfo/w/wy99a-ansi
ADDED
|
Binary file (1.54 kB). View file
|
|
|
parrot/share/terminfo/w/wy99gt-wvb
ADDED
|
Binary file (1.64 kB). View file
|
|
|
parrot/share/terminfo/w/wyse-75ap
ADDED
|
Binary file (1.76 kB). View file
|
|
|
parrot/share/terminfo/w/wyse150-25-w
ADDED
|
Binary file (1.27 kB). View file
|
|
|
parrot/share/terminfo/w/wyse150-vb
ADDED
|
Binary file (1.28 kB). View file
|
|
|
parrot/share/terminfo/w/wyse150-w
ADDED
|
Binary file (1.27 kB). View file
|
|
|
parrot/share/terminfo/w/wyse160-43
ADDED
|
Binary file (1.35 kB). View file
|
|
|
parrot/share/terminfo/w/wyse185-24
ADDED
|
Binary file (1.7 kB). View file
|
|
|
parrot/share/terminfo/w/wyse325-w
ADDED
|
Binary file (1.23 kB). View file
|
|
|
parrot/share/terminfo/w/wyse50-vb
ADDED
|
Binary file (1.2 kB). View file
|
|
|
parrot/share/terminfo/w/wyse50-wvb
ADDED
|
Binary file (1.21 kB). View file
|
|
|
parrot/share/terminfo/w/wyse520-vb
ADDED
|
Binary file (1.74 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/apply.py
ADDED
|
@@ -0,0 +1,2062 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import abc
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
import functools
|
| 6 |
+
from functools import partial
|
| 7 |
+
import inspect
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Any,
|
| 11 |
+
Callable,
|
| 12 |
+
Literal,
|
| 13 |
+
cast,
|
| 14 |
+
)
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from pandas._config import option_context
|
| 20 |
+
|
| 21 |
+
from pandas._libs import lib
|
| 22 |
+
from pandas._libs.internals import BlockValuesRefs
|
| 23 |
+
from pandas._typing import (
|
| 24 |
+
AggFuncType,
|
| 25 |
+
AggFuncTypeBase,
|
| 26 |
+
AggFuncTypeDict,
|
| 27 |
+
AggObjType,
|
| 28 |
+
Axis,
|
| 29 |
+
AxisInt,
|
| 30 |
+
NDFrameT,
|
| 31 |
+
npt,
|
| 32 |
+
)
|
| 33 |
+
from pandas.compat._optional import import_optional_dependency
|
| 34 |
+
from pandas.errors import SpecificationError
|
| 35 |
+
from pandas.util._decorators import cache_readonly
|
| 36 |
+
from pandas.util._exceptions import find_stack_level
|
| 37 |
+
|
| 38 |
+
from pandas.core.dtypes.cast import is_nested_object
|
| 39 |
+
from pandas.core.dtypes.common import (
|
| 40 |
+
is_dict_like,
|
| 41 |
+
is_extension_array_dtype,
|
| 42 |
+
is_list_like,
|
| 43 |
+
is_numeric_dtype,
|
| 44 |
+
is_sequence,
|
| 45 |
+
)
|
| 46 |
+
from pandas.core.dtypes.dtypes import (
|
| 47 |
+
CategoricalDtype,
|
| 48 |
+
ExtensionDtype,
|
| 49 |
+
)
|
| 50 |
+
from pandas.core.dtypes.generic import (
|
| 51 |
+
ABCDataFrame,
|
| 52 |
+
ABCNDFrame,
|
| 53 |
+
ABCSeries,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
from pandas.core._numba.executor import generate_apply_looper
|
| 57 |
+
import pandas.core.common as com
|
| 58 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
| 59 |
+
|
| 60 |
+
if TYPE_CHECKING:
|
| 61 |
+
from collections.abc import (
|
| 62 |
+
Generator,
|
| 63 |
+
Hashable,
|
| 64 |
+
Iterable,
|
| 65 |
+
MutableMapping,
|
| 66 |
+
Sequence,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
from pandas import (
|
| 70 |
+
DataFrame,
|
| 71 |
+
Index,
|
| 72 |
+
Series,
|
| 73 |
+
)
|
| 74 |
+
from pandas.core.groupby import GroupBy
|
| 75 |
+
from pandas.core.resample import Resampler
|
| 76 |
+
from pandas.core.window.rolling import BaseWindow
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
ResType = dict[int, Any]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def frame_apply(
|
| 83 |
+
obj: DataFrame,
|
| 84 |
+
func: AggFuncType,
|
| 85 |
+
axis: Axis = 0,
|
| 86 |
+
raw: bool = False,
|
| 87 |
+
result_type: str | None = None,
|
| 88 |
+
by_row: Literal[False, "compat"] = "compat",
|
| 89 |
+
engine: str = "python",
|
| 90 |
+
engine_kwargs: dict[str, bool] | None = None,
|
| 91 |
+
args=None,
|
| 92 |
+
kwargs=None,
|
| 93 |
+
) -> FrameApply:
|
| 94 |
+
"""construct and return a row or column based frame apply object"""
|
| 95 |
+
axis = obj._get_axis_number(axis)
|
| 96 |
+
klass: type[FrameApply]
|
| 97 |
+
if axis == 0:
|
| 98 |
+
klass = FrameRowApply
|
| 99 |
+
elif axis == 1:
|
| 100 |
+
klass = FrameColumnApply
|
| 101 |
+
|
| 102 |
+
_, func, _, _ = reconstruct_func(func, **kwargs)
|
| 103 |
+
assert func is not None
|
| 104 |
+
|
| 105 |
+
return klass(
|
| 106 |
+
obj,
|
| 107 |
+
func,
|
| 108 |
+
raw=raw,
|
| 109 |
+
result_type=result_type,
|
| 110 |
+
by_row=by_row,
|
| 111 |
+
engine=engine,
|
| 112 |
+
engine_kwargs=engine_kwargs,
|
| 113 |
+
args=args,
|
| 114 |
+
kwargs=kwargs,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Apply(metaclass=abc.ABCMeta):
|
| 119 |
+
axis: AxisInt
|
| 120 |
+
|
| 121 |
+
def __init__(
|
| 122 |
+
self,
|
| 123 |
+
obj: AggObjType,
|
| 124 |
+
func: AggFuncType,
|
| 125 |
+
raw: bool,
|
| 126 |
+
result_type: str | None,
|
| 127 |
+
*,
|
| 128 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
| 129 |
+
engine: str = "python",
|
| 130 |
+
engine_kwargs: dict[str, bool] | None = None,
|
| 131 |
+
args,
|
| 132 |
+
kwargs,
|
| 133 |
+
) -> None:
|
| 134 |
+
self.obj = obj
|
| 135 |
+
self.raw = raw
|
| 136 |
+
|
| 137 |
+
assert by_row is False or by_row in ["compat", "_compat"]
|
| 138 |
+
self.by_row = by_row
|
| 139 |
+
|
| 140 |
+
self.args = args or ()
|
| 141 |
+
self.kwargs = kwargs or {}
|
| 142 |
+
|
| 143 |
+
self.engine = engine
|
| 144 |
+
self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
| 145 |
+
|
| 146 |
+
if result_type not in [None, "reduce", "broadcast", "expand"]:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
"invalid value for result_type, must be one "
|
| 149 |
+
"of {None, 'reduce', 'broadcast', 'expand'}"
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
self.result_type = result_type
|
| 153 |
+
|
| 154 |
+
self.func = func
|
| 155 |
+
|
| 156 |
+
@abc.abstractmethod
|
| 157 |
+
def apply(self) -> DataFrame | Series:
|
| 158 |
+
pass
|
| 159 |
+
|
| 160 |
+
@abc.abstractmethod
|
| 161 |
+
def agg_or_apply_list_like(
|
| 162 |
+
self, op_name: Literal["agg", "apply"]
|
| 163 |
+
) -> DataFrame | Series:
|
| 164 |
+
pass
|
| 165 |
+
|
| 166 |
+
@abc.abstractmethod
|
| 167 |
+
def agg_or_apply_dict_like(
|
| 168 |
+
self, op_name: Literal["agg", "apply"]
|
| 169 |
+
) -> DataFrame | Series:
|
| 170 |
+
pass
|
| 171 |
+
|
| 172 |
+
def agg(self) -> DataFrame | Series | None:
|
| 173 |
+
"""
|
| 174 |
+
Provide an implementation for the aggregators.
|
| 175 |
+
|
| 176 |
+
Returns
|
| 177 |
+
-------
|
| 178 |
+
Result of aggregation, or None if agg cannot be performed by
|
| 179 |
+
this method.
|
| 180 |
+
"""
|
| 181 |
+
obj = self.obj
|
| 182 |
+
func = self.func
|
| 183 |
+
args = self.args
|
| 184 |
+
kwargs = self.kwargs
|
| 185 |
+
|
| 186 |
+
if isinstance(func, str):
|
| 187 |
+
return self.apply_str()
|
| 188 |
+
|
| 189 |
+
if is_dict_like(func):
|
| 190 |
+
return self.agg_dict_like()
|
| 191 |
+
elif is_list_like(func):
|
| 192 |
+
# we require a list, but not a 'str'
|
| 193 |
+
return self.agg_list_like()
|
| 194 |
+
|
| 195 |
+
if callable(func):
|
| 196 |
+
f = com.get_cython_func(func)
|
| 197 |
+
if f and not args and not kwargs:
|
| 198 |
+
warn_alias_replacement(obj, func, f)
|
| 199 |
+
return getattr(obj, f)()
|
| 200 |
+
|
| 201 |
+
# caller can react
|
| 202 |
+
return None
|
| 203 |
+
|
| 204 |
+
def transform(self) -> DataFrame | Series:
|
| 205 |
+
"""
|
| 206 |
+
Transform a DataFrame or Series.
|
| 207 |
+
|
| 208 |
+
Returns
|
| 209 |
+
-------
|
| 210 |
+
DataFrame or Series
|
| 211 |
+
Result of applying ``func`` along the given axis of the
|
| 212 |
+
Series or DataFrame.
|
| 213 |
+
|
| 214 |
+
Raises
|
| 215 |
+
------
|
| 216 |
+
ValueError
|
| 217 |
+
If the transform function fails or does not transform.
|
| 218 |
+
"""
|
| 219 |
+
obj = self.obj
|
| 220 |
+
func = self.func
|
| 221 |
+
axis = self.axis
|
| 222 |
+
args = self.args
|
| 223 |
+
kwargs = self.kwargs
|
| 224 |
+
|
| 225 |
+
is_series = obj.ndim == 1
|
| 226 |
+
|
| 227 |
+
if obj._get_axis_number(axis) == 1:
|
| 228 |
+
assert not is_series
|
| 229 |
+
return obj.T.transform(func, 0, *args, **kwargs).T
|
| 230 |
+
|
| 231 |
+
if is_list_like(func) and not is_dict_like(func):
|
| 232 |
+
func = cast(list[AggFuncTypeBase], func)
|
| 233 |
+
# Convert func equivalent dict
|
| 234 |
+
if is_series:
|
| 235 |
+
func = {com.get_callable_name(v) or v: v for v in func}
|
| 236 |
+
else:
|
| 237 |
+
func = {col: func for col in obj}
|
| 238 |
+
|
| 239 |
+
if is_dict_like(func):
|
| 240 |
+
func = cast(AggFuncTypeDict, func)
|
| 241 |
+
return self.transform_dict_like(func)
|
| 242 |
+
|
| 243 |
+
# func is either str or callable
|
| 244 |
+
func = cast(AggFuncTypeBase, func)
|
| 245 |
+
try:
|
| 246 |
+
result = self.transform_str_or_callable(func)
|
| 247 |
+
except TypeError:
|
| 248 |
+
raise
|
| 249 |
+
except Exception as err:
|
| 250 |
+
raise ValueError("Transform function failed") from err
|
| 251 |
+
|
| 252 |
+
# Functions that transform may return empty Series/DataFrame
|
| 253 |
+
# when the dtype is not appropriate
|
| 254 |
+
if (
|
| 255 |
+
isinstance(result, (ABCSeries, ABCDataFrame))
|
| 256 |
+
and result.empty
|
| 257 |
+
and not obj.empty
|
| 258 |
+
):
|
| 259 |
+
raise ValueError("Transform function failed")
|
| 260 |
+
# error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
|
| 261 |
+
# "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
|
| 262 |
+
# DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
|
| 263 |
+
# Series]"
|
| 264 |
+
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
|
| 265 |
+
obj.index # type: ignore[arg-type]
|
| 266 |
+
):
|
| 267 |
+
raise ValueError("Function did not transform")
|
| 268 |
+
|
| 269 |
+
return result
|
| 270 |
+
|
| 271 |
+
def transform_dict_like(self, func) -> DataFrame:
|
| 272 |
+
"""
|
| 273 |
+
Compute transform in the case of a dict-like func
|
| 274 |
+
"""
|
| 275 |
+
from pandas.core.reshape.concat import concat
|
| 276 |
+
|
| 277 |
+
obj = self.obj
|
| 278 |
+
args = self.args
|
| 279 |
+
kwargs = self.kwargs
|
| 280 |
+
|
| 281 |
+
# transform is currently only for Series/DataFrame
|
| 282 |
+
assert isinstance(obj, ABCNDFrame)
|
| 283 |
+
|
| 284 |
+
if len(func) == 0:
|
| 285 |
+
raise ValueError("No transform functions were provided")
|
| 286 |
+
|
| 287 |
+
func = self.normalize_dictlike_arg("transform", obj, func)
|
| 288 |
+
|
| 289 |
+
results: dict[Hashable, DataFrame | Series] = {}
|
| 290 |
+
for name, how in func.items():
|
| 291 |
+
colg = obj._gotitem(name, ndim=1)
|
| 292 |
+
results[name] = colg.transform(how, 0, *args, **kwargs)
|
| 293 |
+
return concat(results, axis=1)
|
| 294 |
+
|
| 295 |
+
def transform_str_or_callable(self, func) -> DataFrame | Series:
|
| 296 |
+
"""
|
| 297 |
+
Compute transform in the case of a string or callable func
|
| 298 |
+
"""
|
| 299 |
+
obj = self.obj
|
| 300 |
+
args = self.args
|
| 301 |
+
kwargs = self.kwargs
|
| 302 |
+
|
| 303 |
+
if isinstance(func, str):
|
| 304 |
+
return self._apply_str(obj, func, *args, **kwargs)
|
| 305 |
+
|
| 306 |
+
if not args and not kwargs:
|
| 307 |
+
f = com.get_cython_func(func)
|
| 308 |
+
if f:
|
| 309 |
+
warn_alias_replacement(obj, func, f)
|
| 310 |
+
return getattr(obj, f)()
|
| 311 |
+
|
| 312 |
+
# Two possible ways to use a UDF - apply or call directly
|
| 313 |
+
try:
|
| 314 |
+
return obj.apply(func, args=args, **kwargs)
|
| 315 |
+
except Exception:
|
| 316 |
+
return func(obj, *args, **kwargs)
|
| 317 |
+
|
| 318 |
+
def agg_list_like(self) -> DataFrame | Series:
|
| 319 |
+
"""
|
| 320 |
+
Compute aggregation in the case of a list-like argument.
|
| 321 |
+
|
| 322 |
+
Returns
|
| 323 |
+
-------
|
| 324 |
+
Result of aggregation.
|
| 325 |
+
"""
|
| 326 |
+
return self.agg_or_apply_list_like(op_name="agg")
|
| 327 |
+
|
| 328 |
+
def compute_list_like(
|
| 329 |
+
self,
|
| 330 |
+
op_name: Literal["agg", "apply"],
|
| 331 |
+
selected_obj: Series | DataFrame,
|
| 332 |
+
kwargs: dict[str, Any],
|
| 333 |
+
) -> tuple[list[Hashable] | Index, list[Any]]:
|
| 334 |
+
"""
|
| 335 |
+
Compute agg/apply results for like-like input.
|
| 336 |
+
|
| 337 |
+
Parameters
|
| 338 |
+
----------
|
| 339 |
+
op_name : {"agg", "apply"}
|
| 340 |
+
Operation being performed.
|
| 341 |
+
selected_obj : Series or DataFrame
|
| 342 |
+
Data to perform operation on.
|
| 343 |
+
kwargs : dict
|
| 344 |
+
Keyword arguments to pass to the functions.
|
| 345 |
+
|
| 346 |
+
Returns
|
| 347 |
+
-------
|
| 348 |
+
keys : list[Hashable] or Index
|
| 349 |
+
Index labels for result.
|
| 350 |
+
results : list
|
| 351 |
+
Data for result. When aggregating with a Series, this can contain any
|
| 352 |
+
Python objects.
|
| 353 |
+
"""
|
| 354 |
+
func = cast(list[AggFuncTypeBase], self.func)
|
| 355 |
+
obj = self.obj
|
| 356 |
+
|
| 357 |
+
results = []
|
| 358 |
+
keys = []
|
| 359 |
+
|
| 360 |
+
# degenerate case
|
| 361 |
+
if selected_obj.ndim == 1:
|
| 362 |
+
for a in func:
|
| 363 |
+
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
|
| 364 |
+
args = (
|
| 365 |
+
[self.axis, *self.args]
|
| 366 |
+
if include_axis(op_name, colg)
|
| 367 |
+
else self.args
|
| 368 |
+
)
|
| 369 |
+
new_res = getattr(colg, op_name)(a, *args, **kwargs)
|
| 370 |
+
results.append(new_res)
|
| 371 |
+
|
| 372 |
+
# make sure we find a good name
|
| 373 |
+
name = com.get_callable_name(a) or a
|
| 374 |
+
keys.append(name)
|
| 375 |
+
|
| 376 |
+
else:
|
| 377 |
+
indices = []
|
| 378 |
+
for index, col in enumerate(selected_obj):
|
| 379 |
+
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
|
| 380 |
+
args = (
|
| 381 |
+
[self.axis, *self.args]
|
| 382 |
+
if include_axis(op_name, colg)
|
| 383 |
+
else self.args
|
| 384 |
+
)
|
| 385 |
+
new_res = getattr(colg, op_name)(func, *args, **kwargs)
|
| 386 |
+
results.append(new_res)
|
| 387 |
+
indices.append(index)
|
| 388 |
+
# error: Incompatible types in assignment (expression has type "Any |
|
| 389 |
+
# Index", variable has type "list[Any | Callable[..., Any] | str]")
|
| 390 |
+
keys = selected_obj.columns.take(indices) # type: ignore[assignment]
|
| 391 |
+
|
| 392 |
+
return keys, results
|
| 393 |
+
|
| 394 |
+
def wrap_results_list_like(
|
| 395 |
+
self, keys: Iterable[Hashable], results: list[Series | DataFrame]
|
| 396 |
+
):
|
| 397 |
+
from pandas.core.reshape.concat import concat
|
| 398 |
+
|
| 399 |
+
obj = self.obj
|
| 400 |
+
|
| 401 |
+
try:
|
| 402 |
+
return concat(results, keys=keys, axis=1, sort=False)
|
| 403 |
+
except TypeError as err:
|
| 404 |
+
# we are concatting non-NDFrame objects,
|
| 405 |
+
# e.g. a list of scalars
|
| 406 |
+
from pandas import Series
|
| 407 |
+
|
| 408 |
+
result = Series(results, index=keys, name=obj.name)
|
| 409 |
+
if is_nested_object(result):
|
| 410 |
+
raise ValueError(
|
| 411 |
+
"cannot combine transform and aggregation operations"
|
| 412 |
+
) from err
|
| 413 |
+
return result
|
| 414 |
+
|
| 415 |
+
def agg_dict_like(self) -> DataFrame | Series:
|
| 416 |
+
"""
|
| 417 |
+
Compute aggregation in the case of a dict-like argument.
|
| 418 |
+
|
| 419 |
+
Returns
|
| 420 |
+
-------
|
| 421 |
+
Result of aggregation.
|
| 422 |
+
"""
|
| 423 |
+
return self.agg_or_apply_dict_like(op_name="agg")
|
| 424 |
+
|
| 425 |
+
def compute_dict_like(
|
| 426 |
+
self,
|
| 427 |
+
op_name: Literal["agg", "apply"],
|
| 428 |
+
selected_obj: Series | DataFrame,
|
| 429 |
+
selection: Hashable | Sequence[Hashable],
|
| 430 |
+
kwargs: dict[str, Any],
|
| 431 |
+
) -> tuple[list[Hashable], list[Any]]:
|
| 432 |
+
"""
|
| 433 |
+
Compute agg/apply results for dict-like input.
|
| 434 |
+
|
| 435 |
+
Parameters
|
| 436 |
+
----------
|
| 437 |
+
op_name : {"agg", "apply"}
|
| 438 |
+
Operation being performed.
|
| 439 |
+
selected_obj : Series or DataFrame
|
| 440 |
+
Data to perform operation on.
|
| 441 |
+
selection : hashable or sequence of hashables
|
| 442 |
+
Used by GroupBy, Window, and Resample if selection is applied to the object.
|
| 443 |
+
kwargs : dict
|
| 444 |
+
Keyword arguments to pass to the functions.
|
| 445 |
+
|
| 446 |
+
Returns
|
| 447 |
+
-------
|
| 448 |
+
keys : list[hashable]
|
| 449 |
+
Index labels for result.
|
| 450 |
+
results : list
|
| 451 |
+
Data for result. When aggregating with a Series, this can contain any
|
| 452 |
+
Python object.
|
| 453 |
+
"""
|
| 454 |
+
from pandas.core.groupby.generic import (
|
| 455 |
+
DataFrameGroupBy,
|
| 456 |
+
SeriesGroupBy,
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
obj = self.obj
|
| 460 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
| 461 |
+
func = cast(AggFuncTypeDict, self.func)
|
| 462 |
+
func = self.normalize_dictlike_arg(op_name, selected_obj, func)
|
| 463 |
+
|
| 464 |
+
is_non_unique_col = (
|
| 465 |
+
selected_obj.ndim == 2
|
| 466 |
+
and selected_obj.columns.nunique() < len(selected_obj.columns)
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
if selected_obj.ndim == 1:
|
| 470 |
+
# key only used for output
|
| 471 |
+
colg = obj._gotitem(selection, ndim=1)
|
| 472 |
+
results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
|
| 473 |
+
keys = list(func.keys())
|
| 474 |
+
elif not is_groupby and is_non_unique_col:
|
| 475 |
+
# key used for column selection and output
|
| 476 |
+
# GH#51099
|
| 477 |
+
results = []
|
| 478 |
+
keys = []
|
| 479 |
+
for key, how in func.items():
|
| 480 |
+
indices = selected_obj.columns.get_indexer_for([key])
|
| 481 |
+
labels = selected_obj.columns.take(indices)
|
| 482 |
+
label_to_indices = defaultdict(list)
|
| 483 |
+
for index, label in zip(indices, labels):
|
| 484 |
+
label_to_indices[label].append(index)
|
| 485 |
+
|
| 486 |
+
key_data = [
|
| 487 |
+
getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
|
| 488 |
+
for label, indices in label_to_indices.items()
|
| 489 |
+
for indice in indices
|
| 490 |
+
]
|
| 491 |
+
|
| 492 |
+
keys += [key] * len(key_data)
|
| 493 |
+
results += key_data
|
| 494 |
+
else:
|
| 495 |
+
# key used for column selection and output
|
| 496 |
+
results = [
|
| 497 |
+
getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
|
| 498 |
+
for key, how in func.items()
|
| 499 |
+
]
|
| 500 |
+
keys = list(func.keys())
|
| 501 |
+
|
| 502 |
+
return keys, results
|
| 503 |
+
|
| 504 |
+
def wrap_results_dict_like(
|
| 505 |
+
self,
|
| 506 |
+
selected_obj: Series | DataFrame,
|
| 507 |
+
result_index: list[Hashable],
|
| 508 |
+
result_data: list,
|
| 509 |
+
):
|
| 510 |
+
from pandas import Index
|
| 511 |
+
from pandas.core.reshape.concat import concat
|
| 512 |
+
|
| 513 |
+
obj = self.obj
|
| 514 |
+
|
| 515 |
+
# Avoid making two isinstance calls in all and any below
|
| 516 |
+
is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
|
| 517 |
+
|
| 518 |
+
if all(is_ndframe):
|
| 519 |
+
results = dict(zip(result_index, result_data))
|
| 520 |
+
keys_to_use: Iterable[Hashable]
|
| 521 |
+
keys_to_use = [k for k in result_index if not results[k].empty]
|
| 522 |
+
# Have to check, if at least one DataFrame is not empty.
|
| 523 |
+
keys_to_use = keys_to_use if keys_to_use != [] else result_index
|
| 524 |
+
if selected_obj.ndim == 2:
|
| 525 |
+
# keys are columns, so we can preserve names
|
| 526 |
+
ktu = Index(keys_to_use)
|
| 527 |
+
ktu._set_names(selected_obj.columns.names)
|
| 528 |
+
keys_to_use = ktu
|
| 529 |
+
|
| 530 |
+
axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
|
| 531 |
+
result = concat(
|
| 532 |
+
{k: results[k] for k in keys_to_use},
|
| 533 |
+
axis=axis,
|
| 534 |
+
keys=keys_to_use,
|
| 535 |
+
)
|
| 536 |
+
elif any(is_ndframe):
|
| 537 |
+
# There is a mix of NDFrames and scalars
|
| 538 |
+
raise ValueError(
|
| 539 |
+
"cannot perform both aggregation "
|
| 540 |
+
"and transformation operations "
|
| 541 |
+
"simultaneously"
|
| 542 |
+
)
|
| 543 |
+
else:
|
| 544 |
+
from pandas import Series
|
| 545 |
+
|
| 546 |
+
# we have a list of scalars
|
| 547 |
+
# GH 36212 use name only if obj is a series
|
| 548 |
+
if obj.ndim == 1:
|
| 549 |
+
obj = cast("Series", obj)
|
| 550 |
+
name = obj.name
|
| 551 |
+
else:
|
| 552 |
+
name = None
|
| 553 |
+
|
| 554 |
+
result = Series(result_data, index=result_index, name=name)
|
| 555 |
+
|
| 556 |
+
return result
|
| 557 |
+
|
| 558 |
+
def apply_str(self) -> DataFrame | Series:
|
| 559 |
+
"""
|
| 560 |
+
Compute apply in case of a string.
|
| 561 |
+
|
| 562 |
+
Returns
|
| 563 |
+
-------
|
| 564 |
+
result: Series or DataFrame
|
| 565 |
+
"""
|
| 566 |
+
# Caller is responsible for checking isinstance(self.f, str)
|
| 567 |
+
func = cast(str, self.func)
|
| 568 |
+
|
| 569 |
+
obj = self.obj
|
| 570 |
+
|
| 571 |
+
from pandas.core.groupby.generic import (
|
| 572 |
+
DataFrameGroupBy,
|
| 573 |
+
SeriesGroupBy,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
# Support for `frame.transform('method')`
|
| 577 |
+
# Some methods (shift, etc.) require the axis argument, others
|
| 578 |
+
# don't, so inspect and insert if necessary.
|
| 579 |
+
method = getattr(obj, func, None)
|
| 580 |
+
if callable(method):
|
| 581 |
+
sig = inspect.getfullargspec(method)
|
| 582 |
+
arg_names = (*sig.args, *sig.kwonlyargs)
|
| 583 |
+
if self.axis != 0 and (
|
| 584 |
+
"axis" not in arg_names or func in ("corrwith", "skew")
|
| 585 |
+
):
|
| 586 |
+
raise ValueError(f"Operation {func} does not support axis=1")
|
| 587 |
+
if "axis" in arg_names:
|
| 588 |
+
if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)):
|
| 589 |
+
# Try to avoid FutureWarning for deprecated axis keyword;
|
| 590 |
+
# If self.axis matches the axis we would get by not passing
|
| 591 |
+
# axis, we safely exclude the keyword.
|
| 592 |
+
|
| 593 |
+
default_axis = 0
|
| 594 |
+
if func in ["idxmax", "idxmin"]:
|
| 595 |
+
# DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis,
|
| 596 |
+
# whereas other axis keywords default to 0
|
| 597 |
+
default_axis = self.obj.axis
|
| 598 |
+
|
| 599 |
+
if default_axis != self.axis:
|
| 600 |
+
self.kwargs["axis"] = self.axis
|
| 601 |
+
else:
|
| 602 |
+
self.kwargs["axis"] = self.axis
|
| 603 |
+
return self._apply_str(obj, func, *self.args, **self.kwargs)
|
| 604 |
+
|
| 605 |
+
def apply_list_or_dict_like(self) -> DataFrame | Series:
|
| 606 |
+
"""
|
| 607 |
+
Compute apply in case of a list-like or dict-like.
|
| 608 |
+
|
| 609 |
+
Returns
|
| 610 |
+
-------
|
| 611 |
+
result: Series, DataFrame, or None
|
| 612 |
+
Result when self.func is a list-like or dict-like, None otherwise.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
if self.engine == "numba":
|
| 616 |
+
raise NotImplementedError(
|
| 617 |
+
"The 'numba' engine doesn't support list-like/"
|
| 618 |
+
"dict likes of callables yet."
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
|
| 622 |
+
return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
|
| 623 |
+
|
| 624 |
+
func = self.func
|
| 625 |
+
kwargs = self.kwargs
|
| 626 |
+
|
| 627 |
+
if is_dict_like(func):
|
| 628 |
+
result = self.agg_or_apply_dict_like(op_name="apply")
|
| 629 |
+
else:
|
| 630 |
+
result = self.agg_or_apply_list_like(op_name="apply")
|
| 631 |
+
|
| 632 |
+
result = reconstruct_and_relabel_result(result, func, **kwargs)
|
| 633 |
+
|
| 634 |
+
return result
|
| 635 |
+
|
| 636 |
+
def normalize_dictlike_arg(
|
| 637 |
+
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
|
| 638 |
+
) -> AggFuncTypeDict:
|
| 639 |
+
"""
|
| 640 |
+
Handler for dict-like argument.
|
| 641 |
+
|
| 642 |
+
Ensures that necessary columns exist if obj is a DataFrame, and
|
| 643 |
+
that a nested renamer is not passed. Also normalizes to all lists
|
| 644 |
+
when values consists of a mix of list and non-lists.
|
| 645 |
+
"""
|
| 646 |
+
assert how in ("apply", "agg", "transform")
|
| 647 |
+
|
| 648 |
+
# Can't use func.values(); wouldn't work for a Series
|
| 649 |
+
if (
|
| 650 |
+
how == "agg"
|
| 651 |
+
and isinstance(obj, ABCSeries)
|
| 652 |
+
and any(is_list_like(v) for _, v in func.items())
|
| 653 |
+
) or (any(is_dict_like(v) for _, v in func.items())):
|
| 654 |
+
# GH 15931 - deprecation of renaming keys
|
| 655 |
+
raise SpecificationError("nested renamer is not supported")
|
| 656 |
+
|
| 657 |
+
if obj.ndim != 1:
|
| 658 |
+
# Check for missing columns on a frame
|
| 659 |
+
from pandas import Index
|
| 660 |
+
|
| 661 |
+
cols = Index(list(func.keys())).difference(obj.columns, sort=True)
|
| 662 |
+
if len(cols) > 0:
|
| 663 |
+
raise KeyError(f"Column(s) {list(cols)} do not exist")
|
| 664 |
+
|
| 665 |
+
aggregator_types = (list, tuple, dict)
|
| 666 |
+
|
| 667 |
+
# if we have a dict of any non-scalars
|
| 668 |
+
# eg. {'A' : ['mean']}, normalize all to
|
| 669 |
+
# be list-likes
|
| 670 |
+
# Cannot use func.values() because arg may be a Series
|
| 671 |
+
if any(isinstance(x, aggregator_types) for _, x in func.items()):
|
| 672 |
+
new_func: AggFuncTypeDict = {}
|
| 673 |
+
for k, v in func.items():
|
| 674 |
+
if not isinstance(v, aggregator_types):
|
| 675 |
+
new_func[k] = [v]
|
| 676 |
+
else:
|
| 677 |
+
new_func[k] = v
|
| 678 |
+
func = new_func
|
| 679 |
+
return func
|
| 680 |
+
|
| 681 |
+
def _apply_str(self, obj, func: str, *args, **kwargs):
|
| 682 |
+
"""
|
| 683 |
+
if arg is a string, then try to operate on it:
|
| 684 |
+
- try to find a function (or attribute) on obj
|
| 685 |
+
- try to find a numpy function
|
| 686 |
+
- raise
|
| 687 |
+
"""
|
| 688 |
+
assert isinstance(func, str)
|
| 689 |
+
|
| 690 |
+
if hasattr(obj, func):
|
| 691 |
+
f = getattr(obj, func)
|
| 692 |
+
if callable(f):
|
| 693 |
+
return f(*args, **kwargs)
|
| 694 |
+
|
| 695 |
+
# people may aggregate on a non-callable attribute
|
| 696 |
+
# but don't let them think they can pass args to it
|
| 697 |
+
assert len(args) == 0
|
| 698 |
+
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
|
| 699 |
+
return f
|
| 700 |
+
elif hasattr(np, func) and hasattr(obj, "__array__"):
|
| 701 |
+
# in particular exclude Window
|
| 702 |
+
f = getattr(np, func)
|
| 703 |
+
return f(obj, *args, **kwargs)
|
| 704 |
+
else:
|
| 705 |
+
msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
|
| 706 |
+
raise AttributeError(msg)
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
class NDFrameApply(Apply):
|
| 710 |
+
"""
|
| 711 |
+
Methods shared by FrameApply and SeriesApply but
|
| 712 |
+
not GroupByApply or ResamplerWindowApply
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
obj: DataFrame | Series
|
| 716 |
+
|
| 717 |
+
@property
|
| 718 |
+
def index(self) -> Index:
|
| 719 |
+
return self.obj.index
|
| 720 |
+
|
| 721 |
+
@property
|
| 722 |
+
def agg_axis(self) -> Index:
|
| 723 |
+
return self.obj._get_agg_axis(self.axis)
|
| 724 |
+
|
| 725 |
+
def agg_or_apply_list_like(
|
| 726 |
+
self, op_name: Literal["agg", "apply"]
|
| 727 |
+
) -> DataFrame | Series:
|
| 728 |
+
obj = self.obj
|
| 729 |
+
kwargs = self.kwargs
|
| 730 |
+
|
| 731 |
+
if op_name == "apply":
|
| 732 |
+
if isinstance(self, FrameApply):
|
| 733 |
+
by_row = self.by_row
|
| 734 |
+
|
| 735 |
+
elif isinstance(self, SeriesApply):
|
| 736 |
+
by_row = "_compat" if self.by_row else False
|
| 737 |
+
else:
|
| 738 |
+
by_row = False
|
| 739 |
+
kwargs = {**kwargs, "by_row": by_row}
|
| 740 |
+
|
| 741 |
+
if getattr(obj, "axis", 0) == 1:
|
| 742 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
| 743 |
+
|
| 744 |
+
keys, results = self.compute_list_like(op_name, obj, kwargs)
|
| 745 |
+
result = self.wrap_results_list_like(keys, results)
|
| 746 |
+
return result
|
| 747 |
+
|
| 748 |
+
def agg_or_apply_dict_like(
|
| 749 |
+
self, op_name: Literal["agg", "apply"]
|
| 750 |
+
) -> DataFrame | Series:
|
| 751 |
+
assert op_name in ["agg", "apply"]
|
| 752 |
+
obj = self.obj
|
| 753 |
+
|
| 754 |
+
kwargs = {}
|
| 755 |
+
if op_name == "apply":
|
| 756 |
+
by_row = "_compat" if self.by_row else False
|
| 757 |
+
kwargs.update({"by_row": by_row})
|
| 758 |
+
|
| 759 |
+
if getattr(obj, "axis", 0) == 1:
|
| 760 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
| 761 |
+
|
| 762 |
+
selection = None
|
| 763 |
+
result_index, result_data = self.compute_dict_like(
|
| 764 |
+
op_name, obj, selection, kwargs
|
| 765 |
+
)
|
| 766 |
+
result = self.wrap_results_dict_like(obj, result_index, result_data)
|
| 767 |
+
return result
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
class FrameApply(NDFrameApply):
|
| 771 |
+
obj: DataFrame
|
| 772 |
+
|
| 773 |
+
def __init__(
|
| 774 |
+
self,
|
| 775 |
+
obj: AggObjType,
|
| 776 |
+
func: AggFuncType,
|
| 777 |
+
raw: bool,
|
| 778 |
+
result_type: str | None,
|
| 779 |
+
*,
|
| 780 |
+
by_row: Literal[False, "compat"] = False,
|
| 781 |
+
engine: str = "python",
|
| 782 |
+
engine_kwargs: dict[str, bool] | None = None,
|
| 783 |
+
args,
|
| 784 |
+
kwargs,
|
| 785 |
+
) -> None:
|
| 786 |
+
if by_row is not False and by_row != "compat":
|
| 787 |
+
raise ValueError(f"by_row={by_row} not allowed")
|
| 788 |
+
super().__init__(
|
| 789 |
+
obj,
|
| 790 |
+
func,
|
| 791 |
+
raw,
|
| 792 |
+
result_type,
|
| 793 |
+
by_row=by_row,
|
| 794 |
+
engine=engine,
|
| 795 |
+
engine_kwargs=engine_kwargs,
|
| 796 |
+
args=args,
|
| 797 |
+
kwargs=kwargs,
|
| 798 |
+
)
|
| 799 |
+
|
| 800 |
+
# ---------------------------------------------------------------
|
| 801 |
+
# Abstract Methods
|
| 802 |
+
|
| 803 |
+
@property
|
| 804 |
+
@abc.abstractmethod
|
| 805 |
+
def result_index(self) -> Index:
|
| 806 |
+
pass
|
| 807 |
+
|
| 808 |
+
@property
|
| 809 |
+
@abc.abstractmethod
|
| 810 |
+
def result_columns(self) -> Index:
|
| 811 |
+
pass
|
| 812 |
+
|
| 813 |
+
@property
|
| 814 |
+
@abc.abstractmethod
|
| 815 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
| 816 |
+
pass
|
| 817 |
+
|
| 818 |
+
@staticmethod
|
| 819 |
+
@functools.cache
|
| 820 |
+
@abc.abstractmethod
|
| 821 |
+
def generate_numba_apply_func(
|
| 822 |
+
func, nogil=True, nopython=True, parallel=False
|
| 823 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
| 824 |
+
pass
|
| 825 |
+
|
| 826 |
+
@abc.abstractmethod
|
| 827 |
+
def apply_with_numba(self):
|
| 828 |
+
pass
|
| 829 |
+
|
| 830 |
+
def validate_values_for_numba(self):
|
| 831 |
+
# Validate column dtyps all OK
|
| 832 |
+
for colname, dtype in self.obj.dtypes.items():
|
| 833 |
+
if not is_numeric_dtype(dtype):
|
| 834 |
+
raise ValueError(
|
| 835 |
+
f"Column {colname} must have a numeric dtype. "
|
| 836 |
+
f"Found '{dtype}' instead"
|
| 837 |
+
)
|
| 838 |
+
if is_extension_array_dtype(dtype):
|
| 839 |
+
raise ValueError(
|
| 840 |
+
f"Column {colname} is backed by an extension array, "
|
| 841 |
+
f"which is not supported by the numba engine."
|
| 842 |
+
)
|
| 843 |
+
|
| 844 |
+
@abc.abstractmethod
|
| 845 |
+
def wrap_results_for_axis(
|
| 846 |
+
self, results: ResType, res_index: Index
|
| 847 |
+
) -> DataFrame | Series:
|
| 848 |
+
pass
|
| 849 |
+
|
| 850 |
+
# ---------------------------------------------------------------
|
| 851 |
+
|
| 852 |
+
@property
|
| 853 |
+
def res_columns(self) -> Index:
|
| 854 |
+
return self.result_columns
|
| 855 |
+
|
| 856 |
+
@property
|
| 857 |
+
def columns(self) -> Index:
|
| 858 |
+
return self.obj.columns
|
| 859 |
+
|
| 860 |
+
@cache_readonly
|
| 861 |
+
def values(self):
|
| 862 |
+
return self.obj.values
|
| 863 |
+
|
| 864 |
+
def apply(self) -> DataFrame | Series:
|
| 865 |
+
"""compute the results"""
|
| 866 |
+
|
| 867 |
+
# dispatch to handle list-like or dict-like
|
| 868 |
+
if is_list_like(self.func):
|
| 869 |
+
if self.engine == "numba":
|
| 870 |
+
raise NotImplementedError(
|
| 871 |
+
"the 'numba' engine doesn't support lists of callables yet"
|
| 872 |
+
)
|
| 873 |
+
return self.apply_list_or_dict_like()
|
| 874 |
+
|
| 875 |
+
# all empty
|
| 876 |
+
if len(self.columns) == 0 and len(self.index) == 0:
|
| 877 |
+
return self.apply_empty_result()
|
| 878 |
+
|
| 879 |
+
# string dispatch
|
| 880 |
+
if isinstance(self.func, str):
|
| 881 |
+
if self.engine == "numba":
|
| 882 |
+
raise NotImplementedError(
|
| 883 |
+
"the 'numba' engine doesn't support using "
|
| 884 |
+
"a string as the callable function"
|
| 885 |
+
)
|
| 886 |
+
return self.apply_str()
|
| 887 |
+
|
| 888 |
+
# ufunc
|
| 889 |
+
elif isinstance(self.func, np.ufunc):
|
| 890 |
+
if self.engine == "numba":
|
| 891 |
+
raise NotImplementedError(
|
| 892 |
+
"the 'numba' engine doesn't support "
|
| 893 |
+
"using a numpy ufunc as the callable function"
|
| 894 |
+
)
|
| 895 |
+
with np.errstate(all="ignore"):
|
| 896 |
+
results = self.obj._mgr.apply("apply", func=self.func)
|
| 897 |
+
# _constructor will retain self.index and self.columns
|
| 898 |
+
return self.obj._constructor_from_mgr(results, axes=results.axes)
|
| 899 |
+
|
| 900 |
+
# broadcasting
|
| 901 |
+
if self.result_type == "broadcast":
|
| 902 |
+
if self.engine == "numba":
|
| 903 |
+
raise NotImplementedError(
|
| 904 |
+
"the 'numba' engine doesn't support result_type='broadcast'"
|
| 905 |
+
)
|
| 906 |
+
return self.apply_broadcast(self.obj)
|
| 907 |
+
|
| 908 |
+
# one axis empty
|
| 909 |
+
elif not all(self.obj.shape):
|
| 910 |
+
return self.apply_empty_result()
|
| 911 |
+
|
| 912 |
+
# raw
|
| 913 |
+
elif self.raw:
|
| 914 |
+
return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
|
| 915 |
+
|
| 916 |
+
return self.apply_standard()
|
| 917 |
+
|
| 918 |
+
def agg(self):
|
| 919 |
+
obj = self.obj
|
| 920 |
+
axis = self.axis
|
| 921 |
+
|
| 922 |
+
# TODO: Avoid having to change state
|
| 923 |
+
self.obj = self.obj if self.axis == 0 else self.obj.T
|
| 924 |
+
self.axis = 0
|
| 925 |
+
|
| 926 |
+
result = None
|
| 927 |
+
try:
|
| 928 |
+
result = super().agg()
|
| 929 |
+
finally:
|
| 930 |
+
self.obj = obj
|
| 931 |
+
self.axis = axis
|
| 932 |
+
|
| 933 |
+
if axis == 1:
|
| 934 |
+
result = result.T if result is not None else result
|
| 935 |
+
|
| 936 |
+
if result is None:
|
| 937 |
+
result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
|
| 938 |
+
|
| 939 |
+
return result
|
| 940 |
+
|
| 941 |
+
def apply_empty_result(self):
|
| 942 |
+
"""
|
| 943 |
+
we have an empty result; at least 1 axis is 0
|
| 944 |
+
|
| 945 |
+
we will try to apply the function to an empty
|
| 946 |
+
series in order to see if this is a reduction function
|
| 947 |
+
"""
|
| 948 |
+
assert callable(self.func)
|
| 949 |
+
|
| 950 |
+
# we are not asked to reduce or infer reduction
|
| 951 |
+
# so just return a copy of the existing object
|
| 952 |
+
if self.result_type not in ["reduce", None]:
|
| 953 |
+
return self.obj.copy()
|
| 954 |
+
|
| 955 |
+
# we may need to infer
|
| 956 |
+
should_reduce = self.result_type == "reduce"
|
| 957 |
+
|
| 958 |
+
from pandas import Series
|
| 959 |
+
|
| 960 |
+
if not should_reduce:
|
| 961 |
+
try:
|
| 962 |
+
if self.axis == 0:
|
| 963 |
+
r = self.func(
|
| 964 |
+
Series([], dtype=np.float64), *self.args, **self.kwargs
|
| 965 |
+
)
|
| 966 |
+
else:
|
| 967 |
+
r = self.func(
|
| 968 |
+
Series(index=self.columns, dtype=np.float64),
|
| 969 |
+
*self.args,
|
| 970 |
+
**self.kwargs,
|
| 971 |
+
)
|
| 972 |
+
except Exception:
|
| 973 |
+
pass
|
| 974 |
+
else:
|
| 975 |
+
should_reduce = not isinstance(r, Series)
|
| 976 |
+
|
| 977 |
+
if should_reduce:
|
| 978 |
+
if len(self.agg_axis):
|
| 979 |
+
r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
|
| 980 |
+
else:
|
| 981 |
+
r = np.nan
|
| 982 |
+
|
| 983 |
+
return self.obj._constructor_sliced(r, index=self.agg_axis)
|
| 984 |
+
else:
|
| 985 |
+
return self.obj.copy()
|
| 986 |
+
|
| 987 |
+
def apply_raw(self, engine="python", engine_kwargs=None):
|
| 988 |
+
"""apply to the values as a numpy array"""
|
| 989 |
+
|
| 990 |
+
def wrap_function(func):
|
| 991 |
+
"""
|
| 992 |
+
Wrap user supplied function to work around numpy issue.
|
| 993 |
+
|
| 994 |
+
see https://github.com/numpy/numpy/issues/8352
|
| 995 |
+
"""
|
| 996 |
+
|
| 997 |
+
def wrapper(*args, **kwargs):
|
| 998 |
+
result = func(*args, **kwargs)
|
| 999 |
+
if isinstance(result, str):
|
| 1000 |
+
result = np.array(result, dtype=object)
|
| 1001 |
+
return result
|
| 1002 |
+
|
| 1003 |
+
return wrapper
|
| 1004 |
+
|
| 1005 |
+
if engine == "numba":
|
| 1006 |
+
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
| 1007 |
+
|
| 1008 |
+
# error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
|
| 1009 |
+
# incompatible type "Callable[..., Any] | str | list[Callable
|
| 1010 |
+
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
|
| 1011 |
+
# list[Callable[..., Any] | str]]"; expected "Hashable"
|
| 1012 |
+
nb_looper = generate_apply_looper(
|
| 1013 |
+
self.func, **engine_kwargs # type: ignore[arg-type]
|
| 1014 |
+
)
|
| 1015 |
+
result = nb_looper(self.values, self.axis)
|
| 1016 |
+
# If we made the result 2-D, squeeze it back to 1-D
|
| 1017 |
+
result = np.squeeze(result)
|
| 1018 |
+
else:
|
| 1019 |
+
result = np.apply_along_axis(
|
| 1020 |
+
wrap_function(self.func),
|
| 1021 |
+
self.axis,
|
| 1022 |
+
self.values,
|
| 1023 |
+
*self.args,
|
| 1024 |
+
**self.kwargs,
|
| 1025 |
+
)
|
| 1026 |
+
|
| 1027 |
+
# TODO: mixed type case
|
| 1028 |
+
if result.ndim == 2:
|
| 1029 |
+
return self.obj._constructor(result, index=self.index, columns=self.columns)
|
| 1030 |
+
else:
|
| 1031 |
+
return self.obj._constructor_sliced(result, index=self.agg_axis)
|
| 1032 |
+
|
| 1033 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
| 1034 |
+
assert callable(self.func)
|
| 1035 |
+
|
| 1036 |
+
result_values = np.empty_like(target.values)
|
| 1037 |
+
|
| 1038 |
+
# axis which we want to compare compliance
|
| 1039 |
+
result_compare = target.shape[0]
|
| 1040 |
+
|
| 1041 |
+
for i, col in enumerate(target.columns):
|
| 1042 |
+
res = self.func(target[col], *self.args, **self.kwargs)
|
| 1043 |
+
ares = np.asarray(res).ndim
|
| 1044 |
+
|
| 1045 |
+
# must be a scalar or 1d
|
| 1046 |
+
if ares > 1:
|
| 1047 |
+
raise ValueError("too many dims to broadcast")
|
| 1048 |
+
if ares == 1:
|
| 1049 |
+
# must match return dim
|
| 1050 |
+
if result_compare != len(res):
|
| 1051 |
+
raise ValueError("cannot broadcast result")
|
| 1052 |
+
|
| 1053 |
+
result_values[:, i] = res
|
| 1054 |
+
|
| 1055 |
+
# we *always* preserve the original index / columns
|
| 1056 |
+
result = self.obj._constructor(
|
| 1057 |
+
result_values, index=target.index, columns=target.columns
|
| 1058 |
+
)
|
| 1059 |
+
return result
|
| 1060 |
+
|
| 1061 |
+
def apply_standard(self):
|
| 1062 |
+
if self.engine == "python":
|
| 1063 |
+
results, res_index = self.apply_series_generator()
|
| 1064 |
+
else:
|
| 1065 |
+
results, res_index = self.apply_series_numba()
|
| 1066 |
+
|
| 1067 |
+
# wrap results
|
| 1068 |
+
return self.wrap_results(results, res_index)
|
| 1069 |
+
|
| 1070 |
+
def apply_series_generator(self) -> tuple[ResType, Index]:
|
| 1071 |
+
assert callable(self.func)
|
| 1072 |
+
|
| 1073 |
+
series_gen = self.series_generator
|
| 1074 |
+
res_index = self.result_index
|
| 1075 |
+
|
| 1076 |
+
results = {}
|
| 1077 |
+
|
| 1078 |
+
with option_context("mode.chained_assignment", None):
|
| 1079 |
+
for i, v in enumerate(series_gen):
|
| 1080 |
+
# ignore SettingWithCopy here in case the user mutates
|
| 1081 |
+
results[i] = self.func(v, *self.args, **self.kwargs)
|
| 1082 |
+
if isinstance(results[i], ABCSeries):
|
| 1083 |
+
# If we have a view on v, we need to make a copy because
|
| 1084 |
+
# series_generator will swap out the underlying data
|
| 1085 |
+
results[i] = results[i].copy(deep=False)
|
| 1086 |
+
|
| 1087 |
+
return results, res_index
|
| 1088 |
+
|
| 1089 |
+
def apply_series_numba(self):
|
| 1090 |
+
if self.engine_kwargs.get("parallel", False):
|
| 1091 |
+
raise NotImplementedError(
|
| 1092 |
+
"Parallel apply is not supported when raw=False and engine='numba'"
|
| 1093 |
+
)
|
| 1094 |
+
if not self.obj.index.is_unique or not self.columns.is_unique:
|
| 1095 |
+
raise NotImplementedError(
|
| 1096 |
+
"The index/columns must be unique when raw=False and engine='numba'"
|
| 1097 |
+
)
|
| 1098 |
+
self.validate_values_for_numba()
|
| 1099 |
+
results = self.apply_with_numba()
|
| 1100 |
+
return results, self.result_index
|
| 1101 |
+
|
| 1102 |
+
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
|
| 1103 |
+
from pandas import Series
|
| 1104 |
+
|
| 1105 |
+
# see if we can infer the results
|
| 1106 |
+
if len(results) > 0 and 0 in results and is_sequence(results[0]):
|
| 1107 |
+
return self.wrap_results_for_axis(results, res_index)
|
| 1108 |
+
|
| 1109 |
+
# dict of scalars
|
| 1110 |
+
|
| 1111 |
+
# the default dtype of an empty Series is `object`, but this
|
| 1112 |
+
# code can be hit by df.mean() where the result should have dtype
|
| 1113 |
+
# float64 even if it's an empty Series.
|
| 1114 |
+
constructor_sliced = self.obj._constructor_sliced
|
| 1115 |
+
if len(results) == 0 and constructor_sliced is Series:
|
| 1116 |
+
result = constructor_sliced(results, dtype=np.float64)
|
| 1117 |
+
else:
|
| 1118 |
+
result = constructor_sliced(results)
|
| 1119 |
+
result.index = res_index
|
| 1120 |
+
|
| 1121 |
+
return result
|
| 1122 |
+
|
| 1123 |
+
def apply_str(self) -> DataFrame | Series:
|
| 1124 |
+
# Caller is responsible for checking isinstance(self.func, str)
|
| 1125 |
+
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
|
| 1126 |
+
if self.func == "size":
|
| 1127 |
+
# Special-cased because DataFrame.size returns a single scalar
|
| 1128 |
+
obj = self.obj
|
| 1129 |
+
value = obj.shape[self.axis]
|
| 1130 |
+
return obj._constructor_sliced(value, index=self.agg_axis)
|
| 1131 |
+
return super().apply_str()
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
class FrameRowApply(FrameApply):
|
| 1135 |
+
axis: AxisInt = 0
|
| 1136 |
+
|
| 1137 |
+
@property
|
| 1138 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
| 1139 |
+
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
|
| 1140 |
+
|
| 1141 |
+
@staticmethod
|
| 1142 |
+
@functools.cache
|
| 1143 |
+
def generate_numba_apply_func(
|
| 1144 |
+
func, nogil=True, nopython=True, parallel=False
|
| 1145 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
| 1146 |
+
numba = import_optional_dependency("numba")
|
| 1147 |
+
from pandas import Series
|
| 1148 |
+
|
| 1149 |
+
# Import helper from extensions to cast string object -> np strings
|
| 1150 |
+
# Note: This also has the side effect of loading our numba extensions
|
| 1151 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
| 1152 |
+
|
| 1153 |
+
jitted_udf = numba.extending.register_jitable(func)
|
| 1154 |
+
|
| 1155 |
+
# Currently the parallel argument doesn't get passed through here
|
| 1156 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
| 1157 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
| 1158 |
+
def numba_func(values, col_names, df_index):
|
| 1159 |
+
results = {}
|
| 1160 |
+
for j in range(values.shape[1]):
|
| 1161 |
+
# Create the series
|
| 1162 |
+
ser = Series(
|
| 1163 |
+
values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
|
| 1164 |
+
)
|
| 1165 |
+
results[j] = jitted_udf(ser)
|
| 1166 |
+
return results
|
| 1167 |
+
|
| 1168 |
+
return numba_func
|
| 1169 |
+
|
| 1170 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
| 1171 |
+
nb_func = self.generate_numba_apply_func(
|
| 1172 |
+
cast(Callable, self.func), **self.engine_kwargs
|
| 1173 |
+
)
|
| 1174 |
+
from pandas.core._numba.extensions import set_numba_data
|
| 1175 |
+
|
| 1176 |
+
index = self.obj.index
|
| 1177 |
+
if index.dtype == "string":
|
| 1178 |
+
index = index.astype(object)
|
| 1179 |
+
|
| 1180 |
+
columns = self.obj.columns
|
| 1181 |
+
if columns.dtype == "string":
|
| 1182 |
+
columns = columns.astype(object)
|
| 1183 |
+
|
| 1184 |
+
# Convert from numba dict to regular dict
|
| 1185 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
| 1186 |
+
with set_numba_data(index) as index, set_numba_data(columns) as columns:
|
| 1187 |
+
res = dict(nb_func(self.values, columns, index))
|
| 1188 |
+
return res
|
| 1189 |
+
|
| 1190 |
+
@property
|
| 1191 |
+
def result_index(self) -> Index:
|
| 1192 |
+
return self.columns
|
| 1193 |
+
|
| 1194 |
+
@property
|
| 1195 |
+
def result_columns(self) -> Index:
|
| 1196 |
+
return self.index
|
| 1197 |
+
|
| 1198 |
+
def wrap_results_for_axis(
|
| 1199 |
+
self, results: ResType, res_index: Index
|
| 1200 |
+
) -> DataFrame | Series:
|
| 1201 |
+
"""return the results for the rows"""
|
| 1202 |
+
|
| 1203 |
+
if self.result_type == "reduce":
|
| 1204 |
+
# e.g. test_apply_dict GH#8735
|
| 1205 |
+
res = self.obj._constructor_sliced(results)
|
| 1206 |
+
res.index = res_index
|
| 1207 |
+
return res
|
| 1208 |
+
|
| 1209 |
+
elif self.result_type is None and all(
|
| 1210 |
+
isinstance(x, dict) for x in results.values()
|
| 1211 |
+
):
|
| 1212 |
+
# Our operation was a to_dict op e.g.
|
| 1213 |
+
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
|
| 1214 |
+
res = self.obj._constructor_sliced(results)
|
| 1215 |
+
res.index = res_index
|
| 1216 |
+
return res
|
| 1217 |
+
|
| 1218 |
+
try:
|
| 1219 |
+
result = self.obj._constructor(data=results)
|
| 1220 |
+
except ValueError as err:
|
| 1221 |
+
if "All arrays must be of the same length" in str(err):
|
| 1222 |
+
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
|
| 1223 |
+
# see test_agg_listlike_result GH#29587
|
| 1224 |
+
res = self.obj._constructor_sliced(results)
|
| 1225 |
+
res.index = res_index
|
| 1226 |
+
return res
|
| 1227 |
+
else:
|
| 1228 |
+
raise
|
| 1229 |
+
|
| 1230 |
+
if not isinstance(results[0], ABCSeries):
|
| 1231 |
+
if len(result.index) == len(self.res_columns):
|
| 1232 |
+
result.index = self.res_columns
|
| 1233 |
+
|
| 1234 |
+
if len(result.columns) == len(res_index):
|
| 1235 |
+
result.columns = res_index
|
| 1236 |
+
|
| 1237 |
+
return result
|
| 1238 |
+
|
| 1239 |
+
|
| 1240 |
+
class FrameColumnApply(FrameApply):
|
| 1241 |
+
axis: AxisInt = 1
|
| 1242 |
+
|
| 1243 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
| 1244 |
+
result = super().apply_broadcast(target.T)
|
| 1245 |
+
return result.T
|
| 1246 |
+
|
| 1247 |
+
@property
|
| 1248 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
| 1249 |
+
values = self.values
|
| 1250 |
+
values = ensure_wrapped_if_datetimelike(values)
|
| 1251 |
+
assert len(values) > 0
|
| 1252 |
+
|
| 1253 |
+
# We create one Series object, and will swap out the data inside
|
| 1254 |
+
# of it. Kids: don't do this at home.
|
| 1255 |
+
ser = self.obj._ixs(0, axis=0)
|
| 1256 |
+
mgr = ser._mgr
|
| 1257 |
+
|
| 1258 |
+
is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
|
| 1259 |
+
|
| 1260 |
+
if isinstance(ser.dtype, ExtensionDtype):
|
| 1261 |
+
# values will be incorrect for this block
|
| 1262 |
+
# TODO(EA2D): special case would be unnecessary with 2D EAs
|
| 1263 |
+
obj = self.obj
|
| 1264 |
+
for i in range(len(obj)):
|
| 1265 |
+
yield obj._ixs(i, axis=0)
|
| 1266 |
+
|
| 1267 |
+
else:
|
| 1268 |
+
for arr, name in zip(values, self.index):
|
| 1269 |
+
# GH#35462 re-pin mgr in case setitem changed it
|
| 1270 |
+
ser._mgr = mgr
|
| 1271 |
+
mgr.set_values(arr)
|
| 1272 |
+
object.__setattr__(ser, "_name", name)
|
| 1273 |
+
if not is_view:
|
| 1274 |
+
# In apply_series_generator we store the a shallow copy of the
|
| 1275 |
+
# result, which potentially increases the ref count of this reused
|
| 1276 |
+
# `ser` object (depending on the result of the applied function)
|
| 1277 |
+
# -> if that happened and `ser` is already a copy, then we reset
|
| 1278 |
+
# the refs here to avoid triggering a unnecessary CoW inside the
|
| 1279 |
+
# applied function (https://github.com/pandas-dev/pandas/pull/56212)
|
| 1280 |
+
mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
|
| 1281 |
+
yield ser
|
| 1282 |
+
|
| 1283 |
+
@staticmethod
|
| 1284 |
+
@functools.cache
|
| 1285 |
+
def generate_numba_apply_func(
|
| 1286 |
+
func, nogil=True, nopython=True, parallel=False
|
| 1287 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
| 1288 |
+
numba = import_optional_dependency("numba")
|
| 1289 |
+
from pandas import Series
|
| 1290 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
| 1291 |
+
|
| 1292 |
+
jitted_udf = numba.extending.register_jitable(func)
|
| 1293 |
+
|
| 1294 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
| 1295 |
+
def numba_func(values, col_names_index, index):
|
| 1296 |
+
results = {}
|
| 1297 |
+
# Currently the parallel argument doesn't get passed through here
|
| 1298 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
| 1299 |
+
for i in range(values.shape[0]):
|
| 1300 |
+
# Create the series
|
| 1301 |
+
# TODO: values corrupted without the copy
|
| 1302 |
+
ser = Series(
|
| 1303 |
+
values[i].copy(),
|
| 1304 |
+
index=col_names_index,
|
| 1305 |
+
name=maybe_cast_str(index[i]),
|
| 1306 |
+
)
|
| 1307 |
+
results[i] = jitted_udf(ser)
|
| 1308 |
+
|
| 1309 |
+
return results
|
| 1310 |
+
|
| 1311 |
+
return numba_func
|
| 1312 |
+
|
| 1313 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
| 1314 |
+
nb_func = self.generate_numba_apply_func(
|
| 1315 |
+
cast(Callable, self.func), **self.engine_kwargs
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
from pandas.core._numba.extensions import set_numba_data
|
| 1319 |
+
|
| 1320 |
+
# Convert from numba dict to regular dict
|
| 1321 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
| 1322 |
+
with set_numba_data(self.obj.index) as index, set_numba_data(
|
| 1323 |
+
self.columns
|
| 1324 |
+
) as columns:
|
| 1325 |
+
res = dict(nb_func(self.values, columns, index))
|
| 1326 |
+
|
| 1327 |
+
return res
|
| 1328 |
+
|
| 1329 |
+
@property
|
| 1330 |
+
def result_index(self) -> Index:
|
| 1331 |
+
return self.index
|
| 1332 |
+
|
| 1333 |
+
@property
|
| 1334 |
+
def result_columns(self) -> Index:
|
| 1335 |
+
return self.columns
|
| 1336 |
+
|
| 1337 |
+
def wrap_results_for_axis(
|
| 1338 |
+
self, results: ResType, res_index: Index
|
| 1339 |
+
) -> DataFrame | Series:
|
| 1340 |
+
"""return the results for the columns"""
|
| 1341 |
+
result: DataFrame | Series
|
| 1342 |
+
|
| 1343 |
+
# we have requested to expand
|
| 1344 |
+
if self.result_type == "expand":
|
| 1345 |
+
result = self.infer_to_same_shape(results, res_index)
|
| 1346 |
+
|
| 1347 |
+
# we have a non-series and don't want inference
|
| 1348 |
+
elif not isinstance(results[0], ABCSeries):
|
| 1349 |
+
result = self.obj._constructor_sliced(results)
|
| 1350 |
+
result.index = res_index
|
| 1351 |
+
|
| 1352 |
+
# we may want to infer results
|
| 1353 |
+
else:
|
| 1354 |
+
result = self.infer_to_same_shape(results, res_index)
|
| 1355 |
+
|
| 1356 |
+
return result
|
| 1357 |
+
|
| 1358 |
+
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
|
| 1359 |
+
"""infer the results to the same shape as the input object"""
|
| 1360 |
+
result = self.obj._constructor(data=results)
|
| 1361 |
+
result = result.T
|
| 1362 |
+
|
| 1363 |
+
# set the index
|
| 1364 |
+
result.index = res_index
|
| 1365 |
+
|
| 1366 |
+
# infer dtypes
|
| 1367 |
+
result = result.infer_objects(copy=False)
|
| 1368 |
+
|
| 1369 |
+
return result
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
class SeriesApply(NDFrameApply):
|
| 1373 |
+
obj: Series
|
| 1374 |
+
axis: AxisInt = 0
|
| 1375 |
+
by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
|
| 1376 |
+
|
| 1377 |
+
def __init__(
|
| 1378 |
+
self,
|
| 1379 |
+
obj: Series,
|
| 1380 |
+
func: AggFuncType,
|
| 1381 |
+
*,
|
| 1382 |
+
convert_dtype: bool | lib.NoDefault = lib.no_default,
|
| 1383 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
| 1384 |
+
args,
|
| 1385 |
+
kwargs,
|
| 1386 |
+
) -> None:
|
| 1387 |
+
if convert_dtype is lib.no_default:
|
| 1388 |
+
convert_dtype = True
|
| 1389 |
+
else:
|
| 1390 |
+
warnings.warn(
|
| 1391 |
+
"the convert_dtype parameter is deprecated and will be removed in a "
|
| 1392 |
+
"future version. Do ``ser.astype(object).apply()`` "
|
| 1393 |
+
"instead if you want ``convert_dtype=False``.",
|
| 1394 |
+
FutureWarning,
|
| 1395 |
+
stacklevel=find_stack_level(),
|
| 1396 |
+
)
|
| 1397 |
+
self.convert_dtype = convert_dtype
|
| 1398 |
+
|
| 1399 |
+
super().__init__(
|
| 1400 |
+
obj,
|
| 1401 |
+
func,
|
| 1402 |
+
raw=False,
|
| 1403 |
+
result_type=None,
|
| 1404 |
+
by_row=by_row,
|
| 1405 |
+
args=args,
|
| 1406 |
+
kwargs=kwargs,
|
| 1407 |
+
)
|
| 1408 |
+
|
| 1409 |
+
def apply(self) -> DataFrame | Series:
|
| 1410 |
+
obj = self.obj
|
| 1411 |
+
|
| 1412 |
+
if len(obj) == 0:
|
| 1413 |
+
return self.apply_empty_result()
|
| 1414 |
+
|
| 1415 |
+
# dispatch to handle list-like or dict-like
|
| 1416 |
+
if is_list_like(self.func):
|
| 1417 |
+
return self.apply_list_or_dict_like()
|
| 1418 |
+
|
| 1419 |
+
if isinstance(self.func, str):
|
| 1420 |
+
# if we are a string, try to dispatch
|
| 1421 |
+
return self.apply_str()
|
| 1422 |
+
|
| 1423 |
+
if self.by_row == "_compat":
|
| 1424 |
+
return self.apply_compat()
|
| 1425 |
+
|
| 1426 |
+
# self.func is Callable
|
| 1427 |
+
return self.apply_standard()
|
| 1428 |
+
|
| 1429 |
+
def agg(self):
|
| 1430 |
+
result = super().agg()
|
| 1431 |
+
if result is None:
|
| 1432 |
+
obj = self.obj
|
| 1433 |
+
func = self.func
|
| 1434 |
+
# string, list-like, and dict-like are entirely handled in super
|
| 1435 |
+
assert callable(func)
|
| 1436 |
+
|
| 1437 |
+
# GH53325: The setup below is just to keep current behavior while emitting a
|
| 1438 |
+
# deprecation message. In the future this will all be replaced with a simple
|
| 1439 |
+
# `result = f(self.obj, *self.args, **self.kwargs)`.
|
| 1440 |
+
try:
|
| 1441 |
+
result = obj.apply(func, args=self.args, **self.kwargs)
|
| 1442 |
+
except (ValueError, AttributeError, TypeError):
|
| 1443 |
+
result = func(obj, *self.args, **self.kwargs)
|
| 1444 |
+
else:
|
| 1445 |
+
msg = (
|
| 1446 |
+
f"using {func} in {type(obj).__name__}.agg cannot aggregate and "
|
| 1447 |
+
f"has been deprecated. Use {type(obj).__name__}.transform to "
|
| 1448 |
+
f"keep behavior unchanged."
|
| 1449 |
+
)
|
| 1450 |
+
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
|
| 1451 |
+
|
| 1452 |
+
return result
|
| 1453 |
+
|
| 1454 |
+
def apply_empty_result(self) -> Series:
|
| 1455 |
+
obj = self.obj
|
| 1456 |
+
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
|
| 1457 |
+
obj, method="apply"
|
| 1458 |
+
)
|
| 1459 |
+
|
| 1460 |
+
def apply_compat(self):
|
| 1461 |
+
"""compat apply method for funcs in listlikes and dictlikes.
|
| 1462 |
+
|
| 1463 |
+
Used for each callable when giving listlikes and dictlikes of callables to
|
| 1464 |
+
apply. Needed for compatibility with Pandas < v2.1.
|
| 1465 |
+
|
| 1466 |
+
.. versionadded:: 2.1.0
|
| 1467 |
+
"""
|
| 1468 |
+
obj = self.obj
|
| 1469 |
+
func = self.func
|
| 1470 |
+
|
| 1471 |
+
if callable(func):
|
| 1472 |
+
f = com.get_cython_func(func)
|
| 1473 |
+
if f and not self.args and not self.kwargs:
|
| 1474 |
+
return obj.apply(func, by_row=False)
|
| 1475 |
+
|
| 1476 |
+
try:
|
| 1477 |
+
result = obj.apply(func, by_row="compat")
|
| 1478 |
+
except (ValueError, AttributeError, TypeError):
|
| 1479 |
+
result = obj.apply(func, by_row=False)
|
| 1480 |
+
return result
|
| 1481 |
+
|
| 1482 |
+
def apply_standard(self) -> DataFrame | Series:
|
| 1483 |
+
# caller is responsible for ensuring that f is Callable
|
| 1484 |
+
func = cast(Callable, self.func)
|
| 1485 |
+
obj = self.obj
|
| 1486 |
+
|
| 1487 |
+
if isinstance(func, np.ufunc):
|
| 1488 |
+
with np.errstate(all="ignore"):
|
| 1489 |
+
return func(obj, *self.args, **self.kwargs)
|
| 1490 |
+
elif not self.by_row:
|
| 1491 |
+
return func(obj, *self.args, **self.kwargs)
|
| 1492 |
+
|
| 1493 |
+
if self.args or self.kwargs:
|
| 1494 |
+
# _map_values does not support args/kwargs
|
| 1495 |
+
def curried(x):
|
| 1496 |
+
return func(x, *self.args, **self.kwargs)
|
| 1497 |
+
|
| 1498 |
+
else:
|
| 1499 |
+
curried = func
|
| 1500 |
+
|
| 1501 |
+
# row-wise access
|
| 1502 |
+
# apply doesn't have a `na_action` keyword and for backward compat reasons
|
| 1503 |
+
# we need to give `na_action="ignore"` for categorical data.
|
| 1504 |
+
# TODO: remove the `na_action="ignore"` when that default has been changed in
|
| 1505 |
+
# Categorical (GH51645).
|
| 1506 |
+
action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None
|
| 1507 |
+
mapped = obj._map_values(
|
| 1508 |
+
mapper=curried, na_action=action, convert=self.convert_dtype
|
| 1509 |
+
)
|
| 1510 |
+
|
| 1511 |
+
if len(mapped) and isinstance(mapped[0], ABCSeries):
|
| 1512 |
+
# GH#43986 Need to do list(mapped) in order to get treated as nested
|
| 1513 |
+
# See also GH#25959 regarding EA support
|
| 1514 |
+
return obj._constructor_expanddim(list(mapped), index=obj.index)
|
| 1515 |
+
else:
|
| 1516 |
+
return obj._constructor(mapped, index=obj.index).__finalize__(
|
| 1517 |
+
obj, method="apply"
|
| 1518 |
+
)
|
| 1519 |
+
|
| 1520 |
+
|
| 1521 |
+
class GroupByApply(Apply):
|
| 1522 |
+
obj: GroupBy | Resampler | BaseWindow
|
| 1523 |
+
|
| 1524 |
+
def __init__(
|
| 1525 |
+
self,
|
| 1526 |
+
obj: GroupBy[NDFrameT],
|
| 1527 |
+
func: AggFuncType,
|
| 1528 |
+
*,
|
| 1529 |
+
args,
|
| 1530 |
+
kwargs,
|
| 1531 |
+
) -> None:
|
| 1532 |
+
kwargs = kwargs.copy()
|
| 1533 |
+
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
|
| 1534 |
+
super().__init__(
|
| 1535 |
+
obj,
|
| 1536 |
+
func,
|
| 1537 |
+
raw=False,
|
| 1538 |
+
result_type=None,
|
| 1539 |
+
args=args,
|
| 1540 |
+
kwargs=kwargs,
|
| 1541 |
+
)
|
| 1542 |
+
|
| 1543 |
+
def apply(self):
|
| 1544 |
+
raise NotImplementedError
|
| 1545 |
+
|
| 1546 |
+
def transform(self):
|
| 1547 |
+
raise NotImplementedError
|
| 1548 |
+
|
| 1549 |
+
def agg_or_apply_list_like(
|
| 1550 |
+
self, op_name: Literal["agg", "apply"]
|
| 1551 |
+
) -> DataFrame | Series:
|
| 1552 |
+
obj = self.obj
|
| 1553 |
+
kwargs = self.kwargs
|
| 1554 |
+
if op_name == "apply":
|
| 1555 |
+
kwargs = {**kwargs, "by_row": False}
|
| 1556 |
+
|
| 1557 |
+
if getattr(obj, "axis", 0) == 1:
|
| 1558 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
| 1559 |
+
|
| 1560 |
+
if obj._selected_obj.ndim == 1:
|
| 1561 |
+
# For SeriesGroupBy this matches _obj_with_exclusions
|
| 1562 |
+
selected_obj = obj._selected_obj
|
| 1563 |
+
else:
|
| 1564 |
+
selected_obj = obj._obj_with_exclusions
|
| 1565 |
+
|
| 1566 |
+
# Only set as_index=True on groupby objects, not Window or Resample
|
| 1567 |
+
# that inherit from this class.
|
| 1568 |
+
with com.temp_setattr(
|
| 1569 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
| 1570 |
+
):
|
| 1571 |
+
keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
|
| 1572 |
+
result = self.wrap_results_list_like(keys, results)
|
| 1573 |
+
return result
|
| 1574 |
+
|
| 1575 |
+
def agg_or_apply_dict_like(
|
| 1576 |
+
self, op_name: Literal["agg", "apply"]
|
| 1577 |
+
) -> DataFrame | Series:
|
| 1578 |
+
from pandas.core.groupby.generic import (
|
| 1579 |
+
DataFrameGroupBy,
|
| 1580 |
+
SeriesGroupBy,
|
| 1581 |
+
)
|
| 1582 |
+
|
| 1583 |
+
assert op_name in ["agg", "apply"]
|
| 1584 |
+
|
| 1585 |
+
obj = self.obj
|
| 1586 |
+
kwargs = {}
|
| 1587 |
+
if op_name == "apply":
|
| 1588 |
+
by_row = "_compat" if self.by_row else False
|
| 1589 |
+
kwargs.update({"by_row": by_row})
|
| 1590 |
+
|
| 1591 |
+
if getattr(obj, "axis", 0) == 1:
|
| 1592 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
| 1593 |
+
|
| 1594 |
+
selected_obj = obj._selected_obj
|
| 1595 |
+
selection = obj._selection
|
| 1596 |
+
|
| 1597 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
| 1598 |
+
|
| 1599 |
+
# Numba Groupby engine/engine-kwargs passthrough
|
| 1600 |
+
if is_groupby:
|
| 1601 |
+
engine = self.kwargs.get("engine", None)
|
| 1602 |
+
engine_kwargs = self.kwargs.get("engine_kwargs", None)
|
| 1603 |
+
kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
|
| 1604 |
+
|
| 1605 |
+
with com.temp_setattr(
|
| 1606 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
| 1607 |
+
):
|
| 1608 |
+
result_index, result_data = self.compute_dict_like(
|
| 1609 |
+
op_name, selected_obj, selection, kwargs
|
| 1610 |
+
)
|
| 1611 |
+
result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
|
| 1612 |
+
return result
|
| 1613 |
+
|
| 1614 |
+
|
| 1615 |
+
class ResamplerWindowApply(GroupByApply):
|
| 1616 |
+
axis: AxisInt = 0
|
| 1617 |
+
obj: Resampler | BaseWindow
|
| 1618 |
+
|
| 1619 |
+
def __init__(
|
| 1620 |
+
self,
|
| 1621 |
+
obj: Resampler | BaseWindow,
|
| 1622 |
+
func: AggFuncType,
|
| 1623 |
+
*,
|
| 1624 |
+
args,
|
| 1625 |
+
kwargs,
|
| 1626 |
+
) -> None:
|
| 1627 |
+
super(GroupByApply, self).__init__(
|
| 1628 |
+
obj,
|
| 1629 |
+
func,
|
| 1630 |
+
raw=False,
|
| 1631 |
+
result_type=None,
|
| 1632 |
+
args=args,
|
| 1633 |
+
kwargs=kwargs,
|
| 1634 |
+
)
|
| 1635 |
+
|
| 1636 |
+
def apply(self):
|
| 1637 |
+
raise NotImplementedError
|
| 1638 |
+
|
| 1639 |
+
def transform(self):
|
| 1640 |
+
raise NotImplementedError
|
| 1641 |
+
|
| 1642 |
+
|
| 1643 |
+
def reconstruct_func(
|
| 1644 |
+
func: AggFuncType | None, **kwargs
|
| 1645 |
+
) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
|
| 1646 |
+
"""
|
| 1647 |
+
This is the internal function to reconstruct func given if there is relabeling
|
| 1648 |
+
or not and also normalize the keyword to get new order of columns.
|
| 1649 |
+
|
| 1650 |
+
If named aggregation is applied, `func` will be None, and kwargs contains the
|
| 1651 |
+
column and aggregation function information to be parsed;
|
| 1652 |
+
If named aggregation is not applied, `func` is either string (e.g. 'min') or
|
| 1653 |
+
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
|
| 1654 |
+
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
|
| 1655 |
+
|
| 1656 |
+
If relabeling is True, will return relabeling, reconstructed func, column
|
| 1657 |
+
names, and the reconstructed order of columns.
|
| 1658 |
+
If relabeling is False, the columns and order will be None.
|
| 1659 |
+
|
| 1660 |
+
Parameters
|
| 1661 |
+
----------
|
| 1662 |
+
func: agg function (e.g. 'min' or Callable) or list of agg functions
|
| 1663 |
+
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
|
| 1664 |
+
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
|
| 1665 |
+
normalize_keyword_aggregation function for relabelling
|
| 1666 |
+
|
| 1667 |
+
Returns
|
| 1668 |
+
-------
|
| 1669 |
+
relabelling: bool, if there is relabelling or not
|
| 1670 |
+
func: normalized and mangled func
|
| 1671 |
+
columns: tuple of column names
|
| 1672 |
+
order: array of columns indices
|
| 1673 |
+
|
| 1674 |
+
Examples
|
| 1675 |
+
--------
|
| 1676 |
+
>>> reconstruct_func(None, **{"foo": ("col", "min")})
|
| 1677 |
+
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
|
| 1678 |
+
|
| 1679 |
+
>>> reconstruct_func("min")
|
| 1680 |
+
(False, 'min', None, None)
|
| 1681 |
+
"""
|
| 1682 |
+
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
|
| 1683 |
+
columns: tuple[str, ...] | None = None
|
| 1684 |
+
order: npt.NDArray[np.intp] | None = None
|
| 1685 |
+
|
| 1686 |
+
if not relabeling:
|
| 1687 |
+
if isinstance(func, list) and len(func) > len(set(func)):
|
| 1688 |
+
# GH 28426 will raise error if duplicated function names are used and
|
| 1689 |
+
# there is no reassigned name
|
| 1690 |
+
raise SpecificationError(
|
| 1691 |
+
"Function names must be unique if there is no new column names "
|
| 1692 |
+
"assigned"
|
| 1693 |
+
)
|
| 1694 |
+
if func is None:
|
| 1695 |
+
# nicer error message
|
| 1696 |
+
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
|
| 1697 |
+
|
| 1698 |
+
if relabeling:
|
| 1699 |
+
# error: Incompatible types in assignment (expression has type
|
| 1700 |
+
# "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
|
| 1701 |
+
# "Callable[..., Any] | str | list[Callable[..., Any] | str] |
|
| 1702 |
+
# MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
|
| 1703 |
+
# str]] | None")
|
| 1704 |
+
func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
|
| 1705 |
+
kwargs
|
| 1706 |
+
)
|
| 1707 |
+
assert func is not None
|
| 1708 |
+
|
| 1709 |
+
return relabeling, func, columns, order
|
| 1710 |
+
|
| 1711 |
+
|
| 1712 |
+
def is_multi_agg_with_relabel(**kwargs) -> bool:
|
| 1713 |
+
"""
|
| 1714 |
+
Check whether kwargs passed to .agg look like multi-agg with relabeling.
|
| 1715 |
+
|
| 1716 |
+
Parameters
|
| 1717 |
+
----------
|
| 1718 |
+
**kwargs : dict
|
| 1719 |
+
|
| 1720 |
+
Returns
|
| 1721 |
+
-------
|
| 1722 |
+
bool
|
| 1723 |
+
|
| 1724 |
+
Examples
|
| 1725 |
+
--------
|
| 1726 |
+
>>> is_multi_agg_with_relabel(a="max")
|
| 1727 |
+
False
|
| 1728 |
+
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
|
| 1729 |
+
True
|
| 1730 |
+
>>> is_multi_agg_with_relabel()
|
| 1731 |
+
False
|
| 1732 |
+
"""
|
| 1733 |
+
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
|
| 1734 |
+
len(kwargs) > 0
|
| 1735 |
+
)
|
| 1736 |
+
|
| 1737 |
+
|
| 1738 |
+
def normalize_keyword_aggregation(
|
| 1739 |
+
kwargs: dict,
|
| 1740 |
+
) -> tuple[
|
| 1741 |
+
MutableMapping[Hashable, list[AggFuncTypeBase]],
|
| 1742 |
+
tuple[str, ...],
|
| 1743 |
+
npt.NDArray[np.intp],
|
| 1744 |
+
]:
|
| 1745 |
+
"""
|
| 1746 |
+
Normalize user-provided "named aggregation" kwargs.
|
| 1747 |
+
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
|
| 1748 |
+
to the old Dict[str, List[scalar]]].
|
| 1749 |
+
|
| 1750 |
+
Parameters
|
| 1751 |
+
----------
|
| 1752 |
+
kwargs : dict
|
| 1753 |
+
|
| 1754 |
+
Returns
|
| 1755 |
+
-------
|
| 1756 |
+
aggspec : dict
|
| 1757 |
+
The transformed kwargs.
|
| 1758 |
+
columns : tuple[str, ...]
|
| 1759 |
+
The user-provided keys.
|
| 1760 |
+
col_idx_order : List[int]
|
| 1761 |
+
List of columns indices.
|
| 1762 |
+
|
| 1763 |
+
Examples
|
| 1764 |
+
--------
|
| 1765 |
+
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
|
| 1766 |
+
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
|
| 1767 |
+
"""
|
| 1768 |
+
from pandas.core.indexes.base import Index
|
| 1769 |
+
|
| 1770 |
+
# Normalize the aggregation functions as Mapping[column, List[func]],
|
| 1771 |
+
# process normally, then fixup the names.
|
| 1772 |
+
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
|
| 1773 |
+
aggspec = defaultdict(list)
|
| 1774 |
+
order = []
|
| 1775 |
+
columns, pairs = list(zip(*kwargs.items()))
|
| 1776 |
+
|
| 1777 |
+
for column, aggfunc in pairs:
|
| 1778 |
+
aggspec[column].append(aggfunc)
|
| 1779 |
+
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
|
| 1780 |
+
|
| 1781 |
+
# uniquify aggfunc name if duplicated in order list
|
| 1782 |
+
uniquified_order = _make_unique_kwarg_list(order)
|
| 1783 |
+
|
| 1784 |
+
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
|
| 1785 |
+
# uniquified_aggspec will store uniquified order list and will compare it with order
|
| 1786 |
+
# based on index
|
| 1787 |
+
aggspec_order = [
|
| 1788 |
+
(column, com.get_callable_name(aggfunc) or aggfunc)
|
| 1789 |
+
for column, aggfuncs in aggspec.items()
|
| 1790 |
+
for aggfunc in aggfuncs
|
| 1791 |
+
]
|
| 1792 |
+
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
|
| 1793 |
+
|
| 1794 |
+
# get the new index of columns by comparison
|
| 1795 |
+
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
|
| 1796 |
+
return aggspec, columns, col_idx_order
|
| 1797 |
+
|
| 1798 |
+
|
| 1799 |
+
def _make_unique_kwarg_list(
|
| 1800 |
+
seq: Sequence[tuple[Any, Any]]
|
| 1801 |
+
) -> Sequence[tuple[Any, Any]]:
|
| 1802 |
+
"""
|
| 1803 |
+
Uniquify aggfunc name of the pairs in the order list
|
| 1804 |
+
|
| 1805 |
+
Examples:
|
| 1806 |
+
--------
|
| 1807 |
+
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
|
| 1808 |
+
>>> _make_unique_kwarg_list(kwarg_list)
|
| 1809 |
+
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
|
| 1810 |
+
"""
|
| 1811 |
+
return [
|
| 1812 |
+
(pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
|
| 1813 |
+
for i, pair in enumerate(seq)
|
| 1814 |
+
]
|
| 1815 |
+
|
| 1816 |
+
|
| 1817 |
+
def relabel_result(
|
| 1818 |
+
result: DataFrame | Series,
|
| 1819 |
+
func: dict[str, list[Callable | str]],
|
| 1820 |
+
columns: Iterable[Hashable],
|
| 1821 |
+
order: Iterable[int],
|
| 1822 |
+
) -> dict[Hashable, Series]:
|
| 1823 |
+
"""
|
| 1824 |
+
Internal function to reorder result if relabelling is True for
|
| 1825 |
+
dataframe.agg, and return the reordered result in dict.
|
| 1826 |
+
|
| 1827 |
+
Parameters:
|
| 1828 |
+
----------
|
| 1829 |
+
result: Result from aggregation
|
| 1830 |
+
func: Dict of (column name, funcs)
|
| 1831 |
+
columns: New columns name for relabelling
|
| 1832 |
+
order: New order for relabelling
|
| 1833 |
+
|
| 1834 |
+
Examples
|
| 1835 |
+
--------
|
| 1836 |
+
>>> from pandas.core.apply import relabel_result
|
| 1837 |
+
>>> result = pd.DataFrame(
|
| 1838 |
+
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
|
| 1839 |
+
... index=["max", "mean", "min"]
|
| 1840 |
+
... )
|
| 1841 |
+
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
|
| 1842 |
+
>>> columns = ("foo", "aab", "bar", "dat")
|
| 1843 |
+
>>> order = [0, 1, 2, 3]
|
| 1844 |
+
>>> result_in_dict = relabel_result(result, funcs, columns, order)
|
| 1845 |
+
>>> pd.DataFrame(result_in_dict, index=columns)
|
| 1846 |
+
A C B
|
| 1847 |
+
foo 2.0 NaN NaN
|
| 1848 |
+
aab NaN 6.0 NaN
|
| 1849 |
+
bar NaN NaN 4.0
|
| 1850 |
+
dat NaN NaN 2.5
|
| 1851 |
+
"""
|
| 1852 |
+
from pandas.core.indexes.base import Index
|
| 1853 |
+
|
| 1854 |
+
reordered_indexes = [
|
| 1855 |
+
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
|
| 1856 |
+
]
|
| 1857 |
+
reordered_result_in_dict: dict[Hashable, Series] = {}
|
| 1858 |
+
idx = 0
|
| 1859 |
+
|
| 1860 |
+
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
|
| 1861 |
+
for col, fun in func.items():
|
| 1862 |
+
s = result[col].dropna()
|
| 1863 |
+
|
| 1864 |
+
# In the `_aggregate`, the callable names are obtained and used in `result`, and
|
| 1865 |
+
# these names are ordered alphabetically. e.g.
|
| 1866 |
+
# C2 C1
|
| 1867 |
+
# <lambda> 1 NaN
|
| 1868 |
+
# amax NaN 4.0
|
| 1869 |
+
# max NaN 4.0
|
| 1870 |
+
# sum 18.0 6.0
|
| 1871 |
+
# Therefore, the order of functions for each column could be shuffled
|
| 1872 |
+
# accordingly so need to get the callable name if it is not parsed names, and
|
| 1873 |
+
# reorder the aggregated result for each column.
|
| 1874 |
+
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
|
| 1875 |
+
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
|
| 1876 |
+
# reorder so that aggregated values map to their functions regarding the order.
|
| 1877 |
+
|
| 1878 |
+
# However there is only one column being used for aggregation, not need to
|
| 1879 |
+
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
|
| 1880 |
+
# A
|
| 1881 |
+
# min 1.0
|
| 1882 |
+
# mean 1.5
|
| 1883 |
+
# mean 1.5
|
| 1884 |
+
if reorder_mask:
|
| 1885 |
+
fun = [
|
| 1886 |
+
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
|
| 1887 |
+
]
|
| 1888 |
+
col_idx_order = Index(s.index).get_indexer(fun)
|
| 1889 |
+
s = s.iloc[col_idx_order]
|
| 1890 |
+
|
| 1891 |
+
# assign the new user-provided "named aggregation" as index names, and reindex
|
| 1892 |
+
# it based on the whole user-provided names.
|
| 1893 |
+
s.index = reordered_indexes[idx : idx + len(fun)]
|
| 1894 |
+
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
|
| 1895 |
+
idx = idx + len(fun)
|
| 1896 |
+
return reordered_result_in_dict
|
| 1897 |
+
|
| 1898 |
+
|
| 1899 |
+
def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
|
| 1900 |
+
from pandas import DataFrame
|
| 1901 |
+
|
| 1902 |
+
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
|
| 1903 |
+
|
| 1904 |
+
if relabeling:
|
| 1905 |
+
# This is to keep the order to columns occurrence unchanged, and also
|
| 1906 |
+
# keep the order of new columns occurrence unchanged
|
| 1907 |
+
|
| 1908 |
+
# For the return values of reconstruct_func, if relabeling is
|
| 1909 |
+
# False, columns and order will be None.
|
| 1910 |
+
assert columns is not None
|
| 1911 |
+
assert order is not None
|
| 1912 |
+
|
| 1913 |
+
result_in_dict = relabel_result(result, func, columns, order)
|
| 1914 |
+
result = DataFrame(result_in_dict, index=columns)
|
| 1915 |
+
|
| 1916 |
+
return result
|
| 1917 |
+
|
| 1918 |
+
|
| 1919 |
+
# TODO: Can't use, because mypy doesn't like us setting __name__
|
| 1920 |
+
# error: "partial[Any]" has no attribute "__name__"
|
| 1921 |
+
# the type is:
|
| 1922 |
+
# typing.Sequence[Callable[..., ScalarResult]]
|
| 1923 |
+
# -> typing.Sequence[Callable[..., ScalarResult]]:
|
| 1924 |
+
|
| 1925 |
+
|
| 1926 |
+
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
|
| 1927 |
+
"""
|
| 1928 |
+
Possibly mangle a list of aggfuncs.
|
| 1929 |
+
|
| 1930 |
+
Parameters
|
| 1931 |
+
----------
|
| 1932 |
+
aggfuncs : Sequence
|
| 1933 |
+
|
| 1934 |
+
Returns
|
| 1935 |
+
-------
|
| 1936 |
+
mangled: list-like
|
| 1937 |
+
A new AggSpec sequence, where lambdas have been converted
|
| 1938 |
+
to have unique names.
|
| 1939 |
+
|
| 1940 |
+
Notes
|
| 1941 |
+
-----
|
| 1942 |
+
If just one aggfunc is passed, the name will not be mangled.
|
| 1943 |
+
"""
|
| 1944 |
+
if len(aggfuncs) <= 1:
|
| 1945 |
+
# don't mangle for .agg([lambda x: .])
|
| 1946 |
+
return aggfuncs
|
| 1947 |
+
i = 0
|
| 1948 |
+
mangled_aggfuncs = []
|
| 1949 |
+
for aggfunc in aggfuncs:
|
| 1950 |
+
if com.get_callable_name(aggfunc) == "<lambda>":
|
| 1951 |
+
aggfunc = partial(aggfunc)
|
| 1952 |
+
aggfunc.__name__ = f"<lambda_{i}>"
|
| 1953 |
+
i += 1
|
| 1954 |
+
mangled_aggfuncs.append(aggfunc)
|
| 1955 |
+
|
| 1956 |
+
return mangled_aggfuncs
|
| 1957 |
+
|
| 1958 |
+
|
| 1959 |
+
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
|
| 1960 |
+
"""
|
| 1961 |
+
Make new lambdas with unique names.
|
| 1962 |
+
|
| 1963 |
+
Parameters
|
| 1964 |
+
----------
|
| 1965 |
+
agg_spec : Any
|
| 1966 |
+
An argument to GroupBy.agg.
|
| 1967 |
+
Non-dict-like `agg_spec` are pass through as is.
|
| 1968 |
+
For dict-like `agg_spec` a new spec is returned
|
| 1969 |
+
with name-mangled lambdas.
|
| 1970 |
+
|
| 1971 |
+
Returns
|
| 1972 |
+
-------
|
| 1973 |
+
mangled : Any
|
| 1974 |
+
Same type as the input.
|
| 1975 |
+
|
| 1976 |
+
Examples
|
| 1977 |
+
--------
|
| 1978 |
+
>>> maybe_mangle_lambdas('sum')
|
| 1979 |
+
'sum'
|
| 1980 |
+
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
|
| 1981 |
+
[<function __main__.<lambda_0>,
|
| 1982 |
+
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
|
| 1983 |
+
"""
|
| 1984 |
+
is_dict = is_dict_like(agg_spec)
|
| 1985 |
+
if not (is_dict or is_list_like(agg_spec)):
|
| 1986 |
+
return agg_spec
|
| 1987 |
+
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
|
| 1988 |
+
|
| 1989 |
+
if is_dict:
|
| 1990 |
+
for key, aggfuncs in agg_spec.items():
|
| 1991 |
+
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
|
| 1992 |
+
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
|
| 1993 |
+
else:
|
| 1994 |
+
mangled_aggfuncs = aggfuncs
|
| 1995 |
+
|
| 1996 |
+
mangled_aggspec[key] = mangled_aggfuncs
|
| 1997 |
+
else:
|
| 1998 |
+
mangled_aggspec = _managle_lambda_list(agg_spec)
|
| 1999 |
+
|
| 2000 |
+
return mangled_aggspec
|
| 2001 |
+
|
| 2002 |
+
|
| 2003 |
+
def validate_func_kwargs(
|
| 2004 |
+
kwargs: dict,
|
| 2005 |
+
) -> tuple[list[str], list[str | Callable[..., Any]]]:
|
| 2006 |
+
"""
|
| 2007 |
+
Validates types of user-provided "named aggregation" kwargs.
|
| 2008 |
+
`TypeError` is raised if aggfunc is not `str` or callable.
|
| 2009 |
+
|
| 2010 |
+
Parameters
|
| 2011 |
+
----------
|
| 2012 |
+
kwargs : dict
|
| 2013 |
+
|
| 2014 |
+
Returns
|
| 2015 |
+
-------
|
| 2016 |
+
columns : List[str]
|
| 2017 |
+
List of user-provided keys.
|
| 2018 |
+
func : List[Union[str, callable[...,Any]]]
|
| 2019 |
+
List of user-provided aggfuncs
|
| 2020 |
+
|
| 2021 |
+
Examples
|
| 2022 |
+
--------
|
| 2023 |
+
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
|
| 2024 |
+
(['one', 'two'], ['min', 'max'])
|
| 2025 |
+
"""
|
| 2026 |
+
tuple_given_message = "func is expected but received {} in **kwargs."
|
| 2027 |
+
columns = list(kwargs)
|
| 2028 |
+
func = []
|
| 2029 |
+
for col_func in kwargs.values():
|
| 2030 |
+
if not (isinstance(col_func, str) or callable(col_func)):
|
| 2031 |
+
raise TypeError(tuple_given_message.format(type(col_func).__name__))
|
| 2032 |
+
func.append(col_func)
|
| 2033 |
+
if not columns:
|
| 2034 |
+
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
|
| 2035 |
+
raise TypeError(no_arg_message)
|
| 2036 |
+
return columns, func
|
| 2037 |
+
|
| 2038 |
+
|
| 2039 |
+
def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
|
| 2040 |
+
return isinstance(colg, ABCDataFrame) or (
|
| 2041 |
+
isinstance(colg, ABCSeries) and op_name == "agg"
|
| 2042 |
+
)
|
| 2043 |
+
|
| 2044 |
+
|
| 2045 |
+
def warn_alias_replacement(
|
| 2046 |
+
obj: AggObjType,
|
| 2047 |
+
func: Callable,
|
| 2048 |
+
alias: str,
|
| 2049 |
+
) -> None:
|
| 2050 |
+
if alias.startswith("np."):
|
| 2051 |
+
full_alias = alias
|
| 2052 |
+
else:
|
| 2053 |
+
full_alias = f"{type(obj).__name__}.{alias}"
|
| 2054 |
+
alias = f'"{alias}"'
|
| 2055 |
+
warnings.warn(
|
| 2056 |
+
f"The provided callable {func} is currently using "
|
| 2057 |
+
f"{full_alias}. In a future version of pandas, "
|
| 2058 |
+
f"the provided callable will be used directly. To keep current "
|
| 2059 |
+
f"behavior pass the string {alias} instead.",
|
| 2060 |
+
category=FutureWarning,
|
| 2061 |
+
stacklevel=find_stack_level(),
|
| 2062 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
core.array_algos is for algorithms that operate on ndarray and ExtensionArray.
|
| 3 |
+
These should:
|
| 4 |
+
|
| 5 |
+
- Assume that any Index, Series, or DataFrame objects have already been unwrapped.
|
| 6 |
+
- Assume that any list arguments have already been cast to ndarray/EA.
|
| 7 |
+
- Not depend on Index, Series, or DataFrame, nor import any of these.
|
| 8 |
+
- May dispatch to ExtensionArray methods, but should not import from core.arrays.
|
| 9 |
+
"""
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
datetimelke_accumulations.py is for accumulations of datetimelike extension arrays
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import Callable
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from pandas._libs import iNaT
|
| 12 |
+
|
| 13 |
+
from pandas.core.dtypes.missing import isna
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _cum_func(
|
| 17 |
+
func: Callable,
|
| 18 |
+
values: np.ndarray,
|
| 19 |
+
*,
|
| 20 |
+
skipna: bool = True,
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
Accumulations for 1D datetimelike arrays.
|
| 24 |
+
|
| 25 |
+
Parameters
|
| 26 |
+
----------
|
| 27 |
+
func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
|
| 28 |
+
values : np.ndarray
|
| 29 |
+
Numpy array with the values (can be of any dtype that support the
|
| 30 |
+
operation). Values is changed is modified inplace.
|
| 31 |
+
skipna : bool, default True
|
| 32 |
+
Whether to skip NA.
|
| 33 |
+
"""
|
| 34 |
+
try:
|
| 35 |
+
fill_value = {
|
| 36 |
+
np.maximum.accumulate: np.iinfo(np.int64).min,
|
| 37 |
+
np.cumsum: 0,
|
| 38 |
+
np.minimum.accumulate: np.iinfo(np.int64).max,
|
| 39 |
+
}[func]
|
| 40 |
+
except KeyError:
|
| 41 |
+
raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
|
| 42 |
+
|
| 43 |
+
mask = isna(values)
|
| 44 |
+
y = values.view("i8")
|
| 45 |
+
y[mask] = fill_value
|
| 46 |
+
|
| 47 |
+
if not skipna:
|
| 48 |
+
mask = np.maximum.accumulate(mask)
|
| 49 |
+
|
| 50 |
+
result = func(y)
|
| 51 |
+
result[mask] = iNaT
|
| 52 |
+
|
| 53 |
+
if values.dtype.kind in "mM":
|
| 54 |
+
return result.view(values.dtype.base)
|
| 55 |
+
return result
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
|
| 59 |
+
return _cum_func(np.cumsum, values, skipna=skipna)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def cummin(values: np.ndarray, *, skipna: bool = True):
|
| 63 |
+
return _cum_func(np.minimum.accumulate, values, skipna=skipna)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def cummax(values: np.ndarray, *, skipna: bool = True):
|
| 67 |
+
return _cum_func(np.maximum.accumulate, values, skipna=skipna)
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
masked_accumulations.py is for accumulation algorithms using a mask-based approach
|
| 3 |
+
for missing values.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Callable,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from pandas._typing import npt
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _cum_func(
|
| 20 |
+
func: Callable,
|
| 21 |
+
values: np.ndarray,
|
| 22 |
+
mask: npt.NDArray[np.bool_],
|
| 23 |
+
*,
|
| 24 |
+
skipna: bool = True,
|
| 25 |
+
):
|
| 26 |
+
"""
|
| 27 |
+
Accumulations for 1D masked array.
|
| 28 |
+
|
| 29 |
+
We will modify values in place to replace NAs with the appropriate fill value.
|
| 30 |
+
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
|
| 34 |
+
values : np.ndarray
|
| 35 |
+
Numpy array with the values (can be of any dtype that support the
|
| 36 |
+
operation).
|
| 37 |
+
mask : np.ndarray
|
| 38 |
+
Boolean numpy array (True values indicate missing values).
|
| 39 |
+
skipna : bool, default True
|
| 40 |
+
Whether to skip NA.
|
| 41 |
+
"""
|
| 42 |
+
dtype_info: np.iinfo | np.finfo
|
| 43 |
+
if values.dtype.kind == "f":
|
| 44 |
+
dtype_info = np.finfo(values.dtype.type)
|
| 45 |
+
elif values.dtype.kind in "iu":
|
| 46 |
+
dtype_info = np.iinfo(values.dtype.type)
|
| 47 |
+
elif values.dtype.kind == "b":
|
| 48 |
+
# Max value of bool is 1, but since we are setting into a boolean
|
| 49 |
+
# array, 255 is fine as well. Min value has to be 0 when setting
|
| 50 |
+
# into the boolean array.
|
| 51 |
+
dtype_info = np.iinfo(np.uint8)
|
| 52 |
+
else:
|
| 53 |
+
raise NotImplementedError(
|
| 54 |
+
f"No masked accumulation defined for dtype {values.dtype.type}"
|
| 55 |
+
)
|
| 56 |
+
try:
|
| 57 |
+
fill_value = {
|
| 58 |
+
np.cumprod: 1,
|
| 59 |
+
np.maximum.accumulate: dtype_info.min,
|
| 60 |
+
np.cumsum: 0,
|
| 61 |
+
np.minimum.accumulate: dtype_info.max,
|
| 62 |
+
}[func]
|
| 63 |
+
except KeyError:
|
| 64 |
+
raise NotImplementedError(
|
| 65 |
+
f"No accumulation for {func} implemented on BaseMaskedArray"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
values[mask] = fill_value
|
| 69 |
+
|
| 70 |
+
if not skipna:
|
| 71 |
+
mask = np.maximum.accumulate(mask)
|
| 72 |
+
|
| 73 |
+
values = func(values)
|
| 74 |
+
return values, mask
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
| 78 |
+
return _cum_func(np.cumsum, values, mask, skipna=skipna)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
| 82 |
+
return _cum_func(np.cumprod, values, mask, skipna=skipna)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
| 86 |
+
return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
| 90 |
+
return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EA-compatible analogue to np.putmask
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import (
|
| 7 |
+
TYPE_CHECKING,
|
| 8 |
+
Any,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from pandas._libs import lib
|
| 14 |
+
|
| 15 |
+
from pandas.core.dtypes.cast import infer_dtype_from
|
| 16 |
+
from pandas.core.dtypes.common import is_list_like
|
| 17 |
+
|
| 18 |
+
from pandas.core.arrays import ExtensionArray
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from pandas._typing import (
|
| 22 |
+
ArrayLike,
|
| 23 |
+
npt,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from pandas import MultiIndex
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
|
| 30 |
+
"""
|
| 31 |
+
ExtensionArray-compatible implementation of np.putmask. The main
|
| 32 |
+
difference is we do not handle repeating or truncating like numpy.
|
| 33 |
+
|
| 34 |
+
Parameters
|
| 35 |
+
----------
|
| 36 |
+
values: np.ndarray or ExtensionArray
|
| 37 |
+
mask : np.ndarray[bool]
|
| 38 |
+
We assume extract_bool_array has already been called.
|
| 39 |
+
value : Any
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
if (
|
| 43 |
+
not isinstance(values, np.ndarray)
|
| 44 |
+
or (values.dtype == object and not lib.is_scalar(value))
|
| 45 |
+
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
|
| 46 |
+
# rule = "safe", a stricter guarantee we may not have here
|
| 47 |
+
or (
|
| 48 |
+
isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
|
| 49 |
+
)
|
| 50 |
+
):
|
| 51 |
+
# GH#19266 using np.putmask gives unexpected results with listlike value
|
| 52 |
+
# along with object dtype
|
| 53 |
+
if is_list_like(value) and len(value) == len(values):
|
| 54 |
+
values[mask] = value[mask]
|
| 55 |
+
else:
|
| 56 |
+
values[mask] = value
|
| 57 |
+
else:
|
| 58 |
+
# GH#37833 np.putmask is more performant than __setitem__
|
| 59 |
+
np.putmask(values, mask, value)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def putmask_without_repeat(
|
| 63 |
+
values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any
|
| 64 |
+
) -> None:
|
| 65 |
+
"""
|
| 66 |
+
np.putmask will truncate or repeat if `new` is a listlike with
|
| 67 |
+
len(new) != len(values). We require an exact match.
|
| 68 |
+
|
| 69 |
+
Parameters
|
| 70 |
+
----------
|
| 71 |
+
values : np.ndarray
|
| 72 |
+
mask : np.ndarray[bool]
|
| 73 |
+
new : Any
|
| 74 |
+
"""
|
| 75 |
+
if getattr(new, "ndim", 0) >= 1:
|
| 76 |
+
new = new.astype(values.dtype, copy=False)
|
| 77 |
+
|
| 78 |
+
# TODO: this prob needs some better checking for 2D cases
|
| 79 |
+
nlocs = mask.sum()
|
| 80 |
+
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
|
| 81 |
+
shape = np.shape(new)
|
| 82 |
+
# np.shape compat for if setitem_datetimelike_compat
|
| 83 |
+
# changed arraylike to list e.g. test_where_dt64_2d
|
| 84 |
+
if nlocs == shape[-1]:
|
| 85 |
+
# GH#30567
|
| 86 |
+
# If length of ``new`` is less than the length of ``values``,
|
| 87 |
+
# `np.putmask` would first repeat the ``new`` array and then
|
| 88 |
+
# assign the masked values hence produces incorrect result.
|
| 89 |
+
# `np.place` on the other hand uses the ``new`` values at it is
|
| 90 |
+
# to place in the masked locations of ``values``
|
| 91 |
+
np.place(values, mask, new)
|
| 92 |
+
# i.e. values[mask] = new
|
| 93 |
+
elif mask.shape[-1] == shape[-1] or shape[-1] == 1:
|
| 94 |
+
np.putmask(values, mask, new)
|
| 95 |
+
else:
|
| 96 |
+
raise ValueError("cannot assign mismatch length to masked array")
|
| 97 |
+
else:
|
| 98 |
+
np.putmask(values, mask, new)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def validate_putmask(
|
| 102 |
+
values: ArrayLike | MultiIndex, mask: np.ndarray
|
| 103 |
+
) -> tuple[npt.NDArray[np.bool_], bool]:
|
| 104 |
+
"""
|
| 105 |
+
Validate mask and check if this putmask operation is a no-op.
|
| 106 |
+
"""
|
| 107 |
+
mask = extract_bool_array(mask)
|
| 108 |
+
if mask.shape != values.shape:
|
| 109 |
+
raise ValueError("putmask: mask and data must be the same size")
|
| 110 |
+
|
| 111 |
+
noop = not mask.any()
|
| 112 |
+
return mask, noop
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:
|
| 116 |
+
"""
|
| 117 |
+
If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
|
| 118 |
+
"""
|
| 119 |
+
if isinstance(mask, ExtensionArray):
|
| 120 |
+
# We could have BooleanArray, Sparse[bool], ...
|
| 121 |
+
# Except for BooleanArray, this is equivalent to just
|
| 122 |
+
# np.asarray(mask, dtype=bool)
|
| 123 |
+
mask = mask.to_numpy(dtype=bool, na_value=False)
|
| 124 |
+
|
| 125 |
+
mask = np.asarray(mask, dtype=bool)
|
| 126 |
+
return mask
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
|
| 130 |
+
"""
|
| 131 |
+
Parameters
|
| 132 |
+
----------
|
| 133 |
+
values : np.ndarray
|
| 134 |
+
num_set : int
|
| 135 |
+
For putmask, this is mask.sum()
|
| 136 |
+
other : Any
|
| 137 |
+
"""
|
| 138 |
+
if values.dtype == object:
|
| 139 |
+
dtype, _ = infer_dtype_from(other)
|
| 140 |
+
|
| 141 |
+
if lib.is_np_dtype(dtype, "mM"):
|
| 142 |
+
# https://github.com/numpy/numpy/issues/12550
|
| 143 |
+
# timedelta64 will incorrectly cast to int
|
| 144 |
+
if not is_list_like(other):
|
| 145 |
+
other = [other] * num_set
|
| 146 |
+
else:
|
| 147 |
+
other = list(other)
|
| 148 |
+
|
| 149 |
+
return other
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from pandas.core.dtypes.missing import (
|
| 8 |
+
isna,
|
| 9 |
+
na_value_for_dtype,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from pandas._typing import (
|
| 14 |
+
ArrayLike,
|
| 15 |
+
Scalar,
|
| 16 |
+
npt,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def quantile_compat(
|
| 21 |
+
values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
|
| 22 |
+
) -> ArrayLike:
|
| 23 |
+
"""
|
| 24 |
+
Compute the quantiles of the given values for each quantile in `qs`.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
values : np.ndarray or ExtensionArray
|
| 29 |
+
qs : np.ndarray[float64]
|
| 30 |
+
interpolation : str
|
| 31 |
+
|
| 32 |
+
Returns
|
| 33 |
+
-------
|
| 34 |
+
np.ndarray or ExtensionArray
|
| 35 |
+
"""
|
| 36 |
+
if isinstance(values, np.ndarray):
|
| 37 |
+
fill_value = na_value_for_dtype(values.dtype, compat=False)
|
| 38 |
+
mask = isna(values)
|
| 39 |
+
return quantile_with_mask(values, mask, fill_value, qs, interpolation)
|
| 40 |
+
else:
|
| 41 |
+
return values._quantile(qs, interpolation)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def quantile_with_mask(
|
| 45 |
+
values: np.ndarray,
|
| 46 |
+
mask: npt.NDArray[np.bool_],
|
| 47 |
+
fill_value,
|
| 48 |
+
qs: npt.NDArray[np.float64],
|
| 49 |
+
interpolation: str,
|
| 50 |
+
) -> np.ndarray:
|
| 51 |
+
"""
|
| 52 |
+
Compute the quantiles of the given values for each quantile in `qs`.
|
| 53 |
+
|
| 54 |
+
Parameters
|
| 55 |
+
----------
|
| 56 |
+
values : np.ndarray
|
| 57 |
+
For ExtensionArray, this is _values_for_factorize()[0]
|
| 58 |
+
mask : np.ndarray[bool]
|
| 59 |
+
mask = isna(values)
|
| 60 |
+
For ExtensionArray, this is computed before calling _value_for_factorize
|
| 61 |
+
fill_value : Scalar
|
| 62 |
+
The value to interpret fill NA entries with
|
| 63 |
+
For ExtensionArray, this is _values_for_factorize()[1]
|
| 64 |
+
qs : np.ndarray[float64]
|
| 65 |
+
interpolation : str
|
| 66 |
+
Type of interpolation
|
| 67 |
+
|
| 68 |
+
Returns
|
| 69 |
+
-------
|
| 70 |
+
np.ndarray
|
| 71 |
+
|
| 72 |
+
Notes
|
| 73 |
+
-----
|
| 74 |
+
Assumes values is already 2D. For ExtensionArray this means np.atleast_2d
|
| 75 |
+
has been called on _values_for_factorize()[0]
|
| 76 |
+
|
| 77 |
+
Quantile is computed along axis=1.
|
| 78 |
+
"""
|
| 79 |
+
assert values.shape == mask.shape
|
| 80 |
+
if values.ndim == 1:
|
| 81 |
+
# unsqueeze, operate, re-squeeze
|
| 82 |
+
values = np.atleast_2d(values)
|
| 83 |
+
mask = np.atleast_2d(mask)
|
| 84 |
+
res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)
|
| 85 |
+
return res_values[0]
|
| 86 |
+
|
| 87 |
+
assert values.ndim == 2
|
| 88 |
+
|
| 89 |
+
is_empty = values.shape[1] == 0
|
| 90 |
+
|
| 91 |
+
if is_empty:
|
| 92 |
+
# create the array of na_values
|
| 93 |
+
# 2d len(values) * len(qs)
|
| 94 |
+
flat = np.array([fill_value] * len(qs))
|
| 95 |
+
result = np.repeat(flat, len(values)).reshape(len(values), len(qs))
|
| 96 |
+
else:
|
| 97 |
+
result = _nanpercentile(
|
| 98 |
+
values,
|
| 99 |
+
qs * 100.0,
|
| 100 |
+
na_value=fill_value,
|
| 101 |
+
mask=mask,
|
| 102 |
+
interpolation=interpolation,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
result = np.asarray(result)
|
| 106 |
+
result = result.T
|
| 107 |
+
|
| 108 |
+
return result
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _nanpercentile_1d(
|
| 112 |
+
values: np.ndarray,
|
| 113 |
+
mask: npt.NDArray[np.bool_],
|
| 114 |
+
qs: npt.NDArray[np.float64],
|
| 115 |
+
na_value: Scalar,
|
| 116 |
+
interpolation: str,
|
| 117 |
+
) -> Scalar | np.ndarray:
|
| 118 |
+
"""
|
| 119 |
+
Wrapper for np.percentile that skips missing values, specialized to
|
| 120 |
+
1-dimensional case.
|
| 121 |
+
|
| 122 |
+
Parameters
|
| 123 |
+
----------
|
| 124 |
+
values : array over which to find quantiles
|
| 125 |
+
mask : ndarray[bool]
|
| 126 |
+
locations in values that should be considered missing
|
| 127 |
+
qs : np.ndarray[float64] of quantile indices to find
|
| 128 |
+
na_value : scalar
|
| 129 |
+
value to return for empty or all-null values
|
| 130 |
+
interpolation : str
|
| 131 |
+
|
| 132 |
+
Returns
|
| 133 |
+
-------
|
| 134 |
+
quantiles : scalar or array
|
| 135 |
+
"""
|
| 136 |
+
# mask is Union[ExtensionArray, ndarray]
|
| 137 |
+
values = values[~mask]
|
| 138 |
+
|
| 139 |
+
if len(values) == 0:
|
| 140 |
+
# Can't pass dtype=values.dtype here bc we might have na_value=np.nan
|
| 141 |
+
# with values.dtype=int64 see test_quantile_empty
|
| 142 |
+
# equiv: 'np.array([na_value] * len(qs))' but much faster
|
| 143 |
+
return np.full(len(qs), na_value)
|
| 144 |
+
|
| 145 |
+
return np.percentile(
|
| 146 |
+
values,
|
| 147 |
+
qs,
|
| 148 |
+
# error: No overload variant of "percentile" matches argument
|
| 149 |
+
# types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"
|
| 150 |
+
# , "Dict[str, str]" [call-overload]
|
| 151 |
+
method=interpolation, # type: ignore[call-overload]
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _nanpercentile(
|
| 156 |
+
values: np.ndarray,
|
| 157 |
+
qs: npt.NDArray[np.float64],
|
| 158 |
+
*,
|
| 159 |
+
na_value,
|
| 160 |
+
mask: npt.NDArray[np.bool_],
|
| 161 |
+
interpolation: str,
|
| 162 |
+
):
|
| 163 |
+
"""
|
| 164 |
+
Wrapper for np.percentile that skips missing values.
|
| 165 |
+
|
| 166 |
+
Parameters
|
| 167 |
+
----------
|
| 168 |
+
values : np.ndarray[ndim=2] over which to find quantiles
|
| 169 |
+
qs : np.ndarray[float64] of quantile indices to find
|
| 170 |
+
na_value : scalar
|
| 171 |
+
value to return for empty or all-null values
|
| 172 |
+
mask : np.ndarray[bool]
|
| 173 |
+
locations in values that should be considered missing
|
| 174 |
+
interpolation : str
|
| 175 |
+
|
| 176 |
+
Returns
|
| 177 |
+
-------
|
| 178 |
+
quantiles : scalar or array
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
if values.dtype.kind in "mM":
|
| 182 |
+
# need to cast to integer to avoid rounding errors in numpy
|
| 183 |
+
result = _nanpercentile(
|
| 184 |
+
values.view("i8"),
|
| 185 |
+
qs=qs,
|
| 186 |
+
na_value=na_value.view("i8"),
|
| 187 |
+
mask=mask,
|
| 188 |
+
interpolation=interpolation,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Note: we have to do `astype` and not view because in general we
|
| 192 |
+
# have float result at this point, not i8
|
| 193 |
+
return result.astype(values.dtype)
|
| 194 |
+
|
| 195 |
+
if mask.any():
|
| 196 |
+
# Caller is responsible for ensuring mask shape match
|
| 197 |
+
assert mask.shape == values.shape
|
| 198 |
+
result = [
|
| 199 |
+
_nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation)
|
| 200 |
+
for (val, m) in zip(list(values), list(mask))
|
| 201 |
+
]
|
| 202 |
+
if values.dtype.kind == "f":
|
| 203 |
+
# preserve itemsize
|
| 204 |
+
result = np.asarray(result, dtype=values.dtype).T
|
| 205 |
+
else:
|
| 206 |
+
result = np.asarray(result).T
|
| 207 |
+
if (
|
| 208 |
+
result.dtype != values.dtype
|
| 209 |
+
and not mask.all()
|
| 210 |
+
and (result == result.astype(values.dtype, copy=False)).all()
|
| 211 |
+
):
|
| 212 |
+
# mask.all() will never get cast back to int
|
| 213 |
+
# e.g. values id integer dtype and result is floating dtype,
|
| 214 |
+
# only cast back to integer dtype if result values are all-integer.
|
| 215 |
+
result = result.astype(values.dtype, copy=False)
|
| 216 |
+
return result
|
| 217 |
+
else:
|
| 218 |
+
return np.percentile(
|
| 219 |
+
values,
|
| 220 |
+
qs,
|
| 221 |
+
axis=1,
|
| 222 |
+
# error: No overload variant of "percentile" matches argument types
|
| 223 |
+
# "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",
|
| 224 |
+
# "int", "Dict[str, str]" [call-overload]
|
| 225 |
+
method=interpolation, # type: ignore[call-overload]
|
| 226 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/replace.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Methods used by Block.replace and related methods.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import operator
|
| 7 |
+
import re
|
| 8 |
+
from re import Pattern
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
from pandas.core.dtypes.common import (
|
| 17 |
+
is_bool,
|
| 18 |
+
is_re,
|
| 19 |
+
is_re_compilable,
|
| 20 |
+
)
|
| 21 |
+
from pandas.core.dtypes.missing import isna
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from pandas._typing import (
|
| 25 |
+
ArrayLike,
|
| 26 |
+
Scalar,
|
| 27 |
+
npt,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def should_use_regex(regex: bool, to_replace: Any) -> bool:
|
| 32 |
+
"""
|
| 33 |
+
Decide whether to treat `to_replace` as a regular expression.
|
| 34 |
+
"""
|
| 35 |
+
if is_re(to_replace):
|
| 36 |
+
regex = True
|
| 37 |
+
|
| 38 |
+
regex = regex and is_re_compilable(to_replace)
|
| 39 |
+
|
| 40 |
+
# Don't use regex if the pattern is empty.
|
| 41 |
+
regex = regex and re.compile(to_replace).pattern != ""
|
| 42 |
+
return regex
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def compare_or_regex_search(
|
| 46 |
+
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
|
| 47 |
+
) -> ArrayLike:
|
| 48 |
+
"""
|
| 49 |
+
Compare two array-like inputs of the same shape or two scalar values
|
| 50 |
+
|
| 51 |
+
Calls operator.eq or re.search, depending on regex argument. If regex is
|
| 52 |
+
True, perform an element-wise regex matching.
|
| 53 |
+
|
| 54 |
+
Parameters
|
| 55 |
+
----------
|
| 56 |
+
a : array-like
|
| 57 |
+
b : scalar or regex pattern
|
| 58 |
+
regex : bool
|
| 59 |
+
mask : np.ndarray[bool]
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
mask : array-like of bool
|
| 64 |
+
"""
|
| 65 |
+
if isna(b):
|
| 66 |
+
return ~mask
|
| 67 |
+
|
| 68 |
+
def _check_comparison_types(
|
| 69 |
+
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
|
| 70 |
+
):
|
| 71 |
+
"""
|
| 72 |
+
Raises an error if the two arrays (a,b) cannot be compared.
|
| 73 |
+
Otherwise, returns the comparison result as expected.
|
| 74 |
+
"""
|
| 75 |
+
if is_bool(result) and isinstance(a, np.ndarray):
|
| 76 |
+
type_names = [type(a).__name__, type(b).__name__]
|
| 77 |
+
|
| 78 |
+
type_names[0] = f"ndarray(dtype={a.dtype})"
|
| 79 |
+
|
| 80 |
+
raise TypeError(
|
| 81 |
+
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
if not regex or not should_use_regex(regex, b):
|
| 85 |
+
# TODO: should use missing.mask_missing?
|
| 86 |
+
op = lambda x: operator.eq(x, b)
|
| 87 |
+
else:
|
| 88 |
+
op = np.vectorize(
|
| 89 |
+
lambda x: bool(re.search(b, x))
|
| 90 |
+
if isinstance(x, str) and isinstance(b, (str, Pattern))
|
| 91 |
+
else False
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# GH#32621 use mask to avoid comparing to NAs
|
| 95 |
+
if isinstance(a, np.ndarray):
|
| 96 |
+
a = a[mask]
|
| 97 |
+
|
| 98 |
+
result = op(a)
|
| 99 |
+
|
| 100 |
+
if isinstance(result, np.ndarray) and mask is not None:
|
| 101 |
+
# The shape of the mask can differ to that of the result
|
| 102 |
+
# since we may compare only a subset of a's or b's elements
|
| 103 |
+
tmp = np.zeros(mask.shape, dtype=np.bool_)
|
| 104 |
+
np.place(tmp, mask, result)
|
| 105 |
+
result = tmp
|
| 106 |
+
|
| 107 |
+
_check_comparison_types(result, a, b)
|
| 108 |
+
return result
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def replace_regex(
|
| 112 |
+
values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
|
| 113 |
+
) -> None:
|
| 114 |
+
"""
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
values : ArrayLike
|
| 118 |
+
Object dtype.
|
| 119 |
+
rx : re.Pattern
|
| 120 |
+
value : Any
|
| 121 |
+
mask : np.ndarray[bool], optional
|
| 122 |
+
|
| 123 |
+
Notes
|
| 124 |
+
-----
|
| 125 |
+
Alters values in-place.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
# deal with replacing values with objects (strings) that match but
|
| 129 |
+
# whose replacement is not a string (numeric, nan, object)
|
| 130 |
+
if isna(value) or not isinstance(value, str):
|
| 131 |
+
|
| 132 |
+
def re_replacer(s):
|
| 133 |
+
if is_re(rx) and isinstance(s, str):
|
| 134 |
+
return value if rx.search(s) is not None else s
|
| 135 |
+
else:
|
| 136 |
+
return s
|
| 137 |
+
|
| 138 |
+
else:
|
| 139 |
+
# value is guaranteed to be a string here, s can be either a string
|
| 140 |
+
# or null if it's null it gets returned
|
| 141 |
+
def re_replacer(s):
|
| 142 |
+
if is_re(rx) and isinstance(s, str):
|
| 143 |
+
return rx.sub(value, s)
|
| 144 |
+
else:
|
| 145 |
+
return s
|
| 146 |
+
|
| 147 |
+
f = np.vectorize(re_replacer, otypes=[np.object_])
|
| 148 |
+
|
| 149 |
+
if mask is None:
|
| 150 |
+
values[:] = f(values)
|
| 151 |
+
else:
|
| 152 |
+
values[mask] = f(values[mask])
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/take.py
ADDED
|
@@ -0,0 +1,594 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
from typing import (
|
| 5 |
+
TYPE_CHECKING,
|
| 6 |
+
cast,
|
| 7 |
+
overload,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from pandas._libs import (
|
| 13 |
+
algos as libalgos,
|
| 14 |
+
lib,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from pandas.core.dtypes.cast import maybe_promote
|
| 18 |
+
from pandas.core.dtypes.common import (
|
| 19 |
+
ensure_platform_int,
|
| 20 |
+
is_1d_only_ea_dtype,
|
| 21 |
+
)
|
| 22 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
| 23 |
+
|
| 24 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from pandas._typing import (
|
| 28 |
+
ArrayLike,
|
| 29 |
+
AxisInt,
|
| 30 |
+
npt,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
|
| 34 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@overload
|
| 38 |
+
def take_nd(
|
| 39 |
+
arr: np.ndarray,
|
| 40 |
+
indexer,
|
| 41 |
+
axis: AxisInt = ...,
|
| 42 |
+
fill_value=...,
|
| 43 |
+
allow_fill: bool = ...,
|
| 44 |
+
) -> np.ndarray:
|
| 45 |
+
...
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@overload
|
| 49 |
+
def take_nd(
|
| 50 |
+
arr: ExtensionArray,
|
| 51 |
+
indexer,
|
| 52 |
+
axis: AxisInt = ...,
|
| 53 |
+
fill_value=...,
|
| 54 |
+
allow_fill: bool = ...,
|
| 55 |
+
) -> ArrayLike:
|
| 56 |
+
...
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def take_nd(
|
| 60 |
+
arr: ArrayLike,
|
| 61 |
+
indexer,
|
| 62 |
+
axis: AxisInt = 0,
|
| 63 |
+
fill_value=lib.no_default,
|
| 64 |
+
allow_fill: bool = True,
|
| 65 |
+
) -> ArrayLike:
|
| 66 |
+
"""
|
| 67 |
+
Specialized Cython take which sets NaN values in one pass
|
| 68 |
+
|
| 69 |
+
This dispatches to ``take`` defined on ExtensionArrays.
|
| 70 |
+
|
| 71 |
+
Note: this function assumes that the indexer is a valid(ated) indexer with
|
| 72 |
+
no out of bound indices.
|
| 73 |
+
|
| 74 |
+
Parameters
|
| 75 |
+
----------
|
| 76 |
+
arr : np.ndarray or ExtensionArray
|
| 77 |
+
Input array.
|
| 78 |
+
indexer : ndarray
|
| 79 |
+
1-D array of indices to take, subarrays corresponding to -1 value
|
| 80 |
+
indices are filed with fill_value
|
| 81 |
+
axis : int, default 0
|
| 82 |
+
Axis to take from
|
| 83 |
+
fill_value : any, default np.nan
|
| 84 |
+
Fill value to replace -1 values with
|
| 85 |
+
allow_fill : bool, default True
|
| 86 |
+
If False, indexer is assumed to contain no -1 values so no filling
|
| 87 |
+
will be done. This short-circuits computation of a mask. Result is
|
| 88 |
+
undefined if allow_fill == False and -1 is present in indexer.
|
| 89 |
+
|
| 90 |
+
Returns
|
| 91 |
+
-------
|
| 92 |
+
subarray : np.ndarray or ExtensionArray
|
| 93 |
+
May be the same type as the input, or cast to an ndarray.
|
| 94 |
+
"""
|
| 95 |
+
if fill_value is lib.no_default:
|
| 96 |
+
fill_value = na_value_for_dtype(arr.dtype, compat=False)
|
| 97 |
+
elif lib.is_np_dtype(arr.dtype, "mM"):
|
| 98 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
| 99 |
+
if arr.dtype != dtype:
|
| 100 |
+
# EA.take is strict about returning a new object of the same type
|
| 101 |
+
# so for that case cast upfront
|
| 102 |
+
arr = arr.astype(dtype)
|
| 103 |
+
|
| 104 |
+
if not isinstance(arr, np.ndarray):
|
| 105 |
+
# i.e. ExtensionArray,
|
| 106 |
+
# includes for EA to catch DatetimeArray, TimedeltaArray
|
| 107 |
+
if not is_1d_only_ea_dtype(arr.dtype):
|
| 108 |
+
# i.e. DatetimeArray, TimedeltaArray
|
| 109 |
+
arr = cast("NDArrayBackedExtensionArray", arr)
|
| 110 |
+
return arr.take(
|
| 111 |
+
indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
|
| 115 |
+
|
| 116 |
+
arr = np.asarray(arr)
|
| 117 |
+
return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _take_nd_ndarray(
|
| 121 |
+
arr: np.ndarray,
|
| 122 |
+
indexer: npt.NDArray[np.intp] | None,
|
| 123 |
+
axis: AxisInt,
|
| 124 |
+
fill_value,
|
| 125 |
+
allow_fill: bool,
|
| 126 |
+
) -> np.ndarray:
|
| 127 |
+
if indexer is None:
|
| 128 |
+
indexer = np.arange(arr.shape[axis], dtype=np.intp)
|
| 129 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
| 130 |
+
else:
|
| 131 |
+
indexer = ensure_platform_int(indexer)
|
| 132 |
+
|
| 133 |
+
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
|
| 134 |
+
arr, indexer, fill_value, allow_fill
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
flip_order = False
|
| 138 |
+
if arr.ndim == 2 and arr.flags.f_contiguous:
|
| 139 |
+
flip_order = True
|
| 140 |
+
|
| 141 |
+
if flip_order:
|
| 142 |
+
arr = arr.T
|
| 143 |
+
axis = arr.ndim - axis - 1
|
| 144 |
+
|
| 145 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
| 146 |
+
# and the fill_value
|
| 147 |
+
out_shape_ = list(arr.shape)
|
| 148 |
+
out_shape_[axis] = len(indexer)
|
| 149 |
+
out_shape = tuple(out_shape_)
|
| 150 |
+
if arr.flags.f_contiguous and axis == arr.ndim - 1:
|
| 151 |
+
# minor tweak that can make an order-of-magnitude difference
|
| 152 |
+
# for dataframes initialized directly from 2-d ndarrays
|
| 153 |
+
# (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
|
| 154 |
+
# f-contiguous transpose)
|
| 155 |
+
out = np.empty(out_shape, dtype=dtype, order="F")
|
| 156 |
+
else:
|
| 157 |
+
out = np.empty(out_shape, dtype=dtype)
|
| 158 |
+
|
| 159 |
+
func = _get_take_nd_function(
|
| 160 |
+
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
|
| 161 |
+
)
|
| 162 |
+
func(arr, indexer, out, fill_value)
|
| 163 |
+
|
| 164 |
+
if flip_order:
|
| 165 |
+
out = out.T
|
| 166 |
+
return out
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def take_1d(
|
| 170 |
+
arr: ArrayLike,
|
| 171 |
+
indexer: npt.NDArray[np.intp],
|
| 172 |
+
fill_value=None,
|
| 173 |
+
allow_fill: bool = True,
|
| 174 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 175 |
+
) -> ArrayLike:
|
| 176 |
+
"""
|
| 177 |
+
Specialized version for 1D arrays. Differences compared to `take_nd`:
|
| 178 |
+
|
| 179 |
+
- Assumes input array has already been converted to numpy array / EA
|
| 180 |
+
- Assumes indexer is already guaranteed to be intp dtype ndarray
|
| 181 |
+
- Only works for 1D arrays
|
| 182 |
+
|
| 183 |
+
To ensure the lowest possible overhead.
|
| 184 |
+
|
| 185 |
+
Note: similarly to `take_nd`, this function assumes that the indexer is
|
| 186 |
+
a valid(ated) indexer with no out of bound indices.
|
| 187 |
+
|
| 188 |
+
Parameters
|
| 189 |
+
----------
|
| 190 |
+
arr : np.ndarray or ExtensionArray
|
| 191 |
+
Input array.
|
| 192 |
+
indexer : ndarray
|
| 193 |
+
1-D array of indices to take (validated indices, intp dtype).
|
| 194 |
+
fill_value : any, default np.nan
|
| 195 |
+
Fill value to replace -1 values with
|
| 196 |
+
allow_fill : bool, default True
|
| 197 |
+
If False, indexer is assumed to contain no -1 values so no filling
|
| 198 |
+
will be done. This short-circuits computation of a mask. Result is
|
| 199 |
+
undefined if allow_fill == False and -1 is present in indexer.
|
| 200 |
+
mask : np.ndarray, optional, default None
|
| 201 |
+
If `allow_fill` is True, and the mask (where indexer == -1) is already
|
| 202 |
+
known, it can be passed to avoid recomputation.
|
| 203 |
+
"""
|
| 204 |
+
if not isinstance(arr, np.ndarray):
|
| 205 |
+
# ExtensionArray -> dispatch to their method
|
| 206 |
+
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
|
| 207 |
+
|
| 208 |
+
if not allow_fill:
|
| 209 |
+
return arr.take(indexer)
|
| 210 |
+
|
| 211 |
+
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
|
| 212 |
+
arr, indexer, fill_value, True, mask
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
| 216 |
+
# and the fill_value
|
| 217 |
+
out = np.empty(indexer.shape, dtype=dtype)
|
| 218 |
+
|
| 219 |
+
func = _get_take_nd_function(
|
| 220 |
+
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
|
| 221 |
+
)
|
| 222 |
+
func(arr, indexer, out, fill_value)
|
| 223 |
+
|
| 224 |
+
return out
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def take_2d_multi(
|
| 228 |
+
arr: np.ndarray,
|
| 229 |
+
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
|
| 230 |
+
fill_value=np.nan,
|
| 231 |
+
) -> np.ndarray:
|
| 232 |
+
"""
|
| 233 |
+
Specialized Cython take which sets NaN values in one pass.
|
| 234 |
+
"""
|
| 235 |
+
# This is only called from one place in DataFrame._reindex_multi,
|
| 236 |
+
# so we know indexer is well-behaved.
|
| 237 |
+
assert indexer is not None
|
| 238 |
+
assert indexer[0] is not None
|
| 239 |
+
assert indexer[1] is not None
|
| 240 |
+
|
| 241 |
+
row_idx, col_idx = indexer
|
| 242 |
+
|
| 243 |
+
row_idx = ensure_platform_int(row_idx)
|
| 244 |
+
col_idx = ensure_platform_int(col_idx)
|
| 245 |
+
indexer = row_idx, col_idx
|
| 246 |
+
mask_info = None
|
| 247 |
+
|
| 248 |
+
# check for promotion based on types only (do this first because
|
| 249 |
+
# it's faster than computing a mask)
|
| 250 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
| 251 |
+
if dtype != arr.dtype:
|
| 252 |
+
# check if promotion is actually required based on indexer
|
| 253 |
+
row_mask = row_idx == -1
|
| 254 |
+
col_mask = col_idx == -1
|
| 255 |
+
row_needs = row_mask.any()
|
| 256 |
+
col_needs = col_mask.any()
|
| 257 |
+
mask_info = (row_mask, col_mask), (row_needs, col_needs)
|
| 258 |
+
|
| 259 |
+
if not (row_needs or col_needs):
|
| 260 |
+
# if not, then depromote, set fill_value to dummy
|
| 261 |
+
# (it won't be used but we don't want the cython code
|
| 262 |
+
# to crash when trying to cast it to dtype)
|
| 263 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
| 264 |
+
|
| 265 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
| 266 |
+
# and the fill_value
|
| 267 |
+
out_shape = len(row_idx), len(col_idx)
|
| 268 |
+
out = np.empty(out_shape, dtype=dtype)
|
| 269 |
+
|
| 270 |
+
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
|
| 271 |
+
if func is None and arr.dtype != out.dtype:
|
| 272 |
+
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
|
| 273 |
+
if func is not None:
|
| 274 |
+
func = _convert_wrapper(func, out.dtype)
|
| 275 |
+
|
| 276 |
+
if func is not None:
|
| 277 |
+
func(arr, indexer, out=out, fill_value=fill_value)
|
| 278 |
+
else:
|
| 279 |
+
# test_reindex_multi
|
| 280 |
+
_take_2d_multi_object(
|
| 281 |
+
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
return out
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@functools.lru_cache
|
| 288 |
+
def _get_take_nd_function_cached(
|
| 289 |
+
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
|
| 290 |
+
):
|
| 291 |
+
"""
|
| 292 |
+
Part of _get_take_nd_function below that doesn't need `mask_info` and thus
|
| 293 |
+
can be cached (mask_info potentially contains a numpy ndarray which is not
|
| 294 |
+
hashable and thus cannot be used as argument for cached function).
|
| 295 |
+
"""
|
| 296 |
+
tup = (arr_dtype.name, out_dtype.name)
|
| 297 |
+
if ndim == 1:
|
| 298 |
+
func = _take_1d_dict.get(tup, None)
|
| 299 |
+
elif ndim == 2:
|
| 300 |
+
if axis == 0:
|
| 301 |
+
func = _take_2d_axis0_dict.get(tup, None)
|
| 302 |
+
else:
|
| 303 |
+
func = _take_2d_axis1_dict.get(tup, None)
|
| 304 |
+
if func is not None:
|
| 305 |
+
return func
|
| 306 |
+
|
| 307 |
+
# We get here with string, uint, float16, and complex dtypes that could
|
| 308 |
+
# potentially be handled in algos_take_helper.
|
| 309 |
+
# Also a couple with (M8[ns], object) and (m8[ns], object)
|
| 310 |
+
tup = (out_dtype.name, out_dtype.name)
|
| 311 |
+
if ndim == 1:
|
| 312 |
+
func = _take_1d_dict.get(tup, None)
|
| 313 |
+
elif ndim == 2:
|
| 314 |
+
if axis == 0:
|
| 315 |
+
func = _take_2d_axis0_dict.get(tup, None)
|
| 316 |
+
else:
|
| 317 |
+
func = _take_2d_axis1_dict.get(tup, None)
|
| 318 |
+
if func is not None:
|
| 319 |
+
func = _convert_wrapper(func, out_dtype)
|
| 320 |
+
return func
|
| 321 |
+
|
| 322 |
+
return None
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_take_nd_function(
|
| 326 |
+
ndim: int,
|
| 327 |
+
arr_dtype: np.dtype,
|
| 328 |
+
out_dtype: np.dtype,
|
| 329 |
+
axis: AxisInt = 0,
|
| 330 |
+
mask_info=None,
|
| 331 |
+
):
|
| 332 |
+
"""
|
| 333 |
+
Get the appropriate "take" implementation for the given dimension, axis
|
| 334 |
+
and dtypes.
|
| 335 |
+
"""
|
| 336 |
+
func = None
|
| 337 |
+
if ndim <= 2:
|
| 338 |
+
# for this part we don't need `mask_info` -> use the cached algo lookup
|
| 339 |
+
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
|
| 340 |
+
|
| 341 |
+
if func is None:
|
| 342 |
+
|
| 343 |
+
def func(arr, indexer, out, fill_value=np.nan) -> None:
|
| 344 |
+
indexer = ensure_platform_int(indexer)
|
| 345 |
+
_take_nd_object(
|
| 346 |
+
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
return func
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
|
| 353 |
+
def wrapper(
|
| 354 |
+
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
|
| 355 |
+
) -> None:
|
| 356 |
+
if arr_dtype is not None:
|
| 357 |
+
arr = arr.view(arr_dtype)
|
| 358 |
+
if out_dtype is not None:
|
| 359 |
+
out = out.view(out_dtype)
|
| 360 |
+
if fill_wrap is not None:
|
| 361 |
+
# FIXME: if we get here with dt64/td64 we need to be sure we have
|
| 362 |
+
# matching resos
|
| 363 |
+
if fill_value.dtype.kind == "m":
|
| 364 |
+
fill_value = fill_value.astype("m8[ns]")
|
| 365 |
+
else:
|
| 366 |
+
fill_value = fill_value.astype("M8[ns]")
|
| 367 |
+
fill_value = fill_wrap(fill_value)
|
| 368 |
+
|
| 369 |
+
f(arr, indexer, out, fill_value=fill_value)
|
| 370 |
+
|
| 371 |
+
return wrapper
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _convert_wrapper(f, conv_dtype):
|
| 375 |
+
def wrapper(
|
| 376 |
+
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
|
| 377 |
+
) -> None:
|
| 378 |
+
if conv_dtype == object:
|
| 379 |
+
# GH#39755 avoid casting dt64/td64 to integers
|
| 380 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
| 381 |
+
arr = arr.astype(conv_dtype)
|
| 382 |
+
f(arr, indexer, out, fill_value=fill_value)
|
| 383 |
+
|
| 384 |
+
return wrapper
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
_take_1d_dict = {
|
| 388 |
+
("int8", "int8"): libalgos.take_1d_int8_int8,
|
| 389 |
+
("int8", "int32"): libalgos.take_1d_int8_int32,
|
| 390 |
+
("int8", "int64"): libalgos.take_1d_int8_int64,
|
| 391 |
+
("int8", "float64"): libalgos.take_1d_int8_float64,
|
| 392 |
+
("int16", "int16"): libalgos.take_1d_int16_int16,
|
| 393 |
+
("int16", "int32"): libalgos.take_1d_int16_int32,
|
| 394 |
+
("int16", "int64"): libalgos.take_1d_int16_int64,
|
| 395 |
+
("int16", "float64"): libalgos.take_1d_int16_float64,
|
| 396 |
+
("int32", "int32"): libalgos.take_1d_int32_int32,
|
| 397 |
+
("int32", "int64"): libalgos.take_1d_int32_int64,
|
| 398 |
+
("int32", "float64"): libalgos.take_1d_int32_float64,
|
| 399 |
+
("int64", "int64"): libalgos.take_1d_int64_int64,
|
| 400 |
+
("int64", "float64"): libalgos.take_1d_int64_float64,
|
| 401 |
+
("float32", "float32"): libalgos.take_1d_float32_float32,
|
| 402 |
+
("float32", "float64"): libalgos.take_1d_float32_float64,
|
| 403 |
+
("float64", "float64"): libalgos.take_1d_float64_float64,
|
| 404 |
+
("object", "object"): libalgos.take_1d_object_object,
|
| 405 |
+
("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
|
| 406 |
+
("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
|
| 407 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
| 408 |
+
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
|
| 409 |
+
),
|
| 410 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
| 411 |
+
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
|
| 412 |
+
),
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
_take_2d_axis0_dict = {
|
| 416 |
+
("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
|
| 417 |
+
("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
|
| 418 |
+
("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
|
| 419 |
+
("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
|
| 420 |
+
("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
|
| 421 |
+
("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
|
| 422 |
+
("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
|
| 423 |
+
("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
|
| 424 |
+
("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
|
| 425 |
+
("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
|
| 426 |
+
("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
|
| 427 |
+
("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
|
| 428 |
+
("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
|
| 429 |
+
("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
|
| 430 |
+
("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
|
| 431 |
+
("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
|
| 432 |
+
("object", "object"): libalgos.take_2d_axis0_object_object,
|
| 433 |
+
("bool", "bool"): _view_wrapper(
|
| 434 |
+
libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
|
| 435 |
+
),
|
| 436 |
+
("bool", "object"): _view_wrapper(
|
| 437 |
+
libalgos.take_2d_axis0_bool_object, np.uint8, None
|
| 438 |
+
),
|
| 439 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
| 440 |
+
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 441 |
+
),
|
| 442 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
| 443 |
+
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 444 |
+
),
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
_take_2d_axis1_dict = {
|
| 448 |
+
("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
|
| 449 |
+
("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
|
| 450 |
+
("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
|
| 451 |
+
("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
|
| 452 |
+
("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
|
| 453 |
+
("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
|
| 454 |
+
("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
|
| 455 |
+
("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
|
| 456 |
+
("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
|
| 457 |
+
("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
|
| 458 |
+
("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
|
| 459 |
+
("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
|
| 460 |
+
("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
|
| 461 |
+
("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
|
| 462 |
+
("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
|
| 463 |
+
("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
|
| 464 |
+
("object", "object"): libalgos.take_2d_axis1_object_object,
|
| 465 |
+
("bool", "bool"): _view_wrapper(
|
| 466 |
+
libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
|
| 467 |
+
),
|
| 468 |
+
("bool", "object"): _view_wrapper(
|
| 469 |
+
libalgos.take_2d_axis1_bool_object, np.uint8, None
|
| 470 |
+
),
|
| 471 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
| 472 |
+
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 473 |
+
),
|
| 474 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
| 475 |
+
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 476 |
+
),
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
_take_2d_multi_dict = {
|
| 480 |
+
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
|
| 481 |
+
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
|
| 482 |
+
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
|
| 483 |
+
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
|
| 484 |
+
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
|
| 485 |
+
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
|
| 486 |
+
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
|
| 487 |
+
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
|
| 488 |
+
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
|
| 489 |
+
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
|
| 490 |
+
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
|
| 491 |
+
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
|
| 492 |
+
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
|
| 493 |
+
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
|
| 494 |
+
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
|
| 495 |
+
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
|
| 496 |
+
("object", "object"): libalgos.take_2d_multi_object_object,
|
| 497 |
+
("bool", "bool"): _view_wrapper(
|
| 498 |
+
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
|
| 499 |
+
),
|
| 500 |
+
("bool", "object"): _view_wrapper(
|
| 501 |
+
libalgos.take_2d_multi_bool_object, np.uint8, None
|
| 502 |
+
),
|
| 503 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
| 504 |
+
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 505 |
+
),
|
| 506 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
| 507 |
+
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
| 508 |
+
),
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def _take_nd_object(
|
| 513 |
+
arr: np.ndarray,
|
| 514 |
+
indexer: npt.NDArray[np.intp],
|
| 515 |
+
out: np.ndarray,
|
| 516 |
+
axis: AxisInt,
|
| 517 |
+
fill_value,
|
| 518 |
+
mask_info,
|
| 519 |
+
) -> None:
|
| 520 |
+
if mask_info is not None:
|
| 521 |
+
mask, needs_masking = mask_info
|
| 522 |
+
else:
|
| 523 |
+
mask = indexer == -1
|
| 524 |
+
needs_masking = mask.any()
|
| 525 |
+
if arr.dtype != out.dtype:
|
| 526 |
+
arr = arr.astype(out.dtype)
|
| 527 |
+
if arr.shape[axis] > 0:
|
| 528 |
+
arr.take(indexer, axis=axis, out=out)
|
| 529 |
+
if needs_masking:
|
| 530 |
+
outindexer = [slice(None)] * arr.ndim
|
| 531 |
+
outindexer[axis] = mask
|
| 532 |
+
out[tuple(outindexer)] = fill_value
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def _take_2d_multi_object(
|
| 536 |
+
arr: np.ndarray,
|
| 537 |
+
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
|
| 538 |
+
out: np.ndarray,
|
| 539 |
+
fill_value,
|
| 540 |
+
mask_info,
|
| 541 |
+
) -> None:
|
| 542 |
+
# this is not ideal, performance-wise, but it's better than raising
|
| 543 |
+
# an exception (best to optimize in Cython to avoid getting here)
|
| 544 |
+
row_idx, col_idx = indexer # both np.intp
|
| 545 |
+
if mask_info is not None:
|
| 546 |
+
(row_mask, col_mask), (row_needs, col_needs) = mask_info
|
| 547 |
+
else:
|
| 548 |
+
row_mask = row_idx == -1
|
| 549 |
+
col_mask = col_idx == -1
|
| 550 |
+
row_needs = row_mask.any()
|
| 551 |
+
col_needs = col_mask.any()
|
| 552 |
+
if fill_value is not None:
|
| 553 |
+
if row_needs:
|
| 554 |
+
out[row_mask, :] = fill_value
|
| 555 |
+
if col_needs:
|
| 556 |
+
out[:, col_mask] = fill_value
|
| 557 |
+
for i, u_ in enumerate(row_idx):
|
| 558 |
+
if u_ != -1:
|
| 559 |
+
for j, v in enumerate(col_idx):
|
| 560 |
+
if v != -1:
|
| 561 |
+
out[i, j] = arr[u_, v]
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
def _take_preprocess_indexer_and_fill_value(
|
| 565 |
+
arr: np.ndarray,
|
| 566 |
+
indexer: npt.NDArray[np.intp],
|
| 567 |
+
fill_value,
|
| 568 |
+
allow_fill: bool,
|
| 569 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 570 |
+
):
|
| 571 |
+
mask_info: tuple[np.ndarray | None, bool] | None = None
|
| 572 |
+
|
| 573 |
+
if not allow_fill:
|
| 574 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
| 575 |
+
mask_info = None, False
|
| 576 |
+
else:
|
| 577 |
+
# check for promotion based on types only (do this first because
|
| 578 |
+
# it's faster than computing a mask)
|
| 579 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
| 580 |
+
if dtype != arr.dtype:
|
| 581 |
+
# check if promotion is actually required based on indexer
|
| 582 |
+
if mask is not None:
|
| 583 |
+
needs_masking = True
|
| 584 |
+
else:
|
| 585 |
+
mask = indexer == -1
|
| 586 |
+
needs_masking = bool(mask.any())
|
| 587 |
+
mask_info = mask, needs_masking
|
| 588 |
+
if not needs_masking:
|
| 589 |
+
# if not, then depromote, set fill_value to dummy
|
| 590 |
+
# (it won't be used but we don't want the cython code
|
| 591 |
+
# to crash when trying to cast it to dtype)
|
| 592 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
| 593 |
+
|
| 594 |
+
return dtype, fill_value, mask_info
|
videollama2/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
transforms.py is for shape-preserving functions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import TYPE_CHECKING
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from pandas._typing import (
|
| 13 |
+
AxisInt,
|
| 14 |
+
Scalar,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def shift(
|
| 19 |
+
values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar
|
| 20 |
+
) -> np.ndarray:
|
| 21 |
+
new_values = values
|
| 22 |
+
|
| 23 |
+
if periods == 0 or values.size == 0:
|
| 24 |
+
return new_values.copy()
|
| 25 |
+
|
| 26 |
+
# make sure array sent to np.roll is c_contiguous
|
| 27 |
+
f_ordered = values.flags.f_contiguous
|
| 28 |
+
if f_ordered:
|
| 29 |
+
new_values = new_values.T
|
| 30 |
+
axis = new_values.ndim - axis - 1
|
| 31 |
+
|
| 32 |
+
if new_values.size:
|
| 33 |
+
new_values = np.roll(
|
| 34 |
+
new_values,
|
| 35 |
+
np.intp(periods),
|
| 36 |
+
axis=axis,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
axis_indexer = [slice(None)] * values.ndim
|
| 40 |
+
if periods > 0:
|
| 41 |
+
axis_indexer[axis] = slice(None, periods)
|
| 42 |
+
else:
|
| 43 |
+
axis_indexer[axis] = slice(periods, None)
|
| 44 |
+
new_values[tuple(axis_indexer)] = fill_value
|
| 45 |
+
|
| 46 |
+
# restore original order
|
| 47 |
+
if f_ordered:
|
| 48 |
+
new_values = new_values.T
|
| 49 |
+
|
| 50 |
+
return new_values
|
videollama2/lib/python3.10/site-packages/pandas/core/arraylike.py
ADDED
|
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Methods that can be shared by many array-like classes or subclasses:
|
| 3 |
+
Series
|
| 4 |
+
Index
|
| 5 |
+
ExtensionArray
|
| 6 |
+
"""
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import operator
|
| 10 |
+
from typing import Any
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._libs import lib
|
| 15 |
+
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
|
| 16 |
+
|
| 17 |
+
from pandas.core.dtypes.generic import ABCNDFrame
|
| 18 |
+
|
| 19 |
+
from pandas.core import roperator
|
| 20 |
+
from pandas.core.construction import extract_array
|
| 21 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
| 22 |
+
|
| 23 |
+
REDUCTION_ALIASES = {
|
| 24 |
+
"maximum": "max",
|
| 25 |
+
"minimum": "min",
|
| 26 |
+
"add": "sum",
|
| 27 |
+
"multiply": "prod",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class OpsMixin:
|
| 32 |
+
# -------------------------------------------------------------
|
| 33 |
+
# Comparisons
|
| 34 |
+
|
| 35 |
+
def _cmp_method(self, other, op):
|
| 36 |
+
return NotImplemented
|
| 37 |
+
|
| 38 |
+
@unpack_zerodim_and_defer("__eq__")
|
| 39 |
+
def __eq__(self, other):
|
| 40 |
+
return self._cmp_method(other, operator.eq)
|
| 41 |
+
|
| 42 |
+
@unpack_zerodim_and_defer("__ne__")
|
| 43 |
+
def __ne__(self, other):
|
| 44 |
+
return self._cmp_method(other, operator.ne)
|
| 45 |
+
|
| 46 |
+
@unpack_zerodim_and_defer("__lt__")
|
| 47 |
+
def __lt__(self, other):
|
| 48 |
+
return self._cmp_method(other, operator.lt)
|
| 49 |
+
|
| 50 |
+
@unpack_zerodim_and_defer("__le__")
|
| 51 |
+
def __le__(self, other):
|
| 52 |
+
return self._cmp_method(other, operator.le)
|
| 53 |
+
|
| 54 |
+
@unpack_zerodim_and_defer("__gt__")
|
| 55 |
+
def __gt__(self, other):
|
| 56 |
+
return self._cmp_method(other, operator.gt)
|
| 57 |
+
|
| 58 |
+
@unpack_zerodim_and_defer("__ge__")
|
| 59 |
+
def __ge__(self, other):
|
| 60 |
+
return self._cmp_method(other, operator.ge)
|
| 61 |
+
|
| 62 |
+
# -------------------------------------------------------------
|
| 63 |
+
# Logical Methods
|
| 64 |
+
|
| 65 |
+
def _logical_method(self, other, op):
|
| 66 |
+
return NotImplemented
|
| 67 |
+
|
| 68 |
+
@unpack_zerodim_and_defer("__and__")
|
| 69 |
+
def __and__(self, other):
|
| 70 |
+
return self._logical_method(other, operator.and_)
|
| 71 |
+
|
| 72 |
+
@unpack_zerodim_and_defer("__rand__")
|
| 73 |
+
def __rand__(self, other):
|
| 74 |
+
return self._logical_method(other, roperator.rand_)
|
| 75 |
+
|
| 76 |
+
@unpack_zerodim_and_defer("__or__")
|
| 77 |
+
def __or__(self, other):
|
| 78 |
+
return self._logical_method(other, operator.or_)
|
| 79 |
+
|
| 80 |
+
@unpack_zerodim_and_defer("__ror__")
|
| 81 |
+
def __ror__(self, other):
|
| 82 |
+
return self._logical_method(other, roperator.ror_)
|
| 83 |
+
|
| 84 |
+
@unpack_zerodim_and_defer("__xor__")
|
| 85 |
+
def __xor__(self, other):
|
| 86 |
+
return self._logical_method(other, operator.xor)
|
| 87 |
+
|
| 88 |
+
@unpack_zerodim_and_defer("__rxor__")
|
| 89 |
+
def __rxor__(self, other):
|
| 90 |
+
return self._logical_method(other, roperator.rxor)
|
| 91 |
+
|
| 92 |
+
# -------------------------------------------------------------
|
| 93 |
+
# Arithmetic Methods
|
| 94 |
+
|
| 95 |
+
def _arith_method(self, other, op):
|
| 96 |
+
return NotImplemented
|
| 97 |
+
|
| 98 |
+
@unpack_zerodim_and_defer("__add__")
|
| 99 |
+
def __add__(self, other):
|
| 100 |
+
"""
|
| 101 |
+
Get Addition of DataFrame and other, column-wise.
|
| 102 |
+
|
| 103 |
+
Equivalent to ``DataFrame.add(other)``.
|
| 104 |
+
|
| 105 |
+
Parameters
|
| 106 |
+
----------
|
| 107 |
+
other : scalar, sequence, Series, dict or DataFrame
|
| 108 |
+
Object to be added to the DataFrame.
|
| 109 |
+
|
| 110 |
+
Returns
|
| 111 |
+
-------
|
| 112 |
+
DataFrame
|
| 113 |
+
The result of adding ``other`` to DataFrame.
|
| 114 |
+
|
| 115 |
+
See Also
|
| 116 |
+
--------
|
| 117 |
+
DataFrame.add : Add a DataFrame and another object, with option for index-
|
| 118 |
+
or column-oriented addition.
|
| 119 |
+
|
| 120 |
+
Examples
|
| 121 |
+
--------
|
| 122 |
+
>>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
|
| 123 |
+
... index=['elk', 'moose'])
|
| 124 |
+
>>> df
|
| 125 |
+
height weight
|
| 126 |
+
elk 1.5 500
|
| 127 |
+
moose 2.6 800
|
| 128 |
+
|
| 129 |
+
Adding a scalar affects all rows and columns.
|
| 130 |
+
|
| 131 |
+
>>> df[['height', 'weight']] + 1.5
|
| 132 |
+
height weight
|
| 133 |
+
elk 3.0 501.5
|
| 134 |
+
moose 4.1 801.5
|
| 135 |
+
|
| 136 |
+
Each element of a list is added to a column of the DataFrame, in order.
|
| 137 |
+
|
| 138 |
+
>>> df[['height', 'weight']] + [0.5, 1.5]
|
| 139 |
+
height weight
|
| 140 |
+
elk 2.0 501.5
|
| 141 |
+
moose 3.1 801.5
|
| 142 |
+
|
| 143 |
+
Keys of a dictionary are aligned to the DataFrame, based on column names;
|
| 144 |
+
each value in the dictionary is added to the corresponding column.
|
| 145 |
+
|
| 146 |
+
>>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
|
| 147 |
+
height weight
|
| 148 |
+
elk 2.0 501.5
|
| 149 |
+
moose 3.1 801.5
|
| 150 |
+
|
| 151 |
+
When `other` is a :class:`Series`, the index of `other` is aligned with the
|
| 152 |
+
columns of the DataFrame.
|
| 153 |
+
|
| 154 |
+
>>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
|
| 155 |
+
>>> df[['height', 'weight']] + s1
|
| 156 |
+
height weight
|
| 157 |
+
elk 3.0 500.5
|
| 158 |
+
moose 4.1 800.5
|
| 159 |
+
|
| 160 |
+
Even when the index of `other` is the same as the index of the DataFrame,
|
| 161 |
+
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
|
| 162 |
+
:meth:`DataFrame.add` should be used with `axis='index'`.
|
| 163 |
+
|
| 164 |
+
>>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
|
| 165 |
+
>>> df[['height', 'weight']] + s2
|
| 166 |
+
elk height moose weight
|
| 167 |
+
elk NaN NaN NaN NaN
|
| 168 |
+
moose NaN NaN NaN NaN
|
| 169 |
+
|
| 170 |
+
>>> df[['height', 'weight']].add(s2, axis='index')
|
| 171 |
+
height weight
|
| 172 |
+
elk 2.0 500.5
|
| 173 |
+
moose 4.1 801.5
|
| 174 |
+
|
| 175 |
+
When `other` is a :class:`DataFrame`, both columns names and the
|
| 176 |
+
index are aligned.
|
| 177 |
+
|
| 178 |
+
>>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
|
| 179 |
+
... index=['elk', 'moose', 'deer'])
|
| 180 |
+
>>> df[['height', 'weight']] + other
|
| 181 |
+
height weight
|
| 182 |
+
deer NaN NaN
|
| 183 |
+
elk 1.7 NaN
|
| 184 |
+
moose 3.0 NaN
|
| 185 |
+
"""
|
| 186 |
+
return self._arith_method(other, operator.add)
|
| 187 |
+
|
| 188 |
+
@unpack_zerodim_and_defer("__radd__")
|
| 189 |
+
def __radd__(self, other):
|
| 190 |
+
return self._arith_method(other, roperator.radd)
|
| 191 |
+
|
| 192 |
+
@unpack_zerodim_and_defer("__sub__")
|
| 193 |
+
def __sub__(self, other):
|
| 194 |
+
return self._arith_method(other, operator.sub)
|
| 195 |
+
|
| 196 |
+
@unpack_zerodim_and_defer("__rsub__")
|
| 197 |
+
def __rsub__(self, other):
|
| 198 |
+
return self._arith_method(other, roperator.rsub)
|
| 199 |
+
|
| 200 |
+
@unpack_zerodim_and_defer("__mul__")
|
| 201 |
+
def __mul__(self, other):
|
| 202 |
+
return self._arith_method(other, operator.mul)
|
| 203 |
+
|
| 204 |
+
@unpack_zerodim_and_defer("__rmul__")
|
| 205 |
+
def __rmul__(self, other):
|
| 206 |
+
return self._arith_method(other, roperator.rmul)
|
| 207 |
+
|
| 208 |
+
@unpack_zerodim_and_defer("__truediv__")
|
| 209 |
+
def __truediv__(self, other):
|
| 210 |
+
return self._arith_method(other, operator.truediv)
|
| 211 |
+
|
| 212 |
+
@unpack_zerodim_and_defer("__rtruediv__")
|
| 213 |
+
def __rtruediv__(self, other):
|
| 214 |
+
return self._arith_method(other, roperator.rtruediv)
|
| 215 |
+
|
| 216 |
+
@unpack_zerodim_and_defer("__floordiv__")
|
| 217 |
+
def __floordiv__(self, other):
|
| 218 |
+
return self._arith_method(other, operator.floordiv)
|
| 219 |
+
|
| 220 |
+
@unpack_zerodim_and_defer("__rfloordiv")
|
| 221 |
+
def __rfloordiv__(self, other):
|
| 222 |
+
return self._arith_method(other, roperator.rfloordiv)
|
| 223 |
+
|
| 224 |
+
@unpack_zerodim_and_defer("__mod__")
|
| 225 |
+
def __mod__(self, other):
|
| 226 |
+
return self._arith_method(other, operator.mod)
|
| 227 |
+
|
| 228 |
+
@unpack_zerodim_and_defer("__rmod__")
|
| 229 |
+
def __rmod__(self, other):
|
| 230 |
+
return self._arith_method(other, roperator.rmod)
|
| 231 |
+
|
| 232 |
+
@unpack_zerodim_and_defer("__divmod__")
|
| 233 |
+
def __divmod__(self, other):
|
| 234 |
+
return self._arith_method(other, divmod)
|
| 235 |
+
|
| 236 |
+
@unpack_zerodim_and_defer("__rdivmod__")
|
| 237 |
+
def __rdivmod__(self, other):
|
| 238 |
+
return self._arith_method(other, roperator.rdivmod)
|
| 239 |
+
|
| 240 |
+
@unpack_zerodim_and_defer("__pow__")
|
| 241 |
+
def __pow__(self, other):
|
| 242 |
+
return self._arith_method(other, operator.pow)
|
| 243 |
+
|
| 244 |
+
@unpack_zerodim_and_defer("__rpow__")
|
| 245 |
+
def __rpow__(self, other):
|
| 246 |
+
return self._arith_method(other, roperator.rpow)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# -----------------------------------------------------------------------------
|
| 250 |
+
# Helpers to implement __array_ufunc__
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
|
| 254 |
+
"""
|
| 255 |
+
Compatibility with numpy ufuncs.
|
| 256 |
+
|
| 257 |
+
See also
|
| 258 |
+
--------
|
| 259 |
+
numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
|
| 260 |
+
"""
|
| 261 |
+
from pandas.core.frame import (
|
| 262 |
+
DataFrame,
|
| 263 |
+
Series,
|
| 264 |
+
)
|
| 265 |
+
from pandas.core.generic import NDFrame
|
| 266 |
+
from pandas.core.internals import (
|
| 267 |
+
ArrayManager,
|
| 268 |
+
BlockManager,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
cls = type(self)
|
| 272 |
+
|
| 273 |
+
kwargs = _standardize_out_kwarg(**kwargs)
|
| 274 |
+
|
| 275 |
+
# for binary ops, use our custom dunder methods
|
| 276 |
+
result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
|
| 277 |
+
if result is not NotImplemented:
|
| 278 |
+
return result
|
| 279 |
+
|
| 280 |
+
# Determine if we should defer.
|
| 281 |
+
no_defer = (
|
| 282 |
+
np.ndarray.__array_ufunc__,
|
| 283 |
+
cls.__array_ufunc__,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
for item in inputs:
|
| 287 |
+
higher_priority = (
|
| 288 |
+
hasattr(item, "__array_priority__")
|
| 289 |
+
and item.__array_priority__ > self.__array_priority__
|
| 290 |
+
)
|
| 291 |
+
has_array_ufunc = (
|
| 292 |
+
hasattr(item, "__array_ufunc__")
|
| 293 |
+
and type(item).__array_ufunc__ not in no_defer
|
| 294 |
+
and not isinstance(item, self._HANDLED_TYPES)
|
| 295 |
+
)
|
| 296 |
+
if higher_priority or has_array_ufunc:
|
| 297 |
+
return NotImplemented
|
| 298 |
+
|
| 299 |
+
# align all the inputs.
|
| 300 |
+
types = tuple(type(x) for x in inputs)
|
| 301 |
+
alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
|
| 302 |
+
|
| 303 |
+
if len(alignable) > 1:
|
| 304 |
+
# This triggers alignment.
|
| 305 |
+
# At the moment, there aren't any ufuncs with more than two inputs
|
| 306 |
+
# so this ends up just being x1.index | x2.index, but we write
|
| 307 |
+
# it to handle *args.
|
| 308 |
+
set_types = set(types)
|
| 309 |
+
if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
|
| 310 |
+
# We currently don't handle ufunc(DataFrame, Series)
|
| 311 |
+
# well. Previously this raised an internal ValueError. We might
|
| 312 |
+
# support it someday, so raise a NotImplementedError.
|
| 313 |
+
raise NotImplementedError(
|
| 314 |
+
f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
|
| 315 |
+
)
|
| 316 |
+
axes = self.axes
|
| 317 |
+
for obj in alignable[1:]:
|
| 318 |
+
# this relies on the fact that we aren't handling mixed
|
| 319 |
+
# series / frame ufuncs.
|
| 320 |
+
for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
|
| 321 |
+
axes[i] = ax1.union(ax2)
|
| 322 |
+
|
| 323 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
|
| 324 |
+
inputs = tuple(
|
| 325 |
+
x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
|
| 326 |
+
for x, t in zip(inputs, types)
|
| 327 |
+
)
|
| 328 |
+
else:
|
| 329 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
|
| 330 |
+
|
| 331 |
+
if self.ndim == 1:
|
| 332 |
+
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
|
| 333 |
+
name = names[0] if len(set(names)) == 1 else None
|
| 334 |
+
reconstruct_kwargs = {"name": name}
|
| 335 |
+
else:
|
| 336 |
+
reconstruct_kwargs = {}
|
| 337 |
+
|
| 338 |
+
def reconstruct(result):
|
| 339 |
+
if ufunc.nout > 1:
|
| 340 |
+
# np.modf, np.frexp, np.divmod
|
| 341 |
+
return tuple(_reconstruct(x) for x in result)
|
| 342 |
+
|
| 343 |
+
return _reconstruct(result)
|
| 344 |
+
|
| 345 |
+
def _reconstruct(result):
|
| 346 |
+
if lib.is_scalar(result):
|
| 347 |
+
return result
|
| 348 |
+
|
| 349 |
+
if result.ndim != self.ndim:
|
| 350 |
+
if method == "outer":
|
| 351 |
+
raise NotImplementedError
|
| 352 |
+
return result
|
| 353 |
+
if isinstance(result, (BlockManager, ArrayManager)):
|
| 354 |
+
# we went through BlockManager.apply e.g. np.sqrt
|
| 355 |
+
result = self._constructor_from_mgr(result, axes=result.axes)
|
| 356 |
+
else:
|
| 357 |
+
# we converted an array, lost our axes
|
| 358 |
+
result = self._constructor(
|
| 359 |
+
result, **reconstruct_axes, **reconstruct_kwargs, copy=False
|
| 360 |
+
)
|
| 361 |
+
# TODO: When we support multiple values in __finalize__, this
|
| 362 |
+
# should pass alignable to `__finalize__` instead of self.
|
| 363 |
+
# Then `np.add(a, b)` would consider attrs from both a and b
|
| 364 |
+
# when a and b are NDFrames.
|
| 365 |
+
if len(alignable) == 1:
|
| 366 |
+
result = result.__finalize__(self)
|
| 367 |
+
return result
|
| 368 |
+
|
| 369 |
+
if "out" in kwargs:
|
| 370 |
+
# e.g. test_multiindex_get_loc
|
| 371 |
+
result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
|
| 372 |
+
return reconstruct(result)
|
| 373 |
+
|
| 374 |
+
if method == "reduce":
|
| 375 |
+
# e.g. test.series.test_ufunc.test_reduce
|
| 376 |
+
result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
|
| 377 |
+
if result is not NotImplemented:
|
| 378 |
+
return result
|
| 379 |
+
|
| 380 |
+
# We still get here with kwargs `axis` for e.g. np.maximum.accumulate
|
| 381 |
+
# and `dtype` and `keepdims` for np.ptp
|
| 382 |
+
|
| 383 |
+
if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
|
| 384 |
+
# Just give up on preserving types in the complex case.
|
| 385 |
+
# In theory we could preserve them for them.
|
| 386 |
+
# * nout>1 is doable if BlockManager.apply took nout and
|
| 387 |
+
# returned a Tuple[BlockManager].
|
| 388 |
+
# * len(inputs) > 1 is doable when we know that we have
|
| 389 |
+
# aligned blocks / dtypes.
|
| 390 |
+
|
| 391 |
+
# e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
|
| 392 |
+
inputs = tuple(np.asarray(x) for x in inputs)
|
| 393 |
+
# Note: we can't use default_array_ufunc here bc reindexing means
|
| 394 |
+
# that `self` may not be among `inputs`
|
| 395 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
| 396 |
+
elif self.ndim == 1:
|
| 397 |
+
# ufunc(series, ...)
|
| 398 |
+
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
|
| 399 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
| 400 |
+
else:
|
| 401 |
+
# ufunc(dataframe)
|
| 402 |
+
if method == "__call__" and not kwargs:
|
| 403 |
+
# for np.<ufunc>(..) calls
|
| 404 |
+
# kwargs cannot necessarily be handled block-by-block, so only
|
| 405 |
+
# take this path if there are no kwargs
|
| 406 |
+
mgr = inputs[0]._mgr
|
| 407 |
+
result = mgr.apply(getattr(ufunc, method))
|
| 408 |
+
else:
|
| 409 |
+
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
|
| 410 |
+
# Those can have an axis keyword and thus can't be called block-by-block
|
| 411 |
+
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
|
| 412 |
+
# e.g. np.negative (only one reached), with "where" and "out" in kwargs
|
| 413 |
+
|
| 414 |
+
result = reconstruct(result)
|
| 415 |
+
return result
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def _standardize_out_kwarg(**kwargs) -> dict:
|
| 419 |
+
"""
|
| 420 |
+
If kwargs contain "out1" and "out2", replace that with a tuple "out"
|
| 421 |
+
|
| 422 |
+
np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
|
| 423 |
+
`out1=out1, out2=out2)`
|
| 424 |
+
"""
|
| 425 |
+
if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
|
| 426 |
+
out1 = kwargs.pop("out1")
|
| 427 |
+
out2 = kwargs.pop("out2")
|
| 428 |
+
out = (out1, out2)
|
| 429 |
+
kwargs["out"] = out
|
| 430 |
+
return kwargs
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
| 434 |
+
"""
|
| 435 |
+
If we have an `out` keyword, then call the ufunc without `out` and then
|
| 436 |
+
set the result into the given `out`.
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
# Note: we assume _standardize_out_kwarg has already been called.
|
| 440 |
+
out = kwargs.pop("out")
|
| 441 |
+
where = kwargs.pop("where", None)
|
| 442 |
+
|
| 443 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
| 444 |
+
|
| 445 |
+
if result is NotImplemented:
|
| 446 |
+
return NotImplemented
|
| 447 |
+
|
| 448 |
+
if isinstance(result, tuple):
|
| 449 |
+
# i.e. np.divmod, np.modf, np.frexp
|
| 450 |
+
if not isinstance(out, tuple) or len(out) != len(result):
|
| 451 |
+
raise NotImplementedError
|
| 452 |
+
|
| 453 |
+
for arr, res in zip(out, result):
|
| 454 |
+
_assign_where(arr, res, where)
|
| 455 |
+
|
| 456 |
+
return out
|
| 457 |
+
|
| 458 |
+
if isinstance(out, tuple):
|
| 459 |
+
if len(out) == 1:
|
| 460 |
+
out = out[0]
|
| 461 |
+
else:
|
| 462 |
+
raise NotImplementedError
|
| 463 |
+
|
| 464 |
+
_assign_where(out, result, where)
|
| 465 |
+
return out
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _assign_where(out, result, where) -> None:
|
| 469 |
+
"""
|
| 470 |
+
Set a ufunc result into 'out', masking with a 'where' argument if necessary.
|
| 471 |
+
"""
|
| 472 |
+
if where is None:
|
| 473 |
+
# no 'where' arg passed to ufunc
|
| 474 |
+
out[:] = result
|
| 475 |
+
else:
|
| 476 |
+
np.putmask(out, where, result)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
| 480 |
+
"""
|
| 481 |
+
Fallback to the behavior we would get if we did not define __array_ufunc__.
|
| 482 |
+
|
| 483 |
+
Notes
|
| 484 |
+
-----
|
| 485 |
+
We are assuming that `self` is among `inputs`.
|
| 486 |
+
"""
|
| 487 |
+
if not any(x is self for x in inputs):
|
| 488 |
+
raise NotImplementedError
|
| 489 |
+
|
| 490 |
+
new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
|
| 491 |
+
|
| 492 |
+
return getattr(ufunc, method)(*new_inputs, **kwargs)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
| 496 |
+
"""
|
| 497 |
+
Dispatch ufunc reductions to self's reduction methods.
|
| 498 |
+
"""
|
| 499 |
+
assert method == "reduce"
|
| 500 |
+
|
| 501 |
+
if len(inputs) != 1 or inputs[0] is not self:
|
| 502 |
+
return NotImplemented
|
| 503 |
+
|
| 504 |
+
if ufunc.__name__ not in REDUCTION_ALIASES:
|
| 505 |
+
return NotImplemented
|
| 506 |
+
|
| 507 |
+
method_name = REDUCTION_ALIASES[ufunc.__name__]
|
| 508 |
+
|
| 509 |
+
# NB: we are assuming that min/max represent minimum/maximum methods,
|
| 510 |
+
# which would not be accurate for e.g. Timestamp.min
|
| 511 |
+
if not hasattr(self, method_name):
|
| 512 |
+
return NotImplemented
|
| 513 |
+
|
| 514 |
+
if self.ndim > 1:
|
| 515 |
+
if isinstance(self, ABCNDFrame):
|
| 516 |
+
# TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
|
| 517 |
+
kwargs["numeric_only"] = False
|
| 518 |
+
|
| 519 |
+
if "axis" not in kwargs:
|
| 520 |
+
# For DataFrame reductions we don't want the default axis=0
|
| 521 |
+
# Note: np.min is not a ufunc, but uses array_function_dispatch,
|
| 522 |
+
# so calls DataFrame.min (without ever getting here) with the np.min
|
| 523 |
+
# default of axis=None, which DataFrame.min catches and changes to axis=0.
|
| 524 |
+
# np.minimum.reduce(df) gets here bc axis is not in kwargs,
|
| 525 |
+
# so we set axis=0 to match the behaviorof np.minimum.reduce(df.values)
|
| 526 |
+
kwargs["axis"] = 0
|
| 527 |
+
|
| 528 |
+
# By default, numpy's reductions do not skip NaNs, so we have to
|
| 529 |
+
# pass skipna=False
|
| 530 |
+
return getattr(self, method_name)(skipna=False, **kwargs)
|
videollama2/lib/python3.10/site-packages/pandas/core/base.py
ADDED
|
@@ -0,0 +1,1391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base and utility classes for pandas objects.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import textwrap
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Any,
|
| 11 |
+
Generic,
|
| 12 |
+
Literal,
|
| 13 |
+
cast,
|
| 14 |
+
final,
|
| 15 |
+
overload,
|
| 16 |
+
)
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from pandas._config import using_copy_on_write
|
| 22 |
+
|
| 23 |
+
from pandas._libs import lib
|
| 24 |
+
from pandas._typing import (
|
| 25 |
+
AxisInt,
|
| 26 |
+
DtypeObj,
|
| 27 |
+
IndexLabel,
|
| 28 |
+
NDFrameT,
|
| 29 |
+
Self,
|
| 30 |
+
Shape,
|
| 31 |
+
npt,
|
| 32 |
+
)
|
| 33 |
+
from pandas.compat import PYPY
|
| 34 |
+
from pandas.compat.numpy import function as nv
|
| 35 |
+
from pandas.errors import AbstractMethodError
|
| 36 |
+
from pandas.util._decorators import (
|
| 37 |
+
cache_readonly,
|
| 38 |
+
doc,
|
| 39 |
+
)
|
| 40 |
+
from pandas.util._exceptions import find_stack_level
|
| 41 |
+
|
| 42 |
+
from pandas.core.dtypes.cast import can_hold_element
|
| 43 |
+
from pandas.core.dtypes.common import (
|
| 44 |
+
is_object_dtype,
|
| 45 |
+
is_scalar,
|
| 46 |
+
)
|
| 47 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
| 48 |
+
from pandas.core.dtypes.generic import (
|
| 49 |
+
ABCDataFrame,
|
| 50 |
+
ABCIndex,
|
| 51 |
+
ABCSeries,
|
| 52 |
+
)
|
| 53 |
+
from pandas.core.dtypes.missing import (
|
| 54 |
+
isna,
|
| 55 |
+
remove_na_arraylike,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
from pandas.core import (
|
| 59 |
+
algorithms,
|
| 60 |
+
nanops,
|
| 61 |
+
ops,
|
| 62 |
+
)
|
| 63 |
+
from pandas.core.accessor import DirNamesMixin
|
| 64 |
+
from pandas.core.arraylike import OpsMixin
|
| 65 |
+
from pandas.core.arrays import ExtensionArray
|
| 66 |
+
from pandas.core.construction import (
|
| 67 |
+
ensure_wrapped_if_datetimelike,
|
| 68 |
+
extract_array,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if TYPE_CHECKING:
|
| 72 |
+
from collections.abc import (
|
| 73 |
+
Hashable,
|
| 74 |
+
Iterator,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
from pandas._typing import (
|
| 78 |
+
DropKeep,
|
| 79 |
+
NumpySorter,
|
| 80 |
+
NumpyValueArrayLike,
|
| 81 |
+
ScalarLike_co,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
from pandas import (
|
| 85 |
+
DataFrame,
|
| 86 |
+
Index,
|
| 87 |
+
Series,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
_shared_docs: dict[str, str] = {}
|
| 92 |
+
_indexops_doc_kwargs = {
|
| 93 |
+
"klass": "IndexOpsMixin",
|
| 94 |
+
"inplace": "",
|
| 95 |
+
"unique": "IndexOpsMixin",
|
| 96 |
+
"duplicated": "IndexOpsMixin",
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class PandasObject(DirNamesMixin):
|
| 101 |
+
"""
|
| 102 |
+
Baseclass for various pandas objects.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
# results from calls to methods decorated with cache_readonly get added to _cache
|
| 106 |
+
_cache: dict[str, Any]
|
| 107 |
+
|
| 108 |
+
@property
|
| 109 |
+
def _constructor(self):
|
| 110 |
+
"""
|
| 111 |
+
Class constructor (for this class it's just `__class__`).
|
| 112 |
+
"""
|
| 113 |
+
return type(self)
|
| 114 |
+
|
| 115 |
+
def __repr__(self) -> str:
|
| 116 |
+
"""
|
| 117 |
+
Return a string representation for a particular object.
|
| 118 |
+
"""
|
| 119 |
+
# Should be overwritten by base classes
|
| 120 |
+
return object.__repr__(self)
|
| 121 |
+
|
| 122 |
+
def _reset_cache(self, key: str | None = None) -> None:
|
| 123 |
+
"""
|
| 124 |
+
Reset cached properties. If ``key`` is passed, only clears that key.
|
| 125 |
+
"""
|
| 126 |
+
if not hasattr(self, "_cache"):
|
| 127 |
+
return
|
| 128 |
+
if key is None:
|
| 129 |
+
self._cache.clear()
|
| 130 |
+
else:
|
| 131 |
+
self._cache.pop(key, None)
|
| 132 |
+
|
| 133 |
+
def __sizeof__(self) -> int:
|
| 134 |
+
"""
|
| 135 |
+
Generates the total memory usage for an object that returns
|
| 136 |
+
either a value or Series of values
|
| 137 |
+
"""
|
| 138 |
+
memory_usage = getattr(self, "memory_usage", None)
|
| 139 |
+
if memory_usage:
|
| 140 |
+
mem = memory_usage(deep=True) # pylint: disable=not-callable
|
| 141 |
+
return int(mem if is_scalar(mem) else mem.sum())
|
| 142 |
+
|
| 143 |
+
# no memory_usage attribute, so fall back to object's 'sizeof'
|
| 144 |
+
return super().__sizeof__()
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class NoNewAttributesMixin:
|
| 148 |
+
"""
|
| 149 |
+
Mixin which prevents adding new attributes.
|
| 150 |
+
|
| 151 |
+
Prevents additional attributes via xxx.attribute = "something" after a
|
| 152 |
+
call to `self.__freeze()`. Mainly used to prevent the user from using
|
| 153 |
+
wrong attributes on an accessor (`Series.cat/.str/.dt`).
|
| 154 |
+
|
| 155 |
+
If you really want to add a new attribute at a later time, you need to use
|
| 156 |
+
`object.__setattr__(self, key, value)`.
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
def _freeze(self) -> None:
|
| 160 |
+
"""
|
| 161 |
+
Prevents setting additional attributes.
|
| 162 |
+
"""
|
| 163 |
+
object.__setattr__(self, "__frozen", True)
|
| 164 |
+
|
| 165 |
+
# prevent adding any attribute via s.xxx.new_attribute = ...
|
| 166 |
+
def __setattr__(self, key: str, value) -> None:
|
| 167 |
+
# _cache is used by a decorator
|
| 168 |
+
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
|
| 169 |
+
# because
|
| 170 |
+
# 1.) getattr is false for attributes that raise errors
|
| 171 |
+
# 2.) cls.__dict__ doesn't traverse into base classes
|
| 172 |
+
if getattr(self, "__frozen", False) and not (
|
| 173 |
+
key == "_cache"
|
| 174 |
+
or key in type(self).__dict__
|
| 175 |
+
or getattr(self, key, None) is not None
|
| 176 |
+
):
|
| 177 |
+
raise AttributeError(f"You cannot add any new attribute '{key}'")
|
| 178 |
+
object.__setattr__(self, key, value)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class SelectionMixin(Generic[NDFrameT]):
|
| 182 |
+
"""
|
| 183 |
+
mixin implementing the selection & aggregation interface on a group-like
|
| 184 |
+
object sub-classes need to define: obj, exclusions
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
obj: NDFrameT
|
| 188 |
+
_selection: IndexLabel | None = None
|
| 189 |
+
exclusions: frozenset[Hashable]
|
| 190 |
+
_internal_names = ["_cache", "__setstate__"]
|
| 191 |
+
_internal_names_set = set(_internal_names)
|
| 192 |
+
|
| 193 |
+
@final
|
| 194 |
+
@property
|
| 195 |
+
def _selection_list(self):
|
| 196 |
+
if not isinstance(
|
| 197 |
+
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
|
| 198 |
+
):
|
| 199 |
+
return [self._selection]
|
| 200 |
+
return self._selection
|
| 201 |
+
|
| 202 |
+
@cache_readonly
|
| 203 |
+
def _selected_obj(self):
|
| 204 |
+
if self._selection is None or isinstance(self.obj, ABCSeries):
|
| 205 |
+
return self.obj
|
| 206 |
+
else:
|
| 207 |
+
return self.obj[self._selection]
|
| 208 |
+
|
| 209 |
+
@final
|
| 210 |
+
@cache_readonly
|
| 211 |
+
def ndim(self) -> int:
|
| 212 |
+
return self._selected_obj.ndim
|
| 213 |
+
|
| 214 |
+
@final
|
| 215 |
+
@cache_readonly
|
| 216 |
+
def _obj_with_exclusions(self):
|
| 217 |
+
if isinstance(self.obj, ABCSeries):
|
| 218 |
+
return self.obj
|
| 219 |
+
|
| 220 |
+
if self._selection is not None:
|
| 221 |
+
return self.obj._getitem_nocopy(self._selection_list)
|
| 222 |
+
|
| 223 |
+
if len(self.exclusions) > 0:
|
| 224 |
+
# equivalent to `self.obj.drop(self.exclusions, axis=1)
|
| 225 |
+
# but this avoids consolidating and making a copy
|
| 226 |
+
# TODO: following GH#45287 can we now use .drop directly without
|
| 227 |
+
# making a copy?
|
| 228 |
+
return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True)
|
| 229 |
+
else:
|
| 230 |
+
return self.obj
|
| 231 |
+
|
| 232 |
+
def __getitem__(self, key):
|
| 233 |
+
if self._selection is not None:
|
| 234 |
+
raise IndexError(f"Column(s) {self._selection} already selected")
|
| 235 |
+
|
| 236 |
+
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
|
| 237 |
+
if len(self.obj.columns.intersection(key)) != len(set(key)):
|
| 238 |
+
bad_keys = list(set(key).difference(self.obj.columns))
|
| 239 |
+
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
|
| 240 |
+
return self._gotitem(list(key), ndim=2)
|
| 241 |
+
|
| 242 |
+
else:
|
| 243 |
+
if key not in self.obj:
|
| 244 |
+
raise KeyError(f"Column not found: {key}")
|
| 245 |
+
ndim = self.obj[key].ndim
|
| 246 |
+
return self._gotitem(key, ndim=ndim)
|
| 247 |
+
|
| 248 |
+
def _gotitem(self, key, ndim: int, subset=None):
|
| 249 |
+
"""
|
| 250 |
+
sub-classes to define
|
| 251 |
+
return a sliced object
|
| 252 |
+
|
| 253 |
+
Parameters
|
| 254 |
+
----------
|
| 255 |
+
key : str / list of selections
|
| 256 |
+
ndim : {1, 2}
|
| 257 |
+
requested ndim of result
|
| 258 |
+
subset : object, default None
|
| 259 |
+
subset to act on
|
| 260 |
+
"""
|
| 261 |
+
raise AbstractMethodError(self)
|
| 262 |
+
|
| 263 |
+
@final
|
| 264 |
+
def _infer_selection(self, key, subset: Series | DataFrame):
|
| 265 |
+
"""
|
| 266 |
+
Infer the `selection` to pass to our constructor in _gotitem.
|
| 267 |
+
"""
|
| 268 |
+
# Shared by Rolling and Resample
|
| 269 |
+
selection = None
|
| 270 |
+
if subset.ndim == 2 and (
|
| 271 |
+
(lib.is_scalar(key) and key in subset) or lib.is_list_like(key)
|
| 272 |
+
):
|
| 273 |
+
selection = key
|
| 274 |
+
elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name:
|
| 275 |
+
selection = key
|
| 276 |
+
return selection
|
| 277 |
+
|
| 278 |
+
def aggregate(self, func, *args, **kwargs):
|
| 279 |
+
raise AbstractMethodError(self)
|
| 280 |
+
|
| 281 |
+
agg = aggregate
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
class IndexOpsMixin(OpsMixin):
|
| 285 |
+
"""
|
| 286 |
+
Common ops mixin to support a unified interface / docs for Series / Index
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
# ndarray compatibility
|
| 290 |
+
__array_priority__ = 1000
|
| 291 |
+
_hidden_attrs: frozenset[str] = frozenset(
|
| 292 |
+
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
@property
|
| 296 |
+
def dtype(self) -> DtypeObj:
|
| 297 |
+
# must be defined here as a property for mypy
|
| 298 |
+
raise AbstractMethodError(self)
|
| 299 |
+
|
| 300 |
+
@property
|
| 301 |
+
def _values(self) -> ExtensionArray | np.ndarray:
|
| 302 |
+
# must be defined here as a property for mypy
|
| 303 |
+
raise AbstractMethodError(self)
|
| 304 |
+
|
| 305 |
+
@final
|
| 306 |
+
def transpose(self, *args, **kwargs) -> Self:
|
| 307 |
+
"""
|
| 308 |
+
Return the transpose, which is by definition self.
|
| 309 |
+
|
| 310 |
+
Returns
|
| 311 |
+
-------
|
| 312 |
+
%(klass)s
|
| 313 |
+
"""
|
| 314 |
+
nv.validate_transpose(args, kwargs)
|
| 315 |
+
return self
|
| 316 |
+
|
| 317 |
+
T = property(
|
| 318 |
+
transpose,
|
| 319 |
+
doc="""
|
| 320 |
+
Return the transpose, which is by definition self.
|
| 321 |
+
|
| 322 |
+
Examples
|
| 323 |
+
--------
|
| 324 |
+
For Series:
|
| 325 |
+
|
| 326 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
| 327 |
+
>>> s
|
| 328 |
+
0 Ant
|
| 329 |
+
1 Bear
|
| 330 |
+
2 Cow
|
| 331 |
+
dtype: object
|
| 332 |
+
>>> s.T
|
| 333 |
+
0 Ant
|
| 334 |
+
1 Bear
|
| 335 |
+
2 Cow
|
| 336 |
+
dtype: object
|
| 337 |
+
|
| 338 |
+
For Index:
|
| 339 |
+
|
| 340 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 341 |
+
>>> idx.T
|
| 342 |
+
Index([1, 2, 3], dtype='int64')
|
| 343 |
+
""",
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
@property
|
| 347 |
+
def shape(self) -> Shape:
|
| 348 |
+
"""
|
| 349 |
+
Return a tuple of the shape of the underlying data.
|
| 350 |
+
|
| 351 |
+
Examples
|
| 352 |
+
--------
|
| 353 |
+
>>> s = pd.Series([1, 2, 3])
|
| 354 |
+
>>> s.shape
|
| 355 |
+
(3,)
|
| 356 |
+
"""
|
| 357 |
+
return self._values.shape
|
| 358 |
+
|
| 359 |
+
def __len__(self) -> int:
|
| 360 |
+
# We need this defined here for mypy
|
| 361 |
+
raise AbstractMethodError(self)
|
| 362 |
+
|
| 363 |
+
@property
|
| 364 |
+
def ndim(self) -> Literal[1]:
|
| 365 |
+
"""
|
| 366 |
+
Number of dimensions of the underlying data, by definition 1.
|
| 367 |
+
|
| 368 |
+
Examples
|
| 369 |
+
--------
|
| 370 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
| 371 |
+
>>> s
|
| 372 |
+
0 Ant
|
| 373 |
+
1 Bear
|
| 374 |
+
2 Cow
|
| 375 |
+
dtype: object
|
| 376 |
+
>>> s.ndim
|
| 377 |
+
1
|
| 378 |
+
|
| 379 |
+
For Index:
|
| 380 |
+
|
| 381 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 382 |
+
>>> idx
|
| 383 |
+
Index([1, 2, 3], dtype='int64')
|
| 384 |
+
>>> idx.ndim
|
| 385 |
+
1
|
| 386 |
+
"""
|
| 387 |
+
return 1
|
| 388 |
+
|
| 389 |
+
@final
|
| 390 |
+
def item(self):
|
| 391 |
+
"""
|
| 392 |
+
Return the first element of the underlying data as a Python scalar.
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
scalar
|
| 397 |
+
The first element of Series or Index.
|
| 398 |
+
|
| 399 |
+
Raises
|
| 400 |
+
------
|
| 401 |
+
ValueError
|
| 402 |
+
If the data is not length = 1.
|
| 403 |
+
|
| 404 |
+
Examples
|
| 405 |
+
--------
|
| 406 |
+
>>> s = pd.Series([1])
|
| 407 |
+
>>> s.item()
|
| 408 |
+
1
|
| 409 |
+
|
| 410 |
+
For an index:
|
| 411 |
+
|
| 412 |
+
>>> s = pd.Series([1], index=['a'])
|
| 413 |
+
>>> s.index.item()
|
| 414 |
+
'a'
|
| 415 |
+
"""
|
| 416 |
+
if len(self) == 1:
|
| 417 |
+
return next(iter(self))
|
| 418 |
+
raise ValueError("can only convert an array of size 1 to a Python scalar")
|
| 419 |
+
|
| 420 |
+
@property
|
| 421 |
+
def nbytes(self) -> int:
|
| 422 |
+
"""
|
| 423 |
+
Return the number of bytes in the underlying data.
|
| 424 |
+
|
| 425 |
+
Examples
|
| 426 |
+
--------
|
| 427 |
+
For Series:
|
| 428 |
+
|
| 429 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
| 430 |
+
>>> s
|
| 431 |
+
0 Ant
|
| 432 |
+
1 Bear
|
| 433 |
+
2 Cow
|
| 434 |
+
dtype: object
|
| 435 |
+
>>> s.nbytes
|
| 436 |
+
24
|
| 437 |
+
|
| 438 |
+
For Index:
|
| 439 |
+
|
| 440 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 441 |
+
>>> idx
|
| 442 |
+
Index([1, 2, 3], dtype='int64')
|
| 443 |
+
>>> idx.nbytes
|
| 444 |
+
24
|
| 445 |
+
"""
|
| 446 |
+
return self._values.nbytes
|
| 447 |
+
|
| 448 |
+
@property
|
| 449 |
+
def size(self) -> int:
|
| 450 |
+
"""
|
| 451 |
+
Return the number of elements in the underlying data.
|
| 452 |
+
|
| 453 |
+
Examples
|
| 454 |
+
--------
|
| 455 |
+
For Series:
|
| 456 |
+
|
| 457 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
| 458 |
+
>>> s
|
| 459 |
+
0 Ant
|
| 460 |
+
1 Bear
|
| 461 |
+
2 Cow
|
| 462 |
+
dtype: object
|
| 463 |
+
>>> s.size
|
| 464 |
+
3
|
| 465 |
+
|
| 466 |
+
For Index:
|
| 467 |
+
|
| 468 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 469 |
+
>>> idx
|
| 470 |
+
Index([1, 2, 3], dtype='int64')
|
| 471 |
+
>>> idx.size
|
| 472 |
+
3
|
| 473 |
+
"""
|
| 474 |
+
return len(self._values)
|
| 475 |
+
|
| 476 |
+
@property
|
| 477 |
+
def array(self) -> ExtensionArray:
|
| 478 |
+
"""
|
| 479 |
+
The ExtensionArray of the data backing this Series or Index.
|
| 480 |
+
|
| 481 |
+
Returns
|
| 482 |
+
-------
|
| 483 |
+
ExtensionArray
|
| 484 |
+
An ExtensionArray of the values stored within. For extension
|
| 485 |
+
types, this is the actual array. For NumPy native types, this
|
| 486 |
+
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
|
| 487 |
+
|
| 488 |
+
``.array`` differs from ``.values``, which may require converting
|
| 489 |
+
the data to a different form.
|
| 490 |
+
|
| 491 |
+
See Also
|
| 492 |
+
--------
|
| 493 |
+
Index.to_numpy : Similar method that always returns a NumPy array.
|
| 494 |
+
Series.to_numpy : Similar method that always returns a NumPy array.
|
| 495 |
+
|
| 496 |
+
Notes
|
| 497 |
+
-----
|
| 498 |
+
This table lays out the different array types for each extension
|
| 499 |
+
dtype within pandas.
|
| 500 |
+
|
| 501 |
+
================== =============================
|
| 502 |
+
dtype array type
|
| 503 |
+
================== =============================
|
| 504 |
+
category Categorical
|
| 505 |
+
period PeriodArray
|
| 506 |
+
interval IntervalArray
|
| 507 |
+
IntegerNA IntegerArray
|
| 508 |
+
string StringArray
|
| 509 |
+
boolean BooleanArray
|
| 510 |
+
datetime64[ns, tz] DatetimeArray
|
| 511 |
+
================== =============================
|
| 512 |
+
|
| 513 |
+
For any 3rd-party extension types, the array type will be an
|
| 514 |
+
ExtensionArray.
|
| 515 |
+
|
| 516 |
+
For all remaining dtypes ``.array`` will be a
|
| 517 |
+
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
|
| 518 |
+
stored within. If you absolutely need a NumPy array (possibly with
|
| 519 |
+
copying / coercing data), then use :meth:`Series.to_numpy` instead.
|
| 520 |
+
|
| 521 |
+
Examples
|
| 522 |
+
--------
|
| 523 |
+
For regular NumPy types like int, and float, a NumpyExtensionArray
|
| 524 |
+
is returned.
|
| 525 |
+
|
| 526 |
+
>>> pd.Series([1, 2, 3]).array
|
| 527 |
+
<NumpyExtensionArray>
|
| 528 |
+
[1, 2, 3]
|
| 529 |
+
Length: 3, dtype: int64
|
| 530 |
+
|
| 531 |
+
For extension types, like Categorical, the actual ExtensionArray
|
| 532 |
+
is returned
|
| 533 |
+
|
| 534 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
| 535 |
+
>>> ser.array
|
| 536 |
+
['a', 'b', 'a']
|
| 537 |
+
Categories (2, object): ['a', 'b']
|
| 538 |
+
"""
|
| 539 |
+
raise AbstractMethodError(self)
|
| 540 |
+
|
| 541 |
+
@final
|
| 542 |
+
def to_numpy(
|
| 543 |
+
self,
|
| 544 |
+
dtype: npt.DTypeLike | None = None,
|
| 545 |
+
copy: bool = False,
|
| 546 |
+
na_value: object = lib.no_default,
|
| 547 |
+
**kwargs,
|
| 548 |
+
) -> np.ndarray:
|
| 549 |
+
"""
|
| 550 |
+
A NumPy ndarray representing the values in this Series or Index.
|
| 551 |
+
|
| 552 |
+
Parameters
|
| 553 |
+
----------
|
| 554 |
+
dtype : str or numpy.dtype, optional
|
| 555 |
+
The dtype to pass to :meth:`numpy.asarray`.
|
| 556 |
+
copy : bool, default False
|
| 557 |
+
Whether to ensure that the returned value is not a view on
|
| 558 |
+
another array. Note that ``copy=False`` does not *ensure* that
|
| 559 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
| 560 |
+
a copy is made, even if not strictly necessary.
|
| 561 |
+
na_value : Any, optional
|
| 562 |
+
The value to use for missing values. The default value depends
|
| 563 |
+
on `dtype` and the type of the array.
|
| 564 |
+
**kwargs
|
| 565 |
+
Additional keywords passed through to the ``to_numpy`` method
|
| 566 |
+
of the underlying array (for extension arrays).
|
| 567 |
+
|
| 568 |
+
Returns
|
| 569 |
+
-------
|
| 570 |
+
numpy.ndarray
|
| 571 |
+
|
| 572 |
+
See Also
|
| 573 |
+
--------
|
| 574 |
+
Series.array : Get the actual data stored within.
|
| 575 |
+
Index.array : Get the actual data stored within.
|
| 576 |
+
DataFrame.to_numpy : Similar method for DataFrame.
|
| 577 |
+
|
| 578 |
+
Notes
|
| 579 |
+
-----
|
| 580 |
+
The returned array will be the same up to equality (values equal
|
| 581 |
+
in `self` will be equal in the returned array; likewise for values
|
| 582 |
+
that are not equal). When `self` contains an ExtensionArray, the
|
| 583 |
+
dtype may be different. For example, for a category-dtype Series,
|
| 584 |
+
``to_numpy()`` will return a NumPy array and the categorical dtype
|
| 585 |
+
will be lost.
|
| 586 |
+
|
| 587 |
+
For NumPy dtypes, this will be a reference to the actual data stored
|
| 588 |
+
in this Series or Index (assuming ``copy=False``). Modifying the result
|
| 589 |
+
in place will modify the data stored in the Series or Index (not that
|
| 590 |
+
we recommend doing that).
|
| 591 |
+
|
| 592 |
+
For extension types, ``to_numpy()`` *may* require copying data and
|
| 593 |
+
coercing the result to a NumPy type (possibly object), which may be
|
| 594 |
+
expensive. When you need a no-copy reference to the underlying data,
|
| 595 |
+
:attr:`Series.array` should be used instead.
|
| 596 |
+
|
| 597 |
+
This table lays out the different dtypes and default return types of
|
| 598 |
+
``to_numpy()`` for various dtypes within pandas.
|
| 599 |
+
|
| 600 |
+
================== ================================
|
| 601 |
+
dtype array type
|
| 602 |
+
================== ================================
|
| 603 |
+
category[T] ndarray[T] (same dtype as input)
|
| 604 |
+
period ndarray[object] (Periods)
|
| 605 |
+
interval ndarray[object] (Intervals)
|
| 606 |
+
IntegerNA ndarray[object]
|
| 607 |
+
datetime64[ns] datetime64[ns]
|
| 608 |
+
datetime64[ns, tz] ndarray[object] (Timestamps)
|
| 609 |
+
================== ================================
|
| 610 |
+
|
| 611 |
+
Examples
|
| 612 |
+
--------
|
| 613 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
| 614 |
+
>>> ser.to_numpy()
|
| 615 |
+
array(['a', 'b', 'a'], dtype=object)
|
| 616 |
+
|
| 617 |
+
Specify the `dtype` to control how datetime-aware data is represented.
|
| 618 |
+
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
|
| 619 |
+
objects, each with the correct ``tz``.
|
| 620 |
+
|
| 621 |
+
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
|
| 622 |
+
>>> ser.to_numpy(dtype=object)
|
| 623 |
+
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
|
| 624 |
+
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
|
| 625 |
+
dtype=object)
|
| 626 |
+
|
| 627 |
+
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
|
| 628 |
+
datetime64 values. The values are converted to UTC and the timezone
|
| 629 |
+
info is dropped.
|
| 630 |
+
|
| 631 |
+
>>> ser.to_numpy(dtype="datetime64[ns]")
|
| 632 |
+
... # doctest: +ELLIPSIS
|
| 633 |
+
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
|
| 634 |
+
dtype='datetime64[ns]')
|
| 635 |
+
"""
|
| 636 |
+
if isinstance(self.dtype, ExtensionDtype):
|
| 637 |
+
return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
|
| 638 |
+
elif kwargs:
|
| 639 |
+
bad_keys = next(iter(kwargs.keys()))
|
| 640 |
+
raise TypeError(
|
| 641 |
+
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
fillna = (
|
| 645 |
+
na_value is not lib.no_default
|
| 646 |
+
# no need to fillna with np.nan if we already have a float dtype
|
| 647 |
+
and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating))
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
values = self._values
|
| 651 |
+
if fillna:
|
| 652 |
+
if not can_hold_element(values, na_value):
|
| 653 |
+
# if we can't hold the na_value asarray either makes a copy or we
|
| 654 |
+
# error before modifying values. The asarray later on thus won't make
|
| 655 |
+
# another copy
|
| 656 |
+
values = np.asarray(values, dtype=dtype)
|
| 657 |
+
else:
|
| 658 |
+
values = values.copy()
|
| 659 |
+
|
| 660 |
+
values[np.asanyarray(isna(self))] = na_value
|
| 661 |
+
|
| 662 |
+
result = np.asarray(values, dtype=dtype)
|
| 663 |
+
|
| 664 |
+
if (copy and not fillna) or (not copy and using_copy_on_write()):
|
| 665 |
+
if np.shares_memory(self._values[:2], result[:2]):
|
| 666 |
+
# Take slices to improve performance of check
|
| 667 |
+
if using_copy_on_write() and not copy:
|
| 668 |
+
result = result.view()
|
| 669 |
+
result.flags.writeable = False
|
| 670 |
+
else:
|
| 671 |
+
result = result.copy()
|
| 672 |
+
|
| 673 |
+
return result
|
| 674 |
+
|
| 675 |
+
@final
|
| 676 |
+
@property
|
| 677 |
+
def empty(self) -> bool:
|
| 678 |
+
return not self.size
|
| 679 |
+
|
| 680 |
+
@doc(op="max", oppose="min", value="largest")
|
| 681 |
+
def argmax(
|
| 682 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
| 683 |
+
) -> int:
|
| 684 |
+
"""
|
| 685 |
+
Return int position of the {value} value in the Series.
|
| 686 |
+
|
| 687 |
+
If the {op}imum is achieved in multiple locations,
|
| 688 |
+
the first row position is returned.
|
| 689 |
+
|
| 690 |
+
Parameters
|
| 691 |
+
----------
|
| 692 |
+
axis : {{None}}
|
| 693 |
+
Unused. Parameter needed for compatibility with DataFrame.
|
| 694 |
+
skipna : bool, default True
|
| 695 |
+
Exclude NA/null values when showing the result.
|
| 696 |
+
*args, **kwargs
|
| 697 |
+
Additional arguments and keywords for compatibility with NumPy.
|
| 698 |
+
|
| 699 |
+
Returns
|
| 700 |
+
-------
|
| 701 |
+
int
|
| 702 |
+
Row position of the {op}imum value.
|
| 703 |
+
|
| 704 |
+
See Also
|
| 705 |
+
--------
|
| 706 |
+
Series.arg{op} : Return position of the {op}imum value.
|
| 707 |
+
Series.arg{oppose} : Return position of the {oppose}imum value.
|
| 708 |
+
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
|
| 709 |
+
Series.idxmax : Return index label of the maximum values.
|
| 710 |
+
Series.idxmin : Return index label of the minimum values.
|
| 711 |
+
|
| 712 |
+
Examples
|
| 713 |
+
--------
|
| 714 |
+
Consider dataset containing cereal calories
|
| 715 |
+
|
| 716 |
+
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
|
| 717 |
+
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
|
| 718 |
+
>>> s
|
| 719 |
+
Corn Flakes 100.0
|
| 720 |
+
Almond Delight 110.0
|
| 721 |
+
Cinnamon Toast Crunch 120.0
|
| 722 |
+
Cocoa Puff 110.0
|
| 723 |
+
dtype: float64
|
| 724 |
+
|
| 725 |
+
>>> s.argmax()
|
| 726 |
+
2
|
| 727 |
+
>>> s.argmin()
|
| 728 |
+
0
|
| 729 |
+
|
| 730 |
+
The maximum cereal calories is the third element and
|
| 731 |
+
the minimum cereal calories is the first element,
|
| 732 |
+
since series is zero-indexed.
|
| 733 |
+
"""
|
| 734 |
+
delegate = self._values
|
| 735 |
+
nv.validate_minmax_axis(axis)
|
| 736 |
+
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
|
| 737 |
+
|
| 738 |
+
if isinstance(delegate, ExtensionArray):
|
| 739 |
+
if not skipna and delegate.isna().any():
|
| 740 |
+
warnings.warn(
|
| 741 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
| 742 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
| 743 |
+
"In a future version this will raise ValueError.",
|
| 744 |
+
FutureWarning,
|
| 745 |
+
stacklevel=find_stack_level(),
|
| 746 |
+
)
|
| 747 |
+
return -1
|
| 748 |
+
else:
|
| 749 |
+
return delegate.argmax()
|
| 750 |
+
else:
|
| 751 |
+
result = nanops.nanargmax(delegate, skipna=skipna)
|
| 752 |
+
if result == -1:
|
| 753 |
+
warnings.warn(
|
| 754 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
| 755 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
| 756 |
+
"In a future version this will raise ValueError.",
|
| 757 |
+
FutureWarning,
|
| 758 |
+
stacklevel=find_stack_level(),
|
| 759 |
+
)
|
| 760 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
| 761 |
+
# "int")
|
| 762 |
+
return result # type: ignore[return-value]
|
| 763 |
+
|
| 764 |
+
@doc(argmax, op="min", oppose="max", value="smallest")
|
| 765 |
+
def argmin(
|
| 766 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
| 767 |
+
) -> int:
|
| 768 |
+
delegate = self._values
|
| 769 |
+
nv.validate_minmax_axis(axis)
|
| 770 |
+
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
|
| 771 |
+
|
| 772 |
+
if isinstance(delegate, ExtensionArray):
|
| 773 |
+
if not skipna and delegate.isna().any():
|
| 774 |
+
warnings.warn(
|
| 775 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
| 776 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
| 777 |
+
"In a future version this will raise ValueError.",
|
| 778 |
+
FutureWarning,
|
| 779 |
+
stacklevel=find_stack_level(),
|
| 780 |
+
)
|
| 781 |
+
return -1
|
| 782 |
+
else:
|
| 783 |
+
return delegate.argmin()
|
| 784 |
+
else:
|
| 785 |
+
result = nanops.nanargmin(delegate, skipna=skipna)
|
| 786 |
+
if result == -1:
|
| 787 |
+
warnings.warn(
|
| 788 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
| 789 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
| 790 |
+
"In a future version this will raise ValueError.",
|
| 791 |
+
FutureWarning,
|
| 792 |
+
stacklevel=find_stack_level(),
|
| 793 |
+
)
|
| 794 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
| 795 |
+
# "int")
|
| 796 |
+
return result # type: ignore[return-value]
|
| 797 |
+
|
| 798 |
+
def tolist(self):
|
| 799 |
+
"""
|
| 800 |
+
Return a list of the values.
|
| 801 |
+
|
| 802 |
+
These are each a scalar type, which is a Python scalar
|
| 803 |
+
(for str, int, float) or a pandas scalar
|
| 804 |
+
(for Timestamp/Timedelta/Interval/Period)
|
| 805 |
+
|
| 806 |
+
Returns
|
| 807 |
+
-------
|
| 808 |
+
list
|
| 809 |
+
|
| 810 |
+
See Also
|
| 811 |
+
--------
|
| 812 |
+
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
|
| 813 |
+
nested list of Python scalars.
|
| 814 |
+
|
| 815 |
+
Examples
|
| 816 |
+
--------
|
| 817 |
+
For Series
|
| 818 |
+
|
| 819 |
+
>>> s = pd.Series([1, 2, 3])
|
| 820 |
+
>>> s.to_list()
|
| 821 |
+
[1, 2, 3]
|
| 822 |
+
|
| 823 |
+
For Index:
|
| 824 |
+
|
| 825 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 826 |
+
>>> idx
|
| 827 |
+
Index([1, 2, 3], dtype='int64')
|
| 828 |
+
|
| 829 |
+
>>> idx.to_list()
|
| 830 |
+
[1, 2, 3]
|
| 831 |
+
"""
|
| 832 |
+
return self._values.tolist()
|
| 833 |
+
|
| 834 |
+
to_list = tolist
|
| 835 |
+
|
| 836 |
+
def __iter__(self) -> Iterator:
|
| 837 |
+
"""
|
| 838 |
+
Return an iterator of the values.
|
| 839 |
+
|
| 840 |
+
These are each a scalar type, which is a Python scalar
|
| 841 |
+
(for str, int, float) or a pandas scalar
|
| 842 |
+
(for Timestamp/Timedelta/Interval/Period)
|
| 843 |
+
|
| 844 |
+
Returns
|
| 845 |
+
-------
|
| 846 |
+
iterator
|
| 847 |
+
|
| 848 |
+
Examples
|
| 849 |
+
--------
|
| 850 |
+
>>> s = pd.Series([1, 2, 3])
|
| 851 |
+
>>> for x in s:
|
| 852 |
+
... print(x)
|
| 853 |
+
1
|
| 854 |
+
2
|
| 855 |
+
3
|
| 856 |
+
"""
|
| 857 |
+
# We are explicitly making element iterators.
|
| 858 |
+
if not isinstance(self._values, np.ndarray):
|
| 859 |
+
# Check type instead of dtype to catch DTA/TDA
|
| 860 |
+
return iter(self._values)
|
| 861 |
+
else:
|
| 862 |
+
return map(self._values.item, range(self._values.size))
|
| 863 |
+
|
| 864 |
+
@cache_readonly
|
| 865 |
+
def hasnans(self) -> bool:
|
| 866 |
+
"""
|
| 867 |
+
Return True if there are any NaNs.
|
| 868 |
+
|
| 869 |
+
Enables various performance speedups.
|
| 870 |
+
|
| 871 |
+
Returns
|
| 872 |
+
-------
|
| 873 |
+
bool
|
| 874 |
+
|
| 875 |
+
Examples
|
| 876 |
+
--------
|
| 877 |
+
>>> s = pd.Series([1, 2, 3, None])
|
| 878 |
+
>>> s
|
| 879 |
+
0 1.0
|
| 880 |
+
1 2.0
|
| 881 |
+
2 3.0
|
| 882 |
+
3 NaN
|
| 883 |
+
dtype: float64
|
| 884 |
+
>>> s.hasnans
|
| 885 |
+
True
|
| 886 |
+
"""
|
| 887 |
+
# error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]"
|
| 888 |
+
# has no attribute "any"
|
| 889 |
+
return bool(isna(self).any()) # type: ignore[union-attr]
|
| 890 |
+
|
| 891 |
+
@final
|
| 892 |
+
def _map_values(self, mapper, na_action=None, convert: bool = True):
|
| 893 |
+
"""
|
| 894 |
+
An internal function that maps values using the input
|
| 895 |
+
correspondence (which can be a dict, Series, or function).
|
| 896 |
+
|
| 897 |
+
Parameters
|
| 898 |
+
----------
|
| 899 |
+
mapper : function, dict, or Series
|
| 900 |
+
The input correspondence object
|
| 901 |
+
na_action : {None, 'ignore'}
|
| 902 |
+
If 'ignore', propagate NA values, without passing them to the
|
| 903 |
+
mapping function
|
| 904 |
+
convert : bool, default True
|
| 905 |
+
Try to find better dtype for elementwise function results. If
|
| 906 |
+
False, leave as dtype=object. Note that the dtype is always
|
| 907 |
+
preserved for some extension array dtypes, such as Categorical.
|
| 908 |
+
|
| 909 |
+
Returns
|
| 910 |
+
-------
|
| 911 |
+
Union[Index, MultiIndex], inferred
|
| 912 |
+
The output of the mapping function applied to the index.
|
| 913 |
+
If the function returns a tuple with more than one element
|
| 914 |
+
a MultiIndex will be returned.
|
| 915 |
+
"""
|
| 916 |
+
arr = self._values
|
| 917 |
+
|
| 918 |
+
if isinstance(arr, ExtensionArray):
|
| 919 |
+
return arr.map(mapper, na_action=na_action)
|
| 920 |
+
|
| 921 |
+
return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert)
|
| 922 |
+
|
| 923 |
+
@final
|
| 924 |
+
def value_counts(
|
| 925 |
+
self,
|
| 926 |
+
normalize: bool = False,
|
| 927 |
+
sort: bool = True,
|
| 928 |
+
ascending: bool = False,
|
| 929 |
+
bins=None,
|
| 930 |
+
dropna: bool = True,
|
| 931 |
+
) -> Series:
|
| 932 |
+
"""
|
| 933 |
+
Return a Series containing counts of unique values.
|
| 934 |
+
|
| 935 |
+
The resulting object will be in descending order so that the
|
| 936 |
+
first element is the most frequently-occurring element.
|
| 937 |
+
Excludes NA values by default.
|
| 938 |
+
|
| 939 |
+
Parameters
|
| 940 |
+
----------
|
| 941 |
+
normalize : bool, default False
|
| 942 |
+
If True then the object returned will contain the relative
|
| 943 |
+
frequencies of the unique values.
|
| 944 |
+
sort : bool, default True
|
| 945 |
+
Sort by frequencies when True. Preserve the order of the data when False.
|
| 946 |
+
ascending : bool, default False
|
| 947 |
+
Sort in ascending order.
|
| 948 |
+
bins : int, optional
|
| 949 |
+
Rather than count values, group them into half-open bins,
|
| 950 |
+
a convenience for ``pd.cut``, only works with numeric data.
|
| 951 |
+
dropna : bool, default True
|
| 952 |
+
Don't include counts of NaN.
|
| 953 |
+
|
| 954 |
+
Returns
|
| 955 |
+
-------
|
| 956 |
+
Series
|
| 957 |
+
|
| 958 |
+
See Also
|
| 959 |
+
--------
|
| 960 |
+
Series.count: Number of non-NA elements in a Series.
|
| 961 |
+
DataFrame.count: Number of non-NA elements in a DataFrame.
|
| 962 |
+
DataFrame.value_counts: Equivalent method on DataFrames.
|
| 963 |
+
|
| 964 |
+
Examples
|
| 965 |
+
--------
|
| 966 |
+
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
|
| 967 |
+
>>> index.value_counts()
|
| 968 |
+
3.0 2
|
| 969 |
+
1.0 1
|
| 970 |
+
2.0 1
|
| 971 |
+
4.0 1
|
| 972 |
+
Name: count, dtype: int64
|
| 973 |
+
|
| 974 |
+
With `normalize` set to `True`, returns the relative frequency by
|
| 975 |
+
dividing all values by the sum of values.
|
| 976 |
+
|
| 977 |
+
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
|
| 978 |
+
>>> s.value_counts(normalize=True)
|
| 979 |
+
3.0 0.4
|
| 980 |
+
1.0 0.2
|
| 981 |
+
2.0 0.2
|
| 982 |
+
4.0 0.2
|
| 983 |
+
Name: proportion, dtype: float64
|
| 984 |
+
|
| 985 |
+
**bins**
|
| 986 |
+
|
| 987 |
+
Bins can be useful for going from a continuous variable to a
|
| 988 |
+
categorical variable; instead of counting unique
|
| 989 |
+
apparitions of values, divide the index in the specified
|
| 990 |
+
number of half-open bins.
|
| 991 |
+
|
| 992 |
+
>>> s.value_counts(bins=3)
|
| 993 |
+
(0.996, 2.0] 2
|
| 994 |
+
(2.0, 3.0] 2
|
| 995 |
+
(3.0, 4.0] 1
|
| 996 |
+
Name: count, dtype: int64
|
| 997 |
+
|
| 998 |
+
**dropna**
|
| 999 |
+
|
| 1000 |
+
With `dropna` set to `False` we can also see NaN index values.
|
| 1001 |
+
|
| 1002 |
+
>>> s.value_counts(dropna=False)
|
| 1003 |
+
3.0 2
|
| 1004 |
+
1.0 1
|
| 1005 |
+
2.0 1
|
| 1006 |
+
4.0 1
|
| 1007 |
+
NaN 1
|
| 1008 |
+
Name: count, dtype: int64
|
| 1009 |
+
"""
|
| 1010 |
+
return algorithms.value_counts_internal(
|
| 1011 |
+
self,
|
| 1012 |
+
sort=sort,
|
| 1013 |
+
ascending=ascending,
|
| 1014 |
+
normalize=normalize,
|
| 1015 |
+
bins=bins,
|
| 1016 |
+
dropna=dropna,
|
| 1017 |
+
)
|
| 1018 |
+
|
| 1019 |
+
def unique(self):
|
| 1020 |
+
values = self._values
|
| 1021 |
+
if not isinstance(values, np.ndarray):
|
| 1022 |
+
# i.e. ExtensionArray
|
| 1023 |
+
result = values.unique()
|
| 1024 |
+
else:
|
| 1025 |
+
result = algorithms.unique1d(values)
|
| 1026 |
+
return result
|
| 1027 |
+
|
| 1028 |
+
@final
|
| 1029 |
+
def nunique(self, dropna: bool = True) -> int:
|
| 1030 |
+
"""
|
| 1031 |
+
Return number of unique elements in the object.
|
| 1032 |
+
|
| 1033 |
+
Excludes NA values by default.
|
| 1034 |
+
|
| 1035 |
+
Parameters
|
| 1036 |
+
----------
|
| 1037 |
+
dropna : bool, default True
|
| 1038 |
+
Don't include NaN in the count.
|
| 1039 |
+
|
| 1040 |
+
Returns
|
| 1041 |
+
-------
|
| 1042 |
+
int
|
| 1043 |
+
|
| 1044 |
+
See Also
|
| 1045 |
+
--------
|
| 1046 |
+
DataFrame.nunique: Method nunique for DataFrame.
|
| 1047 |
+
Series.count: Count non-NA/null observations in the Series.
|
| 1048 |
+
|
| 1049 |
+
Examples
|
| 1050 |
+
--------
|
| 1051 |
+
>>> s = pd.Series([1, 3, 5, 7, 7])
|
| 1052 |
+
>>> s
|
| 1053 |
+
0 1
|
| 1054 |
+
1 3
|
| 1055 |
+
2 5
|
| 1056 |
+
3 7
|
| 1057 |
+
4 7
|
| 1058 |
+
dtype: int64
|
| 1059 |
+
|
| 1060 |
+
>>> s.nunique()
|
| 1061 |
+
4
|
| 1062 |
+
"""
|
| 1063 |
+
uniqs = self.unique()
|
| 1064 |
+
if dropna:
|
| 1065 |
+
uniqs = remove_na_arraylike(uniqs)
|
| 1066 |
+
return len(uniqs)
|
| 1067 |
+
|
| 1068 |
+
@property
|
| 1069 |
+
def is_unique(self) -> bool:
|
| 1070 |
+
"""
|
| 1071 |
+
Return boolean if values in the object are unique.
|
| 1072 |
+
|
| 1073 |
+
Returns
|
| 1074 |
+
-------
|
| 1075 |
+
bool
|
| 1076 |
+
|
| 1077 |
+
Examples
|
| 1078 |
+
--------
|
| 1079 |
+
>>> s = pd.Series([1, 2, 3])
|
| 1080 |
+
>>> s.is_unique
|
| 1081 |
+
True
|
| 1082 |
+
|
| 1083 |
+
>>> s = pd.Series([1, 2, 3, 1])
|
| 1084 |
+
>>> s.is_unique
|
| 1085 |
+
False
|
| 1086 |
+
"""
|
| 1087 |
+
return self.nunique(dropna=False) == len(self)
|
| 1088 |
+
|
| 1089 |
+
@property
|
| 1090 |
+
def is_monotonic_increasing(self) -> bool:
|
| 1091 |
+
"""
|
| 1092 |
+
Return boolean if values in the object are monotonically increasing.
|
| 1093 |
+
|
| 1094 |
+
Returns
|
| 1095 |
+
-------
|
| 1096 |
+
bool
|
| 1097 |
+
|
| 1098 |
+
Examples
|
| 1099 |
+
--------
|
| 1100 |
+
>>> s = pd.Series([1, 2, 2])
|
| 1101 |
+
>>> s.is_monotonic_increasing
|
| 1102 |
+
True
|
| 1103 |
+
|
| 1104 |
+
>>> s = pd.Series([3, 2, 1])
|
| 1105 |
+
>>> s.is_monotonic_increasing
|
| 1106 |
+
False
|
| 1107 |
+
"""
|
| 1108 |
+
from pandas import Index
|
| 1109 |
+
|
| 1110 |
+
return Index(self).is_monotonic_increasing
|
| 1111 |
+
|
| 1112 |
+
@property
|
| 1113 |
+
def is_monotonic_decreasing(self) -> bool:
|
| 1114 |
+
"""
|
| 1115 |
+
Return boolean if values in the object are monotonically decreasing.
|
| 1116 |
+
|
| 1117 |
+
Returns
|
| 1118 |
+
-------
|
| 1119 |
+
bool
|
| 1120 |
+
|
| 1121 |
+
Examples
|
| 1122 |
+
--------
|
| 1123 |
+
>>> s = pd.Series([3, 2, 2, 1])
|
| 1124 |
+
>>> s.is_monotonic_decreasing
|
| 1125 |
+
True
|
| 1126 |
+
|
| 1127 |
+
>>> s = pd.Series([1, 2, 3])
|
| 1128 |
+
>>> s.is_monotonic_decreasing
|
| 1129 |
+
False
|
| 1130 |
+
"""
|
| 1131 |
+
from pandas import Index
|
| 1132 |
+
|
| 1133 |
+
return Index(self).is_monotonic_decreasing
|
| 1134 |
+
|
| 1135 |
+
@final
|
| 1136 |
+
def _memory_usage(self, deep: bool = False) -> int:
|
| 1137 |
+
"""
|
| 1138 |
+
Memory usage of the values.
|
| 1139 |
+
|
| 1140 |
+
Parameters
|
| 1141 |
+
----------
|
| 1142 |
+
deep : bool, default False
|
| 1143 |
+
Introspect the data deeply, interrogate
|
| 1144 |
+
`object` dtypes for system-level memory consumption.
|
| 1145 |
+
|
| 1146 |
+
Returns
|
| 1147 |
+
-------
|
| 1148 |
+
bytes used
|
| 1149 |
+
|
| 1150 |
+
See Also
|
| 1151 |
+
--------
|
| 1152 |
+
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
|
| 1153 |
+
array.
|
| 1154 |
+
|
| 1155 |
+
Notes
|
| 1156 |
+
-----
|
| 1157 |
+
Memory usage does not include memory consumed by elements that
|
| 1158 |
+
are not components of the array if deep=False or if used on PyPy
|
| 1159 |
+
|
| 1160 |
+
Examples
|
| 1161 |
+
--------
|
| 1162 |
+
>>> idx = pd.Index([1, 2, 3])
|
| 1163 |
+
>>> idx.memory_usage()
|
| 1164 |
+
24
|
| 1165 |
+
"""
|
| 1166 |
+
if hasattr(self.array, "memory_usage"):
|
| 1167 |
+
return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues]
|
| 1168 |
+
deep=deep,
|
| 1169 |
+
)
|
| 1170 |
+
|
| 1171 |
+
v = self.array.nbytes
|
| 1172 |
+
if deep and is_object_dtype(self.dtype) and not PYPY:
|
| 1173 |
+
values = cast(np.ndarray, self._values)
|
| 1174 |
+
v += lib.memory_usage_of_objects(values)
|
| 1175 |
+
return v
|
| 1176 |
+
|
| 1177 |
+
@doc(
|
| 1178 |
+
algorithms.factorize,
|
| 1179 |
+
values="",
|
| 1180 |
+
order="",
|
| 1181 |
+
size_hint="",
|
| 1182 |
+
sort=textwrap.dedent(
|
| 1183 |
+
"""\
|
| 1184 |
+
sort : bool, default False
|
| 1185 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
| 1186 |
+
relationship.
|
| 1187 |
+
"""
|
| 1188 |
+
),
|
| 1189 |
+
)
|
| 1190 |
+
def factorize(
|
| 1191 |
+
self,
|
| 1192 |
+
sort: bool = False,
|
| 1193 |
+
use_na_sentinel: bool = True,
|
| 1194 |
+
) -> tuple[npt.NDArray[np.intp], Index]:
|
| 1195 |
+
codes, uniques = algorithms.factorize(
|
| 1196 |
+
self._values, sort=sort, use_na_sentinel=use_na_sentinel
|
| 1197 |
+
)
|
| 1198 |
+
if uniques.dtype == np.float16:
|
| 1199 |
+
uniques = uniques.astype(np.float32)
|
| 1200 |
+
|
| 1201 |
+
if isinstance(self, ABCIndex):
|
| 1202 |
+
# preserve e.g. MultiIndex
|
| 1203 |
+
uniques = self._constructor(uniques)
|
| 1204 |
+
else:
|
| 1205 |
+
from pandas import Index
|
| 1206 |
+
|
| 1207 |
+
uniques = Index(uniques)
|
| 1208 |
+
return codes, uniques
|
| 1209 |
+
|
| 1210 |
+
_shared_docs[
|
| 1211 |
+
"searchsorted"
|
| 1212 |
+
] = """
|
| 1213 |
+
Find indices where elements should be inserted to maintain order.
|
| 1214 |
+
|
| 1215 |
+
Find the indices into a sorted {klass} `self` such that, if the
|
| 1216 |
+
corresponding elements in `value` were inserted before the indices,
|
| 1217 |
+
the order of `self` would be preserved.
|
| 1218 |
+
|
| 1219 |
+
.. note::
|
| 1220 |
+
|
| 1221 |
+
The {klass} *must* be monotonically sorted, otherwise
|
| 1222 |
+
wrong locations will likely be returned. Pandas does *not*
|
| 1223 |
+
check this for you.
|
| 1224 |
+
|
| 1225 |
+
Parameters
|
| 1226 |
+
----------
|
| 1227 |
+
value : array-like or scalar
|
| 1228 |
+
Values to insert into `self`.
|
| 1229 |
+
side : {{'left', 'right'}}, optional
|
| 1230 |
+
If 'left', the index of the first suitable location found is given.
|
| 1231 |
+
If 'right', return the last such index. If there is no suitable
|
| 1232 |
+
index, return either 0 or N (where N is the length of `self`).
|
| 1233 |
+
sorter : 1-D array-like, optional
|
| 1234 |
+
Optional array of integer indices that sort `self` into ascending
|
| 1235 |
+
order. They are typically the result of ``np.argsort``.
|
| 1236 |
+
|
| 1237 |
+
Returns
|
| 1238 |
+
-------
|
| 1239 |
+
int or array of int
|
| 1240 |
+
A scalar or array of insertion points with the
|
| 1241 |
+
same shape as `value`.
|
| 1242 |
+
|
| 1243 |
+
See Also
|
| 1244 |
+
--------
|
| 1245 |
+
sort_values : Sort by the values along either axis.
|
| 1246 |
+
numpy.searchsorted : Similar method from NumPy.
|
| 1247 |
+
|
| 1248 |
+
Notes
|
| 1249 |
+
-----
|
| 1250 |
+
Binary search is used to find the required insertion points.
|
| 1251 |
+
|
| 1252 |
+
Examples
|
| 1253 |
+
--------
|
| 1254 |
+
>>> ser = pd.Series([1, 2, 3])
|
| 1255 |
+
>>> ser
|
| 1256 |
+
0 1
|
| 1257 |
+
1 2
|
| 1258 |
+
2 3
|
| 1259 |
+
dtype: int64
|
| 1260 |
+
|
| 1261 |
+
>>> ser.searchsorted(4)
|
| 1262 |
+
3
|
| 1263 |
+
|
| 1264 |
+
>>> ser.searchsorted([0, 4])
|
| 1265 |
+
array([0, 3])
|
| 1266 |
+
|
| 1267 |
+
>>> ser.searchsorted([1, 3], side='left')
|
| 1268 |
+
array([0, 2])
|
| 1269 |
+
|
| 1270 |
+
>>> ser.searchsorted([1, 3], side='right')
|
| 1271 |
+
array([1, 3])
|
| 1272 |
+
|
| 1273 |
+
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
|
| 1274 |
+
>>> ser
|
| 1275 |
+
0 2000-03-11
|
| 1276 |
+
1 2000-03-12
|
| 1277 |
+
2 2000-03-13
|
| 1278 |
+
dtype: datetime64[ns]
|
| 1279 |
+
|
| 1280 |
+
>>> ser.searchsorted('3/14/2000')
|
| 1281 |
+
3
|
| 1282 |
+
|
| 1283 |
+
>>> ser = pd.Categorical(
|
| 1284 |
+
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
|
| 1285 |
+
... )
|
| 1286 |
+
>>> ser
|
| 1287 |
+
['apple', 'bread', 'bread', 'cheese', 'milk']
|
| 1288 |
+
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
|
| 1289 |
+
|
| 1290 |
+
>>> ser.searchsorted('bread')
|
| 1291 |
+
1
|
| 1292 |
+
|
| 1293 |
+
>>> ser.searchsorted(['bread'], side='right')
|
| 1294 |
+
array([3])
|
| 1295 |
+
|
| 1296 |
+
If the values are not monotonically sorted, wrong locations
|
| 1297 |
+
may be returned:
|
| 1298 |
+
|
| 1299 |
+
>>> ser = pd.Series([2, 1, 3])
|
| 1300 |
+
>>> ser
|
| 1301 |
+
0 2
|
| 1302 |
+
1 1
|
| 1303 |
+
2 3
|
| 1304 |
+
dtype: int64
|
| 1305 |
+
|
| 1306 |
+
>>> ser.searchsorted(1) # doctest: +SKIP
|
| 1307 |
+
0 # wrong result, correct would be 1
|
| 1308 |
+
"""
|
| 1309 |
+
|
| 1310 |
+
# This overload is needed so that the call to searchsorted in
|
| 1311 |
+
# pandas.core.resample.TimeGrouper._get_period_bins picks the correct result
|
| 1312 |
+
|
| 1313 |
+
# error: Overloaded function signatures 1 and 2 overlap with incompatible
|
| 1314 |
+
# return types
|
| 1315 |
+
@overload
|
| 1316 |
+
def searchsorted( # type: ignore[overload-overlap]
|
| 1317 |
+
self,
|
| 1318 |
+
value: ScalarLike_co,
|
| 1319 |
+
side: Literal["left", "right"] = ...,
|
| 1320 |
+
sorter: NumpySorter = ...,
|
| 1321 |
+
) -> np.intp:
|
| 1322 |
+
...
|
| 1323 |
+
|
| 1324 |
+
@overload
|
| 1325 |
+
def searchsorted(
|
| 1326 |
+
self,
|
| 1327 |
+
value: npt.ArrayLike | ExtensionArray,
|
| 1328 |
+
side: Literal["left", "right"] = ...,
|
| 1329 |
+
sorter: NumpySorter = ...,
|
| 1330 |
+
) -> npt.NDArray[np.intp]:
|
| 1331 |
+
...
|
| 1332 |
+
|
| 1333 |
+
@doc(_shared_docs["searchsorted"], klass="Index")
|
| 1334 |
+
def searchsorted(
|
| 1335 |
+
self,
|
| 1336 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
| 1337 |
+
side: Literal["left", "right"] = "left",
|
| 1338 |
+
sorter: NumpySorter | None = None,
|
| 1339 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
| 1340 |
+
if isinstance(value, ABCDataFrame):
|
| 1341 |
+
msg = (
|
| 1342 |
+
"Value must be 1-D array-like or scalar, "
|
| 1343 |
+
f"{type(value).__name__} is not supported"
|
| 1344 |
+
)
|
| 1345 |
+
raise ValueError(msg)
|
| 1346 |
+
|
| 1347 |
+
values = self._values
|
| 1348 |
+
if not isinstance(values, np.ndarray):
|
| 1349 |
+
# Going through EA.searchsorted directly improves performance GH#38083
|
| 1350 |
+
return values.searchsorted(value, side=side, sorter=sorter)
|
| 1351 |
+
|
| 1352 |
+
return algorithms.searchsorted(
|
| 1353 |
+
values,
|
| 1354 |
+
value,
|
| 1355 |
+
side=side,
|
| 1356 |
+
sorter=sorter,
|
| 1357 |
+
)
|
| 1358 |
+
|
| 1359 |
+
def drop_duplicates(self, *, keep: DropKeep = "first"):
|
| 1360 |
+
duplicated = self._duplicated(keep=keep)
|
| 1361 |
+
# error: Value of type "IndexOpsMixin" is not indexable
|
| 1362 |
+
return self[~duplicated] # type: ignore[index]
|
| 1363 |
+
|
| 1364 |
+
@final
|
| 1365 |
+
def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
|
| 1366 |
+
arr = self._values
|
| 1367 |
+
if isinstance(arr, ExtensionArray):
|
| 1368 |
+
return arr.duplicated(keep=keep)
|
| 1369 |
+
return algorithms.duplicated(arr, keep=keep)
|
| 1370 |
+
|
| 1371 |
+
def _arith_method(self, other, op):
|
| 1372 |
+
res_name = ops.get_op_result_name(self, other)
|
| 1373 |
+
|
| 1374 |
+
lvalues = self._values
|
| 1375 |
+
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
|
| 1376 |
+
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
|
| 1377 |
+
rvalues = ensure_wrapped_if_datetimelike(rvalues)
|
| 1378 |
+
if isinstance(rvalues, range):
|
| 1379 |
+
rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step)
|
| 1380 |
+
|
| 1381 |
+
with np.errstate(all="ignore"):
|
| 1382 |
+
result = ops.arithmetic_op(lvalues, rvalues, op)
|
| 1383 |
+
|
| 1384 |
+
return self._construct_result(result, name=res_name)
|
| 1385 |
+
|
| 1386 |
+
def _construct_result(self, result, name):
|
| 1387 |
+
"""
|
| 1388 |
+
Construct an appropriately-wrapped result from the ArrayLike result
|
| 1389 |
+
of an arithmetic-like operation.
|
| 1390 |
+
"""
|
| 1391 |
+
raise AbstractMethodError(self)
|
videollama2/lib/python3.10/site-packages/pandas/core/common.py
ADDED
|
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Misc tools for implementing data structures
|
| 3 |
+
|
| 4 |
+
Note: pandas.core.common is *not* part of the public API.
|
| 5 |
+
"""
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import builtins
|
| 9 |
+
from collections import (
|
| 10 |
+
abc,
|
| 11 |
+
defaultdict,
|
| 12 |
+
)
|
| 13 |
+
from collections.abc import (
|
| 14 |
+
Collection,
|
| 15 |
+
Generator,
|
| 16 |
+
Hashable,
|
| 17 |
+
Iterable,
|
| 18 |
+
Sequence,
|
| 19 |
+
)
|
| 20 |
+
import contextlib
|
| 21 |
+
from functools import partial
|
| 22 |
+
import inspect
|
| 23 |
+
from typing import (
|
| 24 |
+
TYPE_CHECKING,
|
| 25 |
+
Any,
|
| 26 |
+
Callable,
|
| 27 |
+
cast,
|
| 28 |
+
overload,
|
| 29 |
+
)
|
| 30 |
+
import warnings
|
| 31 |
+
|
| 32 |
+
import numpy as np
|
| 33 |
+
|
| 34 |
+
from pandas._libs import lib
|
| 35 |
+
from pandas.compat.numpy import np_version_gte1p24
|
| 36 |
+
|
| 37 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
| 38 |
+
from pandas.core.dtypes.common import (
|
| 39 |
+
is_bool_dtype,
|
| 40 |
+
is_integer,
|
| 41 |
+
)
|
| 42 |
+
from pandas.core.dtypes.generic import (
|
| 43 |
+
ABCExtensionArray,
|
| 44 |
+
ABCIndex,
|
| 45 |
+
ABCMultiIndex,
|
| 46 |
+
ABCSeries,
|
| 47 |
+
)
|
| 48 |
+
from pandas.core.dtypes.inference import iterable_not_string
|
| 49 |
+
|
| 50 |
+
if TYPE_CHECKING:
|
| 51 |
+
from pandas._typing import (
|
| 52 |
+
AnyArrayLike,
|
| 53 |
+
ArrayLike,
|
| 54 |
+
NpDtype,
|
| 55 |
+
RandomState,
|
| 56 |
+
T,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
from pandas import Index
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def flatten(line):
|
| 63 |
+
"""
|
| 64 |
+
Flatten an arbitrarily nested sequence.
|
| 65 |
+
|
| 66 |
+
Parameters
|
| 67 |
+
----------
|
| 68 |
+
line : sequence
|
| 69 |
+
The non string sequence to flatten
|
| 70 |
+
|
| 71 |
+
Notes
|
| 72 |
+
-----
|
| 73 |
+
This doesn't consider strings sequences.
|
| 74 |
+
|
| 75 |
+
Returns
|
| 76 |
+
-------
|
| 77 |
+
flattened : generator
|
| 78 |
+
"""
|
| 79 |
+
for element in line:
|
| 80 |
+
if iterable_not_string(element):
|
| 81 |
+
yield from flatten(element)
|
| 82 |
+
else:
|
| 83 |
+
yield element
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def consensus_name_attr(objs):
|
| 87 |
+
name = objs[0].name
|
| 88 |
+
for obj in objs[1:]:
|
| 89 |
+
try:
|
| 90 |
+
if obj.name != name:
|
| 91 |
+
name = None
|
| 92 |
+
except ValueError:
|
| 93 |
+
name = None
|
| 94 |
+
return name
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def is_bool_indexer(key: Any) -> bool:
|
| 98 |
+
"""
|
| 99 |
+
Check whether `key` is a valid boolean indexer.
|
| 100 |
+
|
| 101 |
+
Parameters
|
| 102 |
+
----------
|
| 103 |
+
key : Any
|
| 104 |
+
Only list-likes may be considered boolean indexers.
|
| 105 |
+
All other types are not considered a boolean indexer.
|
| 106 |
+
For array-like input, boolean ndarrays or ExtensionArrays
|
| 107 |
+
with ``_is_boolean`` set are considered boolean indexers.
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
bool
|
| 112 |
+
Whether `key` is a valid boolean indexer.
|
| 113 |
+
|
| 114 |
+
Raises
|
| 115 |
+
------
|
| 116 |
+
ValueError
|
| 117 |
+
When the array is an object-dtype ndarray or ExtensionArray
|
| 118 |
+
and contains missing values.
|
| 119 |
+
|
| 120 |
+
See Also
|
| 121 |
+
--------
|
| 122 |
+
check_array_indexer : Check that `key` is a valid array to index,
|
| 123 |
+
and convert to an ndarray.
|
| 124 |
+
"""
|
| 125 |
+
if isinstance(
|
| 126 |
+
key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)
|
| 127 |
+
) and not isinstance(key, ABCMultiIndex):
|
| 128 |
+
if key.dtype == np.object_:
|
| 129 |
+
key_array = np.asarray(key)
|
| 130 |
+
|
| 131 |
+
if not lib.is_bool_array(key_array):
|
| 132 |
+
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
|
| 133 |
+
if lib.is_bool_array(key_array, skipna=True):
|
| 134 |
+
# Don't raise on e.g. ["A", "B", np.nan], see
|
| 135 |
+
# test_loc_getitem_list_of_labels_categoricalindex_with_na
|
| 136 |
+
raise ValueError(na_msg)
|
| 137 |
+
return False
|
| 138 |
+
return True
|
| 139 |
+
elif is_bool_dtype(key.dtype):
|
| 140 |
+
return True
|
| 141 |
+
elif isinstance(key, list):
|
| 142 |
+
# check if np.array(key).dtype would be bool
|
| 143 |
+
if len(key) > 0:
|
| 144 |
+
if type(key) is not list: # noqa: E721
|
| 145 |
+
# GH#42461 cython will raise TypeError if we pass a subclass
|
| 146 |
+
key = list(key)
|
| 147 |
+
return lib.is_bool_list(key)
|
| 148 |
+
|
| 149 |
+
return False
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def cast_scalar_indexer(val):
|
| 153 |
+
"""
|
| 154 |
+
Disallow indexing with a float key, even if that key is a round number.
|
| 155 |
+
|
| 156 |
+
Parameters
|
| 157 |
+
----------
|
| 158 |
+
val : scalar
|
| 159 |
+
|
| 160 |
+
Returns
|
| 161 |
+
-------
|
| 162 |
+
outval : scalar
|
| 163 |
+
"""
|
| 164 |
+
# assumes lib.is_scalar(val)
|
| 165 |
+
if lib.is_float(val) and val.is_integer():
|
| 166 |
+
raise IndexError(
|
| 167 |
+
# GH#34193
|
| 168 |
+
"Indexing with a float is no longer supported. Manually convert "
|
| 169 |
+
"to an integer key instead."
|
| 170 |
+
)
|
| 171 |
+
return val
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def not_none(*args):
|
| 175 |
+
"""
|
| 176 |
+
Returns a generator consisting of the arguments that are not None.
|
| 177 |
+
"""
|
| 178 |
+
return (arg for arg in args if arg is not None)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def any_none(*args) -> bool:
|
| 182 |
+
"""
|
| 183 |
+
Returns a boolean indicating if any argument is None.
|
| 184 |
+
"""
|
| 185 |
+
return any(arg is None for arg in args)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def all_none(*args) -> bool:
|
| 189 |
+
"""
|
| 190 |
+
Returns a boolean indicating if all arguments are None.
|
| 191 |
+
"""
|
| 192 |
+
return all(arg is None for arg in args)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def any_not_none(*args) -> bool:
|
| 196 |
+
"""
|
| 197 |
+
Returns a boolean indicating if any argument is not None.
|
| 198 |
+
"""
|
| 199 |
+
return any(arg is not None for arg in args)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def all_not_none(*args) -> bool:
|
| 203 |
+
"""
|
| 204 |
+
Returns a boolean indicating if all arguments are not None.
|
| 205 |
+
"""
|
| 206 |
+
return all(arg is not None for arg in args)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def count_not_none(*args) -> int:
|
| 210 |
+
"""
|
| 211 |
+
Returns the count of arguments that are not None.
|
| 212 |
+
"""
|
| 213 |
+
return sum(x is not None for x in args)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
@overload
|
| 217 |
+
def asarray_tuplesafe(
|
| 218 |
+
values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
|
| 219 |
+
) -> np.ndarray:
|
| 220 |
+
# ExtensionArray can only be returned when values is an Index, all other iterables
|
| 221 |
+
# will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
|
| 222 |
+
# signature, so instead we special-case some common types.
|
| 223 |
+
...
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
@overload
|
| 227 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
|
| 228 |
+
...
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
|
| 232 |
+
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
|
| 233 |
+
values = list(values)
|
| 234 |
+
elif isinstance(values, ABCIndex):
|
| 235 |
+
return values._values
|
| 236 |
+
elif isinstance(values, ABCSeries):
|
| 237 |
+
return values._values
|
| 238 |
+
|
| 239 |
+
if isinstance(values, list) and dtype in [np.object_, object]:
|
| 240 |
+
return construct_1d_object_array_from_listlike(values)
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
with warnings.catch_warnings():
|
| 244 |
+
# Can remove warning filter once NumPy 1.24 is min version
|
| 245 |
+
if not np_version_gte1p24:
|
| 246 |
+
warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
|
| 247 |
+
result = np.asarray(values, dtype=dtype)
|
| 248 |
+
except ValueError:
|
| 249 |
+
# Using try/except since it's more performant than checking is_list_like
|
| 250 |
+
# over each element
|
| 251 |
+
# error: Argument 1 to "construct_1d_object_array_from_listlike"
|
| 252 |
+
# has incompatible type "Iterable[Any]"; expected "Sized"
|
| 253 |
+
return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type]
|
| 254 |
+
|
| 255 |
+
if issubclass(result.dtype.type, str):
|
| 256 |
+
result = np.asarray(values, dtype=object)
|
| 257 |
+
|
| 258 |
+
if result.ndim == 2:
|
| 259 |
+
# Avoid building an array of arrays:
|
| 260 |
+
values = [tuple(x) for x in values]
|
| 261 |
+
result = construct_1d_object_array_from_listlike(values)
|
| 262 |
+
|
| 263 |
+
return result
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def index_labels_to_array(
|
| 267 |
+
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
|
| 268 |
+
) -> np.ndarray:
|
| 269 |
+
"""
|
| 270 |
+
Transform label or iterable of labels to array, for use in Index.
|
| 271 |
+
|
| 272 |
+
Parameters
|
| 273 |
+
----------
|
| 274 |
+
dtype : dtype
|
| 275 |
+
If specified, use as dtype of the resulting array, otherwise infer.
|
| 276 |
+
|
| 277 |
+
Returns
|
| 278 |
+
-------
|
| 279 |
+
array
|
| 280 |
+
"""
|
| 281 |
+
if isinstance(labels, (str, tuple)):
|
| 282 |
+
labels = [labels]
|
| 283 |
+
|
| 284 |
+
if not isinstance(labels, (list, np.ndarray)):
|
| 285 |
+
try:
|
| 286 |
+
labels = list(labels)
|
| 287 |
+
except TypeError: # non-iterable
|
| 288 |
+
labels = [labels]
|
| 289 |
+
|
| 290 |
+
labels = asarray_tuplesafe(labels, dtype=dtype)
|
| 291 |
+
|
| 292 |
+
return labels
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def maybe_make_list(obj):
|
| 296 |
+
if obj is not None and not isinstance(obj, (tuple, list)):
|
| 297 |
+
return [obj]
|
| 298 |
+
return obj
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:
|
| 302 |
+
"""
|
| 303 |
+
If obj is Iterable but not list-like, consume into list.
|
| 304 |
+
"""
|
| 305 |
+
if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
|
| 306 |
+
return list(obj)
|
| 307 |
+
obj = cast(Collection, obj)
|
| 308 |
+
return obj
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def is_null_slice(obj) -> bool:
|
| 312 |
+
"""
|
| 313 |
+
We have a null slice.
|
| 314 |
+
"""
|
| 315 |
+
return (
|
| 316 |
+
isinstance(obj, slice)
|
| 317 |
+
and obj.start is None
|
| 318 |
+
and obj.stop is None
|
| 319 |
+
and obj.step is None
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def is_empty_slice(obj) -> bool:
|
| 324 |
+
"""
|
| 325 |
+
We have an empty slice, e.g. no values are selected.
|
| 326 |
+
"""
|
| 327 |
+
return (
|
| 328 |
+
isinstance(obj, slice)
|
| 329 |
+
and obj.start is not None
|
| 330 |
+
and obj.stop is not None
|
| 331 |
+
and obj.start == obj.stop
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def is_true_slices(line) -> list[bool]:
|
| 336 |
+
"""
|
| 337 |
+
Find non-trivial slices in "line": return a list of booleans with same length.
|
| 338 |
+
"""
|
| 339 |
+
return [isinstance(k, slice) and not is_null_slice(k) for k in line]
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
# TODO: used only once in indexing; belongs elsewhere?
|
| 343 |
+
def is_full_slice(obj, line: int) -> bool:
|
| 344 |
+
"""
|
| 345 |
+
We have a full length slice.
|
| 346 |
+
"""
|
| 347 |
+
return (
|
| 348 |
+
isinstance(obj, slice)
|
| 349 |
+
and obj.start == 0
|
| 350 |
+
and obj.stop == line
|
| 351 |
+
and obj.step is None
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def get_callable_name(obj):
|
| 356 |
+
# typical case has name
|
| 357 |
+
if hasattr(obj, "__name__"):
|
| 358 |
+
return getattr(obj, "__name__")
|
| 359 |
+
# some objects don't; could recurse
|
| 360 |
+
if isinstance(obj, partial):
|
| 361 |
+
return get_callable_name(obj.func)
|
| 362 |
+
# fall back to class name
|
| 363 |
+
if callable(obj):
|
| 364 |
+
return type(obj).__name__
|
| 365 |
+
# everything failed (probably because the argument
|
| 366 |
+
# wasn't actually callable); we return None
|
| 367 |
+
# instead of the empty string in this case to allow
|
| 368 |
+
# distinguishing between no name and a name of ''
|
| 369 |
+
return None
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def apply_if_callable(maybe_callable, obj, **kwargs):
|
| 373 |
+
"""
|
| 374 |
+
Evaluate possibly callable input using obj and kwargs if it is callable,
|
| 375 |
+
otherwise return as it is.
|
| 376 |
+
|
| 377 |
+
Parameters
|
| 378 |
+
----------
|
| 379 |
+
maybe_callable : possibly a callable
|
| 380 |
+
obj : NDFrame
|
| 381 |
+
**kwargs
|
| 382 |
+
"""
|
| 383 |
+
if callable(maybe_callable):
|
| 384 |
+
return maybe_callable(obj, **kwargs)
|
| 385 |
+
|
| 386 |
+
return maybe_callable
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def standardize_mapping(into):
|
| 390 |
+
"""
|
| 391 |
+
Helper function to standardize a supplied mapping.
|
| 392 |
+
|
| 393 |
+
Parameters
|
| 394 |
+
----------
|
| 395 |
+
into : instance or subclass of collections.abc.Mapping
|
| 396 |
+
Must be a class, an initialized collections.defaultdict,
|
| 397 |
+
or an instance of a collections.abc.Mapping subclass.
|
| 398 |
+
|
| 399 |
+
Returns
|
| 400 |
+
-------
|
| 401 |
+
mapping : a collections.abc.Mapping subclass or other constructor
|
| 402 |
+
a callable object that can accept an iterator to create
|
| 403 |
+
the desired Mapping.
|
| 404 |
+
|
| 405 |
+
See Also
|
| 406 |
+
--------
|
| 407 |
+
DataFrame.to_dict
|
| 408 |
+
Series.to_dict
|
| 409 |
+
"""
|
| 410 |
+
if not inspect.isclass(into):
|
| 411 |
+
if isinstance(into, defaultdict):
|
| 412 |
+
return partial(defaultdict, into.default_factory)
|
| 413 |
+
into = type(into)
|
| 414 |
+
if not issubclass(into, abc.Mapping):
|
| 415 |
+
raise TypeError(f"unsupported type: {into}")
|
| 416 |
+
if into == defaultdict:
|
| 417 |
+
raise TypeError("to_dict() only accepts initialized defaultdicts")
|
| 418 |
+
return into
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
@overload
|
| 422 |
+
def random_state(state: np.random.Generator) -> np.random.Generator:
|
| 423 |
+
...
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
@overload
|
| 427 |
+
def random_state(
|
| 428 |
+
state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None,
|
| 429 |
+
) -> np.random.RandomState:
|
| 430 |
+
...
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def random_state(state: RandomState | None = None):
|
| 434 |
+
"""
|
| 435 |
+
Helper function for processing random_state arguments.
|
| 436 |
+
|
| 437 |
+
Parameters
|
| 438 |
+
----------
|
| 439 |
+
state : int, array-like, BitGenerator, Generator, np.random.RandomState, None.
|
| 440 |
+
If receives an int, array-like, or BitGenerator, passes to
|
| 441 |
+
np.random.RandomState() as seed.
|
| 442 |
+
If receives an np.random RandomState or Generator, just returns that unchanged.
|
| 443 |
+
If receives `None`, returns np.random.
|
| 444 |
+
If receives anything else, raises an informative ValueError.
|
| 445 |
+
|
| 446 |
+
Default None.
|
| 447 |
+
|
| 448 |
+
Returns
|
| 449 |
+
-------
|
| 450 |
+
np.random.RandomState or np.random.Generator. If state is None, returns np.random
|
| 451 |
+
|
| 452 |
+
"""
|
| 453 |
+
if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)):
|
| 454 |
+
return np.random.RandomState(state)
|
| 455 |
+
elif isinstance(state, np.random.RandomState):
|
| 456 |
+
return state
|
| 457 |
+
elif isinstance(state, np.random.Generator):
|
| 458 |
+
return state
|
| 459 |
+
elif state is None:
|
| 460 |
+
return np.random
|
| 461 |
+
else:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
"random_state must be an integer, array-like, a BitGenerator, Generator, "
|
| 464 |
+
"a numpy RandomState, or None"
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def pipe(
|
| 469 |
+
obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
|
| 470 |
+
) -> T:
|
| 471 |
+
"""
|
| 472 |
+
Apply a function ``func`` to object ``obj`` either by passing obj as the
|
| 473 |
+
first argument to the function or, in the case that the func is a tuple,
|
| 474 |
+
interpret the first element of the tuple as a function and pass the obj to
|
| 475 |
+
that function as a keyword argument whose key is the value of the second
|
| 476 |
+
element of the tuple.
|
| 477 |
+
|
| 478 |
+
Parameters
|
| 479 |
+
----------
|
| 480 |
+
func : callable or tuple of (callable, str)
|
| 481 |
+
Function to apply to this object or, alternatively, a
|
| 482 |
+
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
|
| 483 |
+
string indicating the keyword of ``callable`` that expects the
|
| 484 |
+
object.
|
| 485 |
+
*args : iterable, optional
|
| 486 |
+
Positional arguments passed into ``func``.
|
| 487 |
+
**kwargs : dict, optional
|
| 488 |
+
A dictionary of keyword arguments passed into ``func``.
|
| 489 |
+
|
| 490 |
+
Returns
|
| 491 |
+
-------
|
| 492 |
+
object : the return type of ``func``.
|
| 493 |
+
"""
|
| 494 |
+
if isinstance(func, tuple):
|
| 495 |
+
func, target = func
|
| 496 |
+
if target in kwargs:
|
| 497 |
+
msg = f"{target} is both the pipe target and a keyword argument"
|
| 498 |
+
raise ValueError(msg)
|
| 499 |
+
kwargs[target] = obj
|
| 500 |
+
return func(*args, **kwargs)
|
| 501 |
+
else:
|
| 502 |
+
return func(obj, *args, **kwargs)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def get_rename_function(mapper):
|
| 506 |
+
"""
|
| 507 |
+
Returns a function that will map names/labels, dependent if mapper
|
| 508 |
+
is a dict, Series or just a function.
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
+
def f(x):
|
| 512 |
+
if x in mapper:
|
| 513 |
+
return mapper[x]
|
| 514 |
+
else:
|
| 515 |
+
return x
|
| 516 |
+
|
| 517 |
+
return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def convert_to_list_like(
|
| 521 |
+
values: Hashable | Iterable | AnyArrayLike,
|
| 522 |
+
) -> list | AnyArrayLike:
|
| 523 |
+
"""
|
| 524 |
+
Convert list-like or scalar input to list-like. List, numpy and pandas array-like
|
| 525 |
+
inputs are returned unmodified whereas others are converted to list.
|
| 526 |
+
"""
|
| 527 |
+
if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
|
| 528 |
+
return values
|
| 529 |
+
elif isinstance(values, abc.Iterable) and not isinstance(values, str):
|
| 530 |
+
return list(values)
|
| 531 |
+
|
| 532 |
+
return [values]
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
@contextlib.contextmanager
|
| 536 |
+
def temp_setattr(
|
| 537 |
+
obj, attr: str, value, condition: bool = True
|
| 538 |
+
) -> Generator[None, None, None]:
|
| 539 |
+
"""
|
| 540 |
+
Temporarily set attribute on an object.
|
| 541 |
+
|
| 542 |
+
Parameters
|
| 543 |
+
----------
|
| 544 |
+
obj : object
|
| 545 |
+
Object whose attribute will be modified.
|
| 546 |
+
attr : str
|
| 547 |
+
Attribute to modify.
|
| 548 |
+
value : Any
|
| 549 |
+
Value to temporarily set attribute to.
|
| 550 |
+
condition : bool, default True
|
| 551 |
+
Whether to set the attribute. Provided in order to not have to
|
| 552 |
+
conditionally use this context manager.
|
| 553 |
+
|
| 554 |
+
Yields
|
| 555 |
+
------
|
| 556 |
+
object : obj with modified attribute.
|
| 557 |
+
"""
|
| 558 |
+
if condition:
|
| 559 |
+
old_value = getattr(obj, attr)
|
| 560 |
+
setattr(obj, attr, value)
|
| 561 |
+
try:
|
| 562 |
+
yield obj
|
| 563 |
+
finally:
|
| 564 |
+
if condition:
|
| 565 |
+
setattr(obj, attr, old_value)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def require_length_match(data, index: Index) -> None:
|
| 569 |
+
"""
|
| 570 |
+
Check the length of data matches the length of the index.
|
| 571 |
+
"""
|
| 572 |
+
if len(data) != len(index):
|
| 573 |
+
raise ValueError(
|
| 574 |
+
"Length of values "
|
| 575 |
+
f"({len(data)}) "
|
| 576 |
+
"does not match length of index "
|
| 577 |
+
f"({len(index)})"
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0,
|
| 582 |
+
# whereas np.min and np.max (which directly call obj.min and obj.max)
|
| 583 |
+
# default to axis=None.
|
| 584 |
+
_builtin_table = {
|
| 585 |
+
builtins.sum: np.sum,
|
| 586 |
+
builtins.max: np.maximum.reduce,
|
| 587 |
+
builtins.min: np.minimum.reduce,
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
# GH#53425: Only for deprecation
|
| 591 |
+
_builtin_table_alias = {
|
| 592 |
+
builtins.sum: "np.sum",
|
| 593 |
+
builtins.max: "np.maximum.reduce",
|
| 594 |
+
builtins.min: "np.minimum.reduce",
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
_cython_table = {
|
| 598 |
+
builtins.sum: "sum",
|
| 599 |
+
builtins.max: "max",
|
| 600 |
+
builtins.min: "min",
|
| 601 |
+
np.all: "all",
|
| 602 |
+
np.any: "any",
|
| 603 |
+
np.sum: "sum",
|
| 604 |
+
np.nansum: "sum",
|
| 605 |
+
np.mean: "mean",
|
| 606 |
+
np.nanmean: "mean",
|
| 607 |
+
np.prod: "prod",
|
| 608 |
+
np.nanprod: "prod",
|
| 609 |
+
np.std: "std",
|
| 610 |
+
np.nanstd: "std",
|
| 611 |
+
np.var: "var",
|
| 612 |
+
np.nanvar: "var",
|
| 613 |
+
np.median: "median",
|
| 614 |
+
np.nanmedian: "median",
|
| 615 |
+
np.max: "max",
|
| 616 |
+
np.nanmax: "max",
|
| 617 |
+
np.min: "min",
|
| 618 |
+
np.nanmin: "min",
|
| 619 |
+
np.cumprod: "cumprod",
|
| 620 |
+
np.nancumprod: "cumprod",
|
| 621 |
+
np.cumsum: "cumsum",
|
| 622 |
+
np.nancumsum: "cumsum",
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def get_cython_func(arg: Callable) -> str | None:
|
| 627 |
+
"""
|
| 628 |
+
if we define an internal function for this argument, return it
|
| 629 |
+
"""
|
| 630 |
+
return _cython_table.get(arg)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def is_builtin_func(arg):
|
| 634 |
+
"""
|
| 635 |
+
if we define a builtin function for this argument, return it,
|
| 636 |
+
otherwise return the arg
|
| 637 |
+
"""
|
| 638 |
+
return _builtin_table.get(arg, arg)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
|
| 642 |
+
"""
|
| 643 |
+
If a name is missing then replace it by level_n, where n is the count
|
| 644 |
+
|
| 645 |
+
.. versionadded:: 1.4.0
|
| 646 |
+
|
| 647 |
+
Parameters
|
| 648 |
+
----------
|
| 649 |
+
names : list-like
|
| 650 |
+
list of column names or None values.
|
| 651 |
+
|
| 652 |
+
Returns
|
| 653 |
+
-------
|
| 654 |
+
list
|
| 655 |
+
list of column names with the None values replaced.
|
| 656 |
+
"""
|
| 657 |
+
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
|
videollama2/lib/python3.10/site-packages/pandas/core/config_init.py
ADDED
|
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module is imported from the pandas package __init__.py file
|
| 3 |
+
in order to ensure that the core.config options registered here will
|
| 4 |
+
be available as soon as the user loads the package. if register_option
|
| 5 |
+
is invoked inside specific modules, they will not be registered until that
|
| 6 |
+
module is imported, which may or may not be a problem.
|
| 7 |
+
|
| 8 |
+
If you need to make sure options are available even before a certain
|
| 9 |
+
module is imported, register them here rather than in the module.
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
from typing import Callable
|
| 16 |
+
|
| 17 |
+
import pandas._config.config as cf
|
| 18 |
+
from pandas._config.config import (
|
| 19 |
+
is_bool,
|
| 20 |
+
is_callable,
|
| 21 |
+
is_instance_factory,
|
| 22 |
+
is_int,
|
| 23 |
+
is_nonnegative_int,
|
| 24 |
+
is_one_of_factory,
|
| 25 |
+
is_str,
|
| 26 |
+
is_text,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
# compute
|
| 30 |
+
|
| 31 |
+
use_bottleneck_doc = """
|
| 32 |
+
: bool
|
| 33 |
+
Use the bottleneck library to accelerate if it is installed,
|
| 34 |
+
the default is True
|
| 35 |
+
Valid values: False,True
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def use_bottleneck_cb(key) -> None:
|
| 40 |
+
from pandas.core import nanops
|
| 41 |
+
|
| 42 |
+
nanops.set_use_bottleneck(cf.get_option(key))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
use_numexpr_doc = """
|
| 46 |
+
: bool
|
| 47 |
+
Use the numexpr library to accelerate computation if it is installed,
|
| 48 |
+
the default is True
|
| 49 |
+
Valid values: False,True
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def use_numexpr_cb(key) -> None:
|
| 54 |
+
from pandas.core.computation import expressions
|
| 55 |
+
|
| 56 |
+
expressions.set_use_numexpr(cf.get_option(key))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
use_numba_doc = """
|
| 60 |
+
: bool
|
| 61 |
+
Use the numba engine option for select operations if it is installed,
|
| 62 |
+
the default is False
|
| 63 |
+
Valid values: False,True
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def use_numba_cb(key) -> None:
|
| 68 |
+
from pandas.core.util import numba_
|
| 69 |
+
|
| 70 |
+
numba_.set_use_numba(cf.get_option(key))
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
with cf.config_prefix("compute"):
|
| 74 |
+
cf.register_option(
|
| 75 |
+
"use_bottleneck",
|
| 76 |
+
True,
|
| 77 |
+
use_bottleneck_doc,
|
| 78 |
+
validator=is_bool,
|
| 79 |
+
cb=use_bottleneck_cb,
|
| 80 |
+
)
|
| 81 |
+
cf.register_option(
|
| 82 |
+
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
|
| 83 |
+
)
|
| 84 |
+
cf.register_option(
|
| 85 |
+
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
|
| 86 |
+
)
|
| 87 |
+
#
|
| 88 |
+
# options from the "display" namespace
|
| 89 |
+
|
| 90 |
+
pc_precision_doc = """
|
| 91 |
+
: int
|
| 92 |
+
Floating point output precision in terms of number of places after the
|
| 93 |
+
decimal, for regular formatting as well as scientific notation. Similar
|
| 94 |
+
to ``precision`` in :meth:`numpy.set_printoptions`.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
pc_colspace_doc = """
|
| 98 |
+
: int
|
| 99 |
+
Default space for DataFrame columns.
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
pc_max_rows_doc = """
|
| 103 |
+
: int
|
| 104 |
+
If max_rows is exceeded, switch to truncate view. Depending on
|
| 105 |
+
`large_repr`, objects are either centrally truncated or printed as
|
| 106 |
+
a summary view. 'None' value means unlimited.
|
| 107 |
+
|
| 108 |
+
In case python/IPython is running in a terminal and `large_repr`
|
| 109 |
+
equals 'truncate' this can be set to 0 and pandas will auto-detect
|
| 110 |
+
the height of the terminal and print a truncated object which fits
|
| 111 |
+
the screen height. The IPython notebook, IPython qtconsole, or
|
| 112 |
+
IDLE do not run in a terminal and hence it is not possible to do
|
| 113 |
+
correct auto-detection.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
pc_min_rows_doc = """
|
| 117 |
+
: int
|
| 118 |
+
The numbers of rows to show in a truncated view (when `max_rows` is
|
| 119 |
+
exceeded). Ignored when `max_rows` is set to None or 0. When set to
|
| 120 |
+
None, follows the value of `max_rows`.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
pc_max_cols_doc = """
|
| 124 |
+
: int
|
| 125 |
+
If max_cols is exceeded, switch to truncate view. Depending on
|
| 126 |
+
`large_repr`, objects are either centrally truncated or printed as
|
| 127 |
+
a summary view. 'None' value means unlimited.
|
| 128 |
+
|
| 129 |
+
In case python/IPython is running in a terminal and `large_repr`
|
| 130 |
+
equals 'truncate' this can be set to 0 or None and pandas will auto-detect
|
| 131 |
+
the width of the terminal and print a truncated object which fits
|
| 132 |
+
the screen width. The IPython notebook, IPython qtconsole, or IDLE
|
| 133 |
+
do not run in a terminal and hence it is not possible to do
|
| 134 |
+
correct auto-detection and defaults to 20.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
pc_max_categories_doc = """
|
| 138 |
+
: int
|
| 139 |
+
This sets the maximum number of categories pandas should output when
|
| 140 |
+
printing out a `Categorical` or a Series of dtype "category".
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
pc_max_info_cols_doc = """
|
| 144 |
+
: int
|
| 145 |
+
max_info_columns is used in DataFrame.info method to decide if
|
| 146 |
+
per column information will be printed.
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
pc_nb_repr_h_doc = """
|
| 150 |
+
: boolean
|
| 151 |
+
When True, IPython notebook will use html representation for
|
| 152 |
+
pandas objects (if it is available).
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
pc_pprint_nest_depth = """
|
| 156 |
+
: int
|
| 157 |
+
Controls the number of nested levels to process when pretty-printing
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
pc_multi_sparse_doc = """
|
| 161 |
+
: boolean
|
| 162 |
+
"sparsify" MultiIndex display (don't display repeated
|
| 163 |
+
elements in outer levels within groups)
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
float_format_doc = """
|
| 167 |
+
: callable
|
| 168 |
+
The callable should accept a floating point number and return
|
| 169 |
+
a string with the desired format of the number. This is used
|
| 170 |
+
in some places like SeriesFormatter.
|
| 171 |
+
See formats.format.EngFormatter for an example.
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
max_colwidth_doc = """
|
| 175 |
+
: int or None
|
| 176 |
+
The maximum width in characters of a column in the repr of
|
| 177 |
+
a pandas data structure. When the column overflows, a "..."
|
| 178 |
+
placeholder is embedded in the output. A 'None' value means unlimited.
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
colheader_justify_doc = """
|
| 182 |
+
: 'left'/'right'
|
| 183 |
+
Controls the justification of column headers. used by DataFrameFormatter.
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
pc_expand_repr_doc = """
|
| 187 |
+
: boolean
|
| 188 |
+
Whether to print out the full DataFrame repr for wide DataFrames across
|
| 189 |
+
multiple lines, `max_columns` is still respected, but the output will
|
| 190 |
+
wrap-around across multiple "pages" if its width exceeds `display.width`.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
pc_show_dimensions_doc = """
|
| 194 |
+
: boolean or 'truncate'
|
| 195 |
+
Whether to print out dimensions at the end of DataFrame repr.
|
| 196 |
+
If 'truncate' is specified, only print out the dimensions if the
|
| 197 |
+
frame is truncated (e.g. not display all rows and/or columns)
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
pc_east_asian_width_doc = """
|
| 201 |
+
: boolean
|
| 202 |
+
Whether to use the Unicode East Asian Width to calculate the display text
|
| 203 |
+
width.
|
| 204 |
+
Enabling this may affect to the performance (default: False)
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
pc_ambiguous_as_wide_doc = """
|
| 208 |
+
: boolean
|
| 209 |
+
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
|
| 210 |
+
(default: False)
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
pc_table_schema_doc = """
|
| 214 |
+
: boolean
|
| 215 |
+
Whether to publish a Table Schema representation for frontends
|
| 216 |
+
that support it.
|
| 217 |
+
(default: False)
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
pc_html_border_doc = """
|
| 221 |
+
: int
|
| 222 |
+
A ``border=value`` attribute is inserted in the ``<table>`` tag
|
| 223 |
+
for the DataFrame HTML repr.
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
pc_html_use_mathjax_doc = """\
|
| 227 |
+
: boolean
|
| 228 |
+
When True, Jupyter notebook will process table contents using MathJax,
|
| 229 |
+
rendering mathematical expressions enclosed by the dollar symbol.
|
| 230 |
+
(default: True)
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
pc_max_dir_items = """\
|
| 234 |
+
: int
|
| 235 |
+
The number of items that will be added to `dir(...)`. 'None' value means
|
| 236 |
+
unlimited. Because dir is cached, changing this option will not immediately
|
| 237 |
+
affect already existing dataframes until a column is deleted or added.
|
| 238 |
+
|
| 239 |
+
This is for instance used to suggest columns from a dataframe to tab
|
| 240 |
+
completion.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
pc_width_doc = """
|
| 244 |
+
: int
|
| 245 |
+
Width of the display in characters. In case python/IPython is running in
|
| 246 |
+
a terminal this can be set to None and pandas will correctly auto-detect
|
| 247 |
+
the width.
|
| 248 |
+
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
|
| 249 |
+
terminal and hence it is not possible to correctly detect the width.
|
| 250 |
+
"""
|
| 251 |
+
|
| 252 |
+
pc_chop_threshold_doc = """
|
| 253 |
+
: float or None
|
| 254 |
+
if set to a float value, all float values smaller than the given threshold
|
| 255 |
+
will be displayed as exactly 0 by repr and friends.
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
pc_max_seq_items = """
|
| 259 |
+
: int or None
|
| 260 |
+
When pretty-printing a long sequence, no more then `max_seq_items`
|
| 261 |
+
will be printed. If items are omitted, they will be denoted by the
|
| 262 |
+
addition of "..." to the resulting string.
|
| 263 |
+
|
| 264 |
+
If set to None, the number of items to be printed is unlimited.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
pc_max_info_rows_doc = """
|
| 268 |
+
: int
|
| 269 |
+
df.info() will usually show null-counts for each column.
|
| 270 |
+
For large frames this can be quite slow. max_info_rows and max_info_cols
|
| 271 |
+
limit this null check only to frames with smaller dimensions than
|
| 272 |
+
specified.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
pc_large_repr_doc = """
|
| 276 |
+
: 'truncate'/'info'
|
| 277 |
+
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
|
| 278 |
+
show a truncated table, or switch to the view from
|
| 279 |
+
df.info() (the behaviour in earlier versions of pandas).
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
pc_memory_usage_doc = """
|
| 283 |
+
: bool, string or None
|
| 284 |
+
This specifies if the memory usage of a DataFrame should be displayed when
|
| 285 |
+
df.info() is called. Valid values True,False,'deep'
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def table_schema_cb(key) -> None:
|
| 290 |
+
from pandas.io.formats.printing import enable_data_resource_formatter
|
| 291 |
+
|
| 292 |
+
enable_data_resource_formatter(cf.get_option(key))
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def is_terminal() -> bool:
|
| 296 |
+
"""
|
| 297 |
+
Detect if Python is running in a terminal.
|
| 298 |
+
|
| 299 |
+
Returns True if Python is running in a terminal or False if not.
|
| 300 |
+
"""
|
| 301 |
+
try:
|
| 302 |
+
# error: Name 'get_ipython' is not defined
|
| 303 |
+
ip = get_ipython() # type: ignore[name-defined]
|
| 304 |
+
except NameError: # assume standard Python interpreter in a terminal
|
| 305 |
+
return True
|
| 306 |
+
else:
|
| 307 |
+
if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
|
| 308 |
+
return False
|
| 309 |
+
else: # IPython in a terminal
|
| 310 |
+
return True
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
with cf.config_prefix("display"):
|
| 314 |
+
cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
|
| 315 |
+
cf.register_option(
|
| 316 |
+
"float_format",
|
| 317 |
+
None,
|
| 318 |
+
float_format_doc,
|
| 319 |
+
validator=is_one_of_factory([None, is_callable]),
|
| 320 |
+
)
|
| 321 |
+
cf.register_option(
|
| 322 |
+
"max_info_rows",
|
| 323 |
+
1690785,
|
| 324 |
+
pc_max_info_rows_doc,
|
| 325 |
+
validator=is_int,
|
| 326 |
+
)
|
| 327 |
+
cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
|
| 328 |
+
cf.register_option(
|
| 329 |
+
"min_rows",
|
| 330 |
+
10,
|
| 331 |
+
pc_min_rows_doc,
|
| 332 |
+
validator=is_instance_factory([type(None), int]),
|
| 333 |
+
)
|
| 334 |
+
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
|
| 335 |
+
|
| 336 |
+
cf.register_option(
|
| 337 |
+
"max_colwidth",
|
| 338 |
+
50,
|
| 339 |
+
max_colwidth_doc,
|
| 340 |
+
validator=is_nonnegative_int,
|
| 341 |
+
)
|
| 342 |
+
if is_terminal():
|
| 343 |
+
max_cols = 0 # automatically determine optimal number of columns
|
| 344 |
+
else:
|
| 345 |
+
max_cols = 20 # cannot determine optimal number of columns
|
| 346 |
+
cf.register_option(
|
| 347 |
+
"max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
|
| 348 |
+
)
|
| 349 |
+
cf.register_option(
|
| 350 |
+
"large_repr",
|
| 351 |
+
"truncate",
|
| 352 |
+
pc_large_repr_doc,
|
| 353 |
+
validator=is_one_of_factory(["truncate", "info"]),
|
| 354 |
+
)
|
| 355 |
+
cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
|
| 356 |
+
cf.register_option(
|
| 357 |
+
"colheader_justify", "right", colheader_justify_doc, validator=is_text
|
| 358 |
+
)
|
| 359 |
+
cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
|
| 360 |
+
cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
|
| 361 |
+
cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
|
| 362 |
+
cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
|
| 363 |
+
cf.register_option(
|
| 364 |
+
"show_dimensions",
|
| 365 |
+
"truncate",
|
| 366 |
+
pc_show_dimensions_doc,
|
| 367 |
+
validator=is_one_of_factory([True, False, "truncate"]),
|
| 368 |
+
)
|
| 369 |
+
cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
|
| 370 |
+
cf.register_option("max_seq_items", 100, pc_max_seq_items)
|
| 371 |
+
cf.register_option(
|
| 372 |
+
"width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
|
| 373 |
+
)
|
| 374 |
+
cf.register_option(
|
| 375 |
+
"memory_usage",
|
| 376 |
+
True,
|
| 377 |
+
pc_memory_usage_doc,
|
| 378 |
+
validator=is_one_of_factory([None, True, False, "deep"]),
|
| 379 |
+
)
|
| 380 |
+
cf.register_option(
|
| 381 |
+
"unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
|
| 382 |
+
)
|
| 383 |
+
cf.register_option(
|
| 384 |
+
"unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
|
| 385 |
+
)
|
| 386 |
+
cf.register_option(
|
| 387 |
+
"html.table_schema",
|
| 388 |
+
False,
|
| 389 |
+
pc_table_schema_doc,
|
| 390 |
+
validator=is_bool,
|
| 391 |
+
cb=table_schema_cb,
|
| 392 |
+
)
|
| 393 |
+
cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
|
| 394 |
+
cf.register_option(
|
| 395 |
+
"html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
|
| 396 |
+
)
|
| 397 |
+
cf.register_option(
|
| 398 |
+
"max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
tc_sim_interactive_doc = """
|
| 402 |
+
: boolean
|
| 403 |
+
Whether to simulate interactive mode for purposes of testing
|
| 404 |
+
"""
|
| 405 |
+
|
| 406 |
+
with cf.config_prefix("mode"):
|
| 407 |
+
cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
|
| 408 |
+
|
| 409 |
+
use_inf_as_na_doc = """
|
| 410 |
+
: boolean
|
| 411 |
+
True means treat None, NaN, INF, -INF as NA (old way),
|
| 412 |
+
False means None and NaN are null, but INF, -INF are not NA
|
| 413 |
+
(new way).
|
| 414 |
+
|
| 415 |
+
This option is deprecated in pandas 2.1.0 and will be removed in 3.0.
|
| 416 |
+
"""
|
| 417 |
+
|
| 418 |
+
# We don't want to start importing everything at the global context level
|
| 419 |
+
# or we'll hit circular deps.
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def use_inf_as_na_cb(key) -> None:
|
| 423 |
+
# TODO(3.0): enforcing this deprecation will close GH#52501
|
| 424 |
+
from pandas.core.dtypes.missing import _use_inf_as_na
|
| 425 |
+
|
| 426 |
+
_use_inf_as_na(key)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
with cf.config_prefix("mode"):
|
| 430 |
+
cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
|
| 431 |
+
|
| 432 |
+
cf.deprecate_option(
|
| 433 |
+
# GH#51684
|
| 434 |
+
"mode.use_inf_as_na",
|
| 435 |
+
"use_inf_as_na option is deprecated and will be removed in a future "
|
| 436 |
+
"version. Convert inf values to NaN before operating instead.",
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
data_manager_doc = """
|
| 440 |
+
: string
|
| 441 |
+
Internal data manager type; can be "block" or "array". Defaults to "block",
|
| 442 |
+
unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs
|
| 443 |
+
to be set before pandas is imported).
|
| 444 |
+
"""
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
with cf.config_prefix("mode"):
|
| 448 |
+
cf.register_option(
|
| 449 |
+
"data_manager",
|
| 450 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
| 451 |
+
# to "block". This environment variable can be set for testing.
|
| 452 |
+
os.environ.get("PANDAS_DATA_MANAGER", "block"),
|
| 453 |
+
data_manager_doc,
|
| 454 |
+
validator=is_one_of_factory(["block", "array"]),
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
cf.deprecate_option(
|
| 458 |
+
# GH#55043
|
| 459 |
+
"mode.data_manager",
|
| 460 |
+
"data_manager option is deprecated and will be removed in a future "
|
| 461 |
+
"version. Only the BlockManager will be available.",
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
# TODO better name?
|
| 466 |
+
copy_on_write_doc = """
|
| 467 |
+
: bool
|
| 468 |
+
Use new copy-view behaviour using Copy-on-Write. Defaults to False,
|
| 469 |
+
unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
|
| 470 |
+
(if set to "1" for True, needs to be set before pandas is imported).
|
| 471 |
+
"""
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
with cf.config_prefix("mode"):
|
| 475 |
+
cf.register_option(
|
| 476 |
+
"copy_on_write",
|
| 477 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
| 478 |
+
# to False. This environment variable can be set for testing.
|
| 479 |
+
"warn"
|
| 480 |
+
if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn"
|
| 481 |
+
else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
|
| 482 |
+
copy_on_write_doc,
|
| 483 |
+
validator=is_one_of_factory([True, False, "warn"]),
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
# user warnings
|
| 488 |
+
chained_assignment = """
|
| 489 |
+
: string
|
| 490 |
+
Raise an exception, warn, or no action if trying to use chained assignment,
|
| 491 |
+
The default is warn
|
| 492 |
+
"""
|
| 493 |
+
|
| 494 |
+
with cf.config_prefix("mode"):
|
| 495 |
+
cf.register_option(
|
| 496 |
+
"chained_assignment",
|
| 497 |
+
"warn",
|
| 498 |
+
chained_assignment,
|
| 499 |
+
validator=is_one_of_factory([None, "warn", "raise"]),
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
string_storage_doc = """
|
| 504 |
+
: string
|
| 505 |
+
The default storage for StringDtype. This option is ignored if
|
| 506 |
+
``future.infer_string`` is set to True.
|
| 507 |
+
"""
|
| 508 |
+
|
| 509 |
+
with cf.config_prefix("mode"):
|
| 510 |
+
cf.register_option(
|
| 511 |
+
"string_storage",
|
| 512 |
+
"python",
|
| 513 |
+
string_storage_doc,
|
| 514 |
+
validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]),
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
# Set up the io.excel specific reader configuration.
|
| 519 |
+
reader_engine_doc = """
|
| 520 |
+
: string
|
| 521 |
+
The default Excel reader engine for '{ext}' files. Available options:
|
| 522 |
+
auto, {others}.
|
| 523 |
+
"""
|
| 524 |
+
|
| 525 |
+
_xls_options = ["xlrd", "calamine"]
|
| 526 |
+
_xlsm_options = ["xlrd", "openpyxl", "calamine"]
|
| 527 |
+
_xlsx_options = ["xlrd", "openpyxl", "calamine"]
|
| 528 |
+
_ods_options = ["odf", "calamine"]
|
| 529 |
+
_xlsb_options = ["pyxlsb", "calamine"]
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
with cf.config_prefix("io.excel.xls"):
|
| 533 |
+
cf.register_option(
|
| 534 |
+
"reader",
|
| 535 |
+
"auto",
|
| 536 |
+
reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
|
| 537 |
+
validator=is_one_of_factory(_xls_options + ["auto"]),
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
with cf.config_prefix("io.excel.xlsm"):
|
| 541 |
+
cf.register_option(
|
| 542 |
+
"reader",
|
| 543 |
+
"auto",
|
| 544 |
+
reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
| 545 |
+
validator=is_one_of_factory(_xlsm_options + ["auto"]),
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
with cf.config_prefix("io.excel.xlsx"):
|
| 550 |
+
cf.register_option(
|
| 551 |
+
"reader",
|
| 552 |
+
"auto",
|
| 553 |
+
reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
| 554 |
+
validator=is_one_of_factory(_xlsx_options + ["auto"]),
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
with cf.config_prefix("io.excel.ods"):
|
| 559 |
+
cf.register_option(
|
| 560 |
+
"reader",
|
| 561 |
+
"auto",
|
| 562 |
+
reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
| 563 |
+
validator=is_one_of_factory(_ods_options + ["auto"]),
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
with cf.config_prefix("io.excel.xlsb"):
|
| 567 |
+
cf.register_option(
|
| 568 |
+
"reader",
|
| 569 |
+
"auto",
|
| 570 |
+
reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
|
| 571 |
+
validator=is_one_of_factory(_xlsb_options + ["auto"]),
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
# Set up the io.excel specific writer configuration.
|
| 575 |
+
writer_engine_doc = """
|
| 576 |
+
: string
|
| 577 |
+
The default Excel writer engine for '{ext}' files. Available options:
|
| 578 |
+
auto, {others}.
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
_xlsm_options = ["openpyxl"]
|
| 582 |
+
_xlsx_options = ["openpyxl", "xlsxwriter"]
|
| 583 |
+
_ods_options = ["odf"]
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
with cf.config_prefix("io.excel.xlsm"):
|
| 587 |
+
cf.register_option(
|
| 588 |
+
"writer",
|
| 589 |
+
"auto",
|
| 590 |
+
writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
| 591 |
+
validator=str,
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
with cf.config_prefix("io.excel.xlsx"):
|
| 596 |
+
cf.register_option(
|
| 597 |
+
"writer",
|
| 598 |
+
"auto",
|
| 599 |
+
writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
| 600 |
+
validator=str,
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
with cf.config_prefix("io.excel.ods"):
|
| 605 |
+
cf.register_option(
|
| 606 |
+
"writer",
|
| 607 |
+
"auto",
|
| 608 |
+
writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
| 609 |
+
validator=str,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
# Set up the io.parquet specific configuration.
|
| 614 |
+
parquet_engine_doc = """
|
| 615 |
+
: string
|
| 616 |
+
The default parquet reader/writer engine. Available options:
|
| 617 |
+
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
|
| 618 |
+
"""
|
| 619 |
+
|
| 620 |
+
with cf.config_prefix("io.parquet"):
|
| 621 |
+
cf.register_option(
|
| 622 |
+
"engine",
|
| 623 |
+
"auto",
|
| 624 |
+
parquet_engine_doc,
|
| 625 |
+
validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
# Set up the io.sql specific configuration.
|
| 630 |
+
sql_engine_doc = """
|
| 631 |
+
: string
|
| 632 |
+
The default sql reader/writer engine. Available options:
|
| 633 |
+
'auto', 'sqlalchemy', the default is 'auto'
|
| 634 |
+
"""
|
| 635 |
+
|
| 636 |
+
with cf.config_prefix("io.sql"):
|
| 637 |
+
cf.register_option(
|
| 638 |
+
"engine",
|
| 639 |
+
"auto",
|
| 640 |
+
sql_engine_doc,
|
| 641 |
+
validator=is_one_of_factory(["auto", "sqlalchemy"]),
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
# --------
|
| 645 |
+
# Plotting
|
| 646 |
+
# ---------
|
| 647 |
+
|
| 648 |
+
plotting_backend_doc = """
|
| 649 |
+
: str
|
| 650 |
+
The plotting backend to use. The default value is "matplotlib", the
|
| 651 |
+
backend provided with pandas. Other backends can be specified by
|
| 652 |
+
providing the name of the module that implements the backend.
|
| 653 |
+
"""
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
def register_plotting_backend_cb(key) -> None:
|
| 657 |
+
if key == "matplotlib":
|
| 658 |
+
# We defer matplotlib validation, since it's the default
|
| 659 |
+
return
|
| 660 |
+
from pandas.plotting._core import _get_plot_backend
|
| 661 |
+
|
| 662 |
+
_get_plot_backend(key)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
with cf.config_prefix("plotting"):
|
| 666 |
+
cf.register_option(
|
| 667 |
+
"backend",
|
| 668 |
+
defval="matplotlib",
|
| 669 |
+
doc=plotting_backend_doc,
|
| 670 |
+
validator=register_plotting_backend_cb,
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
register_converter_doc = """
|
| 675 |
+
: bool or 'auto'.
|
| 676 |
+
Whether to register converters with matplotlib's units registry for
|
| 677 |
+
dates, times, datetimes, and Periods. Toggling to False will remove
|
| 678 |
+
the converters, restoring any converters that pandas overwrote.
|
| 679 |
+
"""
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def register_converter_cb(key) -> None:
|
| 683 |
+
from pandas.plotting import (
|
| 684 |
+
deregister_matplotlib_converters,
|
| 685 |
+
register_matplotlib_converters,
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
if cf.get_option(key):
|
| 689 |
+
register_matplotlib_converters()
|
| 690 |
+
else:
|
| 691 |
+
deregister_matplotlib_converters()
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
with cf.config_prefix("plotting.matplotlib"):
|
| 695 |
+
cf.register_option(
|
| 696 |
+
"register_converters",
|
| 697 |
+
"auto",
|
| 698 |
+
register_converter_doc,
|
| 699 |
+
validator=is_one_of_factory(["auto", True, False]),
|
| 700 |
+
cb=register_converter_cb,
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
# ------
|
| 704 |
+
# Styler
|
| 705 |
+
# ------
|
| 706 |
+
|
| 707 |
+
styler_sparse_index_doc = """
|
| 708 |
+
: bool
|
| 709 |
+
Whether to sparsify the display of a hierarchical index. Setting to False will
|
| 710 |
+
display each explicit level element in a hierarchical key for each row.
|
| 711 |
+
"""
|
| 712 |
+
|
| 713 |
+
styler_sparse_columns_doc = """
|
| 714 |
+
: bool
|
| 715 |
+
Whether to sparsify the display of hierarchical columns. Setting to False will
|
| 716 |
+
display each explicit level element in a hierarchical key for each column.
|
| 717 |
+
"""
|
| 718 |
+
|
| 719 |
+
styler_render_repr = """
|
| 720 |
+
: str
|
| 721 |
+
Determine which output to use in Jupyter Notebook in {"html", "latex"}.
|
| 722 |
+
"""
|
| 723 |
+
|
| 724 |
+
styler_max_elements = """
|
| 725 |
+
: int
|
| 726 |
+
The maximum number of data-cell (<td>) elements that will be rendered before
|
| 727 |
+
trimming will occur over columns, rows or both if needed.
|
| 728 |
+
"""
|
| 729 |
+
|
| 730 |
+
styler_max_rows = """
|
| 731 |
+
: int, optional
|
| 732 |
+
The maximum number of rows that will be rendered. May still be reduced to
|
| 733 |
+
satisfy ``max_elements``, which takes precedence.
|
| 734 |
+
"""
|
| 735 |
+
|
| 736 |
+
styler_max_columns = """
|
| 737 |
+
: int, optional
|
| 738 |
+
The maximum number of columns that will be rendered. May still be reduced to
|
| 739 |
+
satisfy ``max_elements``, which takes precedence.
|
| 740 |
+
"""
|
| 741 |
+
|
| 742 |
+
styler_precision = """
|
| 743 |
+
: int
|
| 744 |
+
The precision for floats and complex numbers.
|
| 745 |
+
"""
|
| 746 |
+
|
| 747 |
+
styler_decimal = """
|
| 748 |
+
: str
|
| 749 |
+
The character representation for the decimal separator for floats and complex.
|
| 750 |
+
"""
|
| 751 |
+
|
| 752 |
+
styler_thousands = """
|
| 753 |
+
: str, optional
|
| 754 |
+
The character representation for thousands separator for floats, int and complex.
|
| 755 |
+
"""
|
| 756 |
+
|
| 757 |
+
styler_na_rep = """
|
| 758 |
+
: str, optional
|
| 759 |
+
The string representation for values identified as missing.
|
| 760 |
+
"""
|
| 761 |
+
|
| 762 |
+
styler_escape = """
|
| 763 |
+
: str, optional
|
| 764 |
+
Whether to escape certain characters according to the given context; html or latex.
|
| 765 |
+
"""
|
| 766 |
+
|
| 767 |
+
styler_formatter = """
|
| 768 |
+
: str, callable, dict, optional
|
| 769 |
+
A formatter object to be used as default within ``Styler.format``.
|
| 770 |
+
"""
|
| 771 |
+
|
| 772 |
+
styler_multirow_align = """
|
| 773 |
+
: {"c", "t", "b"}
|
| 774 |
+
The specifier for vertical alignment of sparsified LaTeX multirows.
|
| 775 |
+
"""
|
| 776 |
+
|
| 777 |
+
styler_multicol_align = r"""
|
| 778 |
+
: {"r", "c", "l", "naive-l", "naive-r"}
|
| 779 |
+
The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe
|
| 780 |
+
decorators can also be added to non-naive values to draw vertical
|
| 781 |
+
rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells.
|
| 782 |
+
"""
|
| 783 |
+
|
| 784 |
+
styler_hrules = """
|
| 785 |
+
: bool
|
| 786 |
+
Whether to add horizontal rules on top and bottom and below the headers.
|
| 787 |
+
"""
|
| 788 |
+
|
| 789 |
+
styler_environment = """
|
| 790 |
+
: str
|
| 791 |
+
The environment to replace ``\\begin{table}``. If "longtable" is used results
|
| 792 |
+
in a specific longtable environment format.
|
| 793 |
+
"""
|
| 794 |
+
|
| 795 |
+
styler_encoding = """
|
| 796 |
+
: str
|
| 797 |
+
The encoding used for output HTML and LaTeX files.
|
| 798 |
+
"""
|
| 799 |
+
|
| 800 |
+
styler_mathjax = """
|
| 801 |
+
: bool
|
| 802 |
+
If False will render special CSS classes to table attributes that indicate Mathjax
|
| 803 |
+
will not be used in Jupyter Notebook.
|
| 804 |
+
"""
|
| 805 |
+
|
| 806 |
+
with cf.config_prefix("styler"):
|
| 807 |
+
cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool)
|
| 808 |
+
|
| 809 |
+
cf.register_option(
|
| 810 |
+
"sparse.columns", True, styler_sparse_columns_doc, validator=is_bool
|
| 811 |
+
)
|
| 812 |
+
|
| 813 |
+
cf.register_option(
|
| 814 |
+
"render.repr",
|
| 815 |
+
"html",
|
| 816 |
+
styler_render_repr,
|
| 817 |
+
validator=is_one_of_factory(["html", "latex"]),
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
cf.register_option(
|
| 821 |
+
"render.max_elements",
|
| 822 |
+
2**18,
|
| 823 |
+
styler_max_elements,
|
| 824 |
+
validator=is_nonnegative_int,
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
cf.register_option(
|
| 828 |
+
"render.max_rows",
|
| 829 |
+
None,
|
| 830 |
+
styler_max_rows,
|
| 831 |
+
validator=is_nonnegative_int,
|
| 832 |
+
)
|
| 833 |
+
|
| 834 |
+
cf.register_option(
|
| 835 |
+
"render.max_columns",
|
| 836 |
+
None,
|
| 837 |
+
styler_max_columns,
|
| 838 |
+
validator=is_nonnegative_int,
|
| 839 |
+
)
|
| 840 |
+
|
| 841 |
+
cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str)
|
| 842 |
+
|
| 843 |
+
cf.register_option("format.decimal", ".", styler_decimal, validator=is_str)
|
| 844 |
+
|
| 845 |
+
cf.register_option(
|
| 846 |
+
"format.precision", 6, styler_precision, validator=is_nonnegative_int
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
cf.register_option(
|
| 850 |
+
"format.thousands",
|
| 851 |
+
None,
|
| 852 |
+
styler_thousands,
|
| 853 |
+
validator=is_instance_factory([type(None), str]),
|
| 854 |
+
)
|
| 855 |
+
|
| 856 |
+
cf.register_option(
|
| 857 |
+
"format.na_rep",
|
| 858 |
+
None,
|
| 859 |
+
styler_na_rep,
|
| 860 |
+
validator=is_instance_factory([type(None), str]),
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
cf.register_option(
|
| 864 |
+
"format.escape",
|
| 865 |
+
None,
|
| 866 |
+
styler_escape,
|
| 867 |
+
validator=is_one_of_factory([None, "html", "latex", "latex-math"]),
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
cf.register_option(
|
| 871 |
+
"format.formatter",
|
| 872 |
+
None,
|
| 873 |
+
styler_formatter,
|
| 874 |
+
validator=is_instance_factory([type(None), dict, Callable, str]),
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool)
|
| 878 |
+
|
| 879 |
+
cf.register_option(
|
| 880 |
+
"latex.multirow_align",
|
| 881 |
+
"c",
|
| 882 |
+
styler_multirow_align,
|
| 883 |
+
validator=is_one_of_factory(["c", "t", "b", "naive"]),
|
| 884 |
+
)
|
| 885 |
+
|
| 886 |
+
val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"]
|
| 887 |
+
val_mca += ["naive-l", "naive-r"]
|
| 888 |
+
cf.register_option(
|
| 889 |
+
"latex.multicol_align",
|
| 890 |
+
"r",
|
| 891 |
+
styler_multicol_align,
|
| 892 |
+
validator=is_one_of_factory(val_mca),
|
| 893 |
+
)
|
| 894 |
+
|
| 895 |
+
cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool)
|
| 896 |
+
|
| 897 |
+
cf.register_option(
|
| 898 |
+
"latex.environment",
|
| 899 |
+
None,
|
| 900 |
+
styler_environment,
|
| 901 |
+
validator=is_instance_factory([type(None), str]),
|
| 902 |
+
)
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
with cf.config_prefix("future"):
|
| 906 |
+
cf.register_option(
|
| 907 |
+
"infer_string",
|
| 908 |
+
False,
|
| 909 |
+
"Whether to infer sequence of str objects as pyarrow string "
|
| 910 |
+
"dtype, which will be the default in pandas 3.0 "
|
| 911 |
+
"(at which point this option will be deprecated).",
|
| 912 |
+
validator=is_one_of_factory([True, False]),
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
cf.register_option(
|
| 916 |
+
"no_silent_downcasting",
|
| 917 |
+
False,
|
| 918 |
+
"Whether to opt-in to the future behavior which will *not* silently "
|
| 919 |
+
"downcast results from Series and DataFrame `where`, `mask`, and `clip` "
|
| 920 |
+
"methods. "
|
| 921 |
+
"Silent downcasting will be removed in pandas 3.0 "
|
| 922 |
+
"(at which point this option will be deprecated).",
|
| 923 |
+
validator=is_one_of_factory([True, False]),
|
| 924 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/construction.py
ADDED
|
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Constructor functions intended to be shared by pd.array, Series.__init__,
|
| 3 |
+
and Index.__new__.
|
| 4 |
+
|
| 5 |
+
These should not depend on core.internals.
|
| 6 |
+
"""
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from collections.abc import Sequence
|
| 10 |
+
from typing import (
|
| 11 |
+
TYPE_CHECKING,
|
| 12 |
+
Optional,
|
| 13 |
+
Union,
|
| 14 |
+
cast,
|
| 15 |
+
overload,
|
| 16 |
+
)
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
from numpy import ma
|
| 21 |
+
|
| 22 |
+
from pandas._config import using_pyarrow_string_dtype
|
| 23 |
+
|
| 24 |
+
from pandas._libs import lib
|
| 25 |
+
from pandas._libs.tslibs import (
|
| 26 |
+
Period,
|
| 27 |
+
get_supported_dtype,
|
| 28 |
+
is_supported_dtype,
|
| 29 |
+
)
|
| 30 |
+
from pandas._typing import (
|
| 31 |
+
AnyArrayLike,
|
| 32 |
+
ArrayLike,
|
| 33 |
+
Dtype,
|
| 34 |
+
DtypeObj,
|
| 35 |
+
T,
|
| 36 |
+
)
|
| 37 |
+
from pandas.util._exceptions import find_stack_level
|
| 38 |
+
|
| 39 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
| 40 |
+
from pandas.core.dtypes.cast import (
|
| 41 |
+
construct_1d_arraylike_from_scalar,
|
| 42 |
+
construct_1d_object_array_from_listlike,
|
| 43 |
+
maybe_cast_to_datetime,
|
| 44 |
+
maybe_cast_to_integer_array,
|
| 45 |
+
maybe_convert_platform,
|
| 46 |
+
maybe_infer_to_datetimelike,
|
| 47 |
+
maybe_promote,
|
| 48 |
+
)
|
| 49 |
+
from pandas.core.dtypes.common import (
|
| 50 |
+
is_list_like,
|
| 51 |
+
is_object_dtype,
|
| 52 |
+
is_string_dtype,
|
| 53 |
+
pandas_dtype,
|
| 54 |
+
)
|
| 55 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
| 56 |
+
from pandas.core.dtypes.generic import (
|
| 57 |
+
ABCDataFrame,
|
| 58 |
+
ABCExtensionArray,
|
| 59 |
+
ABCIndex,
|
| 60 |
+
ABCSeries,
|
| 61 |
+
)
|
| 62 |
+
from pandas.core.dtypes.missing import isna
|
| 63 |
+
|
| 64 |
+
import pandas.core.common as com
|
| 65 |
+
|
| 66 |
+
if TYPE_CHECKING:
|
| 67 |
+
from pandas import (
|
| 68 |
+
Index,
|
| 69 |
+
Series,
|
| 70 |
+
)
|
| 71 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def array(
|
| 75 |
+
data: Sequence[object] | AnyArrayLike,
|
| 76 |
+
dtype: Dtype | None = None,
|
| 77 |
+
copy: bool = True,
|
| 78 |
+
) -> ExtensionArray:
|
| 79 |
+
"""
|
| 80 |
+
Create an array.
|
| 81 |
+
|
| 82 |
+
Parameters
|
| 83 |
+
----------
|
| 84 |
+
data : Sequence of objects
|
| 85 |
+
The scalars inside `data` should be instances of the
|
| 86 |
+
scalar type for `dtype`. It's expected that `data`
|
| 87 |
+
represents a 1-dimensional array of data.
|
| 88 |
+
|
| 89 |
+
When `data` is an Index or Series, the underlying array
|
| 90 |
+
will be extracted from `data`.
|
| 91 |
+
|
| 92 |
+
dtype : str, np.dtype, or ExtensionDtype, optional
|
| 93 |
+
The dtype to use for the array. This may be a NumPy
|
| 94 |
+
dtype or an extension type registered with pandas using
|
| 95 |
+
:meth:`pandas.api.extensions.register_extension_dtype`.
|
| 96 |
+
|
| 97 |
+
If not specified, there are two possibilities:
|
| 98 |
+
|
| 99 |
+
1. When `data` is a :class:`Series`, :class:`Index`, or
|
| 100 |
+
:class:`ExtensionArray`, the `dtype` will be taken
|
| 101 |
+
from the data.
|
| 102 |
+
2. Otherwise, pandas will attempt to infer the `dtype`
|
| 103 |
+
from the data.
|
| 104 |
+
|
| 105 |
+
Note that when `data` is a NumPy array, ``data.dtype`` is
|
| 106 |
+
*not* used for inferring the array type. This is because
|
| 107 |
+
NumPy cannot represent all the types of data that can be
|
| 108 |
+
held in extension arrays.
|
| 109 |
+
|
| 110 |
+
Currently, pandas will infer an extension dtype for sequences of
|
| 111 |
+
|
| 112 |
+
============================== =======================================
|
| 113 |
+
Scalar Type Array Type
|
| 114 |
+
============================== =======================================
|
| 115 |
+
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
|
| 116 |
+
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
|
| 117 |
+
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
|
| 118 |
+
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
|
| 119 |
+
:class:`int` :class:`pandas.arrays.IntegerArray`
|
| 120 |
+
:class:`float` :class:`pandas.arrays.FloatingArray`
|
| 121 |
+
:class:`str` :class:`pandas.arrays.StringArray` or
|
| 122 |
+
:class:`pandas.arrays.ArrowStringArray`
|
| 123 |
+
:class:`bool` :class:`pandas.arrays.BooleanArray`
|
| 124 |
+
============================== =======================================
|
| 125 |
+
|
| 126 |
+
The ExtensionArray created when the scalar type is :class:`str` is determined by
|
| 127 |
+
``pd.options.mode.string_storage`` if the dtype is not explicitly given.
|
| 128 |
+
|
| 129 |
+
For all other cases, NumPy's usual inference rules will be used.
|
| 130 |
+
copy : bool, default True
|
| 131 |
+
Whether to copy the data, even if not necessary. Depending
|
| 132 |
+
on the type of `data`, creating the new array may require
|
| 133 |
+
copying data, even if ``copy=False``.
|
| 134 |
+
|
| 135 |
+
Returns
|
| 136 |
+
-------
|
| 137 |
+
ExtensionArray
|
| 138 |
+
The newly created array.
|
| 139 |
+
|
| 140 |
+
Raises
|
| 141 |
+
------
|
| 142 |
+
ValueError
|
| 143 |
+
When `data` is not 1-dimensional.
|
| 144 |
+
|
| 145 |
+
See Also
|
| 146 |
+
--------
|
| 147 |
+
numpy.array : Construct a NumPy array.
|
| 148 |
+
Series : Construct a pandas Series.
|
| 149 |
+
Index : Construct a pandas Index.
|
| 150 |
+
arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array.
|
| 151 |
+
Series.array : Extract the array stored within a Series.
|
| 152 |
+
|
| 153 |
+
Notes
|
| 154 |
+
-----
|
| 155 |
+
Omitting the `dtype` argument means pandas will attempt to infer the
|
| 156 |
+
best array type from the values in the data. As new array types are
|
| 157 |
+
added by pandas and 3rd party libraries, the "best" array type may
|
| 158 |
+
change. We recommend specifying `dtype` to ensure that
|
| 159 |
+
|
| 160 |
+
1. the correct array type for the data is returned
|
| 161 |
+
2. the returned array type doesn't change as new extension types
|
| 162 |
+
are added by pandas and third-party libraries
|
| 163 |
+
|
| 164 |
+
Additionally, if the underlying memory representation of the returned
|
| 165 |
+
array matters, we recommend specifying the `dtype` as a concrete object
|
| 166 |
+
rather than a string alias or allowing it to be inferred. For example,
|
| 167 |
+
a future version of pandas or a 3rd-party library may include a
|
| 168 |
+
dedicated ExtensionArray for string data. In this event, the following
|
| 169 |
+
would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
|
| 170 |
+
NumPy array.
|
| 171 |
+
|
| 172 |
+
>>> pd.array(['a', 'b'], dtype=str)
|
| 173 |
+
<NumpyExtensionArray>
|
| 174 |
+
['a', 'b']
|
| 175 |
+
Length: 2, dtype: str32
|
| 176 |
+
|
| 177 |
+
This would instead return the new ExtensionArray dedicated for string
|
| 178 |
+
data. If you really need the new array to be backed by a NumPy array,
|
| 179 |
+
specify that in the dtype.
|
| 180 |
+
|
| 181 |
+
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
|
| 182 |
+
<NumpyExtensionArray>
|
| 183 |
+
['a', 'b']
|
| 184 |
+
Length: 2, dtype: str32
|
| 185 |
+
|
| 186 |
+
Finally, Pandas has arrays that mostly overlap with NumPy
|
| 187 |
+
|
| 188 |
+
* :class:`arrays.DatetimeArray`
|
| 189 |
+
* :class:`arrays.TimedeltaArray`
|
| 190 |
+
|
| 191 |
+
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
|
| 192 |
+
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
|
| 193 |
+
rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
|
| 194 |
+
timezone-aware data, which NumPy does not natively support.
|
| 195 |
+
|
| 196 |
+
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
|
| 197 |
+
<DatetimeArray>
|
| 198 |
+
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
|
| 199 |
+
Length: 2, dtype: datetime64[ns]
|
| 200 |
+
|
| 201 |
+
>>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
|
| 202 |
+
<TimedeltaArray>
|
| 203 |
+
['0 days 01:00:00', '0 days 02:00:00']
|
| 204 |
+
Length: 2, dtype: timedelta64[ns]
|
| 205 |
+
|
| 206 |
+
Examples
|
| 207 |
+
--------
|
| 208 |
+
If a dtype is not specified, pandas will infer the best dtype from the values.
|
| 209 |
+
See the description of `dtype` for the types pandas infers for.
|
| 210 |
+
|
| 211 |
+
>>> pd.array([1, 2])
|
| 212 |
+
<IntegerArray>
|
| 213 |
+
[1, 2]
|
| 214 |
+
Length: 2, dtype: Int64
|
| 215 |
+
|
| 216 |
+
>>> pd.array([1, 2, np.nan])
|
| 217 |
+
<IntegerArray>
|
| 218 |
+
[1, 2, <NA>]
|
| 219 |
+
Length: 3, dtype: Int64
|
| 220 |
+
|
| 221 |
+
>>> pd.array([1.1, 2.2])
|
| 222 |
+
<FloatingArray>
|
| 223 |
+
[1.1, 2.2]
|
| 224 |
+
Length: 2, dtype: Float64
|
| 225 |
+
|
| 226 |
+
>>> pd.array(["a", None, "c"])
|
| 227 |
+
<StringArray>
|
| 228 |
+
['a', <NA>, 'c']
|
| 229 |
+
Length: 3, dtype: string
|
| 230 |
+
|
| 231 |
+
>>> with pd.option_context("string_storage", "pyarrow"):
|
| 232 |
+
... arr = pd.array(["a", None, "c"])
|
| 233 |
+
...
|
| 234 |
+
>>> arr
|
| 235 |
+
<ArrowStringArray>
|
| 236 |
+
['a', <NA>, 'c']
|
| 237 |
+
Length: 3, dtype: string
|
| 238 |
+
|
| 239 |
+
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
|
| 240 |
+
<PeriodArray>
|
| 241 |
+
['2000-01-01', '2000-01-01']
|
| 242 |
+
Length: 2, dtype: period[D]
|
| 243 |
+
|
| 244 |
+
You can use the string alias for `dtype`
|
| 245 |
+
|
| 246 |
+
>>> pd.array(['a', 'b', 'a'], dtype='category')
|
| 247 |
+
['a', 'b', 'a']
|
| 248 |
+
Categories (2, object): ['a', 'b']
|
| 249 |
+
|
| 250 |
+
Or specify the actual dtype
|
| 251 |
+
|
| 252 |
+
>>> pd.array(['a', 'b', 'a'],
|
| 253 |
+
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
|
| 254 |
+
['a', 'b', 'a']
|
| 255 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
| 256 |
+
|
| 257 |
+
If pandas does not infer a dedicated extension type a
|
| 258 |
+
:class:`arrays.NumpyExtensionArray` is returned.
|
| 259 |
+
|
| 260 |
+
>>> pd.array([1 + 1j, 3 + 2j])
|
| 261 |
+
<NumpyExtensionArray>
|
| 262 |
+
[(1+1j), (3+2j)]
|
| 263 |
+
Length: 2, dtype: complex128
|
| 264 |
+
|
| 265 |
+
As mentioned in the "Notes" section, new extension types may be added
|
| 266 |
+
in the future (by pandas or 3rd party libraries), causing the return
|
| 267 |
+
value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the
|
| 268 |
+
`dtype` as a NumPy dtype if you need to ensure there's no future change in
|
| 269 |
+
behavior.
|
| 270 |
+
|
| 271 |
+
>>> pd.array([1, 2], dtype=np.dtype("int32"))
|
| 272 |
+
<NumpyExtensionArray>
|
| 273 |
+
[1, 2]
|
| 274 |
+
Length: 2, dtype: int32
|
| 275 |
+
|
| 276 |
+
`data` must be 1-dimensional. A ValueError is raised when the input
|
| 277 |
+
has the wrong dimensionality.
|
| 278 |
+
|
| 279 |
+
>>> pd.array(1)
|
| 280 |
+
Traceback (most recent call last):
|
| 281 |
+
...
|
| 282 |
+
ValueError: Cannot pass scalar '1' to 'pandas.array'.
|
| 283 |
+
"""
|
| 284 |
+
from pandas.core.arrays import (
|
| 285 |
+
BooleanArray,
|
| 286 |
+
DatetimeArray,
|
| 287 |
+
ExtensionArray,
|
| 288 |
+
FloatingArray,
|
| 289 |
+
IntegerArray,
|
| 290 |
+
IntervalArray,
|
| 291 |
+
NumpyExtensionArray,
|
| 292 |
+
PeriodArray,
|
| 293 |
+
TimedeltaArray,
|
| 294 |
+
)
|
| 295 |
+
from pandas.core.arrays.string_ import StringDtype
|
| 296 |
+
|
| 297 |
+
if lib.is_scalar(data):
|
| 298 |
+
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
|
| 299 |
+
raise ValueError(msg)
|
| 300 |
+
elif isinstance(data, ABCDataFrame):
|
| 301 |
+
raise TypeError("Cannot pass DataFrame to 'pandas.array'")
|
| 302 |
+
|
| 303 |
+
if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
|
| 304 |
+
# Note: we exclude np.ndarray here, will do type inference on it
|
| 305 |
+
dtype = data.dtype
|
| 306 |
+
|
| 307 |
+
data = extract_array(data, extract_numpy=True)
|
| 308 |
+
|
| 309 |
+
# this returns None for not-found dtypes.
|
| 310 |
+
if dtype is not None:
|
| 311 |
+
dtype = pandas_dtype(dtype)
|
| 312 |
+
|
| 313 |
+
if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype):
|
| 314 |
+
# e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray
|
| 315 |
+
if copy:
|
| 316 |
+
return data.copy()
|
| 317 |
+
return data
|
| 318 |
+
|
| 319 |
+
if isinstance(dtype, ExtensionDtype):
|
| 320 |
+
cls = dtype.construct_array_type()
|
| 321 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
| 322 |
+
|
| 323 |
+
if dtype is None:
|
| 324 |
+
inferred_dtype = lib.infer_dtype(data, skipna=True)
|
| 325 |
+
if inferred_dtype == "period":
|
| 326 |
+
period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
|
| 327 |
+
return PeriodArray._from_sequence(period_data, copy=copy)
|
| 328 |
+
|
| 329 |
+
elif inferred_dtype == "interval":
|
| 330 |
+
return IntervalArray(data, copy=copy)
|
| 331 |
+
|
| 332 |
+
elif inferred_dtype.startswith("datetime"):
|
| 333 |
+
# datetime, datetime64
|
| 334 |
+
try:
|
| 335 |
+
return DatetimeArray._from_sequence(data, copy=copy)
|
| 336 |
+
except ValueError:
|
| 337 |
+
# Mixture of timezones, fall back to NumpyExtensionArray
|
| 338 |
+
pass
|
| 339 |
+
|
| 340 |
+
elif inferred_dtype.startswith("timedelta"):
|
| 341 |
+
# timedelta, timedelta64
|
| 342 |
+
return TimedeltaArray._from_sequence(data, copy=copy)
|
| 343 |
+
|
| 344 |
+
elif inferred_dtype == "string":
|
| 345 |
+
# StringArray/ArrowStringArray depending on pd.options.mode.string_storage
|
| 346 |
+
dtype = StringDtype()
|
| 347 |
+
cls = dtype.construct_array_type()
|
| 348 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
| 349 |
+
|
| 350 |
+
elif inferred_dtype == "integer":
|
| 351 |
+
return IntegerArray._from_sequence(data, copy=copy)
|
| 352 |
+
elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data):
|
| 353 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
| 354 |
+
elif (
|
| 355 |
+
inferred_dtype in ("floating", "mixed-integer-float")
|
| 356 |
+
and getattr(data, "dtype", None) != np.float16
|
| 357 |
+
):
|
| 358 |
+
# GH#44715 Exclude np.float16 bc FloatingArray does not support it;
|
| 359 |
+
# we will fall back to NumpyExtensionArray.
|
| 360 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
| 361 |
+
|
| 362 |
+
elif inferred_dtype == "boolean":
|
| 363 |
+
return BooleanArray._from_sequence(data, dtype="boolean", copy=copy)
|
| 364 |
+
|
| 365 |
+
# Pandas overrides NumPy for
|
| 366 |
+
# 1. datetime64[ns,us,ms,s]
|
| 367 |
+
# 2. timedelta64[ns,us,ms,s]
|
| 368 |
+
# so that a DatetimeArray is returned.
|
| 369 |
+
if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
|
| 370 |
+
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
|
| 371 |
+
if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
|
| 372 |
+
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
|
| 373 |
+
|
| 374 |
+
elif lib.is_np_dtype(dtype, "mM"):
|
| 375 |
+
warnings.warn(
|
| 376 |
+
r"datetime64 and timedelta64 dtype resolutions other than "
|
| 377 |
+
r"'s', 'ms', 'us', and 'ns' are deprecated. "
|
| 378 |
+
r"In future releases passing unsupported resolutions will "
|
| 379 |
+
r"raise an exception.",
|
| 380 |
+
FutureWarning,
|
| 381 |
+
stacklevel=find_stack_level(),
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
_typs = frozenset(
|
| 388 |
+
{
|
| 389 |
+
"index",
|
| 390 |
+
"rangeindex",
|
| 391 |
+
"multiindex",
|
| 392 |
+
"datetimeindex",
|
| 393 |
+
"timedeltaindex",
|
| 394 |
+
"periodindex",
|
| 395 |
+
"categoricalindex",
|
| 396 |
+
"intervalindex",
|
| 397 |
+
"series",
|
| 398 |
+
}
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
@overload
|
| 403 |
+
def extract_array(
|
| 404 |
+
obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
|
| 405 |
+
) -> ArrayLike:
|
| 406 |
+
...
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
@overload
|
| 410 |
+
def extract_array(
|
| 411 |
+
obj: T, extract_numpy: bool = ..., extract_range: bool = ...
|
| 412 |
+
) -> T | ArrayLike:
|
| 413 |
+
...
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def extract_array(
|
| 417 |
+
obj: T, extract_numpy: bool = False, extract_range: bool = False
|
| 418 |
+
) -> T | ArrayLike:
|
| 419 |
+
"""
|
| 420 |
+
Extract the ndarray or ExtensionArray from a Series or Index.
|
| 421 |
+
|
| 422 |
+
For all other types, `obj` is just returned as is.
|
| 423 |
+
|
| 424 |
+
Parameters
|
| 425 |
+
----------
|
| 426 |
+
obj : object
|
| 427 |
+
For Series / Index, the underlying ExtensionArray is unboxed.
|
| 428 |
+
|
| 429 |
+
extract_numpy : bool, default False
|
| 430 |
+
Whether to extract the ndarray from a NumpyExtensionArray.
|
| 431 |
+
|
| 432 |
+
extract_range : bool, default False
|
| 433 |
+
If we have a RangeIndex, return range._values if True
|
| 434 |
+
(which is a materialized integer ndarray), otherwise return unchanged.
|
| 435 |
+
|
| 436 |
+
Returns
|
| 437 |
+
-------
|
| 438 |
+
arr : object
|
| 439 |
+
|
| 440 |
+
Examples
|
| 441 |
+
--------
|
| 442 |
+
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
|
| 443 |
+
['a', 'b', 'c']
|
| 444 |
+
Categories (3, object): ['a', 'b', 'c']
|
| 445 |
+
|
| 446 |
+
Other objects like lists, arrays, and DataFrames are just passed through.
|
| 447 |
+
|
| 448 |
+
>>> extract_array([1, 2, 3])
|
| 449 |
+
[1, 2, 3]
|
| 450 |
+
|
| 451 |
+
For an ndarray-backed Series / Index the ndarray is returned.
|
| 452 |
+
|
| 453 |
+
>>> extract_array(pd.Series([1, 2, 3]))
|
| 454 |
+
array([1, 2, 3])
|
| 455 |
+
|
| 456 |
+
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
|
| 457 |
+
|
| 458 |
+
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
|
| 459 |
+
array([1, 2, 3])
|
| 460 |
+
"""
|
| 461 |
+
typ = getattr(obj, "_typ", None)
|
| 462 |
+
if typ in _typs:
|
| 463 |
+
# i.e. isinstance(obj, (ABCIndex, ABCSeries))
|
| 464 |
+
if typ == "rangeindex":
|
| 465 |
+
if extract_range:
|
| 466 |
+
# error: "T" has no attribute "_values"
|
| 467 |
+
return obj._values # type: ignore[attr-defined]
|
| 468 |
+
return obj
|
| 469 |
+
|
| 470 |
+
# error: "T" has no attribute "_values"
|
| 471 |
+
return obj._values # type: ignore[attr-defined]
|
| 472 |
+
|
| 473 |
+
elif extract_numpy and typ == "npy_extension":
|
| 474 |
+
# i.e. isinstance(obj, ABCNumpyExtensionArray)
|
| 475 |
+
# error: "T" has no attribute "to_numpy"
|
| 476 |
+
return obj.to_numpy() # type: ignore[attr-defined]
|
| 477 |
+
|
| 478 |
+
return obj
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def ensure_wrapped_if_datetimelike(arr):
|
| 482 |
+
"""
|
| 483 |
+
Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
|
| 484 |
+
"""
|
| 485 |
+
if isinstance(arr, np.ndarray):
|
| 486 |
+
if arr.dtype.kind == "M":
|
| 487 |
+
from pandas.core.arrays import DatetimeArray
|
| 488 |
+
|
| 489 |
+
dtype = get_supported_dtype(arr.dtype)
|
| 490 |
+
return DatetimeArray._from_sequence(arr, dtype=dtype)
|
| 491 |
+
|
| 492 |
+
elif arr.dtype.kind == "m":
|
| 493 |
+
from pandas.core.arrays import TimedeltaArray
|
| 494 |
+
|
| 495 |
+
dtype = get_supported_dtype(arr.dtype)
|
| 496 |
+
return TimedeltaArray._from_sequence(arr, dtype=dtype)
|
| 497 |
+
|
| 498 |
+
return arr
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:
|
| 502 |
+
"""
|
| 503 |
+
Convert numpy MaskedArray to ensure mask is softened.
|
| 504 |
+
"""
|
| 505 |
+
mask = ma.getmaskarray(data)
|
| 506 |
+
if mask.any():
|
| 507 |
+
dtype, fill_value = maybe_promote(data.dtype, np.nan)
|
| 508 |
+
dtype = cast(np.dtype, dtype)
|
| 509 |
+
data = ma.asarray(data.astype(dtype, copy=True))
|
| 510 |
+
data.soften_mask() # set hardmask False if it was True
|
| 511 |
+
data[mask] = fill_value
|
| 512 |
+
else:
|
| 513 |
+
data = data.copy()
|
| 514 |
+
return data
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def sanitize_array(
|
| 518 |
+
data,
|
| 519 |
+
index: Index | None,
|
| 520 |
+
dtype: DtypeObj | None = None,
|
| 521 |
+
copy: bool = False,
|
| 522 |
+
*,
|
| 523 |
+
allow_2d: bool = False,
|
| 524 |
+
) -> ArrayLike:
|
| 525 |
+
"""
|
| 526 |
+
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
|
| 527 |
+
coerce to the dtype if specified.
|
| 528 |
+
|
| 529 |
+
Parameters
|
| 530 |
+
----------
|
| 531 |
+
data : Any
|
| 532 |
+
index : Index or None, default None
|
| 533 |
+
dtype : np.dtype, ExtensionDtype, or None, default None
|
| 534 |
+
copy : bool, default False
|
| 535 |
+
allow_2d : bool, default False
|
| 536 |
+
If False, raise if we have a 2D Arraylike.
|
| 537 |
+
|
| 538 |
+
Returns
|
| 539 |
+
-------
|
| 540 |
+
np.ndarray or ExtensionArray
|
| 541 |
+
"""
|
| 542 |
+
original_dtype = dtype
|
| 543 |
+
if isinstance(data, ma.MaskedArray):
|
| 544 |
+
data = sanitize_masked_array(data)
|
| 545 |
+
|
| 546 |
+
if isinstance(dtype, NumpyEADtype):
|
| 547 |
+
# Avoid ending up with a NumpyExtensionArray
|
| 548 |
+
dtype = dtype.numpy_dtype
|
| 549 |
+
|
| 550 |
+
object_index = False
|
| 551 |
+
if isinstance(data, ABCIndex) and data.dtype == object and dtype is None:
|
| 552 |
+
object_index = True
|
| 553 |
+
|
| 554 |
+
# extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray
|
| 555 |
+
data = extract_array(data, extract_numpy=True, extract_range=True)
|
| 556 |
+
|
| 557 |
+
if isinstance(data, np.ndarray) and data.ndim == 0:
|
| 558 |
+
if dtype is None:
|
| 559 |
+
dtype = data.dtype
|
| 560 |
+
data = lib.item_from_zerodim(data)
|
| 561 |
+
elif isinstance(data, range):
|
| 562 |
+
# GH#16804
|
| 563 |
+
data = range_to_ndarray(data)
|
| 564 |
+
copy = False
|
| 565 |
+
|
| 566 |
+
if not is_list_like(data):
|
| 567 |
+
if index is None:
|
| 568 |
+
raise ValueError("index must be specified when data is not list-like")
|
| 569 |
+
if (
|
| 570 |
+
isinstance(data, str)
|
| 571 |
+
and using_pyarrow_string_dtype()
|
| 572 |
+
and original_dtype is None
|
| 573 |
+
):
|
| 574 |
+
from pandas.core.arrays.string_ import StringDtype
|
| 575 |
+
|
| 576 |
+
dtype = StringDtype("pyarrow_numpy")
|
| 577 |
+
data = construct_1d_arraylike_from_scalar(data, len(index), dtype)
|
| 578 |
+
|
| 579 |
+
return data
|
| 580 |
+
|
| 581 |
+
elif isinstance(data, ABCExtensionArray):
|
| 582 |
+
# it is already ensured above this is not a NumpyExtensionArray
|
| 583 |
+
# Until GH#49309 is fixed this check needs to come before the
|
| 584 |
+
# ExtensionDtype check
|
| 585 |
+
if dtype is not None:
|
| 586 |
+
subarr = data.astype(dtype, copy=copy)
|
| 587 |
+
elif copy:
|
| 588 |
+
subarr = data.copy()
|
| 589 |
+
else:
|
| 590 |
+
subarr = data
|
| 591 |
+
|
| 592 |
+
elif isinstance(dtype, ExtensionDtype):
|
| 593 |
+
# create an extension array from its dtype
|
| 594 |
+
_sanitize_non_ordered(data)
|
| 595 |
+
cls = dtype.construct_array_type()
|
| 596 |
+
subarr = cls._from_sequence(data, dtype=dtype, copy=copy)
|
| 597 |
+
|
| 598 |
+
# GH#846
|
| 599 |
+
elif isinstance(data, np.ndarray):
|
| 600 |
+
if isinstance(data, np.matrix):
|
| 601 |
+
data = data.A
|
| 602 |
+
|
| 603 |
+
if dtype is None:
|
| 604 |
+
subarr = data
|
| 605 |
+
if data.dtype == object:
|
| 606 |
+
subarr = maybe_infer_to_datetimelike(data)
|
| 607 |
+
if (
|
| 608 |
+
object_index
|
| 609 |
+
and using_pyarrow_string_dtype()
|
| 610 |
+
and is_string_dtype(subarr)
|
| 611 |
+
):
|
| 612 |
+
# Avoid inference when string option is set
|
| 613 |
+
subarr = data
|
| 614 |
+
elif data.dtype.kind == "U" and using_pyarrow_string_dtype():
|
| 615 |
+
from pandas.core.arrays.string_ import StringDtype
|
| 616 |
+
|
| 617 |
+
dtype = StringDtype(storage="pyarrow_numpy")
|
| 618 |
+
subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype)
|
| 619 |
+
|
| 620 |
+
if subarr is data and copy:
|
| 621 |
+
subarr = subarr.copy()
|
| 622 |
+
|
| 623 |
+
else:
|
| 624 |
+
# we will try to copy by-definition here
|
| 625 |
+
subarr = _try_cast(data, dtype, copy)
|
| 626 |
+
|
| 627 |
+
elif hasattr(data, "__array__"):
|
| 628 |
+
# e.g. dask array GH#38645
|
| 629 |
+
if not copy:
|
| 630 |
+
data = np.asarray(data)
|
| 631 |
+
else:
|
| 632 |
+
data = np.array(data, copy=copy)
|
| 633 |
+
return sanitize_array(
|
| 634 |
+
data,
|
| 635 |
+
index=index,
|
| 636 |
+
dtype=dtype,
|
| 637 |
+
copy=False,
|
| 638 |
+
allow_2d=allow_2d,
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
else:
|
| 642 |
+
_sanitize_non_ordered(data)
|
| 643 |
+
# materialize e.g. generators, convert e.g. tuples, abc.ValueView
|
| 644 |
+
data = list(data)
|
| 645 |
+
|
| 646 |
+
if len(data) == 0 and dtype is None:
|
| 647 |
+
# We default to float64, matching numpy
|
| 648 |
+
subarr = np.array([], dtype=np.float64)
|
| 649 |
+
|
| 650 |
+
elif dtype is not None:
|
| 651 |
+
subarr = _try_cast(data, dtype, copy)
|
| 652 |
+
|
| 653 |
+
else:
|
| 654 |
+
subarr = maybe_convert_platform(data)
|
| 655 |
+
if subarr.dtype == object:
|
| 656 |
+
subarr = cast(np.ndarray, subarr)
|
| 657 |
+
subarr = maybe_infer_to_datetimelike(subarr)
|
| 658 |
+
|
| 659 |
+
subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d)
|
| 660 |
+
|
| 661 |
+
if isinstance(subarr, np.ndarray):
|
| 662 |
+
# at this point we should have dtype be None or subarr.dtype == dtype
|
| 663 |
+
dtype = cast(np.dtype, dtype)
|
| 664 |
+
subarr = _sanitize_str_dtypes(subarr, data, dtype, copy)
|
| 665 |
+
|
| 666 |
+
return subarr
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def range_to_ndarray(rng: range) -> np.ndarray:
|
| 670 |
+
"""
|
| 671 |
+
Cast a range object to ndarray.
|
| 672 |
+
"""
|
| 673 |
+
# GH#30171 perf avoid realizing range as a list in np.array
|
| 674 |
+
try:
|
| 675 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64")
|
| 676 |
+
except OverflowError:
|
| 677 |
+
# GH#30173 handling for ranges that overflow int64
|
| 678 |
+
if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop):
|
| 679 |
+
try:
|
| 680 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64")
|
| 681 |
+
except OverflowError:
|
| 682 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
| 683 |
+
else:
|
| 684 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
| 685 |
+
return arr
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def _sanitize_non_ordered(data) -> None:
|
| 689 |
+
"""
|
| 690 |
+
Raise only for unordered sets, e.g., not for dict_keys
|
| 691 |
+
"""
|
| 692 |
+
if isinstance(data, (set, frozenset)):
|
| 693 |
+
raise TypeError(f"'{type(data).__name__}' type is unordered")
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def _sanitize_ndim(
|
| 697 |
+
result: ArrayLike,
|
| 698 |
+
data,
|
| 699 |
+
dtype: DtypeObj | None,
|
| 700 |
+
index: Index | None,
|
| 701 |
+
*,
|
| 702 |
+
allow_2d: bool = False,
|
| 703 |
+
) -> ArrayLike:
|
| 704 |
+
"""
|
| 705 |
+
Ensure we have a 1-dimensional result array.
|
| 706 |
+
"""
|
| 707 |
+
if getattr(result, "ndim", 0) == 0:
|
| 708 |
+
raise ValueError("result should be arraylike with ndim > 0")
|
| 709 |
+
|
| 710 |
+
if result.ndim == 1:
|
| 711 |
+
# the result that we want
|
| 712 |
+
result = _maybe_repeat(result, index)
|
| 713 |
+
|
| 714 |
+
elif result.ndim > 1:
|
| 715 |
+
if isinstance(data, np.ndarray):
|
| 716 |
+
if allow_2d:
|
| 717 |
+
return result
|
| 718 |
+
raise ValueError(
|
| 719 |
+
f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead"
|
| 720 |
+
)
|
| 721 |
+
if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
|
| 722 |
+
# i.e. NumpyEADtype("O")
|
| 723 |
+
|
| 724 |
+
result = com.asarray_tuplesafe(data, dtype=np.dtype("object"))
|
| 725 |
+
cls = dtype.construct_array_type()
|
| 726 |
+
result = cls._from_sequence(result, dtype=dtype)
|
| 727 |
+
else:
|
| 728 |
+
# error: Argument "dtype" to "asarray_tuplesafe" has incompatible type
|
| 729 |
+
# "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str,
|
| 730 |
+
# dtype[Any], None]"
|
| 731 |
+
result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type]
|
| 732 |
+
return result
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def _sanitize_str_dtypes(
|
| 736 |
+
result: np.ndarray, data, dtype: np.dtype | None, copy: bool
|
| 737 |
+
) -> np.ndarray:
|
| 738 |
+
"""
|
| 739 |
+
Ensure we have a dtype that is supported by pandas.
|
| 740 |
+
"""
|
| 741 |
+
|
| 742 |
+
# This is to prevent mixed-type Series getting all casted to
|
| 743 |
+
# NumPy string type, e.g. NaN --> '-1#IND'.
|
| 744 |
+
if issubclass(result.dtype.type, str):
|
| 745 |
+
# GH#16605
|
| 746 |
+
# If not empty convert the data to dtype
|
| 747 |
+
# GH#19853: If data is a scalar, result has already the result
|
| 748 |
+
if not lib.is_scalar(data):
|
| 749 |
+
if not np.all(isna(data)):
|
| 750 |
+
data = np.asarray(data, dtype=dtype)
|
| 751 |
+
if not copy:
|
| 752 |
+
result = np.asarray(data, dtype=object)
|
| 753 |
+
else:
|
| 754 |
+
result = np.array(data, dtype=object, copy=copy)
|
| 755 |
+
return result
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:
|
| 759 |
+
"""
|
| 760 |
+
If we have a length-1 array and an index describing how long we expect
|
| 761 |
+
the result to be, repeat the array.
|
| 762 |
+
"""
|
| 763 |
+
if index is not None:
|
| 764 |
+
if 1 == len(arr) != len(index):
|
| 765 |
+
arr = arr.repeat(len(index))
|
| 766 |
+
return arr
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
def _try_cast(
|
| 770 |
+
arr: list | np.ndarray,
|
| 771 |
+
dtype: np.dtype,
|
| 772 |
+
copy: bool,
|
| 773 |
+
) -> ArrayLike:
|
| 774 |
+
"""
|
| 775 |
+
Convert input to numpy ndarray and optionally cast to a given dtype.
|
| 776 |
+
|
| 777 |
+
Parameters
|
| 778 |
+
----------
|
| 779 |
+
arr : ndarray or list
|
| 780 |
+
Excludes: ExtensionArray, Series, Index.
|
| 781 |
+
dtype : np.dtype
|
| 782 |
+
copy : bool
|
| 783 |
+
If False, don't copy the data if not needed.
|
| 784 |
+
|
| 785 |
+
Returns
|
| 786 |
+
-------
|
| 787 |
+
np.ndarray or ExtensionArray
|
| 788 |
+
"""
|
| 789 |
+
is_ndarray = isinstance(arr, np.ndarray)
|
| 790 |
+
|
| 791 |
+
if dtype == object:
|
| 792 |
+
if not is_ndarray:
|
| 793 |
+
subarr = construct_1d_object_array_from_listlike(arr)
|
| 794 |
+
return subarr
|
| 795 |
+
return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
|
| 796 |
+
|
| 797 |
+
elif dtype.kind == "U":
|
| 798 |
+
# TODO: test cases with arr.dtype.kind in "mM"
|
| 799 |
+
if is_ndarray:
|
| 800 |
+
arr = cast(np.ndarray, arr)
|
| 801 |
+
shape = arr.shape
|
| 802 |
+
if arr.ndim > 1:
|
| 803 |
+
arr = arr.ravel()
|
| 804 |
+
else:
|
| 805 |
+
shape = (len(arr),)
|
| 806 |
+
return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
|
| 807 |
+
shape
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
elif dtype.kind in "mM":
|
| 811 |
+
return maybe_cast_to_datetime(arr, dtype)
|
| 812 |
+
|
| 813 |
+
# GH#15832: Check if we are requesting a numeric dtype and
|
| 814 |
+
# that we can convert the data to the requested dtype.
|
| 815 |
+
elif dtype.kind in "iu":
|
| 816 |
+
# this will raise if we have e.g. floats
|
| 817 |
+
|
| 818 |
+
subarr = maybe_cast_to_integer_array(arr, dtype)
|
| 819 |
+
elif not copy:
|
| 820 |
+
subarr = np.asarray(arr, dtype=dtype)
|
| 821 |
+
else:
|
| 822 |
+
subarr = np.array(arr, dtype=dtype, copy=copy)
|
| 823 |
+
|
| 824 |
+
return subarr
|