Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/share/terminfo/x/xerox +0 -0
- parrot/share/terminfo/x/xnuppc+b +0 -0
- parrot/share/terminfo/x/xnuppc-112x37 +0 -0
- parrot/share/terminfo/x/xterm+alt1049 +0 -0
- parrot/share/terminfo/x/xterm+nofkeys +0 -0
- parrot/share/terminfo/x/xterm+sl-alt +0 -0
- parrot/share/terminfo/x/xterm+sm+1006 +0 -0
- parrot/share/terminfo/x/xterm+tmux2 +0 -0
- parrot/share/terminfo/x/xterm-xfree86 +0 -0
- videollama2/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc +3 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py +2348 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__init__.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/accessors.py +643 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/api.py +388 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/base.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/category.py +513 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py +843 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py +1127 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/extension.py +172 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/frozen.py +120 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/interval.py +1136 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/multi.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/period.py +614 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/range.py +1187 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexes/timedeltas.py +356 -0
- videollama2/lib/python3.10/site-packages/pandas/core/methods/__init__.py +0 -0
.gitattributes
CHANGED
|
@@ -976,3 +976,4 @@ parrot/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono
|
|
| 976 |
parrot/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
|
| 977 |
vllm/lib/python3.10/site-packages/cupy_backends/cuda/libs/cusparse.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 978 |
parrot/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 976 |
parrot/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
|
| 977 |
vllm/lib/python3.10/site-packages/cupy_backends/cuda/libs/cusparse.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 978 |
parrot/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 979 |
+
videollama2/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
parrot/share/terminfo/x/xerox
ADDED
|
Binary file (429 Bytes). View file
|
|
|
parrot/share/terminfo/x/xnuppc+b
ADDED
|
Binary file (991 Bytes). View file
|
|
|
parrot/share/terminfo/x/xnuppc-112x37
ADDED
|
Binary file (1.22 kB). View file
|
|
|
parrot/share/terminfo/x/xterm+alt1049
ADDED
|
Binary file (144 Bytes). View file
|
|
|
parrot/share/terminfo/x/xterm+nofkeys
ADDED
|
Binary file (2.35 kB). View file
|
|
|
parrot/share/terminfo/x/xterm+sl-alt
ADDED
|
Binary file (376 Bytes). View file
|
|
|
parrot/share/terminfo/x/xterm+sm+1006
ADDED
|
Binary file (869 Bytes). View file
|
|
|
parrot/share/terminfo/x/xterm+tmux2
ADDED
|
Binary file (162 Bytes). View file
|
|
|
parrot/share/terminfo/x/xterm-xfree86
ADDED
|
Binary file (2.24 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:400c2103ded88fbf6e3a9c86dd335f4c87529d068e4894fe19108a8a98c967c6
|
| 3 |
+
size 1002305
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc
ADDED
|
Binary file (6.7 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc
ADDED
|
Binary file (39 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (42 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc
ADDED
|
Binary file (62.5 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc
ADDED
|
Binary file (3.22 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc
ADDED
|
Binary file (9.54 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc
ADDED
|
Binary file (19.5 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py
ADDED
|
@@ -0,0 +1,2348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Define extension dtypes.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from datetime import (
|
| 7 |
+
date,
|
| 8 |
+
datetime,
|
| 9 |
+
time,
|
| 10 |
+
timedelta,
|
| 11 |
+
)
|
| 12 |
+
from decimal import Decimal
|
| 13 |
+
import re
|
| 14 |
+
from typing import (
|
| 15 |
+
TYPE_CHECKING,
|
| 16 |
+
Any,
|
| 17 |
+
cast,
|
| 18 |
+
)
|
| 19 |
+
import warnings
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import pytz
|
| 23 |
+
|
| 24 |
+
from pandas._libs import (
|
| 25 |
+
lib,
|
| 26 |
+
missing as libmissing,
|
| 27 |
+
)
|
| 28 |
+
from pandas._libs.interval import Interval
|
| 29 |
+
from pandas._libs.properties import cache_readonly
|
| 30 |
+
from pandas._libs.tslibs import (
|
| 31 |
+
BaseOffset,
|
| 32 |
+
NaT,
|
| 33 |
+
NaTType,
|
| 34 |
+
Period,
|
| 35 |
+
Timedelta,
|
| 36 |
+
Timestamp,
|
| 37 |
+
timezones,
|
| 38 |
+
to_offset,
|
| 39 |
+
tz_compare,
|
| 40 |
+
)
|
| 41 |
+
from pandas._libs.tslibs.dtypes import (
|
| 42 |
+
PeriodDtypeBase,
|
| 43 |
+
abbrev_to_npy_unit,
|
| 44 |
+
)
|
| 45 |
+
from pandas._libs.tslibs.offsets import BDay
|
| 46 |
+
from pandas.compat import pa_version_under10p1
|
| 47 |
+
from pandas.errors import PerformanceWarning
|
| 48 |
+
from pandas.util._exceptions import find_stack_level
|
| 49 |
+
|
| 50 |
+
from pandas.core.dtypes.base import (
|
| 51 |
+
ExtensionDtype,
|
| 52 |
+
StorageExtensionDtype,
|
| 53 |
+
register_extension_dtype,
|
| 54 |
+
)
|
| 55 |
+
from pandas.core.dtypes.generic import (
|
| 56 |
+
ABCCategoricalIndex,
|
| 57 |
+
ABCIndex,
|
| 58 |
+
ABCRangeIndex,
|
| 59 |
+
)
|
| 60 |
+
from pandas.core.dtypes.inference import (
|
| 61 |
+
is_bool,
|
| 62 |
+
is_list_like,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
from pandas.util import capitalize_first_letter
|
| 66 |
+
|
| 67 |
+
if not pa_version_under10p1:
|
| 68 |
+
import pyarrow as pa
|
| 69 |
+
|
| 70 |
+
if TYPE_CHECKING:
|
| 71 |
+
from collections.abc import MutableMapping
|
| 72 |
+
from datetime import tzinfo
|
| 73 |
+
|
| 74 |
+
import pyarrow as pa # noqa: TCH004
|
| 75 |
+
|
| 76 |
+
from pandas._typing import (
|
| 77 |
+
Dtype,
|
| 78 |
+
DtypeObj,
|
| 79 |
+
IntervalClosedType,
|
| 80 |
+
Ordered,
|
| 81 |
+
Self,
|
| 82 |
+
npt,
|
| 83 |
+
type_t,
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
from pandas import (
|
| 87 |
+
Categorical,
|
| 88 |
+
CategoricalIndex,
|
| 89 |
+
DatetimeIndex,
|
| 90 |
+
Index,
|
| 91 |
+
IntervalIndex,
|
| 92 |
+
PeriodIndex,
|
| 93 |
+
)
|
| 94 |
+
from pandas.core.arrays import (
|
| 95 |
+
BaseMaskedArray,
|
| 96 |
+
DatetimeArray,
|
| 97 |
+
IntervalArray,
|
| 98 |
+
NumpyExtensionArray,
|
| 99 |
+
PeriodArray,
|
| 100 |
+
SparseArray,
|
| 101 |
+
)
|
| 102 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
| 103 |
+
|
| 104 |
+
str_type = str
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class PandasExtensionDtype(ExtensionDtype):
|
| 108 |
+
"""
|
| 109 |
+
A np.dtype duck-typed class, suitable for holding a custom dtype.
|
| 110 |
+
|
| 111 |
+
THIS IS NOT A REAL NUMPY DTYPE
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
type: Any
|
| 115 |
+
kind: Any
|
| 116 |
+
# The Any type annotations above are here only because mypy seems to have a
|
| 117 |
+
# problem dealing with multiple inheritance from PandasExtensionDtype
|
| 118 |
+
# and ExtensionDtype's @properties in the subclasses below. The kind and
|
| 119 |
+
# type variables in those subclasses are explicitly typed below.
|
| 120 |
+
subdtype = None
|
| 121 |
+
str: str_type
|
| 122 |
+
num = 100
|
| 123 |
+
shape: tuple[int, ...] = ()
|
| 124 |
+
itemsize = 8
|
| 125 |
+
base: DtypeObj | None = None
|
| 126 |
+
isbuiltin = 0
|
| 127 |
+
isnative = 0
|
| 128 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
| 129 |
+
|
| 130 |
+
def __repr__(self) -> str_type:
|
| 131 |
+
"""
|
| 132 |
+
Return a string representation for a particular object.
|
| 133 |
+
"""
|
| 134 |
+
return str(self)
|
| 135 |
+
|
| 136 |
+
def __hash__(self) -> int:
|
| 137 |
+
raise NotImplementedError("sub-classes should implement an __hash__ method")
|
| 138 |
+
|
| 139 |
+
def __getstate__(self) -> dict[str_type, Any]:
|
| 140 |
+
# pickle support; we don't want to pickle the cache
|
| 141 |
+
return {k: getattr(self, k, None) for k in self._metadata}
|
| 142 |
+
|
| 143 |
+
@classmethod
|
| 144 |
+
def reset_cache(cls) -> None:
|
| 145 |
+
"""clear the cache"""
|
| 146 |
+
cls._cache_dtypes = {}
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class CategoricalDtypeType(type):
|
| 150 |
+
"""
|
| 151 |
+
the type of CategoricalDtype, this metaclass determines subclass ability
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@register_extension_dtype
|
| 156 |
+
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
|
| 157 |
+
"""
|
| 158 |
+
Type for categorical data with the categories and orderedness.
|
| 159 |
+
|
| 160 |
+
Parameters
|
| 161 |
+
----------
|
| 162 |
+
categories : sequence, optional
|
| 163 |
+
Must be unique, and must not contain any nulls.
|
| 164 |
+
The categories are stored in an Index,
|
| 165 |
+
and if an index is provided the dtype of that index will be used.
|
| 166 |
+
ordered : bool or None, default False
|
| 167 |
+
Whether or not this categorical is treated as a ordered categorical.
|
| 168 |
+
None can be used to maintain the ordered value of existing categoricals when
|
| 169 |
+
used in operations that combine categoricals, e.g. astype, and will resolve to
|
| 170 |
+
False if there is no existing ordered to maintain.
|
| 171 |
+
|
| 172 |
+
Attributes
|
| 173 |
+
----------
|
| 174 |
+
categories
|
| 175 |
+
ordered
|
| 176 |
+
|
| 177 |
+
Methods
|
| 178 |
+
-------
|
| 179 |
+
None
|
| 180 |
+
|
| 181 |
+
See Also
|
| 182 |
+
--------
|
| 183 |
+
Categorical : Represent a categorical variable in classic R / S-plus fashion.
|
| 184 |
+
|
| 185 |
+
Notes
|
| 186 |
+
-----
|
| 187 |
+
This class is useful for specifying the type of a ``Categorical``
|
| 188 |
+
independent of the values. See :ref:`categorical.categoricaldtype`
|
| 189 |
+
for more.
|
| 190 |
+
|
| 191 |
+
Examples
|
| 192 |
+
--------
|
| 193 |
+
>>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
|
| 194 |
+
>>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
|
| 195 |
+
0 a
|
| 196 |
+
1 b
|
| 197 |
+
2 a
|
| 198 |
+
3 NaN
|
| 199 |
+
dtype: category
|
| 200 |
+
Categories (2, object): ['b' < 'a']
|
| 201 |
+
|
| 202 |
+
An empty CategoricalDtype with a specific dtype can be created
|
| 203 |
+
by providing an empty index. As follows,
|
| 204 |
+
|
| 205 |
+
>>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
|
| 206 |
+
dtype('<M8[ns]')
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
# TODO: Document public vs. private API
|
| 210 |
+
name = "category"
|
| 211 |
+
type: type[CategoricalDtypeType] = CategoricalDtypeType
|
| 212 |
+
kind: str_type = "O"
|
| 213 |
+
str = "|O08"
|
| 214 |
+
base = np.dtype("O")
|
| 215 |
+
_metadata = ("categories", "ordered")
|
| 216 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
| 217 |
+
_supports_2d = False
|
| 218 |
+
_can_fast_transpose = False
|
| 219 |
+
|
| 220 |
+
def __init__(self, categories=None, ordered: Ordered = False) -> None:
|
| 221 |
+
self._finalize(categories, ordered, fastpath=False)
|
| 222 |
+
|
| 223 |
+
@classmethod
|
| 224 |
+
def _from_fastpath(
|
| 225 |
+
cls, categories=None, ordered: bool | None = None
|
| 226 |
+
) -> CategoricalDtype:
|
| 227 |
+
self = cls.__new__(cls)
|
| 228 |
+
self._finalize(categories, ordered, fastpath=True)
|
| 229 |
+
return self
|
| 230 |
+
|
| 231 |
+
@classmethod
|
| 232 |
+
def _from_categorical_dtype(
|
| 233 |
+
cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None
|
| 234 |
+
) -> CategoricalDtype:
|
| 235 |
+
if categories is ordered is None:
|
| 236 |
+
return dtype
|
| 237 |
+
if categories is None:
|
| 238 |
+
categories = dtype.categories
|
| 239 |
+
if ordered is None:
|
| 240 |
+
ordered = dtype.ordered
|
| 241 |
+
return cls(categories, ordered)
|
| 242 |
+
|
| 243 |
+
@classmethod
|
| 244 |
+
def _from_values_or_dtype(
|
| 245 |
+
cls,
|
| 246 |
+
values=None,
|
| 247 |
+
categories=None,
|
| 248 |
+
ordered: bool | None = None,
|
| 249 |
+
dtype: Dtype | None = None,
|
| 250 |
+
) -> CategoricalDtype:
|
| 251 |
+
"""
|
| 252 |
+
Construct dtype from the input parameters used in :class:`Categorical`.
|
| 253 |
+
|
| 254 |
+
This constructor method specifically does not do the factorization
|
| 255 |
+
step, if that is needed to find the categories. This constructor may
|
| 256 |
+
therefore return ``CategoricalDtype(categories=None, ordered=None)``,
|
| 257 |
+
which may not be useful. Additional steps may therefore have to be
|
| 258 |
+
taken to create the final dtype.
|
| 259 |
+
|
| 260 |
+
The return dtype is specified from the inputs in this prioritized
|
| 261 |
+
order:
|
| 262 |
+
1. if dtype is a CategoricalDtype, return dtype
|
| 263 |
+
2. if dtype is the string 'category', create a CategoricalDtype from
|
| 264 |
+
the supplied categories and ordered parameters, and return that.
|
| 265 |
+
3. if values is a categorical, use value.dtype, but override it with
|
| 266 |
+
categories and ordered if either/both of those are not None.
|
| 267 |
+
4. if dtype is None and values is not a categorical, construct the
|
| 268 |
+
dtype from categories and ordered, even if either of those is None.
|
| 269 |
+
|
| 270 |
+
Parameters
|
| 271 |
+
----------
|
| 272 |
+
values : list-like, optional
|
| 273 |
+
The list-like must be 1-dimensional.
|
| 274 |
+
categories : list-like, optional
|
| 275 |
+
Categories for the CategoricalDtype.
|
| 276 |
+
ordered : bool, optional
|
| 277 |
+
Designating if the categories are ordered.
|
| 278 |
+
dtype : CategoricalDtype or the string "category", optional
|
| 279 |
+
If ``CategoricalDtype``, cannot be used together with
|
| 280 |
+
`categories` or `ordered`.
|
| 281 |
+
|
| 282 |
+
Returns
|
| 283 |
+
-------
|
| 284 |
+
CategoricalDtype
|
| 285 |
+
|
| 286 |
+
Examples
|
| 287 |
+
--------
|
| 288 |
+
>>> pd.CategoricalDtype._from_values_or_dtype()
|
| 289 |
+
CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
|
| 290 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(
|
| 291 |
+
... categories=['a', 'b'], ordered=True
|
| 292 |
+
... )
|
| 293 |
+
CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
|
| 294 |
+
>>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
|
| 295 |
+
>>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
|
| 296 |
+
>>> c = pd.Categorical([0, 1], dtype=dtype1)
|
| 297 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(
|
| 298 |
+
... c, ['x', 'y'], ordered=True, dtype=dtype2
|
| 299 |
+
... )
|
| 300 |
+
Traceback (most recent call last):
|
| 301 |
+
...
|
| 302 |
+
ValueError: Cannot specify `categories` or `ordered` together with
|
| 303 |
+
`dtype`.
|
| 304 |
+
|
| 305 |
+
The supplied dtype takes precedence over values' dtype:
|
| 306 |
+
|
| 307 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
|
| 308 |
+
CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
if dtype is not None:
|
| 312 |
+
# The dtype argument takes precedence over values.dtype (if any)
|
| 313 |
+
if isinstance(dtype, str):
|
| 314 |
+
if dtype == "category":
|
| 315 |
+
if ordered is None and cls.is_dtype(values):
|
| 316 |
+
# GH#49309 preserve orderedness
|
| 317 |
+
ordered = values.dtype.ordered
|
| 318 |
+
|
| 319 |
+
dtype = CategoricalDtype(categories, ordered)
|
| 320 |
+
else:
|
| 321 |
+
raise ValueError(f"Unknown dtype {repr(dtype)}")
|
| 322 |
+
elif categories is not None or ordered is not None:
|
| 323 |
+
raise ValueError(
|
| 324 |
+
"Cannot specify `categories` or `ordered` together with `dtype`."
|
| 325 |
+
)
|
| 326 |
+
elif not isinstance(dtype, CategoricalDtype):
|
| 327 |
+
raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
|
| 328 |
+
elif cls.is_dtype(values):
|
| 329 |
+
# If no "dtype" was passed, use the one from "values", but honor
|
| 330 |
+
# the "ordered" and "categories" arguments
|
| 331 |
+
dtype = values.dtype._from_categorical_dtype(
|
| 332 |
+
values.dtype, categories, ordered
|
| 333 |
+
)
|
| 334 |
+
else:
|
| 335 |
+
# If dtype=None and values is not categorical, create a new dtype.
|
| 336 |
+
# Note: This could potentially have categories=None and
|
| 337 |
+
# ordered=None.
|
| 338 |
+
dtype = CategoricalDtype(categories, ordered)
|
| 339 |
+
|
| 340 |
+
return cast(CategoricalDtype, dtype)
|
| 341 |
+
|
| 342 |
+
@classmethod
|
| 343 |
+
def construct_from_string(cls, string: str_type) -> CategoricalDtype:
|
| 344 |
+
"""
|
| 345 |
+
Construct a CategoricalDtype from a string.
|
| 346 |
+
|
| 347 |
+
Parameters
|
| 348 |
+
----------
|
| 349 |
+
string : str
|
| 350 |
+
Must be the string "category" in order to be successfully constructed.
|
| 351 |
+
|
| 352 |
+
Returns
|
| 353 |
+
-------
|
| 354 |
+
CategoricalDtype
|
| 355 |
+
Instance of the dtype.
|
| 356 |
+
|
| 357 |
+
Raises
|
| 358 |
+
------
|
| 359 |
+
TypeError
|
| 360 |
+
If a CategoricalDtype cannot be constructed from the input.
|
| 361 |
+
"""
|
| 362 |
+
if not isinstance(string, str):
|
| 363 |
+
raise TypeError(
|
| 364 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
| 365 |
+
)
|
| 366 |
+
if string != cls.name:
|
| 367 |
+
raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
|
| 368 |
+
|
| 369 |
+
# need ordered=None to ensure that operations specifying dtype="category" don't
|
| 370 |
+
# override the ordered value for existing categoricals
|
| 371 |
+
return cls(ordered=None)
|
| 372 |
+
|
| 373 |
+
def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
|
| 374 |
+
if ordered is not None:
|
| 375 |
+
self.validate_ordered(ordered)
|
| 376 |
+
|
| 377 |
+
if categories is not None:
|
| 378 |
+
categories = self.validate_categories(categories, fastpath=fastpath)
|
| 379 |
+
|
| 380 |
+
self._categories = categories
|
| 381 |
+
self._ordered = ordered
|
| 382 |
+
|
| 383 |
+
def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
|
| 384 |
+
# for pickle compat. __get_state__ is defined in the
|
| 385 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
| 386 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
| 387 |
+
self._categories = state.pop("categories", None)
|
| 388 |
+
self._ordered = state.pop("ordered", False)
|
| 389 |
+
|
| 390 |
+
def __hash__(self) -> int:
|
| 391 |
+
# _hash_categories returns a uint64, so use the negative
|
| 392 |
+
# space for when we have unknown categories to avoid a conflict
|
| 393 |
+
if self.categories is None:
|
| 394 |
+
if self.ordered:
|
| 395 |
+
return -1
|
| 396 |
+
else:
|
| 397 |
+
return -2
|
| 398 |
+
# We *do* want to include the real self.ordered here
|
| 399 |
+
return int(self._hash_categories)
|
| 400 |
+
|
| 401 |
+
def __eq__(self, other: object) -> bool:
|
| 402 |
+
"""
|
| 403 |
+
Rules for CDT equality:
|
| 404 |
+
1) Any CDT is equal to the string 'category'
|
| 405 |
+
2) Any CDT is equal to itself
|
| 406 |
+
3) Any CDT is equal to a CDT with categories=None regardless of ordered
|
| 407 |
+
4) A CDT with ordered=True is only equal to another CDT with
|
| 408 |
+
ordered=True and identical categories in the same order
|
| 409 |
+
5) A CDT with ordered={False, None} is only equal to another CDT with
|
| 410 |
+
ordered={False, None} and identical categories, but same order is
|
| 411 |
+
not required. There is no distinction between False/None.
|
| 412 |
+
6) Any other comparison returns False
|
| 413 |
+
"""
|
| 414 |
+
if isinstance(other, str):
|
| 415 |
+
return other == self.name
|
| 416 |
+
elif other is self:
|
| 417 |
+
return True
|
| 418 |
+
elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
|
| 419 |
+
return False
|
| 420 |
+
elif self.categories is None or other.categories is None:
|
| 421 |
+
# For non-fully-initialized dtypes, these are only equal to
|
| 422 |
+
# - the string "category" (handled above)
|
| 423 |
+
# - other CategoricalDtype with categories=None
|
| 424 |
+
return self.categories is other.categories
|
| 425 |
+
elif self.ordered or other.ordered:
|
| 426 |
+
# At least one has ordered=True; equal if both have ordered=True
|
| 427 |
+
# and the same values for categories in the same order.
|
| 428 |
+
return (self.ordered == other.ordered) and self.categories.equals(
|
| 429 |
+
other.categories
|
| 430 |
+
)
|
| 431 |
+
else:
|
| 432 |
+
# Neither has ordered=True; equal if both have the same categories,
|
| 433 |
+
# but same order is not necessary. There is no distinction between
|
| 434 |
+
# ordered=False and ordered=None: CDT(., False) and CDT(., None)
|
| 435 |
+
# will be equal if they have the same categories.
|
| 436 |
+
left = self.categories
|
| 437 |
+
right = other.categories
|
| 438 |
+
|
| 439 |
+
# GH#36280 the ordering of checks here is for performance
|
| 440 |
+
if not left.dtype == right.dtype:
|
| 441 |
+
return False
|
| 442 |
+
|
| 443 |
+
if len(left) != len(right):
|
| 444 |
+
return False
|
| 445 |
+
|
| 446 |
+
if self.categories.equals(other.categories):
|
| 447 |
+
# Check and see if they happen to be identical categories
|
| 448 |
+
return True
|
| 449 |
+
|
| 450 |
+
if left.dtype != object:
|
| 451 |
+
# Faster than calculating hash
|
| 452 |
+
indexer = left.get_indexer(right)
|
| 453 |
+
# Because left and right have the same length and are unique,
|
| 454 |
+
# `indexer` not having any -1s implies that there is a
|
| 455 |
+
# bijection between `left` and `right`.
|
| 456 |
+
return (indexer != -1).all()
|
| 457 |
+
|
| 458 |
+
# With object-dtype we need a comparison that identifies
|
| 459 |
+
# e.g. int(2) as distinct from float(2)
|
| 460 |
+
return set(left) == set(right)
|
| 461 |
+
|
| 462 |
+
def __repr__(self) -> str_type:
|
| 463 |
+
if self.categories is None:
|
| 464 |
+
data = "None"
|
| 465 |
+
dtype = "None"
|
| 466 |
+
else:
|
| 467 |
+
data = self.categories._format_data(name=type(self).__name__)
|
| 468 |
+
if isinstance(self.categories, ABCRangeIndex):
|
| 469 |
+
data = str(self.categories._range)
|
| 470 |
+
data = data.rstrip(", ")
|
| 471 |
+
dtype = self.categories.dtype
|
| 472 |
+
|
| 473 |
+
return (
|
| 474 |
+
f"CategoricalDtype(categories={data}, ordered={self.ordered}, "
|
| 475 |
+
f"categories_dtype={dtype})"
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
@cache_readonly
|
| 479 |
+
def _hash_categories(self) -> int:
|
| 480 |
+
from pandas.core.util.hashing import (
|
| 481 |
+
combine_hash_arrays,
|
| 482 |
+
hash_array,
|
| 483 |
+
hash_tuples,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
categories = self.categories
|
| 487 |
+
ordered = self.ordered
|
| 488 |
+
|
| 489 |
+
if len(categories) and isinstance(categories[0], tuple):
|
| 490 |
+
# assumes if any individual category is a tuple, then all our. ATM
|
| 491 |
+
# I don't really want to support just some of the categories being
|
| 492 |
+
# tuples.
|
| 493 |
+
cat_list = list(categories) # breaks if a np.array of categories
|
| 494 |
+
cat_array = hash_tuples(cat_list)
|
| 495 |
+
else:
|
| 496 |
+
if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
|
| 497 |
+
# TODO: hash_array doesn't handle mixed types. It casts
|
| 498 |
+
# everything to a str first, which means we treat
|
| 499 |
+
# {'1', '2'} the same as {'1', 2}
|
| 500 |
+
# find a better solution
|
| 501 |
+
hashed = hash((tuple(categories), ordered))
|
| 502 |
+
return hashed
|
| 503 |
+
|
| 504 |
+
if DatetimeTZDtype.is_dtype(categories.dtype):
|
| 505 |
+
# Avoid future warning.
|
| 506 |
+
categories = categories.view("datetime64[ns]")
|
| 507 |
+
|
| 508 |
+
cat_array = hash_array(np.asarray(categories), categorize=False)
|
| 509 |
+
if ordered:
|
| 510 |
+
cat_array = np.vstack(
|
| 511 |
+
[cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
|
| 512 |
+
)
|
| 513 |
+
else:
|
| 514 |
+
cat_array = np.array([cat_array])
|
| 515 |
+
combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
|
| 516 |
+
return np.bitwise_xor.reduce(combined_hashed)
|
| 517 |
+
|
| 518 |
+
@classmethod
|
| 519 |
+
def construct_array_type(cls) -> type_t[Categorical]:
|
| 520 |
+
"""
|
| 521 |
+
Return the array type associated with this dtype.
|
| 522 |
+
|
| 523 |
+
Returns
|
| 524 |
+
-------
|
| 525 |
+
type
|
| 526 |
+
"""
|
| 527 |
+
from pandas import Categorical
|
| 528 |
+
|
| 529 |
+
return Categorical
|
| 530 |
+
|
| 531 |
+
@staticmethod
|
| 532 |
+
def validate_ordered(ordered: Ordered) -> None:
|
| 533 |
+
"""
|
| 534 |
+
Validates that we have a valid ordered parameter. If
|
| 535 |
+
it is not a boolean, a TypeError will be raised.
|
| 536 |
+
|
| 537 |
+
Parameters
|
| 538 |
+
----------
|
| 539 |
+
ordered : object
|
| 540 |
+
The parameter to be verified.
|
| 541 |
+
|
| 542 |
+
Raises
|
| 543 |
+
------
|
| 544 |
+
TypeError
|
| 545 |
+
If 'ordered' is not a boolean.
|
| 546 |
+
"""
|
| 547 |
+
if not is_bool(ordered):
|
| 548 |
+
raise TypeError("'ordered' must either be 'True' or 'False'")
|
| 549 |
+
|
| 550 |
+
@staticmethod
|
| 551 |
+
def validate_categories(categories, fastpath: bool = False) -> Index:
|
| 552 |
+
"""
|
| 553 |
+
Validates that we have good categories
|
| 554 |
+
|
| 555 |
+
Parameters
|
| 556 |
+
----------
|
| 557 |
+
categories : array-like
|
| 558 |
+
fastpath : bool
|
| 559 |
+
Whether to skip nan and uniqueness checks
|
| 560 |
+
|
| 561 |
+
Returns
|
| 562 |
+
-------
|
| 563 |
+
categories : Index
|
| 564 |
+
"""
|
| 565 |
+
from pandas.core.indexes.base import Index
|
| 566 |
+
|
| 567 |
+
if not fastpath and not is_list_like(categories):
|
| 568 |
+
raise TypeError(
|
| 569 |
+
f"Parameter 'categories' must be list-like, was {repr(categories)}"
|
| 570 |
+
)
|
| 571 |
+
if not isinstance(categories, ABCIndex):
|
| 572 |
+
categories = Index._with_infer(categories, tupleize_cols=False)
|
| 573 |
+
|
| 574 |
+
if not fastpath:
|
| 575 |
+
if categories.hasnans:
|
| 576 |
+
raise ValueError("Categorical categories cannot be null")
|
| 577 |
+
|
| 578 |
+
if not categories.is_unique:
|
| 579 |
+
raise ValueError("Categorical categories must be unique")
|
| 580 |
+
|
| 581 |
+
if isinstance(categories, ABCCategoricalIndex):
|
| 582 |
+
categories = categories.categories
|
| 583 |
+
|
| 584 |
+
return categories
|
| 585 |
+
|
| 586 |
+
def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
|
| 587 |
+
"""
|
| 588 |
+
Returns a CategoricalDtype with categories and ordered taken from dtype
|
| 589 |
+
if specified, otherwise falling back to self if unspecified
|
| 590 |
+
|
| 591 |
+
Parameters
|
| 592 |
+
----------
|
| 593 |
+
dtype : CategoricalDtype
|
| 594 |
+
|
| 595 |
+
Returns
|
| 596 |
+
-------
|
| 597 |
+
new_dtype : CategoricalDtype
|
| 598 |
+
"""
|
| 599 |
+
if isinstance(dtype, str) and dtype == "category":
|
| 600 |
+
# dtype='category' should not change anything
|
| 601 |
+
return self
|
| 602 |
+
elif not self.is_dtype(dtype):
|
| 603 |
+
raise ValueError(
|
| 604 |
+
f"a CategoricalDtype must be passed to perform an update, "
|
| 605 |
+
f"got {repr(dtype)}"
|
| 606 |
+
)
|
| 607 |
+
else:
|
| 608 |
+
# from here on, dtype is a CategoricalDtype
|
| 609 |
+
dtype = cast(CategoricalDtype, dtype)
|
| 610 |
+
|
| 611 |
+
# update categories/ordered unless they've been explicitly passed as None
|
| 612 |
+
new_categories = (
|
| 613 |
+
dtype.categories if dtype.categories is not None else self.categories
|
| 614 |
+
)
|
| 615 |
+
new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
|
| 616 |
+
|
| 617 |
+
return CategoricalDtype(new_categories, new_ordered)
|
| 618 |
+
|
| 619 |
+
@property
|
| 620 |
+
def categories(self) -> Index:
|
| 621 |
+
"""
|
| 622 |
+
An ``Index`` containing the unique categories allowed.
|
| 623 |
+
|
| 624 |
+
Examples
|
| 625 |
+
--------
|
| 626 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
|
| 627 |
+
>>> cat_type.categories
|
| 628 |
+
Index(['a', 'b'], dtype='object')
|
| 629 |
+
"""
|
| 630 |
+
return self._categories
|
| 631 |
+
|
| 632 |
+
@property
|
| 633 |
+
def ordered(self) -> Ordered:
|
| 634 |
+
"""
|
| 635 |
+
Whether the categories have an ordered relationship.
|
| 636 |
+
|
| 637 |
+
Examples
|
| 638 |
+
--------
|
| 639 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
|
| 640 |
+
>>> cat_type.ordered
|
| 641 |
+
True
|
| 642 |
+
|
| 643 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
|
| 644 |
+
>>> cat_type.ordered
|
| 645 |
+
False
|
| 646 |
+
"""
|
| 647 |
+
return self._ordered
|
| 648 |
+
|
| 649 |
+
@property
|
| 650 |
+
def _is_boolean(self) -> bool:
|
| 651 |
+
from pandas.core.dtypes.common import is_bool_dtype
|
| 652 |
+
|
| 653 |
+
return is_bool_dtype(self.categories)
|
| 654 |
+
|
| 655 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 656 |
+
# check if we have all categorical dtype with identical categories
|
| 657 |
+
if all(isinstance(x, CategoricalDtype) for x in dtypes):
|
| 658 |
+
first = dtypes[0]
|
| 659 |
+
if all(first == other for other in dtypes[1:]):
|
| 660 |
+
return first
|
| 661 |
+
|
| 662 |
+
# special case non-initialized categorical
|
| 663 |
+
# TODO we should figure out the expected return value in general
|
| 664 |
+
non_init_cats = [
|
| 665 |
+
isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
|
| 666 |
+
]
|
| 667 |
+
if all(non_init_cats):
|
| 668 |
+
return self
|
| 669 |
+
elif any(non_init_cats):
|
| 670 |
+
return None
|
| 671 |
+
|
| 672 |
+
# categorical is aware of Sparse -> extract sparse subdtypes
|
| 673 |
+
dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
|
| 674 |
+
# extract the categories' dtype
|
| 675 |
+
non_cat_dtypes = [
|
| 676 |
+
x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
|
| 677 |
+
]
|
| 678 |
+
# TODO should categorical always give an answer?
|
| 679 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 680 |
+
|
| 681 |
+
return find_common_type(non_cat_dtypes)
|
| 682 |
+
|
| 683 |
+
@cache_readonly
|
| 684 |
+
def index_class(self) -> type_t[CategoricalIndex]:
|
| 685 |
+
from pandas import CategoricalIndex
|
| 686 |
+
|
| 687 |
+
return CategoricalIndex
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
@register_extension_dtype
|
| 691 |
+
class DatetimeTZDtype(PandasExtensionDtype):
|
| 692 |
+
"""
|
| 693 |
+
An ExtensionDtype for timezone-aware datetime data.
|
| 694 |
+
|
| 695 |
+
**This is not an actual numpy dtype**, but a duck type.
|
| 696 |
+
|
| 697 |
+
Parameters
|
| 698 |
+
----------
|
| 699 |
+
unit : str, default "ns"
|
| 700 |
+
The precision of the datetime data. Currently limited
|
| 701 |
+
to ``"ns"``.
|
| 702 |
+
tz : str, int, or datetime.tzinfo
|
| 703 |
+
The timezone.
|
| 704 |
+
|
| 705 |
+
Attributes
|
| 706 |
+
----------
|
| 707 |
+
unit
|
| 708 |
+
tz
|
| 709 |
+
|
| 710 |
+
Methods
|
| 711 |
+
-------
|
| 712 |
+
None
|
| 713 |
+
|
| 714 |
+
Raises
|
| 715 |
+
------
|
| 716 |
+
ZoneInfoNotFoundError
|
| 717 |
+
When the requested timezone cannot be found.
|
| 718 |
+
|
| 719 |
+
Examples
|
| 720 |
+
--------
|
| 721 |
+
>>> from zoneinfo import ZoneInfo
|
| 722 |
+
>>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
|
| 723 |
+
datetime64[ns, UTC]
|
| 724 |
+
|
| 725 |
+
>>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
|
| 726 |
+
datetime64[ns, Europe/Paris]
|
| 727 |
+
"""
|
| 728 |
+
|
| 729 |
+
type: type[Timestamp] = Timestamp
|
| 730 |
+
kind: str_type = "M"
|
| 731 |
+
num = 101
|
| 732 |
+
_metadata = ("unit", "tz")
|
| 733 |
+
_match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
|
| 734 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
| 735 |
+
_supports_2d = True
|
| 736 |
+
_can_fast_transpose = True
|
| 737 |
+
|
| 738 |
+
@property
|
| 739 |
+
def na_value(self) -> NaTType:
|
| 740 |
+
return NaT
|
| 741 |
+
|
| 742 |
+
@cache_readonly
|
| 743 |
+
def base(self) -> DtypeObj: # type: ignore[override]
|
| 744 |
+
return np.dtype(f"M8[{self.unit}]")
|
| 745 |
+
|
| 746 |
+
# error: Signature of "str" incompatible with supertype "PandasExtensionDtype"
|
| 747 |
+
@cache_readonly
|
| 748 |
+
def str(self) -> str: # type: ignore[override]
|
| 749 |
+
return f"|M8[{self.unit}]"
|
| 750 |
+
|
| 751 |
+
def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
|
| 752 |
+
if isinstance(unit, DatetimeTZDtype):
|
| 753 |
+
# error: "str" has no attribute "tz"
|
| 754 |
+
unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
|
| 755 |
+
|
| 756 |
+
if unit != "ns":
|
| 757 |
+
if isinstance(unit, str) and tz is None:
|
| 758 |
+
# maybe a string like datetime64[ns, tz], which we support for
|
| 759 |
+
# now.
|
| 760 |
+
result = type(self).construct_from_string(unit)
|
| 761 |
+
unit = result.unit
|
| 762 |
+
tz = result.tz
|
| 763 |
+
msg = (
|
| 764 |
+
f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
|
| 765 |
+
"to DatetimeTZDtype is no longer supported. Use "
|
| 766 |
+
"'DatetimeTZDtype.construct_from_string()' instead."
|
| 767 |
+
)
|
| 768 |
+
raise ValueError(msg)
|
| 769 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
| 770 |
+
raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
|
| 771 |
+
|
| 772 |
+
if tz:
|
| 773 |
+
tz = timezones.maybe_get_tz(tz)
|
| 774 |
+
tz = timezones.tz_standardize(tz)
|
| 775 |
+
elif tz is not None:
|
| 776 |
+
raise pytz.UnknownTimeZoneError(tz)
|
| 777 |
+
if tz is None:
|
| 778 |
+
raise TypeError("A 'tz' is required.")
|
| 779 |
+
|
| 780 |
+
self._unit = unit
|
| 781 |
+
self._tz = tz
|
| 782 |
+
|
| 783 |
+
@cache_readonly
|
| 784 |
+
def _creso(self) -> int:
|
| 785 |
+
"""
|
| 786 |
+
The NPY_DATETIMEUNIT corresponding to this dtype's resolution.
|
| 787 |
+
"""
|
| 788 |
+
return abbrev_to_npy_unit(self.unit)
|
| 789 |
+
|
| 790 |
+
@property
|
| 791 |
+
def unit(self) -> str_type:
|
| 792 |
+
"""
|
| 793 |
+
The precision of the datetime data.
|
| 794 |
+
|
| 795 |
+
Examples
|
| 796 |
+
--------
|
| 797 |
+
>>> from zoneinfo import ZoneInfo
|
| 798 |
+
>>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
|
| 799 |
+
>>> dtype.unit
|
| 800 |
+
'ns'
|
| 801 |
+
"""
|
| 802 |
+
return self._unit
|
| 803 |
+
|
| 804 |
+
@property
|
| 805 |
+
def tz(self) -> tzinfo:
|
| 806 |
+
"""
|
| 807 |
+
The timezone.
|
| 808 |
+
|
| 809 |
+
Examples
|
| 810 |
+
--------
|
| 811 |
+
>>> from zoneinfo import ZoneInfo
|
| 812 |
+
>>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
|
| 813 |
+
>>> dtype.tz
|
| 814 |
+
zoneinfo.ZoneInfo(key='America/Los_Angeles')
|
| 815 |
+
"""
|
| 816 |
+
return self._tz
|
| 817 |
+
|
| 818 |
+
@classmethod
|
| 819 |
+
def construct_array_type(cls) -> type_t[DatetimeArray]:
|
| 820 |
+
"""
|
| 821 |
+
Return the array type associated with this dtype.
|
| 822 |
+
|
| 823 |
+
Returns
|
| 824 |
+
-------
|
| 825 |
+
type
|
| 826 |
+
"""
|
| 827 |
+
from pandas.core.arrays import DatetimeArray
|
| 828 |
+
|
| 829 |
+
return DatetimeArray
|
| 830 |
+
|
| 831 |
+
@classmethod
|
| 832 |
+
def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
|
| 833 |
+
"""
|
| 834 |
+
Construct a DatetimeTZDtype from a string.
|
| 835 |
+
|
| 836 |
+
Parameters
|
| 837 |
+
----------
|
| 838 |
+
string : str
|
| 839 |
+
The string alias for this DatetimeTZDtype.
|
| 840 |
+
Should be formatted like ``datetime64[ns, <tz>]``,
|
| 841 |
+
where ``<tz>`` is the timezone name.
|
| 842 |
+
|
| 843 |
+
Examples
|
| 844 |
+
--------
|
| 845 |
+
>>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
|
| 846 |
+
datetime64[ns, UTC]
|
| 847 |
+
"""
|
| 848 |
+
if not isinstance(string, str):
|
| 849 |
+
raise TypeError(
|
| 850 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
|
| 854 |
+
match = cls._match.match(string)
|
| 855 |
+
if match:
|
| 856 |
+
d = match.groupdict()
|
| 857 |
+
try:
|
| 858 |
+
return cls(unit=d["unit"], tz=d["tz"])
|
| 859 |
+
except (KeyError, TypeError, ValueError) as err:
|
| 860 |
+
# KeyError if maybe_get_tz tries and fails to get a
|
| 861 |
+
# pytz timezone (actually pytz.UnknownTimeZoneError).
|
| 862 |
+
# TypeError if we pass a nonsense tz;
|
| 863 |
+
# ValueError if we pass a unit other than "ns"
|
| 864 |
+
raise TypeError(msg) from err
|
| 865 |
+
raise TypeError(msg)
|
| 866 |
+
|
| 867 |
+
def __str__(self) -> str_type:
|
| 868 |
+
return f"datetime64[{self.unit}, {self.tz}]"
|
| 869 |
+
|
| 870 |
+
@property
|
| 871 |
+
def name(self) -> str_type:
|
| 872 |
+
"""A string representation of the dtype."""
|
| 873 |
+
return str(self)
|
| 874 |
+
|
| 875 |
+
def __hash__(self) -> int:
|
| 876 |
+
# make myself hashable
|
| 877 |
+
# TODO: update this.
|
| 878 |
+
return hash(str(self))
|
| 879 |
+
|
| 880 |
+
def __eq__(self, other: object) -> bool:
|
| 881 |
+
if isinstance(other, str):
|
| 882 |
+
if other.startswith("M8["):
|
| 883 |
+
other = f"datetime64[{other[3:]}"
|
| 884 |
+
return other == self.name
|
| 885 |
+
|
| 886 |
+
return (
|
| 887 |
+
isinstance(other, DatetimeTZDtype)
|
| 888 |
+
and self.unit == other.unit
|
| 889 |
+
and tz_compare(self.tz, other.tz)
|
| 890 |
+
)
|
| 891 |
+
|
| 892 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
|
| 893 |
+
"""
|
| 894 |
+
Construct DatetimeArray from pyarrow Array/ChunkedArray.
|
| 895 |
+
|
| 896 |
+
Note: If the units in the pyarrow Array are the same as this
|
| 897 |
+
DatetimeDtype, then values corresponding to the integer representation
|
| 898 |
+
of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
|
| 899 |
+
are converted to ``NaT``, regardless of the null indicator in the
|
| 900 |
+
pyarrow array.
|
| 901 |
+
|
| 902 |
+
Parameters
|
| 903 |
+
----------
|
| 904 |
+
array : pyarrow.Array or pyarrow.ChunkedArray
|
| 905 |
+
The Arrow array to convert to DatetimeArray.
|
| 906 |
+
|
| 907 |
+
Returns
|
| 908 |
+
-------
|
| 909 |
+
extension array : DatetimeArray
|
| 910 |
+
"""
|
| 911 |
+
import pyarrow
|
| 912 |
+
|
| 913 |
+
from pandas.core.arrays import DatetimeArray
|
| 914 |
+
|
| 915 |
+
array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
|
| 916 |
+
|
| 917 |
+
if isinstance(array, pyarrow.Array):
|
| 918 |
+
np_arr = array.to_numpy(zero_copy_only=False)
|
| 919 |
+
else:
|
| 920 |
+
np_arr = array.to_numpy()
|
| 921 |
+
|
| 922 |
+
return DatetimeArray._simple_new(np_arr, dtype=self)
|
| 923 |
+
|
| 924 |
+
def __setstate__(self, state) -> None:
|
| 925 |
+
# for pickle compat. __get_state__ is defined in the
|
| 926 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
| 927 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
| 928 |
+
self._tz = state["tz"]
|
| 929 |
+
self._unit = state["unit"]
|
| 930 |
+
|
| 931 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 932 |
+
if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
|
| 933 |
+
np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
|
| 934 |
+
unit = np.datetime_data(np_dtype)[0]
|
| 935 |
+
return type(self)(unit=unit, tz=self.tz)
|
| 936 |
+
return super()._get_common_dtype(dtypes)
|
| 937 |
+
|
| 938 |
+
@cache_readonly
|
| 939 |
+
def index_class(self) -> type_t[DatetimeIndex]:
|
| 940 |
+
from pandas import DatetimeIndex
|
| 941 |
+
|
| 942 |
+
return DatetimeIndex
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
@register_extension_dtype
|
| 946 |
+
class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
|
| 947 |
+
"""
|
| 948 |
+
An ExtensionDtype for Period data.
|
| 949 |
+
|
| 950 |
+
**This is not an actual numpy dtype**, but a duck type.
|
| 951 |
+
|
| 952 |
+
Parameters
|
| 953 |
+
----------
|
| 954 |
+
freq : str or DateOffset
|
| 955 |
+
The frequency of this PeriodDtype.
|
| 956 |
+
|
| 957 |
+
Attributes
|
| 958 |
+
----------
|
| 959 |
+
freq
|
| 960 |
+
|
| 961 |
+
Methods
|
| 962 |
+
-------
|
| 963 |
+
None
|
| 964 |
+
|
| 965 |
+
Examples
|
| 966 |
+
--------
|
| 967 |
+
>>> pd.PeriodDtype(freq='D')
|
| 968 |
+
period[D]
|
| 969 |
+
|
| 970 |
+
>>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
|
| 971 |
+
period[M]
|
| 972 |
+
"""
|
| 973 |
+
|
| 974 |
+
type: type[Period] = Period
|
| 975 |
+
kind: str_type = "O"
|
| 976 |
+
str = "|O08"
|
| 977 |
+
base = np.dtype("O")
|
| 978 |
+
num = 102
|
| 979 |
+
_metadata = ("freq",)
|
| 980 |
+
_match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
|
| 981 |
+
# error: Incompatible types in assignment (expression has type
|
| 982 |
+
# "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
|
| 983 |
+
# defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
|
| 984 |
+
_cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
|
| 985 |
+
__hash__ = PeriodDtypeBase.__hash__
|
| 986 |
+
_freq: BaseOffset
|
| 987 |
+
_supports_2d = True
|
| 988 |
+
_can_fast_transpose = True
|
| 989 |
+
|
| 990 |
+
def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
|
| 991 |
+
"""
|
| 992 |
+
Parameters
|
| 993 |
+
----------
|
| 994 |
+
freq : PeriodDtype, BaseOffset, or string
|
| 995 |
+
"""
|
| 996 |
+
if isinstance(freq, PeriodDtype):
|
| 997 |
+
return freq
|
| 998 |
+
|
| 999 |
+
if not isinstance(freq, BaseOffset):
|
| 1000 |
+
freq = cls._parse_dtype_strict(freq)
|
| 1001 |
+
|
| 1002 |
+
if isinstance(freq, BDay):
|
| 1003 |
+
# GH#53446
|
| 1004 |
+
# TODO(3.0): enforcing this will close GH#10575
|
| 1005 |
+
warnings.warn(
|
| 1006 |
+
"PeriodDtype[B] is deprecated and will be removed in a future "
|
| 1007 |
+
"version. Use a DatetimeIndex with freq='B' instead",
|
| 1008 |
+
FutureWarning,
|
| 1009 |
+
stacklevel=find_stack_level(),
|
| 1010 |
+
)
|
| 1011 |
+
|
| 1012 |
+
try:
|
| 1013 |
+
dtype_code = cls._cache_dtypes[freq]
|
| 1014 |
+
except KeyError:
|
| 1015 |
+
dtype_code = freq._period_dtype_code
|
| 1016 |
+
cls._cache_dtypes[freq] = dtype_code
|
| 1017 |
+
u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
|
| 1018 |
+
u._freq = freq
|
| 1019 |
+
return u
|
| 1020 |
+
|
| 1021 |
+
def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
|
| 1022 |
+
return type(self), (self.name,)
|
| 1023 |
+
|
| 1024 |
+
@property
|
| 1025 |
+
def freq(self) -> BaseOffset:
|
| 1026 |
+
"""
|
| 1027 |
+
The frequency object of this PeriodDtype.
|
| 1028 |
+
|
| 1029 |
+
Examples
|
| 1030 |
+
--------
|
| 1031 |
+
>>> dtype = pd.PeriodDtype(freq='D')
|
| 1032 |
+
>>> dtype.freq
|
| 1033 |
+
<Day>
|
| 1034 |
+
"""
|
| 1035 |
+
return self._freq
|
| 1036 |
+
|
| 1037 |
+
@classmethod
|
| 1038 |
+
def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
|
| 1039 |
+
if isinstance(freq, str): # note: freq is already of type str!
|
| 1040 |
+
if freq.startswith(("Period[", "period[")):
|
| 1041 |
+
m = cls._match.search(freq)
|
| 1042 |
+
if m is not None:
|
| 1043 |
+
freq = m.group("freq")
|
| 1044 |
+
|
| 1045 |
+
freq_offset = to_offset(freq, is_period=True)
|
| 1046 |
+
if freq_offset is not None:
|
| 1047 |
+
return freq_offset
|
| 1048 |
+
|
| 1049 |
+
raise TypeError(
|
| 1050 |
+
"PeriodDtype argument should be string or BaseOffset, "
|
| 1051 |
+
f"got {type(freq).__name__}"
|
| 1052 |
+
)
|
| 1053 |
+
|
| 1054 |
+
@classmethod
|
| 1055 |
+
def construct_from_string(cls, string: str_type) -> PeriodDtype:
|
| 1056 |
+
"""
|
| 1057 |
+
Strict construction from a string, raise a TypeError if not
|
| 1058 |
+
possible
|
| 1059 |
+
"""
|
| 1060 |
+
if (
|
| 1061 |
+
isinstance(string, str)
|
| 1062 |
+
and (string.startswith(("period[", "Period[")))
|
| 1063 |
+
or isinstance(string, BaseOffset)
|
| 1064 |
+
):
|
| 1065 |
+
# do not parse string like U as period[U]
|
| 1066 |
+
# avoid tuple to be regarded as freq
|
| 1067 |
+
try:
|
| 1068 |
+
return cls(freq=string)
|
| 1069 |
+
except ValueError:
|
| 1070 |
+
pass
|
| 1071 |
+
if isinstance(string, str):
|
| 1072 |
+
msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
|
| 1073 |
+
else:
|
| 1074 |
+
msg = f"'construct_from_string' expects a string, got {type(string)}"
|
| 1075 |
+
raise TypeError(msg)
|
| 1076 |
+
|
| 1077 |
+
def __str__(self) -> str_type:
|
| 1078 |
+
return self.name
|
| 1079 |
+
|
| 1080 |
+
@property
|
| 1081 |
+
def name(self) -> str_type:
|
| 1082 |
+
return f"period[{self._freqstr}]"
|
| 1083 |
+
|
| 1084 |
+
@property
|
| 1085 |
+
def na_value(self) -> NaTType:
|
| 1086 |
+
return NaT
|
| 1087 |
+
|
| 1088 |
+
def __eq__(self, other: object) -> bool:
|
| 1089 |
+
if isinstance(other, str):
|
| 1090 |
+
return other in [self.name, capitalize_first_letter(self.name)]
|
| 1091 |
+
|
| 1092 |
+
return super().__eq__(other)
|
| 1093 |
+
|
| 1094 |
+
def __ne__(self, other: object) -> bool:
|
| 1095 |
+
return not self.__eq__(other)
|
| 1096 |
+
|
| 1097 |
+
@classmethod
|
| 1098 |
+
def is_dtype(cls, dtype: object) -> bool:
|
| 1099 |
+
"""
|
| 1100 |
+
Return a boolean if we if the passed type is an actual dtype that we
|
| 1101 |
+
can match (via string or type)
|
| 1102 |
+
"""
|
| 1103 |
+
if isinstance(dtype, str):
|
| 1104 |
+
# PeriodDtype can be instantiated from freq string like "U",
|
| 1105 |
+
# but doesn't regard freq str like "U" as dtype.
|
| 1106 |
+
if dtype.startswith(("period[", "Period[")):
|
| 1107 |
+
try:
|
| 1108 |
+
return cls._parse_dtype_strict(dtype) is not None
|
| 1109 |
+
except ValueError:
|
| 1110 |
+
return False
|
| 1111 |
+
else:
|
| 1112 |
+
return False
|
| 1113 |
+
return super().is_dtype(dtype)
|
| 1114 |
+
|
| 1115 |
+
@classmethod
|
| 1116 |
+
def construct_array_type(cls) -> type_t[PeriodArray]:
|
| 1117 |
+
"""
|
| 1118 |
+
Return the array type associated with this dtype.
|
| 1119 |
+
|
| 1120 |
+
Returns
|
| 1121 |
+
-------
|
| 1122 |
+
type
|
| 1123 |
+
"""
|
| 1124 |
+
from pandas.core.arrays import PeriodArray
|
| 1125 |
+
|
| 1126 |
+
return PeriodArray
|
| 1127 |
+
|
| 1128 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
|
| 1129 |
+
"""
|
| 1130 |
+
Construct PeriodArray from pyarrow Array/ChunkedArray.
|
| 1131 |
+
"""
|
| 1132 |
+
import pyarrow
|
| 1133 |
+
|
| 1134 |
+
from pandas.core.arrays import PeriodArray
|
| 1135 |
+
from pandas.core.arrays.arrow._arrow_utils import (
|
| 1136 |
+
pyarrow_array_to_numpy_and_mask,
|
| 1137 |
+
)
|
| 1138 |
+
|
| 1139 |
+
if isinstance(array, pyarrow.Array):
|
| 1140 |
+
chunks = [array]
|
| 1141 |
+
else:
|
| 1142 |
+
chunks = array.chunks
|
| 1143 |
+
|
| 1144 |
+
results = []
|
| 1145 |
+
for arr in chunks:
|
| 1146 |
+
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
|
| 1147 |
+
parr = PeriodArray(data.copy(), dtype=self, copy=False)
|
| 1148 |
+
# error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
|
| 1149 |
+
# expected type "Union[int, Sequence[int], Sequence[bool], slice]"
|
| 1150 |
+
parr[~mask] = NaT # type: ignore[index]
|
| 1151 |
+
results.append(parr)
|
| 1152 |
+
|
| 1153 |
+
if not results:
|
| 1154 |
+
return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
|
| 1155 |
+
return PeriodArray._concat_same_type(results)
|
| 1156 |
+
|
| 1157 |
+
@cache_readonly
|
| 1158 |
+
def index_class(self) -> type_t[PeriodIndex]:
|
| 1159 |
+
from pandas import PeriodIndex
|
| 1160 |
+
|
| 1161 |
+
return PeriodIndex
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
@register_extension_dtype
|
| 1165 |
+
class IntervalDtype(PandasExtensionDtype):
|
| 1166 |
+
"""
|
| 1167 |
+
An ExtensionDtype for Interval data.
|
| 1168 |
+
|
| 1169 |
+
**This is not an actual numpy dtype**, but a duck type.
|
| 1170 |
+
|
| 1171 |
+
Parameters
|
| 1172 |
+
----------
|
| 1173 |
+
subtype : str, np.dtype
|
| 1174 |
+
The dtype of the Interval bounds.
|
| 1175 |
+
|
| 1176 |
+
Attributes
|
| 1177 |
+
----------
|
| 1178 |
+
subtype
|
| 1179 |
+
|
| 1180 |
+
Methods
|
| 1181 |
+
-------
|
| 1182 |
+
None
|
| 1183 |
+
|
| 1184 |
+
Examples
|
| 1185 |
+
--------
|
| 1186 |
+
>>> pd.IntervalDtype(subtype='int64', closed='both')
|
| 1187 |
+
interval[int64, both]
|
| 1188 |
+
"""
|
| 1189 |
+
|
| 1190 |
+
name = "interval"
|
| 1191 |
+
kind: str_type = "O"
|
| 1192 |
+
str = "|O08"
|
| 1193 |
+
base = np.dtype("O")
|
| 1194 |
+
num = 103
|
| 1195 |
+
_metadata = (
|
| 1196 |
+
"subtype",
|
| 1197 |
+
"closed",
|
| 1198 |
+
)
|
| 1199 |
+
|
| 1200 |
+
_match = re.compile(
|
| 1201 |
+
r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
|
| 1202 |
+
r"(, (?P<closed>(right|left|both|neither)))?\]"
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
| 1206 |
+
_subtype: None | np.dtype
|
| 1207 |
+
_closed: IntervalClosedType | None
|
| 1208 |
+
|
| 1209 |
+
def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
|
| 1210 |
+
from pandas.core.dtypes.common import (
|
| 1211 |
+
is_string_dtype,
|
| 1212 |
+
pandas_dtype,
|
| 1213 |
+
)
|
| 1214 |
+
|
| 1215 |
+
if closed is not None and closed not in {"right", "left", "both", "neither"}:
|
| 1216 |
+
raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
|
| 1217 |
+
|
| 1218 |
+
if isinstance(subtype, IntervalDtype):
|
| 1219 |
+
if closed is not None and closed != subtype.closed:
|
| 1220 |
+
raise ValueError(
|
| 1221 |
+
"dtype.closed and 'closed' do not match. "
|
| 1222 |
+
"Try IntervalDtype(dtype.subtype, closed) instead."
|
| 1223 |
+
)
|
| 1224 |
+
self._subtype = subtype._subtype
|
| 1225 |
+
self._closed = subtype._closed
|
| 1226 |
+
elif subtype is None:
|
| 1227 |
+
# we are called as an empty constructor
|
| 1228 |
+
# generally for pickle compat
|
| 1229 |
+
self._subtype = None
|
| 1230 |
+
self._closed = closed
|
| 1231 |
+
elif isinstance(subtype, str) and subtype.lower() == "interval":
|
| 1232 |
+
self._subtype = None
|
| 1233 |
+
self._closed = closed
|
| 1234 |
+
else:
|
| 1235 |
+
if isinstance(subtype, str):
|
| 1236 |
+
m = IntervalDtype._match.search(subtype)
|
| 1237 |
+
if m is not None:
|
| 1238 |
+
gd = m.groupdict()
|
| 1239 |
+
subtype = gd["subtype"]
|
| 1240 |
+
if gd.get("closed", None) is not None:
|
| 1241 |
+
if closed is not None:
|
| 1242 |
+
if closed != gd["closed"]:
|
| 1243 |
+
raise ValueError(
|
| 1244 |
+
"'closed' keyword does not match value "
|
| 1245 |
+
"specified in dtype string"
|
| 1246 |
+
)
|
| 1247 |
+
closed = gd["closed"] # type: ignore[assignment]
|
| 1248 |
+
|
| 1249 |
+
try:
|
| 1250 |
+
subtype = pandas_dtype(subtype)
|
| 1251 |
+
except TypeError as err:
|
| 1252 |
+
raise TypeError("could not construct IntervalDtype") from err
|
| 1253 |
+
if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
|
| 1254 |
+
# GH 19016
|
| 1255 |
+
msg = (
|
| 1256 |
+
"category, object, and string subtypes are not supported "
|
| 1257 |
+
"for IntervalDtype"
|
| 1258 |
+
)
|
| 1259 |
+
raise TypeError(msg)
|
| 1260 |
+
self._subtype = subtype
|
| 1261 |
+
self._closed = closed
|
| 1262 |
+
|
| 1263 |
+
@cache_readonly
|
| 1264 |
+
def _can_hold_na(self) -> bool:
|
| 1265 |
+
subtype = self._subtype
|
| 1266 |
+
if subtype is None:
|
| 1267 |
+
# partially-initialized
|
| 1268 |
+
raise NotImplementedError(
|
| 1269 |
+
"_can_hold_na is not defined for partially-initialized IntervalDtype"
|
| 1270 |
+
)
|
| 1271 |
+
if subtype.kind in "iu":
|
| 1272 |
+
return False
|
| 1273 |
+
return True
|
| 1274 |
+
|
| 1275 |
+
@property
|
| 1276 |
+
def closed(self) -> IntervalClosedType:
|
| 1277 |
+
return self._closed # type: ignore[return-value]
|
| 1278 |
+
|
| 1279 |
+
@property
|
| 1280 |
+
def subtype(self):
|
| 1281 |
+
"""
|
| 1282 |
+
The dtype of the Interval bounds.
|
| 1283 |
+
|
| 1284 |
+
Examples
|
| 1285 |
+
--------
|
| 1286 |
+
>>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
|
| 1287 |
+
>>> dtype.subtype
|
| 1288 |
+
dtype('int64')
|
| 1289 |
+
"""
|
| 1290 |
+
return self._subtype
|
| 1291 |
+
|
| 1292 |
+
@classmethod
|
| 1293 |
+
def construct_array_type(cls) -> type[IntervalArray]:
|
| 1294 |
+
"""
|
| 1295 |
+
Return the array type associated with this dtype.
|
| 1296 |
+
|
| 1297 |
+
Returns
|
| 1298 |
+
-------
|
| 1299 |
+
type
|
| 1300 |
+
"""
|
| 1301 |
+
from pandas.core.arrays import IntervalArray
|
| 1302 |
+
|
| 1303 |
+
return IntervalArray
|
| 1304 |
+
|
| 1305 |
+
@classmethod
|
| 1306 |
+
def construct_from_string(cls, string: str_type) -> IntervalDtype:
|
| 1307 |
+
"""
|
| 1308 |
+
attempt to construct this type from a string, raise a TypeError
|
| 1309 |
+
if its not possible
|
| 1310 |
+
"""
|
| 1311 |
+
if not isinstance(string, str):
|
| 1312 |
+
raise TypeError(
|
| 1313 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
| 1314 |
+
)
|
| 1315 |
+
|
| 1316 |
+
if string.lower() == "interval" or cls._match.search(string) is not None:
|
| 1317 |
+
return cls(string)
|
| 1318 |
+
|
| 1319 |
+
msg = (
|
| 1320 |
+
f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
|
| 1321 |
+
"Incorrectly formatted string passed to constructor. "
|
| 1322 |
+
"Valid formats include Interval or Interval[dtype] "
|
| 1323 |
+
"where dtype is numeric, datetime, or timedelta"
|
| 1324 |
+
)
|
| 1325 |
+
raise TypeError(msg)
|
| 1326 |
+
|
| 1327 |
+
@property
|
| 1328 |
+
def type(self) -> type[Interval]:
|
| 1329 |
+
return Interval
|
| 1330 |
+
|
| 1331 |
+
def __str__(self) -> str_type:
|
| 1332 |
+
if self.subtype is None:
|
| 1333 |
+
return "interval"
|
| 1334 |
+
if self.closed is None:
|
| 1335 |
+
# Only partially initialized GH#38394
|
| 1336 |
+
return f"interval[{self.subtype}]"
|
| 1337 |
+
return f"interval[{self.subtype}, {self.closed}]"
|
| 1338 |
+
|
| 1339 |
+
def __hash__(self) -> int:
|
| 1340 |
+
# make myself hashable
|
| 1341 |
+
return hash(str(self))
|
| 1342 |
+
|
| 1343 |
+
def __eq__(self, other: object) -> bool:
|
| 1344 |
+
if isinstance(other, str):
|
| 1345 |
+
return other.lower() in (self.name.lower(), str(self).lower())
|
| 1346 |
+
elif not isinstance(other, IntervalDtype):
|
| 1347 |
+
return False
|
| 1348 |
+
elif self.subtype is None or other.subtype is None:
|
| 1349 |
+
# None should match any subtype
|
| 1350 |
+
return True
|
| 1351 |
+
elif self.closed != other.closed:
|
| 1352 |
+
return False
|
| 1353 |
+
else:
|
| 1354 |
+
return self.subtype == other.subtype
|
| 1355 |
+
|
| 1356 |
+
def __setstate__(self, state) -> None:
|
| 1357 |
+
# for pickle compat. __get_state__ is defined in the
|
| 1358 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
| 1359 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
| 1360 |
+
self._subtype = state["subtype"]
|
| 1361 |
+
|
| 1362 |
+
# backward-compat older pickles won't have "closed" key
|
| 1363 |
+
self._closed = state.pop("closed", None)
|
| 1364 |
+
|
| 1365 |
+
@classmethod
|
| 1366 |
+
def is_dtype(cls, dtype: object) -> bool:
|
| 1367 |
+
"""
|
| 1368 |
+
Return a boolean if we if the passed type is an actual dtype that we
|
| 1369 |
+
can match (via string or type)
|
| 1370 |
+
"""
|
| 1371 |
+
if isinstance(dtype, str):
|
| 1372 |
+
if dtype.lower().startswith("interval"):
|
| 1373 |
+
try:
|
| 1374 |
+
return cls.construct_from_string(dtype) is not None
|
| 1375 |
+
except (ValueError, TypeError):
|
| 1376 |
+
return False
|
| 1377 |
+
else:
|
| 1378 |
+
return False
|
| 1379 |
+
return super().is_dtype(dtype)
|
| 1380 |
+
|
| 1381 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
|
| 1382 |
+
"""
|
| 1383 |
+
Construct IntervalArray from pyarrow Array/ChunkedArray.
|
| 1384 |
+
"""
|
| 1385 |
+
import pyarrow
|
| 1386 |
+
|
| 1387 |
+
from pandas.core.arrays import IntervalArray
|
| 1388 |
+
|
| 1389 |
+
if isinstance(array, pyarrow.Array):
|
| 1390 |
+
chunks = [array]
|
| 1391 |
+
else:
|
| 1392 |
+
chunks = array.chunks
|
| 1393 |
+
|
| 1394 |
+
results = []
|
| 1395 |
+
for arr in chunks:
|
| 1396 |
+
if isinstance(arr, pyarrow.ExtensionArray):
|
| 1397 |
+
arr = arr.storage
|
| 1398 |
+
left = np.asarray(arr.field("left"), dtype=self.subtype)
|
| 1399 |
+
right = np.asarray(arr.field("right"), dtype=self.subtype)
|
| 1400 |
+
iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
|
| 1401 |
+
results.append(iarr)
|
| 1402 |
+
|
| 1403 |
+
if not results:
|
| 1404 |
+
return IntervalArray.from_arrays(
|
| 1405 |
+
np.array([], dtype=self.subtype),
|
| 1406 |
+
np.array([], dtype=self.subtype),
|
| 1407 |
+
closed=self.closed,
|
| 1408 |
+
)
|
| 1409 |
+
return IntervalArray._concat_same_type(results)
|
| 1410 |
+
|
| 1411 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 1412 |
+
if not all(isinstance(x, IntervalDtype) for x in dtypes):
|
| 1413 |
+
return None
|
| 1414 |
+
|
| 1415 |
+
closed = cast("IntervalDtype", dtypes[0]).closed
|
| 1416 |
+
if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
|
| 1417 |
+
return np.dtype(object)
|
| 1418 |
+
|
| 1419 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 1420 |
+
|
| 1421 |
+
common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
|
| 1422 |
+
if common == object:
|
| 1423 |
+
return np.dtype(object)
|
| 1424 |
+
return IntervalDtype(common, closed=closed)
|
| 1425 |
+
|
| 1426 |
+
@cache_readonly
|
| 1427 |
+
def index_class(self) -> type_t[IntervalIndex]:
|
| 1428 |
+
from pandas import IntervalIndex
|
| 1429 |
+
|
| 1430 |
+
return IntervalIndex
|
| 1431 |
+
|
| 1432 |
+
|
| 1433 |
+
class NumpyEADtype(ExtensionDtype):
|
| 1434 |
+
"""
|
| 1435 |
+
A Pandas ExtensionDtype for NumPy dtypes.
|
| 1436 |
+
|
| 1437 |
+
This is mostly for internal compatibility, and is not especially
|
| 1438 |
+
useful on its own.
|
| 1439 |
+
|
| 1440 |
+
Parameters
|
| 1441 |
+
----------
|
| 1442 |
+
dtype : object
|
| 1443 |
+
Object to be converted to a NumPy data type object.
|
| 1444 |
+
|
| 1445 |
+
See Also
|
| 1446 |
+
--------
|
| 1447 |
+
numpy.dtype
|
| 1448 |
+
"""
|
| 1449 |
+
|
| 1450 |
+
_metadata = ("_dtype",)
|
| 1451 |
+
_supports_2d = False
|
| 1452 |
+
_can_fast_transpose = False
|
| 1453 |
+
|
| 1454 |
+
def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:
|
| 1455 |
+
if isinstance(dtype, NumpyEADtype):
|
| 1456 |
+
# make constructor idempotent
|
| 1457 |
+
dtype = dtype.numpy_dtype
|
| 1458 |
+
self._dtype = np.dtype(dtype)
|
| 1459 |
+
|
| 1460 |
+
def __repr__(self) -> str:
|
| 1461 |
+
return f"NumpyEADtype({repr(self.name)})"
|
| 1462 |
+
|
| 1463 |
+
@property
|
| 1464 |
+
def numpy_dtype(self) -> np.dtype:
|
| 1465 |
+
"""
|
| 1466 |
+
The NumPy dtype this NumpyEADtype wraps.
|
| 1467 |
+
"""
|
| 1468 |
+
return self._dtype
|
| 1469 |
+
|
| 1470 |
+
@property
|
| 1471 |
+
def name(self) -> str:
|
| 1472 |
+
"""
|
| 1473 |
+
A bit-width name for this data-type.
|
| 1474 |
+
"""
|
| 1475 |
+
return self._dtype.name
|
| 1476 |
+
|
| 1477 |
+
@property
|
| 1478 |
+
def type(self) -> type[np.generic]:
|
| 1479 |
+
"""
|
| 1480 |
+
The type object used to instantiate a scalar of this NumPy data-type.
|
| 1481 |
+
"""
|
| 1482 |
+
return self._dtype.type
|
| 1483 |
+
|
| 1484 |
+
@property
|
| 1485 |
+
def _is_numeric(self) -> bool:
|
| 1486 |
+
# exclude object, str, unicode, void.
|
| 1487 |
+
return self.kind in set("biufc")
|
| 1488 |
+
|
| 1489 |
+
@property
|
| 1490 |
+
def _is_boolean(self) -> bool:
|
| 1491 |
+
return self.kind == "b"
|
| 1492 |
+
|
| 1493 |
+
@classmethod
|
| 1494 |
+
def construct_from_string(cls, string: str) -> NumpyEADtype:
|
| 1495 |
+
try:
|
| 1496 |
+
dtype = np.dtype(string)
|
| 1497 |
+
except TypeError as err:
|
| 1498 |
+
if not isinstance(string, str):
|
| 1499 |
+
msg = f"'construct_from_string' expects a string, got {type(string)}"
|
| 1500 |
+
else:
|
| 1501 |
+
msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"
|
| 1502 |
+
raise TypeError(msg) from err
|
| 1503 |
+
return cls(dtype)
|
| 1504 |
+
|
| 1505 |
+
@classmethod
|
| 1506 |
+
def construct_array_type(cls) -> type_t[NumpyExtensionArray]:
|
| 1507 |
+
"""
|
| 1508 |
+
Return the array type associated with this dtype.
|
| 1509 |
+
|
| 1510 |
+
Returns
|
| 1511 |
+
-------
|
| 1512 |
+
type
|
| 1513 |
+
"""
|
| 1514 |
+
from pandas.core.arrays import NumpyExtensionArray
|
| 1515 |
+
|
| 1516 |
+
return NumpyExtensionArray
|
| 1517 |
+
|
| 1518 |
+
@property
|
| 1519 |
+
def kind(self) -> str:
|
| 1520 |
+
"""
|
| 1521 |
+
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
|
| 1522 |
+
"""
|
| 1523 |
+
return self._dtype.kind
|
| 1524 |
+
|
| 1525 |
+
@property
|
| 1526 |
+
def itemsize(self) -> int:
|
| 1527 |
+
"""
|
| 1528 |
+
The element size of this data-type object.
|
| 1529 |
+
"""
|
| 1530 |
+
return self._dtype.itemsize
|
| 1531 |
+
|
| 1532 |
+
|
| 1533 |
+
class BaseMaskedDtype(ExtensionDtype):
|
| 1534 |
+
"""
|
| 1535 |
+
Base class for dtypes for BaseMaskedArray subclasses.
|
| 1536 |
+
"""
|
| 1537 |
+
|
| 1538 |
+
base = None
|
| 1539 |
+
type: type
|
| 1540 |
+
|
| 1541 |
+
@property
|
| 1542 |
+
def na_value(self) -> libmissing.NAType:
|
| 1543 |
+
return libmissing.NA
|
| 1544 |
+
|
| 1545 |
+
@cache_readonly
|
| 1546 |
+
def numpy_dtype(self) -> np.dtype:
|
| 1547 |
+
"""Return an instance of our numpy dtype"""
|
| 1548 |
+
return np.dtype(self.type)
|
| 1549 |
+
|
| 1550 |
+
@cache_readonly
|
| 1551 |
+
def kind(self) -> str:
|
| 1552 |
+
return self.numpy_dtype.kind
|
| 1553 |
+
|
| 1554 |
+
@cache_readonly
|
| 1555 |
+
def itemsize(self) -> int:
|
| 1556 |
+
"""Return the number of bytes in this dtype"""
|
| 1557 |
+
return self.numpy_dtype.itemsize
|
| 1558 |
+
|
| 1559 |
+
@classmethod
|
| 1560 |
+
def construct_array_type(cls) -> type_t[BaseMaskedArray]:
|
| 1561 |
+
"""
|
| 1562 |
+
Return the array type associated with this dtype.
|
| 1563 |
+
|
| 1564 |
+
Returns
|
| 1565 |
+
-------
|
| 1566 |
+
type
|
| 1567 |
+
"""
|
| 1568 |
+
raise NotImplementedError
|
| 1569 |
+
|
| 1570 |
+
@classmethod
|
| 1571 |
+
def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:
|
| 1572 |
+
"""
|
| 1573 |
+
Construct the MaskedDtype corresponding to the given numpy dtype.
|
| 1574 |
+
"""
|
| 1575 |
+
if dtype.kind == "b":
|
| 1576 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
| 1577 |
+
|
| 1578 |
+
return BooleanDtype()
|
| 1579 |
+
elif dtype.kind in "iu":
|
| 1580 |
+
from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
|
| 1581 |
+
|
| 1582 |
+
return NUMPY_INT_TO_DTYPE[dtype]
|
| 1583 |
+
elif dtype.kind == "f":
|
| 1584 |
+
from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
|
| 1585 |
+
|
| 1586 |
+
return NUMPY_FLOAT_TO_DTYPE[dtype]
|
| 1587 |
+
else:
|
| 1588 |
+
raise NotImplementedError(dtype)
|
| 1589 |
+
|
| 1590 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 1591 |
+
# We unwrap any masked dtypes, find the common dtype we would use
|
| 1592 |
+
# for that, then re-mask the result.
|
| 1593 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 1594 |
+
|
| 1595 |
+
new_dtype = find_common_type(
|
| 1596 |
+
[
|
| 1597 |
+
dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype
|
| 1598 |
+
for dtype in dtypes
|
| 1599 |
+
]
|
| 1600 |
+
)
|
| 1601 |
+
if not isinstance(new_dtype, np.dtype):
|
| 1602 |
+
# If we ever support e.g. Masked[DatetimeArray] then this will change
|
| 1603 |
+
return None
|
| 1604 |
+
try:
|
| 1605 |
+
return type(self).from_numpy_dtype(new_dtype)
|
| 1606 |
+
except (KeyError, NotImplementedError):
|
| 1607 |
+
return None
|
| 1608 |
+
|
| 1609 |
+
|
| 1610 |
+
@register_extension_dtype
|
| 1611 |
+
class SparseDtype(ExtensionDtype):
|
| 1612 |
+
"""
|
| 1613 |
+
Dtype for data stored in :class:`SparseArray`.
|
| 1614 |
+
|
| 1615 |
+
This dtype implements the pandas ExtensionDtype interface.
|
| 1616 |
+
|
| 1617 |
+
Parameters
|
| 1618 |
+
----------
|
| 1619 |
+
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
|
| 1620 |
+
The dtype of the underlying array storing the non-fill value values.
|
| 1621 |
+
fill_value : scalar, optional
|
| 1622 |
+
The scalar value not stored in the SparseArray. By default, this
|
| 1623 |
+
depends on `dtype`.
|
| 1624 |
+
|
| 1625 |
+
=========== ==========
|
| 1626 |
+
dtype na_value
|
| 1627 |
+
=========== ==========
|
| 1628 |
+
float ``np.nan``
|
| 1629 |
+
int ``0``
|
| 1630 |
+
bool ``False``
|
| 1631 |
+
datetime64 ``pd.NaT``
|
| 1632 |
+
timedelta64 ``pd.NaT``
|
| 1633 |
+
=========== ==========
|
| 1634 |
+
|
| 1635 |
+
The default value may be overridden by specifying a `fill_value`.
|
| 1636 |
+
|
| 1637 |
+
Attributes
|
| 1638 |
+
----------
|
| 1639 |
+
None
|
| 1640 |
+
|
| 1641 |
+
Methods
|
| 1642 |
+
-------
|
| 1643 |
+
None
|
| 1644 |
+
|
| 1645 |
+
Examples
|
| 1646 |
+
--------
|
| 1647 |
+
>>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
|
| 1648 |
+
>>> ser
|
| 1649 |
+
0 1
|
| 1650 |
+
1 0
|
| 1651 |
+
2 0
|
| 1652 |
+
dtype: Sparse[int64, 0]
|
| 1653 |
+
>>> ser.sparse.density
|
| 1654 |
+
0.3333333333333333
|
| 1655 |
+
"""
|
| 1656 |
+
|
| 1657 |
+
_is_immutable = True
|
| 1658 |
+
|
| 1659 |
+
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
|
| 1660 |
+
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
|
| 1661 |
+
# Without is_na_fill_value in the comparison, those would be equal since
|
| 1662 |
+
# hash(nan) is (sometimes?) 0.
|
| 1663 |
+
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
|
| 1664 |
+
|
| 1665 |
+
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
|
| 1666 |
+
if isinstance(dtype, type(self)):
|
| 1667 |
+
if fill_value is None:
|
| 1668 |
+
fill_value = dtype.fill_value
|
| 1669 |
+
dtype = dtype.subtype
|
| 1670 |
+
|
| 1671 |
+
from pandas.core.dtypes.common import (
|
| 1672 |
+
is_string_dtype,
|
| 1673 |
+
pandas_dtype,
|
| 1674 |
+
)
|
| 1675 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
| 1676 |
+
|
| 1677 |
+
dtype = pandas_dtype(dtype)
|
| 1678 |
+
if is_string_dtype(dtype):
|
| 1679 |
+
dtype = np.dtype("object")
|
| 1680 |
+
if not isinstance(dtype, np.dtype):
|
| 1681 |
+
# GH#53160
|
| 1682 |
+
raise TypeError("SparseDtype subtype must be a numpy dtype")
|
| 1683 |
+
|
| 1684 |
+
if fill_value is None:
|
| 1685 |
+
fill_value = na_value_for_dtype(dtype)
|
| 1686 |
+
|
| 1687 |
+
self._dtype = dtype
|
| 1688 |
+
self._fill_value = fill_value
|
| 1689 |
+
self._check_fill_value()
|
| 1690 |
+
|
| 1691 |
+
def __hash__(self) -> int:
|
| 1692 |
+
# Python3 doesn't inherit __hash__ when a base class overrides
|
| 1693 |
+
# __eq__, so we explicitly do it here.
|
| 1694 |
+
return super().__hash__()
|
| 1695 |
+
|
| 1696 |
+
def __eq__(self, other: object) -> bool:
|
| 1697 |
+
# We have to override __eq__ to handle NA values in _metadata.
|
| 1698 |
+
# The base class does simple == checks, which fail for NA.
|
| 1699 |
+
if isinstance(other, str):
|
| 1700 |
+
try:
|
| 1701 |
+
other = self.construct_from_string(other)
|
| 1702 |
+
except TypeError:
|
| 1703 |
+
return False
|
| 1704 |
+
|
| 1705 |
+
if isinstance(other, type(self)):
|
| 1706 |
+
subtype = self.subtype == other.subtype
|
| 1707 |
+
if self._is_na_fill_value:
|
| 1708 |
+
# this case is complicated by two things:
|
| 1709 |
+
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
|
| 1710 |
+
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
|
| 1711 |
+
# i.e. we want to treat any floating-point NaN as equal, but
|
| 1712 |
+
# not a floating-point NaN and a datetime NaT.
|
| 1713 |
+
fill_value = (
|
| 1714 |
+
other._is_na_fill_value
|
| 1715 |
+
and isinstance(self.fill_value, type(other.fill_value))
|
| 1716 |
+
or isinstance(other.fill_value, type(self.fill_value))
|
| 1717 |
+
)
|
| 1718 |
+
else:
|
| 1719 |
+
with warnings.catch_warnings():
|
| 1720 |
+
# Ignore spurious numpy warning
|
| 1721 |
+
warnings.filterwarnings(
|
| 1722 |
+
"ignore",
|
| 1723 |
+
"elementwise comparison failed",
|
| 1724 |
+
category=DeprecationWarning,
|
| 1725 |
+
)
|
| 1726 |
+
|
| 1727 |
+
fill_value = self.fill_value == other.fill_value
|
| 1728 |
+
|
| 1729 |
+
return subtype and fill_value
|
| 1730 |
+
return False
|
| 1731 |
+
|
| 1732 |
+
@property
|
| 1733 |
+
def fill_value(self):
|
| 1734 |
+
"""
|
| 1735 |
+
The fill value of the array.
|
| 1736 |
+
|
| 1737 |
+
Converting the SparseArray to a dense ndarray will fill the
|
| 1738 |
+
array with this value.
|
| 1739 |
+
|
| 1740 |
+
.. warning::
|
| 1741 |
+
|
| 1742 |
+
It's possible to end up with a SparseArray that has ``fill_value``
|
| 1743 |
+
values in ``sp_values``. This can occur, for example, when setting
|
| 1744 |
+
``SparseArray.fill_value`` directly.
|
| 1745 |
+
"""
|
| 1746 |
+
return self._fill_value
|
| 1747 |
+
|
| 1748 |
+
def _check_fill_value(self) -> None:
|
| 1749 |
+
if not lib.is_scalar(self._fill_value):
|
| 1750 |
+
raise ValueError(
|
| 1751 |
+
f"fill_value must be a scalar. Got {self._fill_value} instead"
|
| 1752 |
+
)
|
| 1753 |
+
|
| 1754 |
+
from pandas.core.dtypes.cast import can_hold_element
|
| 1755 |
+
from pandas.core.dtypes.missing import (
|
| 1756 |
+
is_valid_na_for_dtype,
|
| 1757 |
+
isna,
|
| 1758 |
+
)
|
| 1759 |
+
|
| 1760 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
| 1761 |
+
|
| 1762 |
+
# GH#23124 require fill_value and subtype to match
|
| 1763 |
+
val = self._fill_value
|
| 1764 |
+
if isna(val):
|
| 1765 |
+
if not is_valid_na_for_dtype(val, self.subtype):
|
| 1766 |
+
warnings.warn(
|
| 1767 |
+
"Allowing arbitrary scalar fill_value in SparseDtype is "
|
| 1768 |
+
"deprecated. In a future version, the fill_value must be "
|
| 1769 |
+
"a valid value for the SparseDtype.subtype.",
|
| 1770 |
+
FutureWarning,
|
| 1771 |
+
stacklevel=find_stack_level(),
|
| 1772 |
+
)
|
| 1773 |
+
else:
|
| 1774 |
+
dummy = np.empty(0, dtype=self.subtype)
|
| 1775 |
+
dummy = ensure_wrapped_if_datetimelike(dummy)
|
| 1776 |
+
|
| 1777 |
+
if not can_hold_element(dummy, val):
|
| 1778 |
+
warnings.warn(
|
| 1779 |
+
"Allowing arbitrary scalar fill_value in SparseDtype is "
|
| 1780 |
+
"deprecated. In a future version, the fill_value must be "
|
| 1781 |
+
"a valid value for the SparseDtype.subtype.",
|
| 1782 |
+
FutureWarning,
|
| 1783 |
+
stacklevel=find_stack_level(),
|
| 1784 |
+
)
|
| 1785 |
+
|
| 1786 |
+
@property
|
| 1787 |
+
def _is_na_fill_value(self) -> bool:
|
| 1788 |
+
from pandas import isna
|
| 1789 |
+
|
| 1790 |
+
return isna(self.fill_value)
|
| 1791 |
+
|
| 1792 |
+
@property
|
| 1793 |
+
def _is_numeric(self) -> bool:
|
| 1794 |
+
return not self.subtype == object
|
| 1795 |
+
|
| 1796 |
+
@property
|
| 1797 |
+
def _is_boolean(self) -> bool:
|
| 1798 |
+
return self.subtype.kind == "b"
|
| 1799 |
+
|
| 1800 |
+
@property
|
| 1801 |
+
def kind(self) -> str:
|
| 1802 |
+
"""
|
| 1803 |
+
The sparse kind. Either 'integer', or 'block'.
|
| 1804 |
+
"""
|
| 1805 |
+
return self.subtype.kind
|
| 1806 |
+
|
| 1807 |
+
@property
|
| 1808 |
+
def type(self):
|
| 1809 |
+
return self.subtype.type
|
| 1810 |
+
|
| 1811 |
+
@property
|
| 1812 |
+
def subtype(self):
|
| 1813 |
+
return self._dtype
|
| 1814 |
+
|
| 1815 |
+
@property
|
| 1816 |
+
def name(self) -> str:
|
| 1817 |
+
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
|
| 1818 |
+
|
| 1819 |
+
def __repr__(self) -> str:
|
| 1820 |
+
return self.name
|
| 1821 |
+
|
| 1822 |
+
@classmethod
|
| 1823 |
+
def construct_array_type(cls) -> type_t[SparseArray]:
|
| 1824 |
+
"""
|
| 1825 |
+
Return the array type associated with this dtype.
|
| 1826 |
+
|
| 1827 |
+
Returns
|
| 1828 |
+
-------
|
| 1829 |
+
type
|
| 1830 |
+
"""
|
| 1831 |
+
from pandas.core.arrays.sparse.array import SparseArray
|
| 1832 |
+
|
| 1833 |
+
return SparseArray
|
| 1834 |
+
|
| 1835 |
+
@classmethod
|
| 1836 |
+
def construct_from_string(cls, string: str) -> SparseDtype:
|
| 1837 |
+
"""
|
| 1838 |
+
Construct a SparseDtype from a string form.
|
| 1839 |
+
|
| 1840 |
+
Parameters
|
| 1841 |
+
----------
|
| 1842 |
+
string : str
|
| 1843 |
+
Can take the following forms.
|
| 1844 |
+
|
| 1845 |
+
string dtype
|
| 1846 |
+
================ ============================
|
| 1847 |
+
'int' SparseDtype[np.int64, 0]
|
| 1848 |
+
'Sparse' SparseDtype[np.float64, nan]
|
| 1849 |
+
'Sparse[int]' SparseDtype[np.int64, 0]
|
| 1850 |
+
'Sparse[int, 0]' SparseDtype[np.int64, 0]
|
| 1851 |
+
================ ============================
|
| 1852 |
+
|
| 1853 |
+
It is not possible to specify non-default fill values
|
| 1854 |
+
with a string. An argument like ``'Sparse[int, 1]'``
|
| 1855 |
+
will raise a ``TypeError`` because the default fill value
|
| 1856 |
+
for integers is 0.
|
| 1857 |
+
|
| 1858 |
+
Returns
|
| 1859 |
+
-------
|
| 1860 |
+
SparseDtype
|
| 1861 |
+
"""
|
| 1862 |
+
if not isinstance(string, str):
|
| 1863 |
+
raise TypeError(
|
| 1864 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
| 1865 |
+
)
|
| 1866 |
+
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
|
| 1867 |
+
if string.startswith("Sparse"):
|
| 1868 |
+
try:
|
| 1869 |
+
sub_type, has_fill_value = cls._parse_subtype(string)
|
| 1870 |
+
except ValueError as err:
|
| 1871 |
+
raise TypeError(msg) from err
|
| 1872 |
+
else:
|
| 1873 |
+
result = SparseDtype(sub_type)
|
| 1874 |
+
msg = (
|
| 1875 |
+
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
|
| 1876 |
+
"looks like the fill_value in the string is not "
|
| 1877 |
+
"the default for the dtype. Non-default fill_values "
|
| 1878 |
+
"are not supported. Use the 'SparseDtype()' "
|
| 1879 |
+
"constructor instead."
|
| 1880 |
+
)
|
| 1881 |
+
if has_fill_value and str(result) != string:
|
| 1882 |
+
raise TypeError(msg)
|
| 1883 |
+
return result
|
| 1884 |
+
else:
|
| 1885 |
+
raise TypeError(msg)
|
| 1886 |
+
|
| 1887 |
+
@staticmethod
|
| 1888 |
+
def _parse_subtype(dtype: str) -> tuple[str, bool]:
|
| 1889 |
+
"""
|
| 1890 |
+
Parse a string to get the subtype
|
| 1891 |
+
|
| 1892 |
+
Parameters
|
| 1893 |
+
----------
|
| 1894 |
+
dtype : str
|
| 1895 |
+
A string like
|
| 1896 |
+
|
| 1897 |
+
* Sparse[subtype]
|
| 1898 |
+
* Sparse[subtype, fill_value]
|
| 1899 |
+
|
| 1900 |
+
Returns
|
| 1901 |
+
-------
|
| 1902 |
+
subtype : str
|
| 1903 |
+
|
| 1904 |
+
Raises
|
| 1905 |
+
------
|
| 1906 |
+
ValueError
|
| 1907 |
+
When the subtype cannot be extracted.
|
| 1908 |
+
"""
|
| 1909 |
+
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
|
| 1910 |
+
m = xpr.match(dtype)
|
| 1911 |
+
has_fill_value = False
|
| 1912 |
+
if m:
|
| 1913 |
+
subtype = m.groupdict()["subtype"]
|
| 1914 |
+
has_fill_value = bool(m.groupdict()["fill_value"])
|
| 1915 |
+
elif dtype == "Sparse":
|
| 1916 |
+
subtype = "float64"
|
| 1917 |
+
else:
|
| 1918 |
+
raise ValueError(f"Cannot parse {dtype}")
|
| 1919 |
+
return subtype, has_fill_value
|
| 1920 |
+
|
| 1921 |
+
@classmethod
|
| 1922 |
+
def is_dtype(cls, dtype: object) -> bool:
|
| 1923 |
+
dtype = getattr(dtype, "dtype", dtype)
|
| 1924 |
+
if isinstance(dtype, str) and dtype.startswith("Sparse"):
|
| 1925 |
+
sub_type, _ = cls._parse_subtype(dtype)
|
| 1926 |
+
dtype = np.dtype(sub_type)
|
| 1927 |
+
elif isinstance(dtype, cls):
|
| 1928 |
+
return True
|
| 1929 |
+
return isinstance(dtype, np.dtype) or dtype == "Sparse"
|
| 1930 |
+
|
| 1931 |
+
def update_dtype(self, dtype) -> SparseDtype:
|
| 1932 |
+
"""
|
| 1933 |
+
Convert the SparseDtype to a new dtype.
|
| 1934 |
+
|
| 1935 |
+
This takes care of converting the ``fill_value``.
|
| 1936 |
+
|
| 1937 |
+
Parameters
|
| 1938 |
+
----------
|
| 1939 |
+
dtype : Union[str, numpy.dtype, SparseDtype]
|
| 1940 |
+
The new dtype to use.
|
| 1941 |
+
|
| 1942 |
+
* For a SparseDtype, it is simply returned
|
| 1943 |
+
* For a NumPy dtype (or str), the current fill value
|
| 1944 |
+
is converted to the new dtype, and a SparseDtype
|
| 1945 |
+
with `dtype` and the new fill value is returned.
|
| 1946 |
+
|
| 1947 |
+
Returns
|
| 1948 |
+
-------
|
| 1949 |
+
SparseDtype
|
| 1950 |
+
A new SparseDtype with the correct `dtype` and fill value
|
| 1951 |
+
for that `dtype`.
|
| 1952 |
+
|
| 1953 |
+
Raises
|
| 1954 |
+
------
|
| 1955 |
+
ValueError
|
| 1956 |
+
When the current fill value cannot be converted to the
|
| 1957 |
+
new `dtype` (e.g. trying to convert ``np.nan`` to an
|
| 1958 |
+
integer dtype).
|
| 1959 |
+
|
| 1960 |
+
|
| 1961 |
+
Examples
|
| 1962 |
+
--------
|
| 1963 |
+
>>> SparseDtype(int, 0).update_dtype(float)
|
| 1964 |
+
Sparse[float64, 0.0]
|
| 1965 |
+
|
| 1966 |
+
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
|
| 1967 |
+
Sparse[float64, nan]
|
| 1968 |
+
"""
|
| 1969 |
+
from pandas.core.dtypes.astype import astype_array
|
| 1970 |
+
from pandas.core.dtypes.common import pandas_dtype
|
| 1971 |
+
|
| 1972 |
+
cls = type(self)
|
| 1973 |
+
dtype = pandas_dtype(dtype)
|
| 1974 |
+
|
| 1975 |
+
if not isinstance(dtype, cls):
|
| 1976 |
+
if not isinstance(dtype, np.dtype):
|
| 1977 |
+
raise TypeError("sparse arrays of extension dtypes not supported")
|
| 1978 |
+
|
| 1979 |
+
fv_asarray = np.atleast_1d(np.array(self.fill_value))
|
| 1980 |
+
fvarr = astype_array(fv_asarray, dtype)
|
| 1981 |
+
# NB: not fv_0d.item(), as that casts dt64->int
|
| 1982 |
+
fill_value = fvarr[0]
|
| 1983 |
+
dtype = cls(dtype, fill_value=fill_value)
|
| 1984 |
+
|
| 1985 |
+
return dtype
|
| 1986 |
+
|
| 1987 |
+
@property
|
| 1988 |
+
def _subtype_with_str(self):
|
| 1989 |
+
"""
|
| 1990 |
+
Whether the SparseDtype's subtype should be considered ``str``.
|
| 1991 |
+
|
| 1992 |
+
Typically, pandas will store string data in an object-dtype array.
|
| 1993 |
+
When converting values to a dtype, e.g. in ``.astype``, we need to
|
| 1994 |
+
be more specific, we need the actual underlying type.
|
| 1995 |
+
|
| 1996 |
+
Returns
|
| 1997 |
+
-------
|
| 1998 |
+
>>> SparseDtype(int, 1)._subtype_with_str
|
| 1999 |
+
dtype('int64')
|
| 2000 |
+
|
| 2001 |
+
>>> SparseDtype(object, 1)._subtype_with_str
|
| 2002 |
+
dtype('O')
|
| 2003 |
+
|
| 2004 |
+
>>> dtype = SparseDtype(str, '')
|
| 2005 |
+
>>> dtype.subtype
|
| 2006 |
+
dtype('O')
|
| 2007 |
+
|
| 2008 |
+
>>> dtype._subtype_with_str
|
| 2009 |
+
<class 'str'>
|
| 2010 |
+
"""
|
| 2011 |
+
if isinstance(self.fill_value, str):
|
| 2012 |
+
return type(self.fill_value)
|
| 2013 |
+
return self.subtype
|
| 2014 |
+
|
| 2015 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 2016 |
+
# TODO for now only handle SparseDtypes and numpy dtypes => extend
|
| 2017 |
+
# with other compatible extension dtypes
|
| 2018 |
+
from pandas.core.dtypes.cast import np_find_common_type
|
| 2019 |
+
|
| 2020 |
+
if any(
|
| 2021 |
+
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
|
| 2022 |
+
for x in dtypes
|
| 2023 |
+
):
|
| 2024 |
+
return None
|
| 2025 |
+
|
| 2026 |
+
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
|
| 2027 |
+
fill_value = fill_values[0]
|
| 2028 |
+
|
| 2029 |
+
from pandas import isna
|
| 2030 |
+
|
| 2031 |
+
# np.nan isn't a singleton, so we may end up with multiple
|
| 2032 |
+
# NaNs here, so we ignore the all NA case too.
|
| 2033 |
+
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
|
| 2034 |
+
warnings.warn(
|
| 2035 |
+
"Concatenating sparse arrays with multiple fill "
|
| 2036 |
+
f"values: '{fill_values}'. Picking the first and "
|
| 2037 |
+
"converting the rest.",
|
| 2038 |
+
PerformanceWarning,
|
| 2039 |
+
stacklevel=find_stack_level(),
|
| 2040 |
+
)
|
| 2041 |
+
|
| 2042 |
+
np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
|
| 2043 |
+
return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
|
| 2044 |
+
|
| 2045 |
+
|
| 2046 |
+
@register_extension_dtype
|
| 2047 |
+
class ArrowDtype(StorageExtensionDtype):
|
| 2048 |
+
"""
|
| 2049 |
+
An ExtensionDtype for PyArrow data types.
|
| 2050 |
+
|
| 2051 |
+
.. warning::
|
| 2052 |
+
|
| 2053 |
+
ArrowDtype is considered experimental. The implementation and
|
| 2054 |
+
parts of the API may change without warning.
|
| 2055 |
+
|
| 2056 |
+
While most ``dtype`` arguments can accept the "string"
|
| 2057 |
+
constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
|
| 2058 |
+
if the data type contains parameters like ``pyarrow.timestamp``.
|
| 2059 |
+
|
| 2060 |
+
Parameters
|
| 2061 |
+
----------
|
| 2062 |
+
pyarrow_dtype : pa.DataType
|
| 2063 |
+
An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
|
| 2064 |
+
|
| 2065 |
+
Attributes
|
| 2066 |
+
----------
|
| 2067 |
+
pyarrow_dtype
|
| 2068 |
+
|
| 2069 |
+
Methods
|
| 2070 |
+
-------
|
| 2071 |
+
None
|
| 2072 |
+
|
| 2073 |
+
Returns
|
| 2074 |
+
-------
|
| 2075 |
+
ArrowDtype
|
| 2076 |
+
|
| 2077 |
+
Examples
|
| 2078 |
+
--------
|
| 2079 |
+
>>> import pyarrow as pa
|
| 2080 |
+
>>> pd.ArrowDtype(pa.int64())
|
| 2081 |
+
int64[pyarrow]
|
| 2082 |
+
|
| 2083 |
+
Types with parameters must be constructed with ArrowDtype.
|
| 2084 |
+
|
| 2085 |
+
>>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
|
| 2086 |
+
timestamp[s, tz=America/New_York][pyarrow]
|
| 2087 |
+
>>> pd.ArrowDtype(pa.list_(pa.int64()))
|
| 2088 |
+
list<item: int64>[pyarrow]
|
| 2089 |
+
"""
|
| 2090 |
+
|
| 2091 |
+
_metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
|
| 2092 |
+
|
| 2093 |
+
def __init__(self, pyarrow_dtype: pa.DataType) -> None:
|
| 2094 |
+
super().__init__("pyarrow")
|
| 2095 |
+
if pa_version_under10p1:
|
| 2096 |
+
raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")
|
| 2097 |
+
if not isinstance(pyarrow_dtype, pa.DataType):
|
| 2098 |
+
raise ValueError(
|
| 2099 |
+
f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
|
| 2100 |
+
f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
|
| 2101 |
+
)
|
| 2102 |
+
self.pyarrow_dtype = pyarrow_dtype
|
| 2103 |
+
|
| 2104 |
+
def __repr__(self) -> str:
|
| 2105 |
+
return self.name
|
| 2106 |
+
|
| 2107 |
+
def __hash__(self) -> int:
|
| 2108 |
+
# make myself hashable
|
| 2109 |
+
return hash(str(self))
|
| 2110 |
+
|
| 2111 |
+
def __eq__(self, other: object) -> bool:
|
| 2112 |
+
if not isinstance(other, type(self)):
|
| 2113 |
+
return super().__eq__(other)
|
| 2114 |
+
return self.pyarrow_dtype == other.pyarrow_dtype
|
| 2115 |
+
|
| 2116 |
+
@property
|
| 2117 |
+
def type(self):
|
| 2118 |
+
"""
|
| 2119 |
+
Returns associated scalar type.
|
| 2120 |
+
"""
|
| 2121 |
+
pa_type = self.pyarrow_dtype
|
| 2122 |
+
if pa.types.is_integer(pa_type):
|
| 2123 |
+
return int
|
| 2124 |
+
elif pa.types.is_floating(pa_type):
|
| 2125 |
+
return float
|
| 2126 |
+
elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):
|
| 2127 |
+
return str
|
| 2128 |
+
elif (
|
| 2129 |
+
pa.types.is_binary(pa_type)
|
| 2130 |
+
or pa.types.is_fixed_size_binary(pa_type)
|
| 2131 |
+
or pa.types.is_large_binary(pa_type)
|
| 2132 |
+
):
|
| 2133 |
+
return bytes
|
| 2134 |
+
elif pa.types.is_boolean(pa_type):
|
| 2135 |
+
return bool
|
| 2136 |
+
elif pa.types.is_duration(pa_type):
|
| 2137 |
+
if pa_type.unit == "ns":
|
| 2138 |
+
return Timedelta
|
| 2139 |
+
else:
|
| 2140 |
+
return timedelta
|
| 2141 |
+
elif pa.types.is_timestamp(pa_type):
|
| 2142 |
+
if pa_type.unit == "ns":
|
| 2143 |
+
return Timestamp
|
| 2144 |
+
else:
|
| 2145 |
+
return datetime
|
| 2146 |
+
elif pa.types.is_date(pa_type):
|
| 2147 |
+
return date
|
| 2148 |
+
elif pa.types.is_time(pa_type):
|
| 2149 |
+
return time
|
| 2150 |
+
elif pa.types.is_decimal(pa_type):
|
| 2151 |
+
return Decimal
|
| 2152 |
+
elif pa.types.is_dictionary(pa_type):
|
| 2153 |
+
# TODO: Potentially change this & CategoricalDtype.type to
|
| 2154 |
+
# something more representative of the scalar
|
| 2155 |
+
return CategoricalDtypeType
|
| 2156 |
+
elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):
|
| 2157 |
+
return list
|
| 2158 |
+
elif pa.types.is_fixed_size_list(pa_type):
|
| 2159 |
+
return list
|
| 2160 |
+
elif pa.types.is_map(pa_type):
|
| 2161 |
+
return list
|
| 2162 |
+
elif pa.types.is_struct(pa_type):
|
| 2163 |
+
return dict
|
| 2164 |
+
elif pa.types.is_null(pa_type):
|
| 2165 |
+
# TODO: None? pd.NA? pa.null?
|
| 2166 |
+
return type(pa_type)
|
| 2167 |
+
elif isinstance(pa_type, pa.ExtensionType):
|
| 2168 |
+
return type(self)(pa_type.storage_type).type
|
| 2169 |
+
raise NotImplementedError(pa_type)
|
| 2170 |
+
|
| 2171 |
+
@property
|
| 2172 |
+
def name(self) -> str: # type: ignore[override]
|
| 2173 |
+
"""
|
| 2174 |
+
A string identifying the data type.
|
| 2175 |
+
"""
|
| 2176 |
+
return f"{str(self.pyarrow_dtype)}[{self.storage}]"
|
| 2177 |
+
|
| 2178 |
+
@cache_readonly
|
| 2179 |
+
def numpy_dtype(self) -> np.dtype:
|
| 2180 |
+
"""Return an instance of the related numpy dtype"""
|
| 2181 |
+
if pa.types.is_timestamp(self.pyarrow_dtype):
|
| 2182 |
+
# pa.timestamp(unit).to_pandas_dtype() returns ns units
|
| 2183 |
+
# regardless of the pyarrow timestamp units.
|
| 2184 |
+
# This can be removed if/when pyarrow addresses it:
|
| 2185 |
+
# https://github.com/apache/arrow/issues/34462
|
| 2186 |
+
return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")
|
| 2187 |
+
if pa.types.is_duration(self.pyarrow_dtype):
|
| 2188 |
+
# pa.duration(unit).to_pandas_dtype() returns ns units
|
| 2189 |
+
# regardless of the pyarrow duration units
|
| 2190 |
+
# This can be removed if/when pyarrow addresses it:
|
| 2191 |
+
# https://github.com/apache/arrow/issues/34462
|
| 2192 |
+
return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
|
| 2193 |
+
if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
|
| 2194 |
+
self.pyarrow_dtype
|
| 2195 |
+
):
|
| 2196 |
+
# pa.string().to_pandas_dtype() = object which we don't want
|
| 2197 |
+
return np.dtype(str)
|
| 2198 |
+
try:
|
| 2199 |
+
return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
|
| 2200 |
+
except (NotImplementedError, TypeError):
|
| 2201 |
+
return np.dtype(object)
|
| 2202 |
+
|
| 2203 |
+
@cache_readonly
|
| 2204 |
+
def kind(self) -> str:
|
| 2205 |
+
if pa.types.is_timestamp(self.pyarrow_dtype):
|
| 2206 |
+
# To mirror DatetimeTZDtype
|
| 2207 |
+
return "M"
|
| 2208 |
+
return self.numpy_dtype.kind
|
| 2209 |
+
|
| 2210 |
+
@cache_readonly
|
| 2211 |
+
def itemsize(self) -> int:
|
| 2212 |
+
"""Return the number of bytes in this dtype"""
|
| 2213 |
+
return self.numpy_dtype.itemsize
|
| 2214 |
+
|
| 2215 |
+
@classmethod
|
| 2216 |
+
def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
|
| 2217 |
+
"""
|
| 2218 |
+
Return the array type associated with this dtype.
|
| 2219 |
+
|
| 2220 |
+
Returns
|
| 2221 |
+
-------
|
| 2222 |
+
type
|
| 2223 |
+
"""
|
| 2224 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
| 2225 |
+
|
| 2226 |
+
return ArrowExtensionArray
|
| 2227 |
+
|
| 2228 |
+
@classmethod
|
| 2229 |
+
def construct_from_string(cls, string: str) -> ArrowDtype:
|
| 2230 |
+
"""
|
| 2231 |
+
Construct this type from a string.
|
| 2232 |
+
|
| 2233 |
+
Parameters
|
| 2234 |
+
----------
|
| 2235 |
+
string : str
|
| 2236 |
+
string should follow the format f"{pyarrow_type}[pyarrow]"
|
| 2237 |
+
e.g. int64[pyarrow]
|
| 2238 |
+
"""
|
| 2239 |
+
if not isinstance(string, str):
|
| 2240 |
+
raise TypeError(
|
| 2241 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
| 2242 |
+
)
|
| 2243 |
+
if not string.endswith("[pyarrow]"):
|
| 2244 |
+
raise TypeError(f"'{string}' must end with '[pyarrow]'")
|
| 2245 |
+
if string == "string[pyarrow]":
|
| 2246 |
+
# Ensure Registry.find skips ArrowDtype to use StringDtype instead
|
| 2247 |
+
raise TypeError("string[pyarrow] should be constructed by StringDtype")
|
| 2248 |
+
|
| 2249 |
+
base_type = string[:-9] # get rid of "[pyarrow]"
|
| 2250 |
+
try:
|
| 2251 |
+
pa_dtype = pa.type_for_alias(base_type)
|
| 2252 |
+
except ValueError as err:
|
| 2253 |
+
has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)
|
| 2254 |
+
if has_parameters:
|
| 2255 |
+
# Fallback to try common temporal types
|
| 2256 |
+
try:
|
| 2257 |
+
return cls._parse_temporal_dtype_string(base_type)
|
| 2258 |
+
except (NotImplementedError, ValueError):
|
| 2259 |
+
# Fall through to raise with nice exception message below
|
| 2260 |
+
pass
|
| 2261 |
+
|
| 2262 |
+
raise NotImplementedError(
|
| 2263 |
+
"Passing pyarrow type specific parameters "
|
| 2264 |
+
f"({has_parameters.group()}) in the string is not supported. "
|
| 2265 |
+
"Please construct an ArrowDtype object with a pyarrow_dtype "
|
| 2266 |
+
"instance with specific parameters."
|
| 2267 |
+
) from err
|
| 2268 |
+
raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
|
| 2269 |
+
return cls(pa_dtype)
|
| 2270 |
+
|
| 2271 |
+
# TODO(arrow#33642): This can be removed once supported by pyarrow
|
| 2272 |
+
@classmethod
|
| 2273 |
+
def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:
|
| 2274 |
+
"""
|
| 2275 |
+
Construct a temporal ArrowDtype from string.
|
| 2276 |
+
"""
|
| 2277 |
+
# we assume
|
| 2278 |
+
# 1) "[pyarrow]" has already been stripped from the end of our string.
|
| 2279 |
+
# 2) we know "[" is present
|
| 2280 |
+
head, tail = string.split("[", 1)
|
| 2281 |
+
|
| 2282 |
+
if not tail.endswith("]"):
|
| 2283 |
+
raise ValueError
|
| 2284 |
+
tail = tail[:-1]
|
| 2285 |
+
|
| 2286 |
+
if head == "timestamp":
|
| 2287 |
+
assert "," in tail # otherwise type_for_alias should work
|
| 2288 |
+
unit, tz = tail.split(",", 1)
|
| 2289 |
+
unit = unit.strip()
|
| 2290 |
+
tz = tz.strip()
|
| 2291 |
+
if tz.startswith("tz="):
|
| 2292 |
+
tz = tz[3:]
|
| 2293 |
+
|
| 2294 |
+
pa_type = pa.timestamp(unit, tz=tz)
|
| 2295 |
+
dtype = cls(pa_type)
|
| 2296 |
+
return dtype
|
| 2297 |
+
|
| 2298 |
+
raise NotImplementedError(string)
|
| 2299 |
+
|
| 2300 |
+
@property
|
| 2301 |
+
def _is_numeric(self) -> bool:
|
| 2302 |
+
"""
|
| 2303 |
+
Whether columns with this dtype should be considered numeric.
|
| 2304 |
+
"""
|
| 2305 |
+
# TODO: pa.types.is_boolean?
|
| 2306 |
+
return (
|
| 2307 |
+
pa.types.is_integer(self.pyarrow_dtype)
|
| 2308 |
+
or pa.types.is_floating(self.pyarrow_dtype)
|
| 2309 |
+
or pa.types.is_decimal(self.pyarrow_dtype)
|
| 2310 |
+
)
|
| 2311 |
+
|
| 2312 |
+
@property
|
| 2313 |
+
def _is_boolean(self) -> bool:
|
| 2314 |
+
"""
|
| 2315 |
+
Whether this dtype should be considered boolean.
|
| 2316 |
+
"""
|
| 2317 |
+
return pa.types.is_boolean(self.pyarrow_dtype)
|
| 2318 |
+
|
| 2319 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
| 2320 |
+
# We unwrap any masked dtypes, find the common dtype we would use
|
| 2321 |
+
# for that, then re-mask the result.
|
| 2322 |
+
# Mirrors BaseMaskedDtype
|
| 2323 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 2324 |
+
|
| 2325 |
+
null_dtype = type(self)(pa.null())
|
| 2326 |
+
|
| 2327 |
+
new_dtype = find_common_type(
|
| 2328 |
+
[
|
| 2329 |
+
dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
|
| 2330 |
+
for dtype in dtypes
|
| 2331 |
+
if dtype != null_dtype
|
| 2332 |
+
]
|
| 2333 |
+
)
|
| 2334 |
+
if not isinstance(new_dtype, np.dtype):
|
| 2335 |
+
return None
|
| 2336 |
+
try:
|
| 2337 |
+
pa_dtype = pa.from_numpy_dtype(new_dtype)
|
| 2338 |
+
return type(self)(pa_dtype)
|
| 2339 |
+
except NotImplementedError:
|
| 2340 |
+
return None
|
| 2341 |
+
|
| 2342 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
|
| 2343 |
+
"""
|
| 2344 |
+
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
|
| 2345 |
+
"""
|
| 2346 |
+
array_class = self.construct_array_type()
|
| 2347 |
+
arr = array.cast(self.pyarrow_dtype, safe=True)
|
| 2348 |
+
return array_class(arr)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__init__.py
ADDED
|
File without changes
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (176 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc
ADDED
|
Binary file (17.3 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc
ADDED
|
Binary file (32.7 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc
ADDED
|
Binary file (5.19 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc
ADDED
|
Binary file (4.09 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc
ADDED
|
Binary file (28.9 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc
ADDED
|
Binary file (16.4 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/accessors.py
ADDED
|
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
datetimelike delegation
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import (
|
| 7 |
+
TYPE_CHECKING,
|
| 8 |
+
cast,
|
| 9 |
+
)
|
| 10 |
+
import warnings
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._libs import lib
|
| 15 |
+
from pandas.util._exceptions import find_stack_level
|
| 16 |
+
|
| 17 |
+
from pandas.core.dtypes.common import (
|
| 18 |
+
is_integer_dtype,
|
| 19 |
+
is_list_like,
|
| 20 |
+
)
|
| 21 |
+
from pandas.core.dtypes.dtypes import (
|
| 22 |
+
ArrowDtype,
|
| 23 |
+
CategoricalDtype,
|
| 24 |
+
DatetimeTZDtype,
|
| 25 |
+
PeriodDtype,
|
| 26 |
+
)
|
| 27 |
+
from pandas.core.dtypes.generic import ABCSeries
|
| 28 |
+
|
| 29 |
+
from pandas.core.accessor import (
|
| 30 |
+
PandasDelegate,
|
| 31 |
+
delegate_names,
|
| 32 |
+
)
|
| 33 |
+
from pandas.core.arrays import (
|
| 34 |
+
DatetimeArray,
|
| 35 |
+
PeriodArray,
|
| 36 |
+
TimedeltaArray,
|
| 37 |
+
)
|
| 38 |
+
from pandas.core.arrays.arrow.array import ArrowExtensionArray
|
| 39 |
+
from pandas.core.base import (
|
| 40 |
+
NoNewAttributesMixin,
|
| 41 |
+
PandasObject,
|
| 42 |
+
)
|
| 43 |
+
from pandas.core.indexes.datetimes import DatetimeIndex
|
| 44 |
+
from pandas.core.indexes.timedeltas import TimedeltaIndex
|
| 45 |
+
|
| 46 |
+
if TYPE_CHECKING:
|
| 47 |
+
from pandas import (
|
| 48 |
+
DataFrame,
|
| 49 |
+
Series,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
|
| 54 |
+
_hidden_attrs = PandasObject._hidden_attrs | {
|
| 55 |
+
"orig",
|
| 56 |
+
"name",
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
def __init__(self, data: Series, orig) -> None:
|
| 60 |
+
if not isinstance(data, ABCSeries):
|
| 61 |
+
raise TypeError(
|
| 62 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
self._parent = data
|
| 66 |
+
self.orig = orig
|
| 67 |
+
self.name = getattr(data, "name", None)
|
| 68 |
+
self._freeze()
|
| 69 |
+
|
| 70 |
+
def _get_values(self):
|
| 71 |
+
data = self._parent
|
| 72 |
+
if lib.is_np_dtype(data.dtype, "M"):
|
| 73 |
+
return DatetimeIndex(data, copy=False, name=self.name)
|
| 74 |
+
|
| 75 |
+
elif isinstance(data.dtype, DatetimeTZDtype):
|
| 76 |
+
return DatetimeIndex(data, copy=False, name=self.name)
|
| 77 |
+
|
| 78 |
+
elif lib.is_np_dtype(data.dtype, "m"):
|
| 79 |
+
return TimedeltaIndex(data, copy=False, name=self.name)
|
| 80 |
+
|
| 81 |
+
elif isinstance(data.dtype, PeriodDtype):
|
| 82 |
+
return PeriodArray(data, copy=False)
|
| 83 |
+
|
| 84 |
+
raise TypeError(
|
| 85 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
def _delegate_property_get(self, name: str):
|
| 89 |
+
from pandas import Series
|
| 90 |
+
|
| 91 |
+
values = self._get_values()
|
| 92 |
+
|
| 93 |
+
result = getattr(values, name)
|
| 94 |
+
|
| 95 |
+
# maybe need to upcast (ints)
|
| 96 |
+
if isinstance(result, np.ndarray):
|
| 97 |
+
if is_integer_dtype(result):
|
| 98 |
+
result = result.astype("int64")
|
| 99 |
+
elif not is_list_like(result):
|
| 100 |
+
return result
|
| 101 |
+
|
| 102 |
+
result = np.asarray(result)
|
| 103 |
+
|
| 104 |
+
if self.orig is not None:
|
| 105 |
+
index = self.orig.index
|
| 106 |
+
else:
|
| 107 |
+
index = self._parent.index
|
| 108 |
+
# return the result as a Series
|
| 109 |
+
result = Series(result, index=index, name=self.name).__finalize__(self._parent)
|
| 110 |
+
|
| 111 |
+
# setting this object will show a SettingWithCopyWarning/Error
|
| 112 |
+
result._is_copy = (
|
| 113 |
+
"modifications to a property of a datetimelike "
|
| 114 |
+
"object are not supported and are discarded. "
|
| 115 |
+
"Change values on the original."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
return result
|
| 119 |
+
|
| 120 |
+
def _delegate_property_set(self, name: str, value, *args, **kwargs):
|
| 121 |
+
raise ValueError(
|
| 122 |
+
"modifications to a property of a datetimelike object are not supported. "
|
| 123 |
+
"Change values on the original."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
| 127 |
+
from pandas import Series
|
| 128 |
+
|
| 129 |
+
values = self._get_values()
|
| 130 |
+
|
| 131 |
+
method = getattr(values, name)
|
| 132 |
+
result = method(*args, **kwargs)
|
| 133 |
+
|
| 134 |
+
if not is_list_like(result):
|
| 135 |
+
return result
|
| 136 |
+
|
| 137 |
+
result = Series(result, index=self._parent.index, name=self.name).__finalize__(
|
| 138 |
+
self._parent
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# setting this object will show a SettingWithCopyWarning/Error
|
| 142 |
+
result._is_copy = (
|
| 143 |
+
"modifications to a method of a datetimelike "
|
| 144 |
+
"object are not supported and are discarded. "
|
| 145 |
+
"Change values on the original."
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
return result
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@delegate_names(
|
| 152 |
+
delegate=ArrowExtensionArray,
|
| 153 |
+
accessors=TimedeltaArray._datetimelike_ops,
|
| 154 |
+
typ="property",
|
| 155 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
| 156 |
+
raise_on_missing=False,
|
| 157 |
+
)
|
| 158 |
+
@delegate_names(
|
| 159 |
+
delegate=ArrowExtensionArray,
|
| 160 |
+
accessors=TimedeltaArray._datetimelike_methods,
|
| 161 |
+
typ="method",
|
| 162 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
| 163 |
+
raise_on_missing=False,
|
| 164 |
+
)
|
| 165 |
+
@delegate_names(
|
| 166 |
+
delegate=ArrowExtensionArray,
|
| 167 |
+
accessors=DatetimeArray._datetimelike_ops,
|
| 168 |
+
typ="property",
|
| 169 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
| 170 |
+
raise_on_missing=False,
|
| 171 |
+
)
|
| 172 |
+
@delegate_names(
|
| 173 |
+
delegate=ArrowExtensionArray,
|
| 174 |
+
accessors=DatetimeArray._datetimelike_methods,
|
| 175 |
+
typ="method",
|
| 176 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
| 177 |
+
raise_on_missing=False,
|
| 178 |
+
)
|
| 179 |
+
class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):
|
| 180 |
+
def __init__(self, data: Series, orig) -> None:
|
| 181 |
+
if not isinstance(data, ABCSeries):
|
| 182 |
+
raise TypeError(
|
| 183 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
self._parent = data
|
| 187 |
+
self._orig = orig
|
| 188 |
+
self._freeze()
|
| 189 |
+
|
| 190 |
+
def _delegate_property_get(self, name: str):
|
| 191 |
+
if not hasattr(self._parent.array, f"_dt_{name}"):
|
| 192 |
+
raise NotImplementedError(
|
| 193 |
+
f"dt.{name} is not supported for {self._parent.dtype}"
|
| 194 |
+
)
|
| 195 |
+
result = getattr(self._parent.array, f"_dt_{name}")
|
| 196 |
+
|
| 197 |
+
if not is_list_like(result):
|
| 198 |
+
return result
|
| 199 |
+
|
| 200 |
+
if self._orig is not None:
|
| 201 |
+
index = self._orig.index
|
| 202 |
+
else:
|
| 203 |
+
index = self._parent.index
|
| 204 |
+
# return the result as a Series, which is by definition a copy
|
| 205 |
+
result = type(self._parent)(
|
| 206 |
+
result, index=index, name=self._parent.name
|
| 207 |
+
).__finalize__(self._parent)
|
| 208 |
+
|
| 209 |
+
return result
|
| 210 |
+
|
| 211 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
| 212 |
+
if not hasattr(self._parent.array, f"_dt_{name}"):
|
| 213 |
+
raise NotImplementedError(
|
| 214 |
+
f"dt.{name} is not supported for {self._parent.dtype}"
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)
|
| 218 |
+
|
| 219 |
+
if self._orig is not None:
|
| 220 |
+
index = self._orig.index
|
| 221 |
+
else:
|
| 222 |
+
index = self._parent.index
|
| 223 |
+
# return the result as a Series, which is by definition a copy
|
| 224 |
+
result = type(self._parent)(
|
| 225 |
+
result, index=index, name=self._parent.name
|
| 226 |
+
).__finalize__(self._parent)
|
| 227 |
+
|
| 228 |
+
return result
|
| 229 |
+
|
| 230 |
+
def to_pytimedelta(self):
|
| 231 |
+
return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta()
|
| 232 |
+
|
| 233 |
+
def to_pydatetime(self):
|
| 234 |
+
# GH#20306
|
| 235 |
+
warnings.warn(
|
| 236 |
+
f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
|
| 237 |
+
"in a future version this will return a Series containing python "
|
| 238 |
+
"datetime objects instead of an ndarray. To retain the old behavior, "
|
| 239 |
+
"call `np.array` on the result",
|
| 240 |
+
FutureWarning,
|
| 241 |
+
stacklevel=find_stack_level(),
|
| 242 |
+
)
|
| 243 |
+
return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime()
|
| 244 |
+
|
| 245 |
+
def isocalendar(self) -> DataFrame:
|
| 246 |
+
from pandas import DataFrame
|
| 247 |
+
|
| 248 |
+
result = (
|
| 249 |
+
cast(ArrowExtensionArray, self._parent.array)
|
| 250 |
+
._dt_isocalendar()
|
| 251 |
+
._pa_array.combine_chunks()
|
| 252 |
+
)
|
| 253 |
+
iso_calendar_df = DataFrame(
|
| 254 |
+
{
|
| 255 |
+
col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]
|
| 256 |
+
for i, col in enumerate(["year", "week", "day"])
|
| 257 |
+
}
|
| 258 |
+
)
|
| 259 |
+
return iso_calendar_df
|
| 260 |
+
|
| 261 |
+
@property
|
| 262 |
+
def components(self) -> DataFrame:
|
| 263 |
+
from pandas import DataFrame
|
| 264 |
+
|
| 265 |
+
components_df = DataFrame(
|
| 266 |
+
{
|
| 267 |
+
col: getattr(self._parent.array, f"_dt_{col}")
|
| 268 |
+
for col in [
|
| 269 |
+
"days",
|
| 270 |
+
"hours",
|
| 271 |
+
"minutes",
|
| 272 |
+
"seconds",
|
| 273 |
+
"milliseconds",
|
| 274 |
+
"microseconds",
|
| 275 |
+
"nanoseconds",
|
| 276 |
+
]
|
| 277 |
+
}
|
| 278 |
+
)
|
| 279 |
+
return components_df
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@delegate_names(
|
| 283 |
+
delegate=DatetimeArray,
|
| 284 |
+
accessors=DatetimeArray._datetimelike_ops + ["unit"],
|
| 285 |
+
typ="property",
|
| 286 |
+
)
|
| 287 |
+
@delegate_names(
|
| 288 |
+
delegate=DatetimeArray,
|
| 289 |
+
accessors=DatetimeArray._datetimelike_methods + ["as_unit"],
|
| 290 |
+
typ="method",
|
| 291 |
+
)
|
| 292 |
+
class DatetimeProperties(Properties):
|
| 293 |
+
"""
|
| 294 |
+
Accessor object for datetimelike properties of the Series values.
|
| 295 |
+
|
| 296 |
+
Examples
|
| 297 |
+
--------
|
| 298 |
+
>>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))
|
| 299 |
+
>>> seconds_series
|
| 300 |
+
0 2000-01-01 00:00:00
|
| 301 |
+
1 2000-01-01 00:00:01
|
| 302 |
+
2 2000-01-01 00:00:02
|
| 303 |
+
dtype: datetime64[ns]
|
| 304 |
+
>>> seconds_series.dt.second
|
| 305 |
+
0 0
|
| 306 |
+
1 1
|
| 307 |
+
2 2
|
| 308 |
+
dtype: int32
|
| 309 |
+
|
| 310 |
+
>>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))
|
| 311 |
+
>>> hours_series
|
| 312 |
+
0 2000-01-01 00:00:00
|
| 313 |
+
1 2000-01-01 01:00:00
|
| 314 |
+
2 2000-01-01 02:00:00
|
| 315 |
+
dtype: datetime64[ns]
|
| 316 |
+
>>> hours_series.dt.hour
|
| 317 |
+
0 0
|
| 318 |
+
1 1
|
| 319 |
+
2 2
|
| 320 |
+
dtype: int32
|
| 321 |
+
|
| 322 |
+
>>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="QE"))
|
| 323 |
+
>>> quarters_series
|
| 324 |
+
0 2000-03-31
|
| 325 |
+
1 2000-06-30
|
| 326 |
+
2 2000-09-30
|
| 327 |
+
dtype: datetime64[ns]
|
| 328 |
+
>>> quarters_series.dt.quarter
|
| 329 |
+
0 1
|
| 330 |
+
1 2
|
| 331 |
+
2 3
|
| 332 |
+
dtype: int32
|
| 333 |
+
|
| 334 |
+
Returns a Series indexed like the original Series.
|
| 335 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
def to_pydatetime(self) -> np.ndarray:
|
| 339 |
+
"""
|
| 340 |
+
Return the data as an array of :class:`datetime.datetime` objects.
|
| 341 |
+
|
| 342 |
+
.. deprecated:: 2.1.0
|
| 343 |
+
|
| 344 |
+
The current behavior of dt.to_pydatetime is deprecated.
|
| 345 |
+
In a future version this will return a Series containing python
|
| 346 |
+
datetime objects instead of a ndarray.
|
| 347 |
+
|
| 348 |
+
Timezone information is retained if present.
|
| 349 |
+
|
| 350 |
+
.. warning::
|
| 351 |
+
|
| 352 |
+
Python's datetime uses microsecond resolution, which is lower than
|
| 353 |
+
pandas (nanosecond). The values are truncated.
|
| 354 |
+
|
| 355 |
+
Returns
|
| 356 |
+
-------
|
| 357 |
+
numpy.ndarray
|
| 358 |
+
Object dtype array containing native Python datetime objects.
|
| 359 |
+
|
| 360 |
+
See Also
|
| 361 |
+
--------
|
| 362 |
+
datetime.datetime : Standard library value for a datetime.
|
| 363 |
+
|
| 364 |
+
Examples
|
| 365 |
+
--------
|
| 366 |
+
>>> s = pd.Series(pd.date_range('20180310', periods=2))
|
| 367 |
+
>>> s
|
| 368 |
+
0 2018-03-10
|
| 369 |
+
1 2018-03-11
|
| 370 |
+
dtype: datetime64[ns]
|
| 371 |
+
|
| 372 |
+
>>> s.dt.to_pydatetime()
|
| 373 |
+
array([datetime.datetime(2018, 3, 10, 0, 0),
|
| 374 |
+
datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)
|
| 375 |
+
|
| 376 |
+
pandas' nanosecond precision is truncated to microseconds.
|
| 377 |
+
|
| 378 |
+
>>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
|
| 379 |
+
>>> s
|
| 380 |
+
0 2018-03-10 00:00:00.000000000
|
| 381 |
+
1 2018-03-10 00:00:00.000000001
|
| 382 |
+
dtype: datetime64[ns]
|
| 383 |
+
|
| 384 |
+
>>> s.dt.to_pydatetime()
|
| 385 |
+
array([datetime.datetime(2018, 3, 10, 0, 0),
|
| 386 |
+
datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)
|
| 387 |
+
"""
|
| 388 |
+
# GH#20306
|
| 389 |
+
warnings.warn(
|
| 390 |
+
f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
|
| 391 |
+
"in a future version this will return a Series containing python "
|
| 392 |
+
"datetime objects instead of an ndarray. To retain the old behavior, "
|
| 393 |
+
"call `np.array` on the result",
|
| 394 |
+
FutureWarning,
|
| 395 |
+
stacklevel=find_stack_level(),
|
| 396 |
+
)
|
| 397 |
+
return self._get_values().to_pydatetime()
|
| 398 |
+
|
| 399 |
+
@property
|
| 400 |
+
def freq(self):
|
| 401 |
+
return self._get_values().inferred_freq
|
| 402 |
+
|
| 403 |
+
def isocalendar(self) -> DataFrame:
|
| 404 |
+
"""
|
| 405 |
+
Calculate year, week, and day according to the ISO 8601 standard.
|
| 406 |
+
|
| 407 |
+
Returns
|
| 408 |
+
-------
|
| 409 |
+
DataFrame
|
| 410 |
+
With columns year, week and day.
|
| 411 |
+
|
| 412 |
+
See Also
|
| 413 |
+
--------
|
| 414 |
+
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
|
| 415 |
+
week number, and weekday for the given Timestamp object.
|
| 416 |
+
datetime.date.isocalendar : Return a named tuple object with
|
| 417 |
+
three components: year, week and weekday.
|
| 418 |
+
|
| 419 |
+
Examples
|
| 420 |
+
--------
|
| 421 |
+
>>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
|
| 422 |
+
>>> ser.dt.isocalendar()
|
| 423 |
+
year week day
|
| 424 |
+
0 2009 53 5
|
| 425 |
+
1 <NA> <NA> <NA>
|
| 426 |
+
>>> ser.dt.isocalendar().week
|
| 427 |
+
0 53
|
| 428 |
+
1 <NA>
|
| 429 |
+
Name: week, dtype: UInt32
|
| 430 |
+
"""
|
| 431 |
+
return self._get_values().isocalendar().set_index(self._parent.index)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@delegate_names(
|
| 435 |
+
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
|
| 436 |
+
)
|
| 437 |
+
@delegate_names(
|
| 438 |
+
delegate=TimedeltaArray,
|
| 439 |
+
accessors=TimedeltaArray._datetimelike_methods,
|
| 440 |
+
typ="method",
|
| 441 |
+
)
|
| 442 |
+
class TimedeltaProperties(Properties):
|
| 443 |
+
"""
|
| 444 |
+
Accessor object for datetimelike properties of the Series values.
|
| 445 |
+
|
| 446 |
+
Returns a Series indexed like the original Series.
|
| 447 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
| 448 |
+
|
| 449 |
+
Examples
|
| 450 |
+
--------
|
| 451 |
+
>>> seconds_series = pd.Series(
|
| 452 |
+
... pd.timedelta_range(start="1 second", periods=3, freq="s")
|
| 453 |
+
... )
|
| 454 |
+
>>> seconds_series
|
| 455 |
+
0 0 days 00:00:01
|
| 456 |
+
1 0 days 00:00:02
|
| 457 |
+
2 0 days 00:00:03
|
| 458 |
+
dtype: timedelta64[ns]
|
| 459 |
+
>>> seconds_series.dt.seconds
|
| 460 |
+
0 1
|
| 461 |
+
1 2
|
| 462 |
+
2 3
|
| 463 |
+
dtype: int32
|
| 464 |
+
"""
|
| 465 |
+
|
| 466 |
+
def to_pytimedelta(self) -> np.ndarray:
|
| 467 |
+
"""
|
| 468 |
+
Return an array of native :class:`datetime.timedelta` objects.
|
| 469 |
+
|
| 470 |
+
Python's standard `datetime` library uses a different representation
|
| 471 |
+
timedelta's. This method converts a Series of pandas Timedeltas
|
| 472 |
+
to `datetime.timedelta` format with the same length as the original
|
| 473 |
+
Series.
|
| 474 |
+
|
| 475 |
+
Returns
|
| 476 |
+
-------
|
| 477 |
+
numpy.ndarray
|
| 478 |
+
Array of 1D containing data with `datetime.timedelta` type.
|
| 479 |
+
|
| 480 |
+
See Also
|
| 481 |
+
--------
|
| 482 |
+
datetime.timedelta : A duration expressing the difference
|
| 483 |
+
between two date, time, or datetime.
|
| 484 |
+
|
| 485 |
+
Examples
|
| 486 |
+
--------
|
| 487 |
+
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
|
| 488 |
+
>>> s
|
| 489 |
+
0 0 days
|
| 490 |
+
1 1 days
|
| 491 |
+
2 2 days
|
| 492 |
+
3 3 days
|
| 493 |
+
4 4 days
|
| 494 |
+
dtype: timedelta64[ns]
|
| 495 |
+
|
| 496 |
+
>>> s.dt.to_pytimedelta()
|
| 497 |
+
array([datetime.timedelta(0), datetime.timedelta(days=1),
|
| 498 |
+
datetime.timedelta(days=2), datetime.timedelta(days=3),
|
| 499 |
+
datetime.timedelta(days=4)], dtype=object)
|
| 500 |
+
"""
|
| 501 |
+
return self._get_values().to_pytimedelta()
|
| 502 |
+
|
| 503 |
+
@property
|
| 504 |
+
def components(self):
|
| 505 |
+
"""
|
| 506 |
+
Return a Dataframe of the components of the Timedeltas.
|
| 507 |
+
|
| 508 |
+
Returns
|
| 509 |
+
-------
|
| 510 |
+
DataFrame
|
| 511 |
+
|
| 512 |
+
Examples
|
| 513 |
+
--------
|
| 514 |
+
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
|
| 515 |
+
>>> s
|
| 516 |
+
0 0 days 00:00:00
|
| 517 |
+
1 0 days 00:00:01
|
| 518 |
+
2 0 days 00:00:02
|
| 519 |
+
3 0 days 00:00:03
|
| 520 |
+
4 0 days 00:00:04
|
| 521 |
+
dtype: timedelta64[ns]
|
| 522 |
+
>>> s.dt.components
|
| 523 |
+
days hours minutes seconds milliseconds microseconds nanoseconds
|
| 524 |
+
0 0 0 0 0 0 0 0
|
| 525 |
+
1 0 0 0 1 0 0 0
|
| 526 |
+
2 0 0 0 2 0 0 0
|
| 527 |
+
3 0 0 0 3 0 0 0
|
| 528 |
+
4 0 0 0 4 0 0 0
|
| 529 |
+
"""
|
| 530 |
+
return (
|
| 531 |
+
self._get_values()
|
| 532 |
+
.components.set_index(self._parent.index)
|
| 533 |
+
.__finalize__(self._parent)
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
@property
|
| 537 |
+
def freq(self):
|
| 538 |
+
return self._get_values().inferred_freq
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
@delegate_names(
|
| 542 |
+
delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"
|
| 543 |
+
)
|
| 544 |
+
@delegate_names(
|
| 545 |
+
delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"
|
| 546 |
+
)
|
| 547 |
+
class PeriodProperties(Properties):
|
| 548 |
+
"""
|
| 549 |
+
Accessor object for datetimelike properties of the Series values.
|
| 550 |
+
|
| 551 |
+
Returns a Series indexed like the original Series.
|
| 552 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
| 553 |
+
|
| 554 |
+
Examples
|
| 555 |
+
--------
|
| 556 |
+
>>> seconds_series = pd.Series(
|
| 557 |
+
... pd.period_range(
|
| 558 |
+
... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
|
| 559 |
+
... )
|
| 560 |
+
... )
|
| 561 |
+
>>> seconds_series
|
| 562 |
+
0 2000-01-01 00:00:00
|
| 563 |
+
1 2000-01-01 00:00:01
|
| 564 |
+
2 2000-01-01 00:00:02
|
| 565 |
+
3 2000-01-01 00:00:03
|
| 566 |
+
dtype: period[s]
|
| 567 |
+
>>> seconds_series.dt.second
|
| 568 |
+
0 0
|
| 569 |
+
1 1
|
| 570 |
+
2 2
|
| 571 |
+
3 3
|
| 572 |
+
dtype: int64
|
| 573 |
+
|
| 574 |
+
>>> hours_series = pd.Series(
|
| 575 |
+
... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
|
| 576 |
+
... )
|
| 577 |
+
>>> hours_series
|
| 578 |
+
0 2000-01-01 00:00
|
| 579 |
+
1 2000-01-01 01:00
|
| 580 |
+
2 2000-01-01 02:00
|
| 581 |
+
3 2000-01-01 03:00
|
| 582 |
+
dtype: period[h]
|
| 583 |
+
>>> hours_series.dt.hour
|
| 584 |
+
0 0
|
| 585 |
+
1 1
|
| 586 |
+
2 2
|
| 587 |
+
3 3
|
| 588 |
+
dtype: int64
|
| 589 |
+
|
| 590 |
+
>>> quarters_series = pd.Series(
|
| 591 |
+
... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
|
| 592 |
+
... )
|
| 593 |
+
>>> quarters_series
|
| 594 |
+
0 2000Q1
|
| 595 |
+
1 2000Q2
|
| 596 |
+
2 2000Q3
|
| 597 |
+
3 2000Q4
|
| 598 |
+
dtype: period[Q-DEC]
|
| 599 |
+
>>> quarters_series.dt.quarter
|
| 600 |
+
0 1
|
| 601 |
+
1 2
|
| 602 |
+
2 3
|
| 603 |
+
3 4
|
| 604 |
+
dtype: int64
|
| 605 |
+
"""
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
class CombinedDatetimelikeProperties(
|
| 609 |
+
DatetimeProperties, TimedeltaProperties, PeriodProperties
|
| 610 |
+
):
|
| 611 |
+
def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
|
| 612 |
+
# CombinedDatetimelikeProperties isn't really instantiated. Instead
|
| 613 |
+
# we need to choose which parent (datetime or timedelta) is
|
| 614 |
+
# appropriate. Since we're checking the dtypes anyway, we'll just
|
| 615 |
+
# do all the validation here.
|
| 616 |
+
|
| 617 |
+
if not isinstance(data, ABCSeries):
|
| 618 |
+
raise TypeError(
|
| 619 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
orig = data if isinstance(data.dtype, CategoricalDtype) else None
|
| 623 |
+
if orig is not None:
|
| 624 |
+
data = data._constructor(
|
| 625 |
+
orig.array,
|
| 626 |
+
name=orig.name,
|
| 627 |
+
copy=False,
|
| 628 |
+
dtype=orig._values.categories.dtype,
|
| 629 |
+
index=orig.index,
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in "Mm":
|
| 633 |
+
return ArrowTemporalProperties(data, orig)
|
| 634 |
+
if lib.is_np_dtype(data.dtype, "M"):
|
| 635 |
+
return DatetimeProperties(data, orig)
|
| 636 |
+
elif isinstance(data.dtype, DatetimeTZDtype):
|
| 637 |
+
return DatetimeProperties(data, orig)
|
| 638 |
+
elif lib.is_np_dtype(data.dtype, "m"):
|
| 639 |
+
return TimedeltaProperties(data, orig)
|
| 640 |
+
elif isinstance(data.dtype, PeriodDtype):
|
| 641 |
+
return PeriodProperties(data, orig)
|
| 642 |
+
|
| 643 |
+
raise AttributeError("Can only use .dt accessor with datetimelike values")
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/api.py
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import textwrap
|
| 4 |
+
from typing import (
|
| 5 |
+
TYPE_CHECKING,
|
| 6 |
+
cast,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from pandas._libs import (
|
| 12 |
+
NaT,
|
| 13 |
+
lib,
|
| 14 |
+
)
|
| 15 |
+
from pandas.errors import InvalidIndexError
|
| 16 |
+
|
| 17 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 18 |
+
|
| 19 |
+
from pandas.core.algorithms import safe_sort
|
| 20 |
+
from pandas.core.indexes.base import (
|
| 21 |
+
Index,
|
| 22 |
+
_new_Index,
|
| 23 |
+
ensure_index,
|
| 24 |
+
ensure_index_from_sequences,
|
| 25 |
+
get_unanimous_names,
|
| 26 |
+
)
|
| 27 |
+
from pandas.core.indexes.category import CategoricalIndex
|
| 28 |
+
from pandas.core.indexes.datetimes import DatetimeIndex
|
| 29 |
+
from pandas.core.indexes.interval import IntervalIndex
|
| 30 |
+
from pandas.core.indexes.multi import MultiIndex
|
| 31 |
+
from pandas.core.indexes.period import PeriodIndex
|
| 32 |
+
from pandas.core.indexes.range import RangeIndex
|
| 33 |
+
from pandas.core.indexes.timedeltas import TimedeltaIndex
|
| 34 |
+
|
| 35 |
+
if TYPE_CHECKING:
|
| 36 |
+
from pandas._typing import Axis
|
| 37 |
+
_sort_msg = textwrap.dedent(
|
| 38 |
+
"""\
|
| 39 |
+
Sorting because non-concatenation axis is not aligned. A future version
|
| 40 |
+
of pandas will change to not sort by default.
|
| 41 |
+
|
| 42 |
+
To accept the future behavior, pass 'sort=False'.
|
| 43 |
+
|
| 44 |
+
To retain the current behavior and silence the warning, pass 'sort=True'.
|
| 45 |
+
"""
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
__all__ = [
|
| 50 |
+
"Index",
|
| 51 |
+
"MultiIndex",
|
| 52 |
+
"CategoricalIndex",
|
| 53 |
+
"IntervalIndex",
|
| 54 |
+
"RangeIndex",
|
| 55 |
+
"InvalidIndexError",
|
| 56 |
+
"TimedeltaIndex",
|
| 57 |
+
"PeriodIndex",
|
| 58 |
+
"DatetimeIndex",
|
| 59 |
+
"_new_Index",
|
| 60 |
+
"NaT",
|
| 61 |
+
"ensure_index",
|
| 62 |
+
"ensure_index_from_sequences",
|
| 63 |
+
"get_objs_combined_axis",
|
| 64 |
+
"union_indexes",
|
| 65 |
+
"get_unanimous_names",
|
| 66 |
+
"all_indexes_same",
|
| 67 |
+
"default_index",
|
| 68 |
+
"safe_sort_index",
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_objs_combined_axis(
|
| 73 |
+
objs,
|
| 74 |
+
intersect: bool = False,
|
| 75 |
+
axis: Axis = 0,
|
| 76 |
+
sort: bool = True,
|
| 77 |
+
copy: bool = False,
|
| 78 |
+
) -> Index:
|
| 79 |
+
"""
|
| 80 |
+
Extract combined index: return intersection or union (depending on the
|
| 81 |
+
value of "intersect") of indexes on given axis, or None if all objects
|
| 82 |
+
lack indexes (e.g. they are numpy arrays).
|
| 83 |
+
|
| 84 |
+
Parameters
|
| 85 |
+
----------
|
| 86 |
+
objs : list
|
| 87 |
+
Series or DataFrame objects, may be mix of the two.
|
| 88 |
+
intersect : bool, default False
|
| 89 |
+
If True, calculate the intersection between indexes. Otherwise,
|
| 90 |
+
calculate the union.
|
| 91 |
+
axis : {0 or 'index', 1 or 'outer'}, default 0
|
| 92 |
+
The axis to extract indexes from.
|
| 93 |
+
sort : bool, default True
|
| 94 |
+
Whether the result index should come out sorted or not.
|
| 95 |
+
copy : bool, default False
|
| 96 |
+
If True, return a copy of the combined index.
|
| 97 |
+
|
| 98 |
+
Returns
|
| 99 |
+
-------
|
| 100 |
+
Index
|
| 101 |
+
"""
|
| 102 |
+
obs_idxes = [obj._get_axis(axis) for obj in objs]
|
| 103 |
+
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _get_distinct_objs(objs: list[Index]) -> list[Index]:
|
| 107 |
+
"""
|
| 108 |
+
Return a list with distinct elements of "objs" (different ids).
|
| 109 |
+
Preserves order.
|
| 110 |
+
"""
|
| 111 |
+
ids: set[int] = set()
|
| 112 |
+
res = []
|
| 113 |
+
for obj in objs:
|
| 114 |
+
if id(obj) not in ids:
|
| 115 |
+
ids.add(id(obj))
|
| 116 |
+
res.append(obj)
|
| 117 |
+
return res
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _get_combined_index(
|
| 121 |
+
indexes: list[Index],
|
| 122 |
+
intersect: bool = False,
|
| 123 |
+
sort: bool = False,
|
| 124 |
+
copy: bool = False,
|
| 125 |
+
) -> Index:
|
| 126 |
+
"""
|
| 127 |
+
Return the union or intersection of indexes.
|
| 128 |
+
|
| 129 |
+
Parameters
|
| 130 |
+
----------
|
| 131 |
+
indexes : list of Index or list objects
|
| 132 |
+
When intersect=True, do not accept list of lists.
|
| 133 |
+
intersect : bool, default False
|
| 134 |
+
If True, calculate the intersection between indexes. Otherwise,
|
| 135 |
+
calculate the union.
|
| 136 |
+
sort : bool, default False
|
| 137 |
+
Whether the result index should come out sorted or not.
|
| 138 |
+
copy : bool, default False
|
| 139 |
+
If True, return a copy of the combined index.
|
| 140 |
+
|
| 141 |
+
Returns
|
| 142 |
+
-------
|
| 143 |
+
Index
|
| 144 |
+
"""
|
| 145 |
+
# TODO: handle index names!
|
| 146 |
+
indexes = _get_distinct_objs(indexes)
|
| 147 |
+
if len(indexes) == 0:
|
| 148 |
+
index = Index([])
|
| 149 |
+
elif len(indexes) == 1:
|
| 150 |
+
index = indexes[0]
|
| 151 |
+
elif intersect:
|
| 152 |
+
index = indexes[0]
|
| 153 |
+
for other in indexes[1:]:
|
| 154 |
+
index = index.intersection(other)
|
| 155 |
+
else:
|
| 156 |
+
index = union_indexes(indexes, sort=False)
|
| 157 |
+
index = ensure_index(index)
|
| 158 |
+
|
| 159 |
+
if sort:
|
| 160 |
+
index = safe_sort_index(index)
|
| 161 |
+
# GH 29879
|
| 162 |
+
if copy:
|
| 163 |
+
index = index.copy()
|
| 164 |
+
|
| 165 |
+
return index
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def safe_sort_index(index: Index) -> Index:
|
| 169 |
+
"""
|
| 170 |
+
Returns the sorted index
|
| 171 |
+
|
| 172 |
+
We keep the dtypes and the name attributes.
|
| 173 |
+
|
| 174 |
+
Parameters
|
| 175 |
+
----------
|
| 176 |
+
index : an Index
|
| 177 |
+
|
| 178 |
+
Returns
|
| 179 |
+
-------
|
| 180 |
+
Index
|
| 181 |
+
"""
|
| 182 |
+
if index.is_monotonic_increasing:
|
| 183 |
+
return index
|
| 184 |
+
|
| 185 |
+
try:
|
| 186 |
+
array_sorted = safe_sort(index)
|
| 187 |
+
except TypeError:
|
| 188 |
+
pass
|
| 189 |
+
else:
|
| 190 |
+
if isinstance(array_sorted, Index):
|
| 191 |
+
return array_sorted
|
| 192 |
+
|
| 193 |
+
array_sorted = cast(np.ndarray, array_sorted)
|
| 194 |
+
if isinstance(index, MultiIndex):
|
| 195 |
+
index = MultiIndex.from_tuples(array_sorted, names=index.names)
|
| 196 |
+
else:
|
| 197 |
+
index = Index(array_sorted, name=index.name, dtype=index.dtype)
|
| 198 |
+
|
| 199 |
+
return index
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def union_indexes(indexes, sort: bool | None = True) -> Index:
|
| 203 |
+
"""
|
| 204 |
+
Return the union of indexes.
|
| 205 |
+
|
| 206 |
+
The behavior of sort and names is not consistent.
|
| 207 |
+
|
| 208 |
+
Parameters
|
| 209 |
+
----------
|
| 210 |
+
indexes : list of Index or list objects
|
| 211 |
+
sort : bool, default True
|
| 212 |
+
Whether the result index should come out sorted or not.
|
| 213 |
+
|
| 214 |
+
Returns
|
| 215 |
+
-------
|
| 216 |
+
Index
|
| 217 |
+
"""
|
| 218 |
+
if len(indexes) == 0:
|
| 219 |
+
raise AssertionError("Must have at least 1 Index to union")
|
| 220 |
+
if len(indexes) == 1:
|
| 221 |
+
result = indexes[0]
|
| 222 |
+
if isinstance(result, list):
|
| 223 |
+
if not sort:
|
| 224 |
+
result = Index(result)
|
| 225 |
+
else:
|
| 226 |
+
result = Index(sorted(result))
|
| 227 |
+
return result
|
| 228 |
+
|
| 229 |
+
indexes, kind = _sanitize_and_check(indexes)
|
| 230 |
+
|
| 231 |
+
def _unique_indices(inds, dtype) -> Index:
|
| 232 |
+
"""
|
| 233 |
+
Concatenate indices and remove duplicates.
|
| 234 |
+
|
| 235 |
+
Parameters
|
| 236 |
+
----------
|
| 237 |
+
inds : list of Index or list objects
|
| 238 |
+
dtype : dtype to set for the resulting Index
|
| 239 |
+
|
| 240 |
+
Returns
|
| 241 |
+
-------
|
| 242 |
+
Index
|
| 243 |
+
"""
|
| 244 |
+
if all(isinstance(ind, Index) for ind in inds):
|
| 245 |
+
inds = [ind.astype(dtype, copy=False) for ind in inds]
|
| 246 |
+
result = inds[0].unique()
|
| 247 |
+
other = inds[1].append(inds[2:])
|
| 248 |
+
diff = other[result.get_indexer_for(other) == -1]
|
| 249 |
+
if len(diff):
|
| 250 |
+
result = result.append(diff.unique())
|
| 251 |
+
if sort:
|
| 252 |
+
result = result.sort_values()
|
| 253 |
+
return result
|
| 254 |
+
|
| 255 |
+
def conv(i):
|
| 256 |
+
if isinstance(i, Index):
|
| 257 |
+
i = i.tolist()
|
| 258 |
+
return i
|
| 259 |
+
|
| 260 |
+
return Index(
|
| 261 |
+
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort),
|
| 262 |
+
dtype=dtype,
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def _find_common_index_dtype(inds):
|
| 266 |
+
"""
|
| 267 |
+
Finds a common type for the indexes to pass through to resulting index.
|
| 268 |
+
|
| 269 |
+
Parameters
|
| 270 |
+
----------
|
| 271 |
+
inds: list of Index or list objects
|
| 272 |
+
|
| 273 |
+
Returns
|
| 274 |
+
-------
|
| 275 |
+
The common type or None if no indexes were given
|
| 276 |
+
"""
|
| 277 |
+
dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)]
|
| 278 |
+
if dtypes:
|
| 279 |
+
dtype = find_common_type(dtypes)
|
| 280 |
+
else:
|
| 281 |
+
dtype = None
|
| 282 |
+
|
| 283 |
+
return dtype
|
| 284 |
+
|
| 285 |
+
if kind == "special":
|
| 286 |
+
result = indexes[0]
|
| 287 |
+
|
| 288 |
+
dtis = [x for x in indexes if isinstance(x, DatetimeIndex)]
|
| 289 |
+
dti_tzs = [x for x in dtis if x.tz is not None]
|
| 290 |
+
if len(dti_tzs) not in [0, len(dtis)]:
|
| 291 |
+
# TODO: this behavior is not tested (so may not be desired),
|
| 292 |
+
# but is kept in order to keep behavior the same when
|
| 293 |
+
# deprecating union_many
|
| 294 |
+
# test_frame_from_dict_with_mixed_indexes
|
| 295 |
+
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
|
| 296 |
+
|
| 297 |
+
if len(dtis) == len(indexes):
|
| 298 |
+
sort = True
|
| 299 |
+
result = indexes[0]
|
| 300 |
+
|
| 301 |
+
elif len(dtis) > 1:
|
| 302 |
+
# If we have mixed timezones, our casting behavior may depend on
|
| 303 |
+
# the order of indexes, which we don't want.
|
| 304 |
+
sort = False
|
| 305 |
+
|
| 306 |
+
# TODO: what about Categorical[dt64]?
|
| 307 |
+
# test_frame_from_dict_with_mixed_indexes
|
| 308 |
+
indexes = [x.astype(object, copy=False) for x in indexes]
|
| 309 |
+
result = indexes[0]
|
| 310 |
+
|
| 311 |
+
for other in indexes[1:]:
|
| 312 |
+
result = result.union(other, sort=None if sort else False)
|
| 313 |
+
return result
|
| 314 |
+
|
| 315 |
+
elif kind == "array":
|
| 316 |
+
dtype = _find_common_index_dtype(indexes)
|
| 317 |
+
index = indexes[0]
|
| 318 |
+
if not all(index.equals(other) for other in indexes[1:]):
|
| 319 |
+
index = _unique_indices(indexes, dtype)
|
| 320 |
+
|
| 321 |
+
name = get_unanimous_names(*indexes)[0]
|
| 322 |
+
if name != index.name:
|
| 323 |
+
index = index.rename(name)
|
| 324 |
+
return index
|
| 325 |
+
else: # kind='list'
|
| 326 |
+
dtype = _find_common_index_dtype(indexes)
|
| 327 |
+
return _unique_indices(indexes, dtype)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def _sanitize_and_check(indexes):
|
| 331 |
+
"""
|
| 332 |
+
Verify the type of indexes and convert lists to Index.
|
| 333 |
+
|
| 334 |
+
Cases:
|
| 335 |
+
|
| 336 |
+
- [list, list, ...]: Return ([list, list, ...], 'list')
|
| 337 |
+
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
|
| 338 |
+
Lists are sorted and converted to Index.
|
| 339 |
+
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
|
| 340 |
+
TYPE = 'special' if at least one special type, 'array' otherwise.
|
| 341 |
+
|
| 342 |
+
Parameters
|
| 343 |
+
----------
|
| 344 |
+
indexes : list of Index or list objects
|
| 345 |
+
|
| 346 |
+
Returns
|
| 347 |
+
-------
|
| 348 |
+
sanitized_indexes : list of Index or list objects
|
| 349 |
+
type : {'list', 'array', 'special'}
|
| 350 |
+
"""
|
| 351 |
+
kinds = list({type(index) for index in indexes})
|
| 352 |
+
|
| 353 |
+
if list in kinds:
|
| 354 |
+
if len(kinds) > 1:
|
| 355 |
+
indexes = [
|
| 356 |
+
Index(list(x)) if not isinstance(x, Index) else x for x in indexes
|
| 357 |
+
]
|
| 358 |
+
kinds.remove(list)
|
| 359 |
+
else:
|
| 360 |
+
return indexes, "list"
|
| 361 |
+
|
| 362 |
+
if len(kinds) > 1 or Index not in kinds:
|
| 363 |
+
return indexes, "special"
|
| 364 |
+
else:
|
| 365 |
+
return indexes, "array"
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def all_indexes_same(indexes) -> bool:
|
| 369 |
+
"""
|
| 370 |
+
Determine if all indexes contain the same elements.
|
| 371 |
+
|
| 372 |
+
Parameters
|
| 373 |
+
----------
|
| 374 |
+
indexes : iterable of Index objects
|
| 375 |
+
|
| 376 |
+
Returns
|
| 377 |
+
-------
|
| 378 |
+
bool
|
| 379 |
+
True if all indexes contain the same elements, False otherwise.
|
| 380 |
+
"""
|
| 381 |
+
itr = iter(indexes)
|
| 382 |
+
first = next(itr)
|
| 383 |
+
return all(first.equals(index) for index in itr)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def default_index(n: int) -> RangeIndex:
|
| 387 |
+
rng = range(n)
|
| 388 |
+
return RangeIndex._simple_new(rng, name=None)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/base.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/category.py
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import (
|
| 4 |
+
TYPE_CHECKING,
|
| 5 |
+
Any,
|
| 6 |
+
Literal,
|
| 7 |
+
cast,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from pandas._libs import index as libindex
|
| 13 |
+
from pandas.util._decorators import (
|
| 14 |
+
cache_readonly,
|
| 15 |
+
doc,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
from pandas.core.dtypes.common import is_scalar
|
| 19 |
+
from pandas.core.dtypes.concat import concat_compat
|
| 20 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
| 21 |
+
from pandas.core.dtypes.missing import (
|
| 22 |
+
is_valid_na_for_dtype,
|
| 23 |
+
isna,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from pandas.core.arrays.categorical import (
|
| 27 |
+
Categorical,
|
| 28 |
+
contains,
|
| 29 |
+
)
|
| 30 |
+
from pandas.core.construction import extract_array
|
| 31 |
+
from pandas.core.indexes.base import (
|
| 32 |
+
Index,
|
| 33 |
+
maybe_extract_name,
|
| 34 |
+
)
|
| 35 |
+
from pandas.core.indexes.extension import (
|
| 36 |
+
NDArrayBackedExtensionIndex,
|
| 37 |
+
inherit_names,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
if TYPE_CHECKING:
|
| 41 |
+
from collections.abc import Hashable
|
| 42 |
+
|
| 43 |
+
from pandas._typing import (
|
| 44 |
+
Dtype,
|
| 45 |
+
DtypeObj,
|
| 46 |
+
Self,
|
| 47 |
+
npt,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@inherit_names(
|
| 52 |
+
[
|
| 53 |
+
"argsort",
|
| 54 |
+
"tolist",
|
| 55 |
+
"codes",
|
| 56 |
+
"categories",
|
| 57 |
+
"ordered",
|
| 58 |
+
"_reverse_indexer",
|
| 59 |
+
"searchsorted",
|
| 60 |
+
"min",
|
| 61 |
+
"max",
|
| 62 |
+
],
|
| 63 |
+
Categorical,
|
| 64 |
+
)
|
| 65 |
+
@inherit_names(
|
| 66 |
+
[
|
| 67 |
+
"rename_categories",
|
| 68 |
+
"reorder_categories",
|
| 69 |
+
"add_categories",
|
| 70 |
+
"remove_categories",
|
| 71 |
+
"remove_unused_categories",
|
| 72 |
+
"set_categories",
|
| 73 |
+
"as_ordered",
|
| 74 |
+
"as_unordered",
|
| 75 |
+
],
|
| 76 |
+
Categorical,
|
| 77 |
+
wrap=True,
|
| 78 |
+
)
|
| 79 |
+
class CategoricalIndex(NDArrayBackedExtensionIndex):
|
| 80 |
+
"""
|
| 81 |
+
Index based on an underlying :class:`Categorical`.
|
| 82 |
+
|
| 83 |
+
CategoricalIndex, like Categorical, can only take on a limited,
|
| 84 |
+
and usually fixed, number of possible values (`categories`). Also,
|
| 85 |
+
like Categorical, it might have an order, but numerical operations
|
| 86 |
+
(additions, divisions, ...) are not possible.
|
| 87 |
+
|
| 88 |
+
Parameters
|
| 89 |
+
----------
|
| 90 |
+
data : array-like (1-dimensional)
|
| 91 |
+
The values of the categorical. If `categories` are given, values not in
|
| 92 |
+
`categories` will be replaced with NaN.
|
| 93 |
+
categories : index-like, optional
|
| 94 |
+
The categories for the categorical. Items need to be unique.
|
| 95 |
+
If the categories are not given here (and also not in `dtype`), they
|
| 96 |
+
will be inferred from the `data`.
|
| 97 |
+
ordered : bool, optional
|
| 98 |
+
Whether or not this categorical is treated as an ordered
|
| 99 |
+
categorical. If not given here or in `dtype`, the resulting
|
| 100 |
+
categorical will be unordered.
|
| 101 |
+
dtype : CategoricalDtype or "category", optional
|
| 102 |
+
If :class:`CategoricalDtype`, cannot be used together with
|
| 103 |
+
`categories` or `ordered`.
|
| 104 |
+
copy : bool, default False
|
| 105 |
+
Make a copy of input ndarray.
|
| 106 |
+
name : object, optional
|
| 107 |
+
Name to be stored in the index.
|
| 108 |
+
|
| 109 |
+
Attributes
|
| 110 |
+
----------
|
| 111 |
+
codes
|
| 112 |
+
categories
|
| 113 |
+
ordered
|
| 114 |
+
|
| 115 |
+
Methods
|
| 116 |
+
-------
|
| 117 |
+
rename_categories
|
| 118 |
+
reorder_categories
|
| 119 |
+
add_categories
|
| 120 |
+
remove_categories
|
| 121 |
+
remove_unused_categories
|
| 122 |
+
set_categories
|
| 123 |
+
as_ordered
|
| 124 |
+
as_unordered
|
| 125 |
+
map
|
| 126 |
+
|
| 127 |
+
Raises
|
| 128 |
+
------
|
| 129 |
+
ValueError
|
| 130 |
+
If the categories do not validate.
|
| 131 |
+
TypeError
|
| 132 |
+
If an explicit ``ordered=True`` is given but no `categories` and the
|
| 133 |
+
`values` are not sortable.
|
| 134 |
+
|
| 135 |
+
See Also
|
| 136 |
+
--------
|
| 137 |
+
Index : The base pandas Index type.
|
| 138 |
+
Categorical : A categorical array.
|
| 139 |
+
CategoricalDtype : Type for categorical data.
|
| 140 |
+
|
| 141 |
+
Notes
|
| 142 |
+
-----
|
| 143 |
+
See the `user guide
|
| 144 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
|
| 145 |
+
for more.
|
| 146 |
+
|
| 147 |
+
Examples
|
| 148 |
+
--------
|
| 149 |
+
>>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
|
| 150 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
| 151 |
+
categories=['a', 'b', 'c'], ordered=False, dtype='category')
|
| 152 |
+
|
| 153 |
+
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
|
| 154 |
+
|
| 155 |
+
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
|
| 156 |
+
>>> pd.CategoricalIndex(c)
|
| 157 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
| 158 |
+
categories=['a', 'b', 'c'], ordered=False, dtype='category')
|
| 159 |
+
|
| 160 |
+
Ordered ``CategoricalIndex`` can have a min and max value.
|
| 161 |
+
|
| 162 |
+
>>> ci = pd.CategoricalIndex(
|
| 163 |
+
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
|
| 164 |
+
... )
|
| 165 |
+
>>> ci
|
| 166 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
| 167 |
+
categories=['c', 'b', 'a'], ordered=True, dtype='category')
|
| 168 |
+
>>> ci.min()
|
| 169 |
+
'c'
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
_typ = "categoricalindex"
|
| 173 |
+
_data_cls = Categorical
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def _can_hold_strings(self):
|
| 177 |
+
return self.categories._can_hold_strings
|
| 178 |
+
|
| 179 |
+
@cache_readonly
|
| 180 |
+
def _should_fallback_to_positional(self) -> bool:
|
| 181 |
+
return self.categories._should_fallback_to_positional
|
| 182 |
+
|
| 183 |
+
codes: np.ndarray
|
| 184 |
+
categories: Index
|
| 185 |
+
ordered: bool | None
|
| 186 |
+
_data: Categorical
|
| 187 |
+
_values: Categorical
|
| 188 |
+
|
| 189 |
+
@property
|
| 190 |
+
def _engine_type(self) -> type[libindex.IndexEngine]:
|
| 191 |
+
# self.codes can have dtype int8, int16, int32 or int64, so we need
|
| 192 |
+
# to return the corresponding engine type (libindex.Int8Engine, etc.).
|
| 193 |
+
return {
|
| 194 |
+
np.int8: libindex.Int8Engine,
|
| 195 |
+
np.int16: libindex.Int16Engine,
|
| 196 |
+
np.int32: libindex.Int32Engine,
|
| 197 |
+
np.int64: libindex.Int64Engine,
|
| 198 |
+
}[self.codes.dtype.type]
|
| 199 |
+
|
| 200 |
+
# --------------------------------------------------------------------
|
| 201 |
+
# Constructors
|
| 202 |
+
|
| 203 |
+
def __new__(
|
| 204 |
+
cls,
|
| 205 |
+
data=None,
|
| 206 |
+
categories=None,
|
| 207 |
+
ordered=None,
|
| 208 |
+
dtype: Dtype | None = None,
|
| 209 |
+
copy: bool = False,
|
| 210 |
+
name: Hashable | None = None,
|
| 211 |
+
) -> Self:
|
| 212 |
+
name = maybe_extract_name(name, data, cls)
|
| 213 |
+
|
| 214 |
+
if is_scalar(data):
|
| 215 |
+
# GH#38944 include None here, which pre-2.0 subbed in []
|
| 216 |
+
cls._raise_scalar_data_error(data)
|
| 217 |
+
|
| 218 |
+
data = Categorical(
|
| 219 |
+
data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
return cls._simple_new(data, name=name)
|
| 223 |
+
|
| 224 |
+
# --------------------------------------------------------------------
|
| 225 |
+
|
| 226 |
+
def _is_dtype_compat(self, other: Index) -> Categorical:
|
| 227 |
+
"""
|
| 228 |
+
*this is an internal non-public method*
|
| 229 |
+
|
| 230 |
+
provide a comparison between the dtype of self and other (coercing if
|
| 231 |
+
needed)
|
| 232 |
+
|
| 233 |
+
Parameters
|
| 234 |
+
----------
|
| 235 |
+
other : Index
|
| 236 |
+
|
| 237 |
+
Returns
|
| 238 |
+
-------
|
| 239 |
+
Categorical
|
| 240 |
+
|
| 241 |
+
Raises
|
| 242 |
+
------
|
| 243 |
+
TypeError if the dtypes are not compatible
|
| 244 |
+
"""
|
| 245 |
+
if isinstance(other.dtype, CategoricalDtype):
|
| 246 |
+
cat = extract_array(other)
|
| 247 |
+
cat = cast(Categorical, cat)
|
| 248 |
+
if not cat._categories_match_up_to_permutation(self._values):
|
| 249 |
+
raise TypeError(
|
| 250 |
+
"categories must match existing categories when appending"
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
elif other._is_multi:
|
| 254 |
+
# preempt raising NotImplementedError in isna call
|
| 255 |
+
raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
|
| 256 |
+
else:
|
| 257 |
+
values = other
|
| 258 |
+
|
| 259 |
+
cat = Categorical(other, dtype=self.dtype)
|
| 260 |
+
other = CategoricalIndex(cat)
|
| 261 |
+
if not other.isin(values).all():
|
| 262 |
+
raise TypeError(
|
| 263 |
+
"cannot append a non-category item to a CategoricalIndex"
|
| 264 |
+
)
|
| 265 |
+
cat = other._values
|
| 266 |
+
|
| 267 |
+
if not ((cat == values) | (isna(cat) & isna(values))).all():
|
| 268 |
+
# GH#37667 see test_equals_non_category
|
| 269 |
+
raise TypeError(
|
| 270 |
+
"categories must match existing categories when appending"
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
return cat
|
| 274 |
+
|
| 275 |
+
def equals(self, other: object) -> bool:
|
| 276 |
+
"""
|
| 277 |
+
Determine if two CategoricalIndex objects contain the same elements.
|
| 278 |
+
|
| 279 |
+
Returns
|
| 280 |
+
-------
|
| 281 |
+
bool
|
| 282 |
+
``True`` if two :class:`pandas.CategoricalIndex` objects have equal
|
| 283 |
+
elements, ``False`` otherwise.
|
| 284 |
+
|
| 285 |
+
Examples
|
| 286 |
+
--------
|
| 287 |
+
>>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
|
| 288 |
+
>>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))
|
| 289 |
+
>>> ci.equals(ci2)
|
| 290 |
+
True
|
| 291 |
+
|
| 292 |
+
The order of elements matters.
|
| 293 |
+
|
| 294 |
+
>>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])
|
| 295 |
+
>>> ci.equals(ci3)
|
| 296 |
+
False
|
| 297 |
+
|
| 298 |
+
The orderedness also matters.
|
| 299 |
+
|
| 300 |
+
>>> ci4 = ci.as_ordered()
|
| 301 |
+
>>> ci.equals(ci4)
|
| 302 |
+
False
|
| 303 |
+
|
| 304 |
+
The categories matter, but the order of the categories matters only when
|
| 305 |
+
``ordered=True``.
|
| 306 |
+
|
| 307 |
+
>>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])
|
| 308 |
+
>>> ci.equals(ci5)
|
| 309 |
+
False
|
| 310 |
+
|
| 311 |
+
>>> ci6 = ci.set_categories(['b', 'c', 'a'])
|
| 312 |
+
>>> ci.equals(ci6)
|
| 313 |
+
True
|
| 314 |
+
>>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
| 315 |
+
... ordered=True)
|
| 316 |
+
>>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])
|
| 317 |
+
>>> ci_ordered.equals(ci2_ordered)
|
| 318 |
+
False
|
| 319 |
+
"""
|
| 320 |
+
if self.is_(other):
|
| 321 |
+
return True
|
| 322 |
+
|
| 323 |
+
if not isinstance(other, Index):
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
try:
|
| 327 |
+
other = self._is_dtype_compat(other)
|
| 328 |
+
except (TypeError, ValueError):
|
| 329 |
+
return False
|
| 330 |
+
|
| 331 |
+
return self._data.equals(other)
|
| 332 |
+
|
| 333 |
+
# --------------------------------------------------------------------
|
| 334 |
+
# Rendering Methods
|
| 335 |
+
|
| 336 |
+
@property
|
| 337 |
+
def _formatter_func(self):
|
| 338 |
+
return self.categories._formatter_func
|
| 339 |
+
|
| 340 |
+
def _format_attrs(self):
|
| 341 |
+
"""
|
| 342 |
+
Return a list of tuples of the (attr,formatted_value)
|
| 343 |
+
"""
|
| 344 |
+
attrs: list[tuple[str, str | int | bool | None]]
|
| 345 |
+
|
| 346 |
+
attrs = [
|
| 347 |
+
(
|
| 348 |
+
"categories",
|
| 349 |
+
f"[{', '.join(self._data._repr_categories())}]",
|
| 350 |
+
),
|
| 351 |
+
("ordered", self.ordered),
|
| 352 |
+
]
|
| 353 |
+
extra = super()._format_attrs()
|
| 354 |
+
return attrs + extra
|
| 355 |
+
|
| 356 |
+
# --------------------------------------------------------------------
|
| 357 |
+
|
| 358 |
+
@property
|
| 359 |
+
def inferred_type(self) -> str:
|
| 360 |
+
return "categorical"
|
| 361 |
+
|
| 362 |
+
@doc(Index.__contains__)
|
| 363 |
+
def __contains__(self, key: Any) -> bool:
|
| 364 |
+
# if key is a NaN, check if any NaN is in self.
|
| 365 |
+
if is_valid_na_for_dtype(key, self.categories.dtype):
|
| 366 |
+
return self.hasnans
|
| 367 |
+
|
| 368 |
+
return contains(self, key, container=self._engine)
|
| 369 |
+
|
| 370 |
+
def reindex(
|
| 371 |
+
self, target, method=None, level=None, limit: int | None = None, tolerance=None
|
| 372 |
+
) -> tuple[Index, npt.NDArray[np.intp] | None]:
|
| 373 |
+
"""
|
| 374 |
+
Create index with target's values (move/add/delete values as necessary)
|
| 375 |
+
|
| 376 |
+
Returns
|
| 377 |
+
-------
|
| 378 |
+
new_index : pd.Index
|
| 379 |
+
Resulting index
|
| 380 |
+
indexer : np.ndarray[np.intp] or None
|
| 381 |
+
Indices of output values in original index
|
| 382 |
+
|
| 383 |
+
"""
|
| 384 |
+
if method is not None:
|
| 385 |
+
raise NotImplementedError(
|
| 386 |
+
"argument method is not implemented for CategoricalIndex.reindex"
|
| 387 |
+
)
|
| 388 |
+
if level is not None:
|
| 389 |
+
raise NotImplementedError(
|
| 390 |
+
"argument level is not implemented for CategoricalIndex.reindex"
|
| 391 |
+
)
|
| 392 |
+
if limit is not None:
|
| 393 |
+
raise NotImplementedError(
|
| 394 |
+
"argument limit is not implemented for CategoricalIndex.reindex"
|
| 395 |
+
)
|
| 396 |
+
return super().reindex(target)
|
| 397 |
+
|
| 398 |
+
# --------------------------------------------------------------------
|
| 399 |
+
# Indexing Methods
|
| 400 |
+
|
| 401 |
+
def _maybe_cast_indexer(self, key) -> int:
|
| 402 |
+
# GH#41933: we have to do this instead of self._data._validate_scalar
|
| 403 |
+
# because this will correctly get partial-indexing on Interval categories
|
| 404 |
+
try:
|
| 405 |
+
return self._data._unbox_scalar(key)
|
| 406 |
+
except KeyError:
|
| 407 |
+
if is_valid_na_for_dtype(key, self.categories.dtype):
|
| 408 |
+
return -1
|
| 409 |
+
raise
|
| 410 |
+
|
| 411 |
+
def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
|
| 412 |
+
if isinstance(values, CategoricalIndex):
|
| 413 |
+
values = values._data
|
| 414 |
+
if isinstance(values, Categorical):
|
| 415 |
+
# Indexing on codes is more efficient if categories are the same,
|
| 416 |
+
# so we can apply some optimizations based on the degree of
|
| 417 |
+
# dtype-matching.
|
| 418 |
+
cat = self._data._encode_with_my_categories(values)
|
| 419 |
+
codes = cat._codes
|
| 420 |
+
else:
|
| 421 |
+
codes = self.categories.get_indexer(values)
|
| 422 |
+
codes = codes.astype(self.codes.dtype, copy=False)
|
| 423 |
+
cat = self._data._from_backing_data(codes)
|
| 424 |
+
return type(self)._simple_new(cat)
|
| 425 |
+
|
| 426 |
+
# --------------------------------------------------------------------
|
| 427 |
+
|
| 428 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
| 429 |
+
return self.categories._is_comparable_dtype(dtype)
|
| 430 |
+
|
| 431 |
+
def map(self, mapper, na_action: Literal["ignore"] | None = None):
|
| 432 |
+
"""
|
| 433 |
+
Map values using input an input mapping or function.
|
| 434 |
+
|
| 435 |
+
Maps the values (their categories, not the codes) of the index to new
|
| 436 |
+
categories. If the mapping correspondence is one-to-one the result is a
|
| 437 |
+
:class:`~pandas.CategoricalIndex` which has the same order property as
|
| 438 |
+
the original, otherwise an :class:`~pandas.Index` is returned.
|
| 439 |
+
|
| 440 |
+
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
|
| 441 |
+
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
|
| 442 |
+
will be returned.
|
| 443 |
+
|
| 444 |
+
Parameters
|
| 445 |
+
----------
|
| 446 |
+
mapper : function, dict, or Series
|
| 447 |
+
Mapping correspondence.
|
| 448 |
+
|
| 449 |
+
Returns
|
| 450 |
+
-------
|
| 451 |
+
pandas.CategoricalIndex or pandas.Index
|
| 452 |
+
Mapped index.
|
| 453 |
+
|
| 454 |
+
See Also
|
| 455 |
+
--------
|
| 456 |
+
Index.map : Apply a mapping correspondence on an
|
| 457 |
+
:class:`~pandas.Index`.
|
| 458 |
+
Series.map : Apply a mapping correspondence on a
|
| 459 |
+
:class:`~pandas.Series`.
|
| 460 |
+
Series.apply : Apply more complex functions on a
|
| 461 |
+
:class:`~pandas.Series`.
|
| 462 |
+
|
| 463 |
+
Examples
|
| 464 |
+
--------
|
| 465 |
+
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
|
| 466 |
+
>>> idx
|
| 467 |
+
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
|
| 468 |
+
ordered=False, dtype='category')
|
| 469 |
+
>>> idx.map(lambda x: x.upper())
|
| 470 |
+
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
|
| 471 |
+
ordered=False, dtype='category')
|
| 472 |
+
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
|
| 473 |
+
CategoricalIndex(['first', 'second', 'third'], categories=['first',
|
| 474 |
+
'second', 'third'], ordered=False, dtype='category')
|
| 475 |
+
|
| 476 |
+
If the mapping is one-to-one the ordering of the categories is
|
| 477 |
+
preserved:
|
| 478 |
+
|
| 479 |
+
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
|
| 480 |
+
>>> idx
|
| 481 |
+
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
|
| 482 |
+
ordered=True, dtype='category')
|
| 483 |
+
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
|
| 484 |
+
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
|
| 485 |
+
dtype='category')
|
| 486 |
+
|
| 487 |
+
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
|
| 488 |
+
|
| 489 |
+
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
|
| 490 |
+
Index(['first', 'second', 'first'], dtype='object')
|
| 491 |
+
|
| 492 |
+
If a `dict` is used, all unmapped categories are mapped to `NaN` and
|
| 493 |
+
the result is an :class:`~pandas.Index`:
|
| 494 |
+
|
| 495 |
+
>>> idx.map({'a': 'first', 'b': 'second'})
|
| 496 |
+
Index(['first', 'second', nan], dtype='object')
|
| 497 |
+
"""
|
| 498 |
+
mapped = self._values.map(mapper, na_action=na_action)
|
| 499 |
+
return Index(mapped, name=self.name)
|
| 500 |
+
|
| 501 |
+
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
|
| 502 |
+
# if calling index is category, don't check dtype of others
|
| 503 |
+
try:
|
| 504 |
+
cat = Categorical._concat_same_type(
|
| 505 |
+
[self._is_dtype_compat(c) for c in to_concat]
|
| 506 |
+
)
|
| 507 |
+
except TypeError:
|
| 508 |
+
# not all to_concat elements are among our categories (or NA)
|
| 509 |
+
|
| 510 |
+
res = concat_compat([x._values for x in to_concat])
|
| 511 |
+
return Index(res, name=name)
|
| 512 |
+
else:
|
| 513 |
+
return type(self)._simple_new(cat, name=name)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py
ADDED
|
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base and utility classes for tseries type pandas objects.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from abc import (
|
| 7 |
+
ABC,
|
| 8 |
+
abstractmethod,
|
| 9 |
+
)
|
| 10 |
+
from typing import (
|
| 11 |
+
TYPE_CHECKING,
|
| 12 |
+
Any,
|
| 13 |
+
Callable,
|
| 14 |
+
cast,
|
| 15 |
+
final,
|
| 16 |
+
)
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from pandas._config import using_copy_on_write
|
| 22 |
+
|
| 23 |
+
from pandas._libs import (
|
| 24 |
+
NaT,
|
| 25 |
+
Timedelta,
|
| 26 |
+
lib,
|
| 27 |
+
)
|
| 28 |
+
from pandas._libs.tslibs import (
|
| 29 |
+
BaseOffset,
|
| 30 |
+
Resolution,
|
| 31 |
+
Tick,
|
| 32 |
+
parsing,
|
| 33 |
+
to_offset,
|
| 34 |
+
)
|
| 35 |
+
from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
|
| 36 |
+
from pandas.compat.numpy import function as nv
|
| 37 |
+
from pandas.errors import (
|
| 38 |
+
InvalidIndexError,
|
| 39 |
+
NullFrequencyError,
|
| 40 |
+
)
|
| 41 |
+
from pandas.util._decorators import (
|
| 42 |
+
Appender,
|
| 43 |
+
cache_readonly,
|
| 44 |
+
doc,
|
| 45 |
+
)
|
| 46 |
+
from pandas.util._exceptions import find_stack_level
|
| 47 |
+
|
| 48 |
+
from pandas.core.dtypes.common import (
|
| 49 |
+
is_integer,
|
| 50 |
+
is_list_like,
|
| 51 |
+
)
|
| 52 |
+
from pandas.core.dtypes.concat import concat_compat
|
| 53 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
| 54 |
+
|
| 55 |
+
from pandas.core.arrays import (
|
| 56 |
+
DatetimeArray,
|
| 57 |
+
ExtensionArray,
|
| 58 |
+
PeriodArray,
|
| 59 |
+
TimedeltaArray,
|
| 60 |
+
)
|
| 61 |
+
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
|
| 62 |
+
import pandas.core.common as com
|
| 63 |
+
import pandas.core.indexes.base as ibase
|
| 64 |
+
from pandas.core.indexes.base import (
|
| 65 |
+
Index,
|
| 66 |
+
_index_shared_docs,
|
| 67 |
+
)
|
| 68 |
+
from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
|
| 69 |
+
from pandas.core.indexes.range import RangeIndex
|
| 70 |
+
from pandas.core.tools.timedeltas import to_timedelta
|
| 71 |
+
|
| 72 |
+
if TYPE_CHECKING:
|
| 73 |
+
from collections.abc import Sequence
|
| 74 |
+
from datetime import datetime
|
| 75 |
+
|
| 76 |
+
from pandas._typing import (
|
| 77 |
+
Axis,
|
| 78 |
+
Self,
|
| 79 |
+
npt,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
from pandas import CategoricalIndex
|
| 83 |
+
|
| 84 |
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
|
| 88 |
+
"""
|
| 89 |
+
Common ops mixin to support a unified interface datetimelike Index.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
_can_hold_strings = False
|
| 93 |
+
_data: DatetimeArray | TimedeltaArray | PeriodArray
|
| 94 |
+
|
| 95 |
+
@doc(DatetimeLikeArrayMixin.mean)
|
| 96 |
+
def mean(self, *, skipna: bool = True, axis: int | None = 0):
|
| 97 |
+
return self._data.mean(skipna=skipna, axis=axis)
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def freq(self) -> BaseOffset | None:
|
| 101 |
+
return self._data.freq
|
| 102 |
+
|
| 103 |
+
@freq.setter
|
| 104 |
+
def freq(self, value) -> None:
|
| 105 |
+
# error: Property "freq" defined in "PeriodArray" is read-only [misc]
|
| 106 |
+
self._data.freq = value # type: ignore[misc]
|
| 107 |
+
|
| 108 |
+
@property
|
| 109 |
+
def asi8(self) -> npt.NDArray[np.int64]:
|
| 110 |
+
return self._data.asi8
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
@doc(DatetimeLikeArrayMixin.freqstr)
|
| 114 |
+
def freqstr(self) -> str:
|
| 115 |
+
from pandas import PeriodIndex
|
| 116 |
+
|
| 117 |
+
if self._data.freqstr is not None and isinstance(
|
| 118 |
+
self._data, (PeriodArray, PeriodIndex)
|
| 119 |
+
):
|
| 120 |
+
freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)
|
| 121 |
+
return freq
|
| 122 |
+
else:
|
| 123 |
+
return self._data.freqstr # type: ignore[return-value]
|
| 124 |
+
|
| 125 |
+
@cache_readonly
|
| 126 |
+
@abstractmethod
|
| 127 |
+
def _resolution_obj(self) -> Resolution:
|
| 128 |
+
...
|
| 129 |
+
|
| 130 |
+
@cache_readonly
|
| 131 |
+
@doc(DatetimeLikeArrayMixin.resolution)
|
| 132 |
+
def resolution(self) -> str:
|
| 133 |
+
return self._data.resolution
|
| 134 |
+
|
| 135 |
+
# ------------------------------------------------------------------------
|
| 136 |
+
|
| 137 |
+
@cache_readonly
|
| 138 |
+
def hasnans(self) -> bool:
|
| 139 |
+
return self._data._hasna
|
| 140 |
+
|
| 141 |
+
def equals(self, other: Any) -> bool:
|
| 142 |
+
"""
|
| 143 |
+
Determines if two Index objects contain the same elements.
|
| 144 |
+
"""
|
| 145 |
+
if self.is_(other):
|
| 146 |
+
return True
|
| 147 |
+
|
| 148 |
+
if not isinstance(other, Index):
|
| 149 |
+
return False
|
| 150 |
+
elif other.dtype.kind in "iufc":
|
| 151 |
+
return False
|
| 152 |
+
elif not isinstance(other, type(self)):
|
| 153 |
+
should_try = False
|
| 154 |
+
inferable = self._data._infer_matches
|
| 155 |
+
if other.dtype == object:
|
| 156 |
+
should_try = other.inferred_type in inferable
|
| 157 |
+
elif isinstance(other.dtype, CategoricalDtype):
|
| 158 |
+
other = cast("CategoricalIndex", other)
|
| 159 |
+
should_try = other.categories.inferred_type in inferable
|
| 160 |
+
|
| 161 |
+
if should_try:
|
| 162 |
+
try:
|
| 163 |
+
other = type(self)(other)
|
| 164 |
+
except (ValueError, TypeError, OverflowError):
|
| 165 |
+
# e.g.
|
| 166 |
+
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
|
| 167 |
+
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
|
| 168 |
+
# OverflowError -> Index([very_large_timedeltas])
|
| 169 |
+
return False
|
| 170 |
+
|
| 171 |
+
if self.dtype != other.dtype:
|
| 172 |
+
# have different timezone
|
| 173 |
+
return False
|
| 174 |
+
|
| 175 |
+
return np.array_equal(self.asi8, other.asi8)
|
| 176 |
+
|
| 177 |
+
@Appender(Index.__contains__.__doc__)
|
| 178 |
+
def __contains__(self, key: Any) -> bool:
|
| 179 |
+
hash(key)
|
| 180 |
+
try:
|
| 181 |
+
self.get_loc(key)
|
| 182 |
+
except (KeyError, TypeError, ValueError, InvalidIndexError):
|
| 183 |
+
return False
|
| 184 |
+
return True
|
| 185 |
+
|
| 186 |
+
def _convert_tolerance(self, tolerance, target):
|
| 187 |
+
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
|
| 188 |
+
return super()._convert_tolerance(tolerance, target)
|
| 189 |
+
|
| 190 |
+
# --------------------------------------------------------------------
|
| 191 |
+
# Rendering Methods
|
| 192 |
+
_default_na_rep = "NaT"
|
| 193 |
+
|
| 194 |
+
def format(
|
| 195 |
+
self,
|
| 196 |
+
name: bool = False,
|
| 197 |
+
formatter: Callable | None = None,
|
| 198 |
+
na_rep: str = "NaT",
|
| 199 |
+
date_format: str | None = None,
|
| 200 |
+
) -> list[str]:
|
| 201 |
+
"""
|
| 202 |
+
Render a string representation of the Index.
|
| 203 |
+
"""
|
| 204 |
+
warnings.warn(
|
| 205 |
+
# GH#55413
|
| 206 |
+
f"{type(self).__name__}.format is deprecated and will be removed "
|
| 207 |
+
"in a future version. Convert using index.astype(str) or "
|
| 208 |
+
"index.map(formatter) instead.",
|
| 209 |
+
FutureWarning,
|
| 210 |
+
stacklevel=find_stack_level(),
|
| 211 |
+
)
|
| 212 |
+
header = []
|
| 213 |
+
if name:
|
| 214 |
+
header.append(
|
| 215 |
+
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
|
| 216 |
+
if self.name is not None
|
| 217 |
+
else ""
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if formatter is not None:
|
| 221 |
+
return header + list(self.map(formatter))
|
| 222 |
+
|
| 223 |
+
return self._format_with_header(
|
| 224 |
+
header=header, na_rep=na_rep, date_format=date_format
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
def _format_with_header(
|
| 228 |
+
self, *, header: list[str], na_rep: str, date_format: str | None = None
|
| 229 |
+
) -> list[str]:
|
| 230 |
+
# TODO: not reached in tests 2023-10-11
|
| 231 |
+
# matches base class except for whitespace padding and date_format
|
| 232 |
+
return header + list(
|
| 233 |
+
self._get_values_for_csv(na_rep=na_rep, date_format=date_format)
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
@property
|
| 237 |
+
def _formatter_func(self):
|
| 238 |
+
return self._data._formatter()
|
| 239 |
+
|
| 240 |
+
def _format_attrs(self):
|
| 241 |
+
"""
|
| 242 |
+
Return a list of tuples of the (attr,formatted_value).
|
| 243 |
+
"""
|
| 244 |
+
attrs = super()._format_attrs()
|
| 245 |
+
for attrib in self._attributes:
|
| 246 |
+
# iterating over _attributes prevents us from doing this for PeriodIndex
|
| 247 |
+
if attrib == "freq":
|
| 248 |
+
freq = self.freqstr
|
| 249 |
+
if freq is not None:
|
| 250 |
+
freq = repr(freq) # e.g. D -> 'D'
|
| 251 |
+
attrs.append(("freq", freq))
|
| 252 |
+
return attrs
|
| 253 |
+
|
| 254 |
+
@Appender(Index._summary.__doc__)
|
| 255 |
+
def _summary(self, name=None) -> str:
|
| 256 |
+
result = super()._summary(name=name)
|
| 257 |
+
if self.freq:
|
| 258 |
+
result += f"\nFreq: {self.freqstr}"
|
| 259 |
+
|
| 260 |
+
return result
|
| 261 |
+
|
| 262 |
+
# --------------------------------------------------------------------
|
| 263 |
+
# Indexing Methods
|
| 264 |
+
|
| 265 |
+
@final
|
| 266 |
+
def _can_partial_date_slice(self, reso: Resolution) -> bool:
|
| 267 |
+
# e.g. test_getitem_setitem_periodindex
|
| 268 |
+
# History of conversation GH#3452, GH#3931, GH#2369, GH#14826
|
| 269 |
+
return reso > self._resolution_obj
|
| 270 |
+
# NB: for DTI/PI, not TDI
|
| 271 |
+
|
| 272 |
+
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
|
| 273 |
+
raise NotImplementedError
|
| 274 |
+
|
| 275 |
+
def _parse_with_reso(self, label: str):
|
| 276 |
+
# overridden by TimedeltaIndex
|
| 277 |
+
try:
|
| 278 |
+
if self.freq is None or hasattr(self.freq, "rule_code"):
|
| 279 |
+
freq = self.freq
|
| 280 |
+
except NotImplementedError:
|
| 281 |
+
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
|
| 282 |
+
|
| 283 |
+
freqstr: str | None
|
| 284 |
+
if freq is not None and not isinstance(freq, str):
|
| 285 |
+
freqstr = freq.rule_code
|
| 286 |
+
else:
|
| 287 |
+
freqstr = freq
|
| 288 |
+
|
| 289 |
+
if isinstance(label, np.str_):
|
| 290 |
+
# GH#45580
|
| 291 |
+
label = str(label)
|
| 292 |
+
|
| 293 |
+
parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
|
| 294 |
+
reso = Resolution.from_attrname(reso_str)
|
| 295 |
+
return parsed, reso
|
| 296 |
+
|
| 297 |
+
def _get_string_slice(self, key: str):
|
| 298 |
+
# overridden by TimedeltaIndex
|
| 299 |
+
parsed, reso = self._parse_with_reso(key)
|
| 300 |
+
try:
|
| 301 |
+
return self._partial_date_slice(reso, parsed)
|
| 302 |
+
except KeyError as err:
|
| 303 |
+
raise KeyError(key) from err
|
| 304 |
+
|
| 305 |
+
@final
|
| 306 |
+
def _partial_date_slice(
|
| 307 |
+
self,
|
| 308 |
+
reso: Resolution,
|
| 309 |
+
parsed: datetime,
|
| 310 |
+
) -> slice | npt.NDArray[np.intp]:
|
| 311 |
+
"""
|
| 312 |
+
Parameters
|
| 313 |
+
----------
|
| 314 |
+
reso : Resolution
|
| 315 |
+
parsed : datetime
|
| 316 |
+
|
| 317 |
+
Returns
|
| 318 |
+
-------
|
| 319 |
+
slice or ndarray[intp]
|
| 320 |
+
"""
|
| 321 |
+
if not self._can_partial_date_slice(reso):
|
| 322 |
+
raise ValueError
|
| 323 |
+
|
| 324 |
+
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
|
| 325 |
+
vals = self._data._ndarray
|
| 326 |
+
unbox = self._data._unbox
|
| 327 |
+
|
| 328 |
+
if self.is_monotonic_increasing:
|
| 329 |
+
if len(self) and (
|
| 330 |
+
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
|
| 331 |
+
):
|
| 332 |
+
# we are out of range
|
| 333 |
+
raise KeyError
|
| 334 |
+
|
| 335 |
+
# TODO: does this depend on being monotonic _increasing_?
|
| 336 |
+
|
| 337 |
+
# a monotonic (sorted) series can be sliced
|
| 338 |
+
left = vals.searchsorted(unbox(t1), side="left")
|
| 339 |
+
right = vals.searchsorted(unbox(t2), side="right")
|
| 340 |
+
return slice(left, right)
|
| 341 |
+
|
| 342 |
+
else:
|
| 343 |
+
lhs_mask = vals >= unbox(t1)
|
| 344 |
+
rhs_mask = vals <= unbox(t2)
|
| 345 |
+
|
| 346 |
+
# try to find the dates
|
| 347 |
+
return (lhs_mask & rhs_mask).nonzero()[0]
|
| 348 |
+
|
| 349 |
+
def _maybe_cast_slice_bound(self, label, side: str):
|
| 350 |
+
"""
|
| 351 |
+
If label is a string, cast it to scalar type according to resolution.
|
| 352 |
+
|
| 353 |
+
Parameters
|
| 354 |
+
----------
|
| 355 |
+
label : object
|
| 356 |
+
side : {'left', 'right'}
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
label : object
|
| 361 |
+
|
| 362 |
+
Notes
|
| 363 |
+
-----
|
| 364 |
+
Value of `side` parameter should be validated in caller.
|
| 365 |
+
"""
|
| 366 |
+
if isinstance(label, str):
|
| 367 |
+
try:
|
| 368 |
+
parsed, reso = self._parse_with_reso(label)
|
| 369 |
+
except ValueError as err:
|
| 370 |
+
# DTI -> parsing.DateParseError
|
| 371 |
+
# TDI -> 'unit abbreviation w/o a number'
|
| 372 |
+
# PI -> string cannot be parsed as datetime-like
|
| 373 |
+
self._raise_invalid_indexer("slice", label, err)
|
| 374 |
+
|
| 375 |
+
lower, upper = self._parsed_string_to_bounds(reso, parsed)
|
| 376 |
+
return lower if side == "left" else upper
|
| 377 |
+
elif not isinstance(label, self._data._recognized_scalars):
|
| 378 |
+
self._raise_invalid_indexer("slice", label)
|
| 379 |
+
|
| 380 |
+
return label
|
| 381 |
+
|
| 382 |
+
# --------------------------------------------------------------------
|
| 383 |
+
# Arithmetic Methods
|
| 384 |
+
|
| 385 |
+
def shift(self, periods: int = 1, freq=None) -> Self:
|
| 386 |
+
"""
|
| 387 |
+
Shift index by desired number of time frequency increments.
|
| 388 |
+
|
| 389 |
+
This method is for shifting the values of datetime-like indexes
|
| 390 |
+
by a specified time increment a given number of times.
|
| 391 |
+
|
| 392 |
+
Parameters
|
| 393 |
+
----------
|
| 394 |
+
periods : int, default 1
|
| 395 |
+
Number of periods (or increments) to shift by,
|
| 396 |
+
can be positive or negative.
|
| 397 |
+
freq : pandas.DateOffset, pandas.Timedelta or string, optional
|
| 398 |
+
Frequency increment to shift by.
|
| 399 |
+
If None, the index is shifted by its own `freq` attribute.
|
| 400 |
+
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
|
| 401 |
+
|
| 402 |
+
Returns
|
| 403 |
+
-------
|
| 404 |
+
pandas.DatetimeIndex
|
| 405 |
+
Shifted index.
|
| 406 |
+
|
| 407 |
+
See Also
|
| 408 |
+
--------
|
| 409 |
+
Index.shift : Shift values of Index.
|
| 410 |
+
PeriodIndex.shift : Shift values of PeriodIndex.
|
| 411 |
+
"""
|
| 412 |
+
raise NotImplementedError
|
| 413 |
+
|
| 414 |
+
# --------------------------------------------------------------------
|
| 415 |
+
|
| 416 |
+
@doc(Index._maybe_cast_listlike_indexer)
|
| 417 |
+
def _maybe_cast_listlike_indexer(self, keyarr):
|
| 418 |
+
try:
|
| 419 |
+
res = self._data._validate_listlike(keyarr, allow_object=True)
|
| 420 |
+
except (ValueError, TypeError):
|
| 421 |
+
if not isinstance(keyarr, ExtensionArray):
|
| 422 |
+
# e.g. we don't want to cast DTA to ndarray[object]
|
| 423 |
+
res = com.asarray_tuplesafe(keyarr)
|
| 424 |
+
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
|
| 425 |
+
else:
|
| 426 |
+
res = keyarr
|
| 427 |
+
return Index(res, dtype=res.dtype)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
|
| 431 |
+
"""
|
| 432 |
+
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
|
| 433 |
+
but not PeriodIndex
|
| 434 |
+
"""
|
| 435 |
+
|
| 436 |
+
_data: DatetimeArray | TimedeltaArray
|
| 437 |
+
_comparables = ["name", "freq"]
|
| 438 |
+
_attributes = ["name", "freq"]
|
| 439 |
+
|
| 440 |
+
# Compat for frequency inference, see GH#23789
|
| 441 |
+
_is_monotonic_increasing = Index.is_monotonic_increasing
|
| 442 |
+
_is_monotonic_decreasing = Index.is_monotonic_decreasing
|
| 443 |
+
_is_unique = Index.is_unique
|
| 444 |
+
|
| 445 |
+
@property
|
| 446 |
+
def unit(self) -> str:
|
| 447 |
+
return self._data.unit
|
| 448 |
+
|
| 449 |
+
def as_unit(self, unit: str) -> Self:
|
| 450 |
+
"""
|
| 451 |
+
Convert to a dtype with the given unit resolution.
|
| 452 |
+
|
| 453 |
+
Parameters
|
| 454 |
+
----------
|
| 455 |
+
unit : {'s', 'ms', 'us', 'ns'}
|
| 456 |
+
|
| 457 |
+
Returns
|
| 458 |
+
-------
|
| 459 |
+
same type as self
|
| 460 |
+
|
| 461 |
+
Examples
|
| 462 |
+
--------
|
| 463 |
+
For :class:`pandas.DatetimeIndex`:
|
| 464 |
+
|
| 465 |
+
>>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
|
| 466 |
+
>>> idx
|
| 467 |
+
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
|
| 468 |
+
dtype='datetime64[ns]', freq=None)
|
| 469 |
+
>>> idx.as_unit('s')
|
| 470 |
+
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
|
| 471 |
+
|
| 472 |
+
For :class:`pandas.TimedeltaIndex`:
|
| 473 |
+
|
| 474 |
+
>>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
|
| 475 |
+
>>> tdelta_idx
|
| 476 |
+
TimedeltaIndex(['1 days 00:03:00.000002042'],
|
| 477 |
+
dtype='timedelta64[ns]', freq=None)
|
| 478 |
+
>>> tdelta_idx.as_unit('s')
|
| 479 |
+
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
|
| 480 |
+
"""
|
| 481 |
+
arr = self._data.as_unit(unit)
|
| 482 |
+
return type(self)._simple_new(arr, name=self.name)
|
| 483 |
+
|
| 484 |
+
def _with_freq(self, freq):
|
| 485 |
+
arr = self._data._with_freq(freq)
|
| 486 |
+
return type(self)._simple_new(arr, name=self._name)
|
| 487 |
+
|
| 488 |
+
@property
|
| 489 |
+
def values(self) -> np.ndarray:
|
| 490 |
+
# NB: For Datetime64TZ this is lossy
|
| 491 |
+
data = self._data._ndarray
|
| 492 |
+
if using_copy_on_write():
|
| 493 |
+
data = data.view()
|
| 494 |
+
data.flags.writeable = False
|
| 495 |
+
return data
|
| 496 |
+
|
| 497 |
+
@doc(DatetimeIndexOpsMixin.shift)
|
| 498 |
+
def shift(self, periods: int = 1, freq=None) -> Self:
|
| 499 |
+
if freq is not None and freq != self.freq:
|
| 500 |
+
if isinstance(freq, str):
|
| 501 |
+
freq = to_offset(freq)
|
| 502 |
+
offset = periods * freq
|
| 503 |
+
return self + offset
|
| 504 |
+
|
| 505 |
+
if periods == 0 or len(self) == 0:
|
| 506 |
+
# GH#14811 empty case
|
| 507 |
+
return self.copy()
|
| 508 |
+
|
| 509 |
+
if self.freq is None:
|
| 510 |
+
raise NullFrequencyError("Cannot shift with no freq")
|
| 511 |
+
|
| 512 |
+
start = self[0] + periods * self.freq
|
| 513 |
+
end = self[-1] + periods * self.freq
|
| 514 |
+
|
| 515 |
+
# Note: in the DatetimeTZ case, _generate_range will infer the
|
| 516 |
+
# appropriate timezone from `start` and `end`, so tz does not need
|
| 517 |
+
# to be passed explicitly.
|
| 518 |
+
result = self._data._generate_range(
|
| 519 |
+
start=start, end=end, periods=None, freq=self.freq, unit=self.unit
|
| 520 |
+
)
|
| 521 |
+
return type(self)._simple_new(result, name=self.name)
|
| 522 |
+
|
| 523 |
+
@cache_readonly
|
| 524 |
+
@doc(DatetimeLikeArrayMixin.inferred_freq)
|
| 525 |
+
def inferred_freq(self) -> str | None:
|
| 526 |
+
return self._data.inferred_freq
|
| 527 |
+
|
| 528 |
+
# --------------------------------------------------------------------
|
| 529 |
+
# Set Operation Methods
|
| 530 |
+
|
| 531 |
+
@cache_readonly
|
| 532 |
+
def _as_range_index(self) -> RangeIndex:
|
| 533 |
+
# Convert our i8 representations to RangeIndex
|
| 534 |
+
# Caller is responsible for checking isinstance(self.freq, Tick)
|
| 535 |
+
freq = cast(Tick, self.freq)
|
| 536 |
+
tick = Timedelta(freq).as_unit("ns")._value
|
| 537 |
+
rng = range(self[0]._value, self[-1]._value + tick, tick)
|
| 538 |
+
return RangeIndex(rng)
|
| 539 |
+
|
| 540 |
+
def _can_range_setop(self, other) -> bool:
|
| 541 |
+
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
|
| 542 |
+
|
| 543 |
+
def _wrap_range_setop(self, other, res_i8) -> Self:
|
| 544 |
+
new_freq = None
|
| 545 |
+
if not len(res_i8):
|
| 546 |
+
# RangeIndex defaults to step=1, which we don't want.
|
| 547 |
+
new_freq = self.freq
|
| 548 |
+
elif isinstance(res_i8, RangeIndex):
|
| 549 |
+
new_freq = to_offset(Timedelta(res_i8.step))
|
| 550 |
+
|
| 551 |
+
# TODO(GH#41493): we cannot just do
|
| 552 |
+
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
|
| 553 |
+
# because test_setops_preserve_freq fails with _validate_frequency raising.
|
| 554 |
+
# This raising is incorrect, as 'on_freq' is incorrect. This will
|
| 555 |
+
# be fixed by GH#41493
|
| 556 |
+
res_values = res_i8.values.view(self._data._ndarray.dtype)
|
| 557 |
+
result = type(self._data)._simple_new(
|
| 558 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
| 559 |
+
# incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
|
| 560 |
+
# "Union[dtype[datetime64], DatetimeTZDtype]"
|
| 561 |
+
res_values,
|
| 562 |
+
dtype=self.dtype, # type: ignore[arg-type]
|
| 563 |
+
freq=new_freq, # type: ignore[arg-type]
|
| 564 |
+
)
|
| 565 |
+
return cast("Self", self._wrap_setop_result(other, result))
|
| 566 |
+
|
| 567 |
+
def _range_intersect(self, other, sort) -> Self:
|
| 568 |
+
# Dispatch to RangeIndex intersection logic.
|
| 569 |
+
left = self._as_range_index
|
| 570 |
+
right = other._as_range_index
|
| 571 |
+
res_i8 = left.intersection(right, sort=sort)
|
| 572 |
+
return self._wrap_range_setop(other, res_i8)
|
| 573 |
+
|
| 574 |
+
def _range_union(self, other, sort) -> Self:
|
| 575 |
+
# Dispatch to RangeIndex union logic.
|
| 576 |
+
left = self._as_range_index
|
| 577 |
+
right = other._as_range_index
|
| 578 |
+
res_i8 = left.union(right, sort=sort)
|
| 579 |
+
return self._wrap_range_setop(other, res_i8)
|
| 580 |
+
|
| 581 |
+
def _intersection(self, other: Index, sort: bool = False) -> Index:
|
| 582 |
+
"""
|
| 583 |
+
intersection specialized to the case with matching dtypes and both non-empty.
|
| 584 |
+
"""
|
| 585 |
+
other = cast("DatetimeTimedeltaMixin", other)
|
| 586 |
+
|
| 587 |
+
if self._can_range_setop(other):
|
| 588 |
+
return self._range_intersect(other, sort=sort)
|
| 589 |
+
|
| 590 |
+
if not self._can_fast_intersect(other):
|
| 591 |
+
result = Index._intersection(self, other, sort=sort)
|
| 592 |
+
# We need to invalidate the freq because Index._intersection
|
| 593 |
+
# uses _shallow_copy on a view of self._data, which will preserve
|
| 594 |
+
# self.freq if we're not careful.
|
| 595 |
+
# At this point we should have result.dtype == self.dtype
|
| 596 |
+
# and type(result) is type(self._data)
|
| 597 |
+
result = self._wrap_setop_result(other, result)
|
| 598 |
+
return result._with_freq(None)._with_freq("infer")
|
| 599 |
+
|
| 600 |
+
else:
|
| 601 |
+
return self._fast_intersect(other, sort)
|
| 602 |
+
|
| 603 |
+
def _fast_intersect(self, other, sort):
|
| 604 |
+
# to make our life easier, "sort" the two ranges
|
| 605 |
+
if self[0] <= other[0]:
|
| 606 |
+
left, right = self, other
|
| 607 |
+
else:
|
| 608 |
+
left, right = other, self
|
| 609 |
+
|
| 610 |
+
# after sorting, the intersection always starts with the right index
|
| 611 |
+
# and ends with the index of which the last elements is smallest
|
| 612 |
+
end = min(left[-1], right[-1])
|
| 613 |
+
start = right[0]
|
| 614 |
+
|
| 615 |
+
if end < start:
|
| 616 |
+
result = self[:0]
|
| 617 |
+
else:
|
| 618 |
+
lslice = slice(*left.slice_locs(start, end))
|
| 619 |
+
result = left._values[lslice]
|
| 620 |
+
|
| 621 |
+
return result
|
| 622 |
+
|
| 623 |
+
def _can_fast_intersect(self, other: Self) -> bool:
|
| 624 |
+
# Note: we only get here with len(self) > 0 and len(other) > 0
|
| 625 |
+
if self.freq is None:
|
| 626 |
+
return False
|
| 627 |
+
|
| 628 |
+
elif other.freq != self.freq:
|
| 629 |
+
return False
|
| 630 |
+
|
| 631 |
+
elif not self.is_monotonic_increasing:
|
| 632 |
+
# Because freq is not None, we must then be monotonic decreasing
|
| 633 |
+
return False
|
| 634 |
+
|
| 635 |
+
# this along with matching freqs ensure that we "line up",
|
| 636 |
+
# so intersection will preserve freq
|
| 637 |
+
# Note we are assuming away Ticks, as those go through _range_intersect
|
| 638 |
+
# GH#42104
|
| 639 |
+
return self.freq.n == 1
|
| 640 |
+
|
| 641 |
+
def _can_fast_union(self, other: Self) -> bool:
|
| 642 |
+
# Assumes that type(self) == type(other), as per the annotation
|
| 643 |
+
# The ability to fast_union also implies that `freq` should be
|
| 644 |
+
# retained on union.
|
| 645 |
+
freq = self.freq
|
| 646 |
+
|
| 647 |
+
if freq is None or freq != other.freq:
|
| 648 |
+
return False
|
| 649 |
+
|
| 650 |
+
if not self.is_monotonic_increasing:
|
| 651 |
+
# Because freq is not None, we must then be monotonic decreasing
|
| 652 |
+
# TODO: do union on the reversed indexes?
|
| 653 |
+
return False
|
| 654 |
+
|
| 655 |
+
if len(self) == 0 or len(other) == 0:
|
| 656 |
+
# only reached via union_many
|
| 657 |
+
return True
|
| 658 |
+
|
| 659 |
+
# to make our life easier, "sort" the two ranges
|
| 660 |
+
if self[0] <= other[0]:
|
| 661 |
+
left, right = self, other
|
| 662 |
+
else:
|
| 663 |
+
left, right = other, self
|
| 664 |
+
|
| 665 |
+
right_start = right[0]
|
| 666 |
+
left_end = left[-1]
|
| 667 |
+
|
| 668 |
+
# Only need to "adjoin", not overlap
|
| 669 |
+
return (right_start == left_end + freq) or right_start in left
|
| 670 |
+
|
| 671 |
+
def _fast_union(self, other: Self, sort=None) -> Self:
|
| 672 |
+
# Caller is responsible for ensuring self and other are non-empty
|
| 673 |
+
|
| 674 |
+
# to make our life easier, "sort" the two ranges
|
| 675 |
+
if self[0] <= other[0]:
|
| 676 |
+
left, right = self, other
|
| 677 |
+
elif sort is False:
|
| 678 |
+
# TDIs are not in the "correct" order and we don't want
|
| 679 |
+
# to sort but want to remove overlaps
|
| 680 |
+
left, right = self, other
|
| 681 |
+
left_start = left[0]
|
| 682 |
+
loc = right.searchsorted(left_start, side="left")
|
| 683 |
+
right_chunk = right._values[:loc]
|
| 684 |
+
dates = concat_compat((left._values, right_chunk))
|
| 685 |
+
result = type(self)._simple_new(dates, name=self.name)
|
| 686 |
+
return result
|
| 687 |
+
else:
|
| 688 |
+
left, right = other, self
|
| 689 |
+
|
| 690 |
+
left_end = left[-1]
|
| 691 |
+
right_end = right[-1]
|
| 692 |
+
|
| 693 |
+
# concatenate
|
| 694 |
+
if left_end < right_end:
|
| 695 |
+
loc = right.searchsorted(left_end, side="right")
|
| 696 |
+
right_chunk = right._values[loc:]
|
| 697 |
+
dates = concat_compat([left._values, right_chunk])
|
| 698 |
+
# The can_fast_union check ensures that the result.freq
|
| 699 |
+
# should match self.freq
|
| 700 |
+
assert isinstance(dates, type(self._data))
|
| 701 |
+
# error: Item "ExtensionArray" of "ExtensionArray |
|
| 702 |
+
# ndarray[Any, Any]" has no attribute "_freq"
|
| 703 |
+
assert dates._freq == self.freq # type: ignore[union-attr]
|
| 704 |
+
result = type(self)._simple_new(dates)
|
| 705 |
+
return result
|
| 706 |
+
else:
|
| 707 |
+
return left
|
| 708 |
+
|
| 709 |
+
def _union(self, other, sort):
|
| 710 |
+
# We are called by `union`, which is responsible for this validation
|
| 711 |
+
assert isinstance(other, type(self))
|
| 712 |
+
assert self.dtype == other.dtype
|
| 713 |
+
|
| 714 |
+
if self._can_range_setop(other):
|
| 715 |
+
return self._range_union(other, sort=sort)
|
| 716 |
+
|
| 717 |
+
if self._can_fast_union(other):
|
| 718 |
+
result = self._fast_union(other, sort=sort)
|
| 719 |
+
# in the case with sort=None, the _can_fast_union check ensures
|
| 720 |
+
# that result.freq == self.freq
|
| 721 |
+
return result
|
| 722 |
+
else:
|
| 723 |
+
return super()._union(other, sort)._with_freq("infer")
|
| 724 |
+
|
| 725 |
+
# --------------------------------------------------------------------
|
| 726 |
+
# Join Methods
|
| 727 |
+
|
| 728 |
+
def _get_join_freq(self, other):
|
| 729 |
+
"""
|
| 730 |
+
Get the freq to attach to the result of a join operation.
|
| 731 |
+
"""
|
| 732 |
+
freq = None
|
| 733 |
+
if self._can_fast_union(other):
|
| 734 |
+
freq = self.freq
|
| 735 |
+
return freq
|
| 736 |
+
|
| 737 |
+
def _wrap_joined_index(
|
| 738 |
+
self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
|
| 739 |
+
):
|
| 740 |
+
assert other.dtype == self.dtype, (other.dtype, self.dtype)
|
| 741 |
+
result = super()._wrap_joined_index(joined, other, lidx, ridx)
|
| 742 |
+
result._data._freq = self._get_join_freq(other)
|
| 743 |
+
return result
|
| 744 |
+
|
| 745 |
+
def _get_engine_target(self) -> np.ndarray:
|
| 746 |
+
# engine methods and libjoin methods need dt64/td64 values cast to i8
|
| 747 |
+
return self._data._ndarray.view("i8")
|
| 748 |
+
|
| 749 |
+
def _from_join_target(self, result: np.ndarray):
|
| 750 |
+
# view e.g. i8 back to M8[ns]
|
| 751 |
+
result = result.view(self._data._ndarray.dtype)
|
| 752 |
+
return self._data._from_backing_data(result)
|
| 753 |
+
|
| 754 |
+
# --------------------------------------------------------------------
|
| 755 |
+
# List-like Methods
|
| 756 |
+
|
| 757 |
+
def _get_delete_freq(self, loc: int | slice | Sequence[int]):
|
| 758 |
+
"""
|
| 759 |
+
Find the `freq` for self.delete(loc).
|
| 760 |
+
"""
|
| 761 |
+
freq = None
|
| 762 |
+
if self.freq is not None:
|
| 763 |
+
if is_integer(loc):
|
| 764 |
+
if loc in (0, -len(self), -1, len(self) - 1):
|
| 765 |
+
freq = self.freq
|
| 766 |
+
else:
|
| 767 |
+
if is_list_like(loc):
|
| 768 |
+
# error: Incompatible types in assignment (expression has
|
| 769 |
+
# type "Union[slice, ndarray]", variable has type
|
| 770 |
+
# "Union[int, slice, Sequence[int]]")
|
| 771 |
+
loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
|
| 772 |
+
np.asarray(loc, dtype=np.intp), len(self)
|
| 773 |
+
)
|
| 774 |
+
if isinstance(loc, slice) and loc.step in (1, None):
|
| 775 |
+
if loc.start in (0, None) or loc.stop in (len(self), None):
|
| 776 |
+
freq = self.freq
|
| 777 |
+
return freq
|
| 778 |
+
|
| 779 |
+
def _get_insert_freq(self, loc: int, item):
|
| 780 |
+
"""
|
| 781 |
+
Find the `freq` for self.insert(loc, item).
|
| 782 |
+
"""
|
| 783 |
+
value = self._data._validate_scalar(item)
|
| 784 |
+
item = self._data._box_func(value)
|
| 785 |
+
|
| 786 |
+
freq = None
|
| 787 |
+
if self.freq is not None:
|
| 788 |
+
# freq can be preserved on edge cases
|
| 789 |
+
if self.size:
|
| 790 |
+
if item is NaT:
|
| 791 |
+
pass
|
| 792 |
+
elif loc in (0, -len(self)) and item + self.freq == self[0]:
|
| 793 |
+
freq = self.freq
|
| 794 |
+
elif (loc == len(self)) and item - self.freq == self[-1]:
|
| 795 |
+
freq = self.freq
|
| 796 |
+
else:
|
| 797 |
+
# Adding a single item to an empty index may preserve freq
|
| 798 |
+
if isinstance(self.freq, Tick):
|
| 799 |
+
# all TimedeltaIndex cases go through here; is_on_offset
|
| 800 |
+
# would raise TypeError
|
| 801 |
+
freq = self.freq
|
| 802 |
+
elif self.freq.is_on_offset(item):
|
| 803 |
+
freq = self.freq
|
| 804 |
+
return freq
|
| 805 |
+
|
| 806 |
+
@doc(NDArrayBackedExtensionIndex.delete)
|
| 807 |
+
def delete(self, loc) -> Self:
|
| 808 |
+
result = super().delete(loc)
|
| 809 |
+
result._data._freq = self._get_delete_freq(loc)
|
| 810 |
+
return result
|
| 811 |
+
|
| 812 |
+
@doc(NDArrayBackedExtensionIndex.insert)
|
| 813 |
+
def insert(self, loc: int, item):
|
| 814 |
+
result = super().insert(loc, item)
|
| 815 |
+
if isinstance(result, type(self)):
|
| 816 |
+
# i.e. parent class method did not cast
|
| 817 |
+
result._data._freq = self._get_insert_freq(loc, item)
|
| 818 |
+
return result
|
| 819 |
+
|
| 820 |
+
# --------------------------------------------------------------------
|
| 821 |
+
# NDArray-Like Methods
|
| 822 |
+
|
| 823 |
+
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
|
| 824 |
+
def take(
|
| 825 |
+
self,
|
| 826 |
+
indices,
|
| 827 |
+
axis: Axis = 0,
|
| 828 |
+
allow_fill: bool = True,
|
| 829 |
+
fill_value=None,
|
| 830 |
+
**kwargs,
|
| 831 |
+
) -> Self:
|
| 832 |
+
nv.validate_take((), kwargs)
|
| 833 |
+
indices = np.asarray(indices, dtype=np.intp)
|
| 834 |
+
|
| 835 |
+
result = NDArrayBackedExtensionIndex.take(
|
| 836 |
+
self, indices, axis, allow_fill, fill_value, **kwargs
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
|
| 840 |
+
if isinstance(maybe_slice, slice):
|
| 841 |
+
freq = self._data._get_getitem_freq(maybe_slice)
|
| 842 |
+
result._data._freq = freq
|
| 843 |
+
return result
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py
ADDED
|
@@ -0,0 +1,1127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import datetime as dt
|
| 4 |
+
import operator
|
| 5 |
+
from typing import TYPE_CHECKING
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytz
|
| 10 |
+
|
| 11 |
+
from pandas._libs import (
|
| 12 |
+
NaT,
|
| 13 |
+
Period,
|
| 14 |
+
Timestamp,
|
| 15 |
+
index as libindex,
|
| 16 |
+
lib,
|
| 17 |
+
)
|
| 18 |
+
from pandas._libs.tslibs import (
|
| 19 |
+
Resolution,
|
| 20 |
+
Tick,
|
| 21 |
+
Timedelta,
|
| 22 |
+
periods_per_day,
|
| 23 |
+
timezones,
|
| 24 |
+
to_offset,
|
| 25 |
+
)
|
| 26 |
+
from pandas._libs.tslibs.offsets import prefix_mapping
|
| 27 |
+
from pandas.util._decorators import (
|
| 28 |
+
cache_readonly,
|
| 29 |
+
doc,
|
| 30 |
+
)
|
| 31 |
+
from pandas.util._exceptions import find_stack_level
|
| 32 |
+
|
| 33 |
+
from pandas.core.dtypes.common import is_scalar
|
| 34 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
| 35 |
+
from pandas.core.dtypes.generic import ABCSeries
|
| 36 |
+
from pandas.core.dtypes.missing import is_valid_na_for_dtype
|
| 37 |
+
|
| 38 |
+
from pandas.core.arrays.datetimes import (
|
| 39 |
+
DatetimeArray,
|
| 40 |
+
tz_to_dtype,
|
| 41 |
+
)
|
| 42 |
+
import pandas.core.common as com
|
| 43 |
+
from pandas.core.indexes.base import (
|
| 44 |
+
Index,
|
| 45 |
+
maybe_extract_name,
|
| 46 |
+
)
|
| 47 |
+
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
|
| 48 |
+
from pandas.core.indexes.extension import inherit_names
|
| 49 |
+
from pandas.core.tools.times import to_time
|
| 50 |
+
|
| 51 |
+
if TYPE_CHECKING:
|
| 52 |
+
from collections.abc import Hashable
|
| 53 |
+
|
| 54 |
+
from pandas._typing import (
|
| 55 |
+
Dtype,
|
| 56 |
+
DtypeObj,
|
| 57 |
+
Frequency,
|
| 58 |
+
IntervalClosedType,
|
| 59 |
+
Self,
|
| 60 |
+
TimeAmbiguous,
|
| 61 |
+
TimeNonexistent,
|
| 62 |
+
npt,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
from pandas.core.api import (
|
| 66 |
+
DataFrame,
|
| 67 |
+
PeriodIndex,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _new_DatetimeIndex(cls, d):
|
| 74 |
+
"""
|
| 75 |
+
This is called upon unpickling, rather than the default which doesn't
|
| 76 |
+
have arguments and breaks __new__
|
| 77 |
+
"""
|
| 78 |
+
if "data" in d and not isinstance(d["data"], DatetimeIndex):
|
| 79 |
+
# Avoid need to verify integrity by calling simple_new directly
|
| 80 |
+
data = d.pop("data")
|
| 81 |
+
if not isinstance(data, DatetimeArray):
|
| 82 |
+
# For backward compat with older pickles, we may need to construct
|
| 83 |
+
# a DatetimeArray to adapt to the newer _simple_new signature
|
| 84 |
+
tz = d.pop("tz")
|
| 85 |
+
freq = d.pop("freq")
|
| 86 |
+
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
|
| 87 |
+
else:
|
| 88 |
+
dta = data
|
| 89 |
+
for key in ["tz", "freq"]:
|
| 90 |
+
# These are already stored in our DatetimeArray; if they are
|
| 91 |
+
# also in the pickle and don't match, we have a problem.
|
| 92 |
+
if key in d:
|
| 93 |
+
assert d[key] == getattr(dta, key)
|
| 94 |
+
d.pop(key)
|
| 95 |
+
result = cls._simple_new(dta, **d)
|
| 96 |
+
else:
|
| 97 |
+
with warnings.catch_warnings():
|
| 98 |
+
# TODO: If we knew what was going in to **d, we might be able to
|
| 99 |
+
# go through _simple_new instead
|
| 100 |
+
warnings.simplefilter("ignore")
|
| 101 |
+
result = cls.__new__(cls, **d)
|
| 102 |
+
|
| 103 |
+
return result
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@inherit_names(
|
| 107 |
+
DatetimeArray._field_ops
|
| 108 |
+
+ [
|
| 109 |
+
method
|
| 110 |
+
for method in DatetimeArray._datetimelike_methods
|
| 111 |
+
if method not in ("tz_localize", "tz_convert", "strftime")
|
| 112 |
+
],
|
| 113 |
+
DatetimeArray,
|
| 114 |
+
wrap=True,
|
| 115 |
+
)
|
| 116 |
+
@inherit_names(["is_normalized"], DatetimeArray, cache=True)
|
| 117 |
+
@inherit_names(
|
| 118 |
+
[
|
| 119 |
+
"tz",
|
| 120 |
+
"tzinfo",
|
| 121 |
+
"dtype",
|
| 122 |
+
"to_pydatetime",
|
| 123 |
+
"date",
|
| 124 |
+
"time",
|
| 125 |
+
"timetz",
|
| 126 |
+
"std",
|
| 127 |
+
]
|
| 128 |
+
+ DatetimeArray._bool_ops,
|
| 129 |
+
DatetimeArray,
|
| 130 |
+
)
|
| 131 |
+
class DatetimeIndex(DatetimeTimedeltaMixin):
|
| 132 |
+
"""
|
| 133 |
+
Immutable ndarray-like of datetime64 data.
|
| 134 |
+
|
| 135 |
+
Represented internally as int64, and which can be boxed to Timestamp objects
|
| 136 |
+
that are subclasses of datetime and carry metadata.
|
| 137 |
+
|
| 138 |
+
.. versionchanged:: 2.0.0
|
| 139 |
+
The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,
|
| 140 |
+
:attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype
|
| 141 |
+
``int32``. Previously they had dtype ``int64``.
|
| 142 |
+
|
| 143 |
+
Parameters
|
| 144 |
+
----------
|
| 145 |
+
data : array-like (1-dimensional)
|
| 146 |
+
Datetime-like data to construct index with.
|
| 147 |
+
freq : str or pandas offset object, optional
|
| 148 |
+
One of pandas date offset strings or corresponding objects. The string
|
| 149 |
+
'infer' can be passed in order to set the frequency of the index as the
|
| 150 |
+
inferred frequency upon creation.
|
| 151 |
+
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
|
| 152 |
+
Set the Timezone of the data.
|
| 153 |
+
normalize : bool, default False
|
| 154 |
+
Normalize start/end dates to midnight before generating date range.
|
| 155 |
+
|
| 156 |
+
.. deprecated:: 2.1.0
|
| 157 |
+
|
| 158 |
+
closed : {'left', 'right'}, optional
|
| 159 |
+
Set whether to include `start` and `end` that are on the
|
| 160 |
+
boundary. The default includes boundary points on either end.
|
| 161 |
+
|
| 162 |
+
.. deprecated:: 2.1.0
|
| 163 |
+
|
| 164 |
+
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
|
| 165 |
+
When clocks moved backward due to DST, ambiguous times may arise.
|
| 166 |
+
For example in Central European Time (UTC+01), when going from 03:00
|
| 167 |
+
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
|
| 168 |
+
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
|
| 169 |
+
dictates how ambiguous times should be handled.
|
| 170 |
+
|
| 171 |
+
- 'infer' will attempt to infer fall dst-transition hours based on
|
| 172 |
+
order
|
| 173 |
+
- bool-ndarray where True signifies a DST time, False signifies a
|
| 174 |
+
non-DST time (note that this flag is only applicable for ambiguous
|
| 175 |
+
times)
|
| 176 |
+
- 'NaT' will return NaT where there are ambiguous times
|
| 177 |
+
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
|
| 178 |
+
dayfirst : bool, default False
|
| 179 |
+
If True, parse dates in `data` with the day first order.
|
| 180 |
+
yearfirst : bool, default False
|
| 181 |
+
If True parse dates in `data` with the year first order.
|
| 182 |
+
dtype : numpy.dtype or DatetimeTZDtype or str, default None
|
| 183 |
+
Note that the only NumPy dtype allowed is `datetime64[ns]`.
|
| 184 |
+
copy : bool, default False
|
| 185 |
+
Make a copy of input ndarray.
|
| 186 |
+
name : label, default None
|
| 187 |
+
Name to be stored in the index.
|
| 188 |
+
|
| 189 |
+
Attributes
|
| 190 |
+
----------
|
| 191 |
+
year
|
| 192 |
+
month
|
| 193 |
+
day
|
| 194 |
+
hour
|
| 195 |
+
minute
|
| 196 |
+
second
|
| 197 |
+
microsecond
|
| 198 |
+
nanosecond
|
| 199 |
+
date
|
| 200 |
+
time
|
| 201 |
+
timetz
|
| 202 |
+
dayofyear
|
| 203 |
+
day_of_year
|
| 204 |
+
dayofweek
|
| 205 |
+
day_of_week
|
| 206 |
+
weekday
|
| 207 |
+
quarter
|
| 208 |
+
tz
|
| 209 |
+
freq
|
| 210 |
+
freqstr
|
| 211 |
+
is_month_start
|
| 212 |
+
is_month_end
|
| 213 |
+
is_quarter_start
|
| 214 |
+
is_quarter_end
|
| 215 |
+
is_year_start
|
| 216 |
+
is_year_end
|
| 217 |
+
is_leap_year
|
| 218 |
+
inferred_freq
|
| 219 |
+
|
| 220 |
+
Methods
|
| 221 |
+
-------
|
| 222 |
+
normalize
|
| 223 |
+
strftime
|
| 224 |
+
snap
|
| 225 |
+
tz_convert
|
| 226 |
+
tz_localize
|
| 227 |
+
round
|
| 228 |
+
floor
|
| 229 |
+
ceil
|
| 230 |
+
to_period
|
| 231 |
+
to_pydatetime
|
| 232 |
+
to_series
|
| 233 |
+
to_frame
|
| 234 |
+
month_name
|
| 235 |
+
day_name
|
| 236 |
+
mean
|
| 237 |
+
std
|
| 238 |
+
|
| 239 |
+
See Also
|
| 240 |
+
--------
|
| 241 |
+
Index : The base pandas Index type.
|
| 242 |
+
TimedeltaIndex : Index of timedelta64 data.
|
| 243 |
+
PeriodIndex : Index of Period data.
|
| 244 |
+
to_datetime : Convert argument to datetime.
|
| 245 |
+
date_range : Create a fixed-frequency DatetimeIndex.
|
| 246 |
+
|
| 247 |
+
Notes
|
| 248 |
+
-----
|
| 249 |
+
To learn more about the frequency strings, please see `this link
|
| 250 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 251 |
+
|
| 252 |
+
Examples
|
| 253 |
+
--------
|
| 254 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
| 255 |
+
>>> idx
|
| 256 |
+
DatetimeIndex(['2020-01-01 10:00:00+00:00', '2020-02-01 11:00:00+00:00'],
|
| 257 |
+
dtype='datetime64[ns, UTC]', freq=None)
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
_typ = "datetimeindex"
|
| 261 |
+
|
| 262 |
+
_data_cls = DatetimeArray
|
| 263 |
+
_supports_partial_string_indexing = True
|
| 264 |
+
|
| 265 |
+
@property
|
| 266 |
+
def _engine_type(self) -> type[libindex.DatetimeEngine]:
|
| 267 |
+
return libindex.DatetimeEngine
|
| 268 |
+
|
| 269 |
+
_data: DatetimeArray
|
| 270 |
+
_values: DatetimeArray
|
| 271 |
+
tz: dt.tzinfo | None
|
| 272 |
+
|
| 273 |
+
# --------------------------------------------------------------------
|
| 274 |
+
# methods that dispatch to DatetimeArray and wrap result
|
| 275 |
+
|
| 276 |
+
@doc(DatetimeArray.strftime)
|
| 277 |
+
def strftime(self, date_format) -> Index:
|
| 278 |
+
arr = self._data.strftime(date_format)
|
| 279 |
+
return Index(arr, name=self.name, dtype=object)
|
| 280 |
+
|
| 281 |
+
@doc(DatetimeArray.tz_convert)
|
| 282 |
+
def tz_convert(self, tz) -> Self:
|
| 283 |
+
arr = self._data.tz_convert(tz)
|
| 284 |
+
return type(self)._simple_new(arr, name=self.name, refs=self._references)
|
| 285 |
+
|
| 286 |
+
@doc(DatetimeArray.tz_localize)
|
| 287 |
+
def tz_localize(
|
| 288 |
+
self,
|
| 289 |
+
tz,
|
| 290 |
+
ambiguous: TimeAmbiguous = "raise",
|
| 291 |
+
nonexistent: TimeNonexistent = "raise",
|
| 292 |
+
) -> Self:
|
| 293 |
+
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
|
| 294 |
+
return type(self)._simple_new(arr, name=self.name)
|
| 295 |
+
|
| 296 |
+
@doc(DatetimeArray.to_period)
|
| 297 |
+
def to_period(self, freq=None) -> PeriodIndex:
|
| 298 |
+
from pandas.core.indexes.api import PeriodIndex
|
| 299 |
+
|
| 300 |
+
arr = self._data.to_period(freq)
|
| 301 |
+
return PeriodIndex._simple_new(arr, name=self.name)
|
| 302 |
+
|
| 303 |
+
@doc(DatetimeArray.to_julian_date)
|
| 304 |
+
def to_julian_date(self) -> Index:
|
| 305 |
+
arr = self._data.to_julian_date()
|
| 306 |
+
return Index._simple_new(arr, name=self.name)
|
| 307 |
+
|
| 308 |
+
@doc(DatetimeArray.isocalendar)
|
| 309 |
+
def isocalendar(self) -> DataFrame:
|
| 310 |
+
df = self._data.isocalendar()
|
| 311 |
+
return df.set_index(self)
|
| 312 |
+
|
| 313 |
+
@cache_readonly
|
| 314 |
+
def _resolution_obj(self) -> Resolution:
|
| 315 |
+
return self._data._resolution_obj
|
| 316 |
+
|
| 317 |
+
# --------------------------------------------------------------------
|
| 318 |
+
# Constructors
|
| 319 |
+
|
| 320 |
+
def __new__(
|
| 321 |
+
cls,
|
| 322 |
+
data=None,
|
| 323 |
+
freq: Frequency | lib.NoDefault = lib.no_default,
|
| 324 |
+
tz=lib.no_default,
|
| 325 |
+
normalize: bool | lib.NoDefault = lib.no_default,
|
| 326 |
+
closed=lib.no_default,
|
| 327 |
+
ambiguous: TimeAmbiguous = "raise",
|
| 328 |
+
dayfirst: bool = False,
|
| 329 |
+
yearfirst: bool = False,
|
| 330 |
+
dtype: Dtype | None = None,
|
| 331 |
+
copy: bool = False,
|
| 332 |
+
name: Hashable | None = None,
|
| 333 |
+
) -> Self:
|
| 334 |
+
if closed is not lib.no_default:
|
| 335 |
+
# GH#52628
|
| 336 |
+
warnings.warn(
|
| 337 |
+
f"The 'closed' keyword in {cls.__name__} construction is "
|
| 338 |
+
"deprecated and will be removed in a future version.",
|
| 339 |
+
FutureWarning,
|
| 340 |
+
stacklevel=find_stack_level(),
|
| 341 |
+
)
|
| 342 |
+
if normalize is not lib.no_default:
|
| 343 |
+
# GH#52628
|
| 344 |
+
warnings.warn(
|
| 345 |
+
f"The 'normalize' keyword in {cls.__name__} construction is "
|
| 346 |
+
"deprecated and will be removed in a future version.",
|
| 347 |
+
FutureWarning,
|
| 348 |
+
stacklevel=find_stack_level(),
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
if is_scalar(data):
|
| 352 |
+
cls._raise_scalar_data_error(data)
|
| 353 |
+
|
| 354 |
+
# - Cases checked above all return/raise before reaching here - #
|
| 355 |
+
|
| 356 |
+
name = maybe_extract_name(name, data, cls)
|
| 357 |
+
|
| 358 |
+
if (
|
| 359 |
+
isinstance(data, DatetimeArray)
|
| 360 |
+
and freq is lib.no_default
|
| 361 |
+
and tz is lib.no_default
|
| 362 |
+
and dtype is None
|
| 363 |
+
):
|
| 364 |
+
# fastpath, similar logic in TimedeltaIndex.__new__;
|
| 365 |
+
# Note in this particular case we retain non-nano.
|
| 366 |
+
if copy:
|
| 367 |
+
data = data.copy()
|
| 368 |
+
return cls._simple_new(data, name=name)
|
| 369 |
+
|
| 370 |
+
dtarr = DatetimeArray._from_sequence_not_strict(
|
| 371 |
+
data,
|
| 372 |
+
dtype=dtype,
|
| 373 |
+
copy=copy,
|
| 374 |
+
tz=tz,
|
| 375 |
+
freq=freq,
|
| 376 |
+
dayfirst=dayfirst,
|
| 377 |
+
yearfirst=yearfirst,
|
| 378 |
+
ambiguous=ambiguous,
|
| 379 |
+
)
|
| 380 |
+
refs = None
|
| 381 |
+
if not copy and isinstance(data, (Index, ABCSeries)):
|
| 382 |
+
refs = data._references
|
| 383 |
+
|
| 384 |
+
subarr = cls._simple_new(dtarr, name=name, refs=refs)
|
| 385 |
+
return subarr
|
| 386 |
+
|
| 387 |
+
# --------------------------------------------------------------------
|
| 388 |
+
|
| 389 |
+
@cache_readonly
|
| 390 |
+
def _is_dates_only(self) -> bool:
|
| 391 |
+
"""
|
| 392 |
+
Return a boolean if we are only dates (and don't have a timezone)
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
bool
|
| 397 |
+
"""
|
| 398 |
+
if isinstance(self.freq, Tick):
|
| 399 |
+
delta = Timedelta(self.freq)
|
| 400 |
+
|
| 401 |
+
if delta % dt.timedelta(days=1) != dt.timedelta(days=0):
|
| 402 |
+
return False
|
| 403 |
+
|
| 404 |
+
return self._values._is_dates_only
|
| 405 |
+
|
| 406 |
+
def __reduce__(self):
|
| 407 |
+
d = {"data": self._data, "name": self.name}
|
| 408 |
+
return _new_DatetimeIndex, (type(self), d), None
|
| 409 |
+
|
| 410 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
| 411 |
+
"""
|
| 412 |
+
Can we compare values of the given dtype to our own?
|
| 413 |
+
"""
|
| 414 |
+
if self.tz is not None:
|
| 415 |
+
# If we have tz, we can compare to tzaware
|
| 416 |
+
return isinstance(dtype, DatetimeTZDtype)
|
| 417 |
+
# if we dont have tz, we can only compare to tznaive
|
| 418 |
+
return lib.is_np_dtype(dtype, "M")
|
| 419 |
+
|
| 420 |
+
# --------------------------------------------------------------------
|
| 421 |
+
# Rendering Methods
|
| 422 |
+
|
| 423 |
+
@cache_readonly
|
| 424 |
+
def _formatter_func(self):
|
| 425 |
+
# Note this is equivalent to the DatetimeIndexOpsMixin method but
|
| 426 |
+
# uses the maybe-cached self._is_dates_only instead of re-computing it.
|
| 427 |
+
from pandas.io.formats.format import get_format_datetime64
|
| 428 |
+
|
| 429 |
+
formatter = get_format_datetime64(is_dates_only=self._is_dates_only)
|
| 430 |
+
return lambda x: f"'{formatter(x)}'"
|
| 431 |
+
|
| 432 |
+
# --------------------------------------------------------------------
|
| 433 |
+
# Set Operation Methods
|
| 434 |
+
|
| 435 |
+
def _can_range_setop(self, other) -> bool:
|
| 436 |
+
# GH 46702: If self or other have non-UTC tzs, DST transitions prevent
|
| 437 |
+
# range representation due to no singular step
|
| 438 |
+
if (
|
| 439 |
+
self.tz is not None
|
| 440 |
+
and not timezones.is_utc(self.tz)
|
| 441 |
+
and not timezones.is_fixed_offset(self.tz)
|
| 442 |
+
):
|
| 443 |
+
return False
|
| 444 |
+
if (
|
| 445 |
+
other.tz is not None
|
| 446 |
+
and not timezones.is_utc(other.tz)
|
| 447 |
+
and not timezones.is_fixed_offset(other.tz)
|
| 448 |
+
):
|
| 449 |
+
return False
|
| 450 |
+
return super()._can_range_setop(other)
|
| 451 |
+
|
| 452 |
+
# --------------------------------------------------------------------
|
| 453 |
+
|
| 454 |
+
def _get_time_micros(self) -> npt.NDArray[np.int64]:
|
| 455 |
+
"""
|
| 456 |
+
Return the number of microseconds since midnight.
|
| 457 |
+
|
| 458 |
+
Returns
|
| 459 |
+
-------
|
| 460 |
+
ndarray[int64_t]
|
| 461 |
+
"""
|
| 462 |
+
values = self._data._local_timestamps()
|
| 463 |
+
|
| 464 |
+
ppd = periods_per_day(self._data._creso)
|
| 465 |
+
|
| 466 |
+
frac = values % ppd
|
| 467 |
+
if self.unit == "ns":
|
| 468 |
+
micros = frac // 1000
|
| 469 |
+
elif self.unit == "us":
|
| 470 |
+
micros = frac
|
| 471 |
+
elif self.unit == "ms":
|
| 472 |
+
micros = frac * 1000
|
| 473 |
+
elif self.unit == "s":
|
| 474 |
+
micros = frac * 1_000_000
|
| 475 |
+
else: # pragma: no cover
|
| 476 |
+
raise NotImplementedError(self.unit)
|
| 477 |
+
|
| 478 |
+
micros[self._isnan] = -1
|
| 479 |
+
return micros
|
| 480 |
+
|
| 481 |
+
def snap(self, freq: Frequency = "S") -> DatetimeIndex:
|
| 482 |
+
"""
|
| 483 |
+
Snap time stamps to nearest occurring frequency.
|
| 484 |
+
|
| 485 |
+
Returns
|
| 486 |
+
-------
|
| 487 |
+
DatetimeIndex
|
| 488 |
+
|
| 489 |
+
Examples
|
| 490 |
+
--------
|
| 491 |
+
>>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02',
|
| 492 |
+
... '2023-02-01', '2023-02-02'])
|
| 493 |
+
>>> idx
|
| 494 |
+
DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
|
| 495 |
+
dtype='datetime64[ns]', freq=None)
|
| 496 |
+
>>> idx.snap('MS')
|
| 497 |
+
DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
|
| 498 |
+
dtype='datetime64[ns]', freq=None)
|
| 499 |
+
"""
|
| 500 |
+
# Superdumb, punting on any optimizing
|
| 501 |
+
freq = to_offset(freq)
|
| 502 |
+
|
| 503 |
+
dta = self._data.copy()
|
| 504 |
+
|
| 505 |
+
for i, v in enumerate(self):
|
| 506 |
+
s = v
|
| 507 |
+
if not freq.is_on_offset(s):
|
| 508 |
+
t0 = freq.rollback(s)
|
| 509 |
+
t1 = freq.rollforward(s)
|
| 510 |
+
if abs(s - t0) < abs(t1 - s):
|
| 511 |
+
s = t0
|
| 512 |
+
else:
|
| 513 |
+
s = t1
|
| 514 |
+
dta[i] = s
|
| 515 |
+
|
| 516 |
+
return DatetimeIndex._simple_new(dta, name=self.name)
|
| 517 |
+
|
| 518 |
+
# --------------------------------------------------------------------
|
| 519 |
+
# Indexing Methods
|
| 520 |
+
|
| 521 |
+
def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):
|
| 522 |
+
"""
|
| 523 |
+
Calculate datetime bounds for parsed time string and its resolution.
|
| 524 |
+
|
| 525 |
+
Parameters
|
| 526 |
+
----------
|
| 527 |
+
reso : Resolution
|
| 528 |
+
Resolution provided by parsed string.
|
| 529 |
+
parsed : datetime
|
| 530 |
+
Datetime from parsed string.
|
| 531 |
+
|
| 532 |
+
Returns
|
| 533 |
+
-------
|
| 534 |
+
lower, upper: pd.Timestamp
|
| 535 |
+
"""
|
| 536 |
+
freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
|
| 537 |
+
per = Period(parsed, freq=freq)
|
| 538 |
+
start, end = per.start_time, per.end_time
|
| 539 |
+
|
| 540 |
+
# GH 24076
|
| 541 |
+
# If an incoming date string contained a UTC offset, need to localize
|
| 542 |
+
# the parsed date to this offset first before aligning with the index's
|
| 543 |
+
# timezone
|
| 544 |
+
start = start.tz_localize(parsed.tzinfo)
|
| 545 |
+
end = end.tz_localize(parsed.tzinfo)
|
| 546 |
+
|
| 547 |
+
if parsed.tzinfo is not None:
|
| 548 |
+
if self.tz is None:
|
| 549 |
+
raise ValueError(
|
| 550 |
+
"The index must be timezone aware when indexing "
|
| 551 |
+
"with a date string with a UTC offset"
|
| 552 |
+
)
|
| 553 |
+
# The flipped case with parsed.tz is None and self.tz is not None
|
| 554 |
+
# is ruled out bc parsed and reso are produced by _parse_with_reso,
|
| 555 |
+
# which localizes parsed.
|
| 556 |
+
return start, end
|
| 557 |
+
|
| 558 |
+
def _parse_with_reso(self, label: str):
|
| 559 |
+
parsed, reso = super()._parse_with_reso(label)
|
| 560 |
+
|
| 561 |
+
parsed = Timestamp(parsed)
|
| 562 |
+
|
| 563 |
+
if self.tz is not None and parsed.tzinfo is None:
|
| 564 |
+
# we special-case timezone-naive strings and timezone-aware
|
| 565 |
+
# DatetimeIndex
|
| 566 |
+
# https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081
|
| 567 |
+
parsed = parsed.tz_localize(self.tz)
|
| 568 |
+
|
| 569 |
+
return parsed, reso
|
| 570 |
+
|
| 571 |
+
def _disallow_mismatched_indexing(self, key) -> None:
|
| 572 |
+
"""
|
| 573 |
+
Check for mismatched-tzawareness indexing and re-raise as KeyError.
|
| 574 |
+
"""
|
| 575 |
+
# we get here with isinstance(key, self._data._recognized_scalars)
|
| 576 |
+
try:
|
| 577 |
+
# GH#36148
|
| 578 |
+
self._data._assert_tzawareness_compat(key)
|
| 579 |
+
except TypeError as err:
|
| 580 |
+
raise KeyError(key) from err
|
| 581 |
+
|
| 582 |
+
def get_loc(self, key):
|
| 583 |
+
"""
|
| 584 |
+
Get integer location for requested label
|
| 585 |
+
|
| 586 |
+
Returns
|
| 587 |
+
-------
|
| 588 |
+
loc : int
|
| 589 |
+
"""
|
| 590 |
+
self._check_indexing_error(key)
|
| 591 |
+
|
| 592 |
+
orig_key = key
|
| 593 |
+
if is_valid_na_for_dtype(key, self.dtype):
|
| 594 |
+
key = NaT
|
| 595 |
+
|
| 596 |
+
if isinstance(key, self._data._recognized_scalars):
|
| 597 |
+
# needed to localize naive datetimes
|
| 598 |
+
self._disallow_mismatched_indexing(key)
|
| 599 |
+
key = Timestamp(key)
|
| 600 |
+
|
| 601 |
+
elif isinstance(key, str):
|
| 602 |
+
try:
|
| 603 |
+
parsed, reso = self._parse_with_reso(key)
|
| 604 |
+
except (ValueError, pytz.NonExistentTimeError) as err:
|
| 605 |
+
raise KeyError(key) from err
|
| 606 |
+
self._disallow_mismatched_indexing(parsed)
|
| 607 |
+
|
| 608 |
+
if self._can_partial_date_slice(reso):
|
| 609 |
+
try:
|
| 610 |
+
return self._partial_date_slice(reso, parsed)
|
| 611 |
+
except KeyError as err:
|
| 612 |
+
raise KeyError(key) from err
|
| 613 |
+
|
| 614 |
+
key = parsed
|
| 615 |
+
|
| 616 |
+
elif isinstance(key, dt.timedelta):
|
| 617 |
+
# GH#20464
|
| 618 |
+
raise TypeError(
|
| 619 |
+
f"Cannot index {type(self).__name__} with {type(key).__name__}"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
elif isinstance(key, dt.time):
|
| 623 |
+
return self.indexer_at_time(key)
|
| 624 |
+
|
| 625 |
+
else:
|
| 626 |
+
# unrecognized type
|
| 627 |
+
raise KeyError(key)
|
| 628 |
+
|
| 629 |
+
try:
|
| 630 |
+
return Index.get_loc(self, key)
|
| 631 |
+
except KeyError as err:
|
| 632 |
+
raise KeyError(orig_key) from err
|
| 633 |
+
|
| 634 |
+
@doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound)
|
| 635 |
+
def _maybe_cast_slice_bound(self, label, side: str):
|
| 636 |
+
# GH#42855 handle date here instead of get_slice_bound
|
| 637 |
+
if isinstance(label, dt.date) and not isinstance(label, dt.datetime):
|
| 638 |
+
# Pandas supports slicing with dates, treated as datetimes at midnight.
|
| 639 |
+
# https://github.com/pandas-dev/pandas/issues/31501
|
| 640 |
+
label = Timestamp(label).to_pydatetime()
|
| 641 |
+
|
| 642 |
+
label = super()._maybe_cast_slice_bound(label, side)
|
| 643 |
+
self._data._assert_tzawareness_compat(label)
|
| 644 |
+
return Timestamp(label)
|
| 645 |
+
|
| 646 |
+
def slice_indexer(self, start=None, end=None, step=None):
|
| 647 |
+
"""
|
| 648 |
+
Return indexer for specified label slice.
|
| 649 |
+
Index.slice_indexer, customized to handle time slicing.
|
| 650 |
+
|
| 651 |
+
In addition to functionality provided by Index.slice_indexer, does the
|
| 652 |
+
following:
|
| 653 |
+
|
| 654 |
+
- if both `start` and `end` are instances of `datetime.time`, it
|
| 655 |
+
invokes `indexer_between_time`
|
| 656 |
+
- if `start` and `end` are both either string or None perform
|
| 657 |
+
value-based selection in non-monotonic cases.
|
| 658 |
+
|
| 659 |
+
"""
|
| 660 |
+
# For historical reasons DatetimeIndex supports slices between two
|
| 661 |
+
# instances of datetime.time as if it were applying a slice mask to
|
| 662 |
+
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
|
| 663 |
+
if isinstance(start, dt.time) and isinstance(end, dt.time):
|
| 664 |
+
if step is not None and step != 1:
|
| 665 |
+
raise ValueError("Must have step size of 1 with time slices")
|
| 666 |
+
return self.indexer_between_time(start, end)
|
| 667 |
+
|
| 668 |
+
if isinstance(start, dt.time) or isinstance(end, dt.time):
|
| 669 |
+
raise KeyError("Cannot mix time and non-time slice keys")
|
| 670 |
+
|
| 671 |
+
def check_str_or_none(point) -> bool:
|
| 672 |
+
return point is not None and not isinstance(point, str)
|
| 673 |
+
|
| 674 |
+
# GH#33146 if start and end are combinations of str and None and Index is not
|
| 675 |
+
# monotonic, we can not use Index.slice_indexer because it does not honor the
|
| 676 |
+
# actual elements, is only searching for start and end
|
| 677 |
+
if (
|
| 678 |
+
check_str_or_none(start)
|
| 679 |
+
or check_str_or_none(end)
|
| 680 |
+
or self.is_monotonic_increasing
|
| 681 |
+
):
|
| 682 |
+
return Index.slice_indexer(self, start, end, step)
|
| 683 |
+
|
| 684 |
+
mask = np.array(True)
|
| 685 |
+
in_index = True
|
| 686 |
+
if start is not None:
|
| 687 |
+
start_casted = self._maybe_cast_slice_bound(start, "left")
|
| 688 |
+
mask = start_casted <= self
|
| 689 |
+
in_index &= (start_casted == self).any()
|
| 690 |
+
|
| 691 |
+
if end is not None:
|
| 692 |
+
end_casted = self._maybe_cast_slice_bound(end, "right")
|
| 693 |
+
mask = (self <= end_casted) & mask
|
| 694 |
+
in_index &= (end_casted == self).any()
|
| 695 |
+
|
| 696 |
+
if not in_index:
|
| 697 |
+
raise KeyError(
|
| 698 |
+
"Value based partial slicing on non-monotonic DatetimeIndexes "
|
| 699 |
+
"with non-existing keys is not allowed.",
|
| 700 |
+
)
|
| 701 |
+
indexer = mask.nonzero()[0][::step]
|
| 702 |
+
if len(indexer) == len(self):
|
| 703 |
+
return slice(None)
|
| 704 |
+
else:
|
| 705 |
+
return indexer
|
| 706 |
+
|
| 707 |
+
# --------------------------------------------------------------------
|
| 708 |
+
|
| 709 |
+
@property
|
| 710 |
+
def inferred_type(self) -> str:
|
| 711 |
+
# b/c datetime is represented as microseconds since the epoch, make
|
| 712 |
+
# sure we can't have ambiguous indexing
|
| 713 |
+
return "datetime64"
|
| 714 |
+
|
| 715 |
+
def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
|
| 716 |
+
"""
|
| 717 |
+
Return index locations of values at particular time of day.
|
| 718 |
+
|
| 719 |
+
Parameters
|
| 720 |
+
----------
|
| 721 |
+
time : datetime.time or str
|
| 722 |
+
Time passed in either as object (datetime.time) or as string in
|
| 723 |
+
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
|
| 724 |
+
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
|
| 725 |
+
|
| 726 |
+
Returns
|
| 727 |
+
-------
|
| 728 |
+
np.ndarray[np.intp]
|
| 729 |
+
|
| 730 |
+
See Also
|
| 731 |
+
--------
|
| 732 |
+
indexer_between_time : Get index locations of values between particular
|
| 733 |
+
times of day.
|
| 734 |
+
DataFrame.at_time : Select values at particular time of day.
|
| 735 |
+
|
| 736 |
+
Examples
|
| 737 |
+
--------
|
| 738 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00",
|
| 739 |
+
... "3/1/2020 10:00"])
|
| 740 |
+
>>> idx.indexer_at_time("10:00")
|
| 741 |
+
array([0, 2])
|
| 742 |
+
"""
|
| 743 |
+
if asof:
|
| 744 |
+
raise NotImplementedError("'asof' argument is not supported")
|
| 745 |
+
|
| 746 |
+
if isinstance(time, str):
|
| 747 |
+
from dateutil.parser import parse
|
| 748 |
+
|
| 749 |
+
time = parse(time).time()
|
| 750 |
+
|
| 751 |
+
if time.tzinfo:
|
| 752 |
+
if self.tz is None:
|
| 753 |
+
raise ValueError("Index must be timezone aware.")
|
| 754 |
+
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
|
| 755 |
+
else:
|
| 756 |
+
time_micros = self._get_time_micros()
|
| 757 |
+
micros = _time_to_micros(time)
|
| 758 |
+
return (time_micros == micros).nonzero()[0]
|
| 759 |
+
|
| 760 |
+
def indexer_between_time(
|
| 761 |
+
self, start_time, end_time, include_start: bool = True, include_end: bool = True
|
| 762 |
+
) -> npt.NDArray[np.intp]:
|
| 763 |
+
"""
|
| 764 |
+
Return index locations of values between particular times of day.
|
| 765 |
+
|
| 766 |
+
Parameters
|
| 767 |
+
----------
|
| 768 |
+
start_time, end_time : datetime.time, str
|
| 769 |
+
Time passed either as object (datetime.time) or as string in
|
| 770 |
+
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
|
| 771 |
+
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
|
| 772 |
+
include_start : bool, default True
|
| 773 |
+
include_end : bool, default True
|
| 774 |
+
|
| 775 |
+
Returns
|
| 776 |
+
-------
|
| 777 |
+
np.ndarray[np.intp]
|
| 778 |
+
|
| 779 |
+
See Also
|
| 780 |
+
--------
|
| 781 |
+
indexer_at_time : Get index locations of values at particular time of day.
|
| 782 |
+
DataFrame.between_time : Select values between particular times of day.
|
| 783 |
+
|
| 784 |
+
Examples
|
| 785 |
+
--------
|
| 786 |
+
>>> idx = pd.date_range("2023-01-01", periods=4, freq="h")
|
| 787 |
+
>>> idx
|
| 788 |
+
DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',
|
| 789 |
+
'2023-01-01 02:00:00', '2023-01-01 03:00:00'],
|
| 790 |
+
dtype='datetime64[ns]', freq='h')
|
| 791 |
+
>>> idx.indexer_between_time("00:00", "2:00", include_end=False)
|
| 792 |
+
array([0, 1])
|
| 793 |
+
"""
|
| 794 |
+
start_time = to_time(start_time)
|
| 795 |
+
end_time = to_time(end_time)
|
| 796 |
+
time_micros = self._get_time_micros()
|
| 797 |
+
start_micros = _time_to_micros(start_time)
|
| 798 |
+
end_micros = _time_to_micros(end_time)
|
| 799 |
+
|
| 800 |
+
if include_start and include_end:
|
| 801 |
+
lop = rop = operator.le
|
| 802 |
+
elif include_start:
|
| 803 |
+
lop = operator.le
|
| 804 |
+
rop = operator.lt
|
| 805 |
+
elif include_end:
|
| 806 |
+
lop = operator.lt
|
| 807 |
+
rop = operator.le
|
| 808 |
+
else:
|
| 809 |
+
lop = rop = operator.lt
|
| 810 |
+
|
| 811 |
+
if start_time <= end_time:
|
| 812 |
+
join_op = operator.and_
|
| 813 |
+
else:
|
| 814 |
+
join_op = operator.or_
|
| 815 |
+
|
| 816 |
+
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
|
| 817 |
+
|
| 818 |
+
return mask.nonzero()[0]
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def date_range(
|
| 822 |
+
start=None,
|
| 823 |
+
end=None,
|
| 824 |
+
periods=None,
|
| 825 |
+
freq=None,
|
| 826 |
+
tz=None,
|
| 827 |
+
normalize: bool = False,
|
| 828 |
+
name: Hashable | None = None,
|
| 829 |
+
inclusive: IntervalClosedType = "both",
|
| 830 |
+
*,
|
| 831 |
+
unit: str | None = None,
|
| 832 |
+
**kwargs,
|
| 833 |
+
) -> DatetimeIndex:
|
| 834 |
+
"""
|
| 835 |
+
Return a fixed frequency DatetimeIndex.
|
| 836 |
+
|
| 837 |
+
Returns the range of equally spaced time points (where the difference between any
|
| 838 |
+
two adjacent points is specified by the given frequency) such that they all
|
| 839 |
+
satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,
|
| 840 |
+
the first and last time points in that range that fall on the boundary of ``freq``
|
| 841 |
+
(if given as a frequency string) or that are valid for ``freq`` (if given as a
|
| 842 |
+
:class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,
|
| 843 |
+
``end``, or ``freq`` is *not* specified, this missing parameter can be computed
|
| 844 |
+
given ``periods``, the number of timesteps in the range. See the note below.)
|
| 845 |
+
|
| 846 |
+
Parameters
|
| 847 |
+
----------
|
| 848 |
+
start : str or datetime-like, optional
|
| 849 |
+
Left bound for generating dates.
|
| 850 |
+
end : str or datetime-like, optional
|
| 851 |
+
Right bound for generating dates.
|
| 852 |
+
periods : int, optional
|
| 853 |
+
Number of periods to generate.
|
| 854 |
+
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
|
| 855 |
+
Frequency strings can have multiples, e.g. '5h'. See
|
| 856 |
+
:ref:`here <timeseries.offset_aliases>` for a list of
|
| 857 |
+
frequency aliases.
|
| 858 |
+
tz : str or tzinfo, optional
|
| 859 |
+
Time zone name for returning localized DatetimeIndex, for example
|
| 860 |
+
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
|
| 861 |
+
timezone-naive unless timezone-aware datetime-likes are passed.
|
| 862 |
+
normalize : bool, default False
|
| 863 |
+
Normalize start/end dates to midnight before generating date range.
|
| 864 |
+
name : str, default None
|
| 865 |
+
Name of the resulting DatetimeIndex.
|
| 866 |
+
inclusive : {"both", "neither", "left", "right"}, default "both"
|
| 867 |
+
Include boundaries; Whether to set each bound as closed or open.
|
| 868 |
+
|
| 869 |
+
.. versionadded:: 1.4.0
|
| 870 |
+
unit : str, default None
|
| 871 |
+
Specify the desired resolution of the result.
|
| 872 |
+
|
| 873 |
+
.. versionadded:: 2.0.0
|
| 874 |
+
**kwargs
|
| 875 |
+
For compatibility. Has no effect on the result.
|
| 876 |
+
|
| 877 |
+
Returns
|
| 878 |
+
-------
|
| 879 |
+
DatetimeIndex
|
| 880 |
+
|
| 881 |
+
See Also
|
| 882 |
+
--------
|
| 883 |
+
DatetimeIndex : An immutable container for datetimes.
|
| 884 |
+
timedelta_range : Return a fixed frequency TimedeltaIndex.
|
| 885 |
+
period_range : Return a fixed frequency PeriodIndex.
|
| 886 |
+
interval_range : Return a fixed frequency IntervalIndex.
|
| 887 |
+
|
| 888 |
+
Notes
|
| 889 |
+
-----
|
| 890 |
+
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
|
| 891 |
+
exactly three must be specified. If ``freq`` is omitted, the resulting
|
| 892 |
+
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
|
| 893 |
+
``start`` and ``end`` (closed on both sides).
|
| 894 |
+
|
| 895 |
+
To learn more about the frequency strings, please see `this link
|
| 896 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 897 |
+
|
| 898 |
+
Examples
|
| 899 |
+
--------
|
| 900 |
+
**Specifying the values**
|
| 901 |
+
|
| 902 |
+
The next four examples generate the same `DatetimeIndex`, but vary
|
| 903 |
+
the combination of `start`, `end` and `periods`.
|
| 904 |
+
|
| 905 |
+
Specify `start` and `end`, with the default daily frequency.
|
| 906 |
+
|
| 907 |
+
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
|
| 908 |
+
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
|
| 909 |
+
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
|
| 910 |
+
dtype='datetime64[ns]', freq='D')
|
| 911 |
+
|
| 912 |
+
Specify timezone-aware `start` and `end`, with the default daily frequency.
|
| 913 |
+
|
| 914 |
+
>>> pd.date_range(
|
| 915 |
+
... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"),
|
| 916 |
+
... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"),
|
| 917 |
+
... )
|
| 918 |
+
DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00',
|
| 919 |
+
'2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00',
|
| 920 |
+
'2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00',
|
| 921 |
+
'2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'],
|
| 922 |
+
dtype='datetime64[ns, Europe/Berlin]', freq='D')
|
| 923 |
+
|
| 924 |
+
Specify `start` and `periods`, the number of periods (days).
|
| 925 |
+
|
| 926 |
+
>>> pd.date_range(start='1/1/2018', periods=8)
|
| 927 |
+
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
|
| 928 |
+
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
|
| 929 |
+
dtype='datetime64[ns]', freq='D')
|
| 930 |
+
|
| 931 |
+
Specify `end` and `periods`, the number of periods (days).
|
| 932 |
+
|
| 933 |
+
>>> pd.date_range(end='1/1/2018', periods=8)
|
| 934 |
+
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
|
| 935 |
+
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
|
| 936 |
+
dtype='datetime64[ns]', freq='D')
|
| 937 |
+
|
| 938 |
+
Specify `start`, `end`, and `periods`; the frequency is generated
|
| 939 |
+
automatically (linearly spaced).
|
| 940 |
+
|
| 941 |
+
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
|
| 942 |
+
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
|
| 943 |
+
'2018-04-27 00:00:00'],
|
| 944 |
+
dtype='datetime64[ns]', freq=None)
|
| 945 |
+
|
| 946 |
+
**Other Parameters**
|
| 947 |
+
|
| 948 |
+
Changed the `freq` (frequency) to ``'ME'`` (month end frequency).
|
| 949 |
+
|
| 950 |
+
>>> pd.date_range(start='1/1/2018', periods=5, freq='ME')
|
| 951 |
+
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
|
| 952 |
+
'2018-05-31'],
|
| 953 |
+
dtype='datetime64[ns]', freq='ME')
|
| 954 |
+
|
| 955 |
+
Multiples are allowed
|
| 956 |
+
|
| 957 |
+
>>> pd.date_range(start='1/1/2018', periods=5, freq='3ME')
|
| 958 |
+
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
|
| 959 |
+
'2019-01-31'],
|
| 960 |
+
dtype='datetime64[ns]', freq='3ME')
|
| 961 |
+
|
| 962 |
+
`freq` can also be specified as an Offset object.
|
| 963 |
+
|
| 964 |
+
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
|
| 965 |
+
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
|
| 966 |
+
'2019-01-31'],
|
| 967 |
+
dtype='datetime64[ns]', freq='3ME')
|
| 968 |
+
|
| 969 |
+
Specify `tz` to set the timezone.
|
| 970 |
+
|
| 971 |
+
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
|
| 972 |
+
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
|
| 973 |
+
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
|
| 974 |
+
'2018-01-05 00:00:00+09:00'],
|
| 975 |
+
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
|
| 976 |
+
|
| 977 |
+
`inclusive` controls whether to include `start` and `end` that are on the
|
| 978 |
+
boundary. The default, "both", includes boundary points on either end.
|
| 979 |
+
|
| 980 |
+
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")
|
| 981 |
+
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
|
| 982 |
+
dtype='datetime64[ns]', freq='D')
|
| 983 |
+
|
| 984 |
+
Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.
|
| 985 |
+
|
| 986 |
+
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')
|
| 987 |
+
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
|
| 988 |
+
dtype='datetime64[ns]', freq='D')
|
| 989 |
+
|
| 990 |
+
Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and
|
| 991 |
+
similarly ``inclusive='neither'`` will exclude both `start` and `end`.
|
| 992 |
+
|
| 993 |
+
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')
|
| 994 |
+
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
|
| 995 |
+
dtype='datetime64[ns]', freq='D')
|
| 996 |
+
|
| 997 |
+
**Specify a unit**
|
| 998 |
+
|
| 999 |
+
>>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s")
|
| 1000 |
+
DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01',
|
| 1001 |
+
'2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01',
|
| 1002 |
+
'2817-01-01', '2917-01-01'],
|
| 1003 |
+
dtype='datetime64[s]', freq='100YS-JAN')
|
| 1004 |
+
"""
|
| 1005 |
+
if freq is None and com.any_none(periods, start, end):
|
| 1006 |
+
freq = "D"
|
| 1007 |
+
|
| 1008 |
+
dtarr = DatetimeArray._generate_range(
|
| 1009 |
+
start=start,
|
| 1010 |
+
end=end,
|
| 1011 |
+
periods=periods,
|
| 1012 |
+
freq=freq,
|
| 1013 |
+
tz=tz,
|
| 1014 |
+
normalize=normalize,
|
| 1015 |
+
inclusive=inclusive,
|
| 1016 |
+
unit=unit,
|
| 1017 |
+
**kwargs,
|
| 1018 |
+
)
|
| 1019 |
+
return DatetimeIndex._simple_new(dtarr, name=name)
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def bdate_range(
|
| 1023 |
+
start=None,
|
| 1024 |
+
end=None,
|
| 1025 |
+
periods: int | None = None,
|
| 1026 |
+
freq: Frequency | dt.timedelta = "B",
|
| 1027 |
+
tz=None,
|
| 1028 |
+
normalize: bool = True,
|
| 1029 |
+
name: Hashable | None = None,
|
| 1030 |
+
weekmask=None,
|
| 1031 |
+
holidays=None,
|
| 1032 |
+
inclusive: IntervalClosedType = "both",
|
| 1033 |
+
**kwargs,
|
| 1034 |
+
) -> DatetimeIndex:
|
| 1035 |
+
"""
|
| 1036 |
+
Return a fixed frequency DatetimeIndex with business day as the default.
|
| 1037 |
+
|
| 1038 |
+
Parameters
|
| 1039 |
+
----------
|
| 1040 |
+
start : str or datetime-like, default None
|
| 1041 |
+
Left bound for generating dates.
|
| 1042 |
+
end : str or datetime-like, default None
|
| 1043 |
+
Right bound for generating dates.
|
| 1044 |
+
periods : int, default None
|
| 1045 |
+
Number of periods to generate.
|
| 1046 |
+
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B'
|
| 1047 |
+
Frequency strings can have multiples, e.g. '5h'. The default is
|
| 1048 |
+
business daily ('B').
|
| 1049 |
+
tz : str or None
|
| 1050 |
+
Time zone name for returning localized DatetimeIndex, for example
|
| 1051 |
+
Asia/Beijing.
|
| 1052 |
+
normalize : bool, default False
|
| 1053 |
+
Normalize start/end dates to midnight before generating date range.
|
| 1054 |
+
name : str, default None
|
| 1055 |
+
Name of the resulting DatetimeIndex.
|
| 1056 |
+
weekmask : str or None, default None
|
| 1057 |
+
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
|
| 1058 |
+
only used when custom frequency strings are passed. The default
|
| 1059 |
+
value None is equivalent to 'Mon Tue Wed Thu Fri'.
|
| 1060 |
+
holidays : list-like or None, default None
|
| 1061 |
+
Dates to exclude from the set of valid business days, passed to
|
| 1062 |
+
``numpy.busdaycalendar``, only used when custom frequency strings
|
| 1063 |
+
are passed.
|
| 1064 |
+
inclusive : {"both", "neither", "left", "right"}, default "both"
|
| 1065 |
+
Include boundaries; Whether to set each bound as closed or open.
|
| 1066 |
+
|
| 1067 |
+
.. versionadded:: 1.4.0
|
| 1068 |
+
**kwargs
|
| 1069 |
+
For compatibility. Has no effect on the result.
|
| 1070 |
+
|
| 1071 |
+
Returns
|
| 1072 |
+
-------
|
| 1073 |
+
DatetimeIndex
|
| 1074 |
+
|
| 1075 |
+
Notes
|
| 1076 |
+
-----
|
| 1077 |
+
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
|
| 1078 |
+
exactly three must be specified. Specifying ``freq`` is a requirement
|
| 1079 |
+
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
|
| 1080 |
+
desired.
|
| 1081 |
+
|
| 1082 |
+
To learn more about the frequency strings, please see `this link
|
| 1083 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 1084 |
+
|
| 1085 |
+
Examples
|
| 1086 |
+
--------
|
| 1087 |
+
Note how the two weekend days are skipped in the result.
|
| 1088 |
+
|
| 1089 |
+
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
|
| 1090 |
+
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
|
| 1091 |
+
'2018-01-05', '2018-01-08'],
|
| 1092 |
+
dtype='datetime64[ns]', freq='B')
|
| 1093 |
+
"""
|
| 1094 |
+
if freq is None:
|
| 1095 |
+
msg = "freq must be specified for bdate_range; use date_range instead"
|
| 1096 |
+
raise TypeError(msg)
|
| 1097 |
+
|
| 1098 |
+
if isinstance(freq, str) and freq.startswith("C"):
|
| 1099 |
+
try:
|
| 1100 |
+
weekmask = weekmask or "Mon Tue Wed Thu Fri"
|
| 1101 |
+
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
|
| 1102 |
+
except (KeyError, TypeError) as err:
|
| 1103 |
+
msg = f"invalid custom frequency string: {freq}"
|
| 1104 |
+
raise ValueError(msg) from err
|
| 1105 |
+
elif holidays or weekmask:
|
| 1106 |
+
msg = (
|
| 1107 |
+
"a custom frequency string is required when holidays or "
|
| 1108 |
+
f"weekmask are passed, got frequency {freq}"
|
| 1109 |
+
)
|
| 1110 |
+
raise ValueError(msg)
|
| 1111 |
+
|
| 1112 |
+
return date_range(
|
| 1113 |
+
start=start,
|
| 1114 |
+
end=end,
|
| 1115 |
+
periods=periods,
|
| 1116 |
+
freq=freq,
|
| 1117 |
+
tz=tz,
|
| 1118 |
+
normalize=normalize,
|
| 1119 |
+
name=name,
|
| 1120 |
+
inclusive=inclusive,
|
| 1121 |
+
**kwargs,
|
| 1122 |
+
)
|
| 1123 |
+
|
| 1124 |
+
|
| 1125 |
+
def _time_to_micros(time_obj: dt.time) -> int:
|
| 1126 |
+
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
|
| 1127 |
+
return 1_000_000 * seconds + time_obj.microsecond
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/extension.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Shared methods for Index subclasses backed by ExtensionArray.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import (
|
| 7 |
+
TYPE_CHECKING,
|
| 8 |
+
Callable,
|
| 9 |
+
TypeVar,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from pandas.util._decorators import cache_readonly
|
| 13 |
+
|
| 14 |
+
from pandas.core.dtypes.generic import ABCDataFrame
|
| 15 |
+
|
| 16 |
+
from pandas.core.indexes.base import Index
|
| 17 |
+
|
| 18 |
+
if TYPE_CHECKING:
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from pandas._typing import (
|
| 22 |
+
ArrayLike,
|
| 23 |
+
npt,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from pandas.core.arrays import IntervalArray
|
| 27 |
+
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
|
| 28 |
+
|
| 29 |
+
_ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _inherit_from_data(
|
| 33 |
+
name: str, delegate: type, cache: bool = False, wrap: bool = False
|
| 34 |
+
):
|
| 35 |
+
"""
|
| 36 |
+
Make an alias for a method of the underlying ExtensionArray.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
name : str
|
| 41 |
+
Name of an attribute the class should inherit from its EA parent.
|
| 42 |
+
delegate : class
|
| 43 |
+
cache : bool, default False
|
| 44 |
+
Whether to convert wrapped properties into cache_readonly
|
| 45 |
+
wrap : bool, default False
|
| 46 |
+
Whether to wrap the inherited result in an Index.
|
| 47 |
+
|
| 48 |
+
Returns
|
| 49 |
+
-------
|
| 50 |
+
attribute, method, property, or cache_readonly
|
| 51 |
+
"""
|
| 52 |
+
attr = getattr(delegate, name)
|
| 53 |
+
|
| 54 |
+
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
|
| 55 |
+
# getset_descriptor i.e. property defined in cython class
|
| 56 |
+
if cache:
|
| 57 |
+
|
| 58 |
+
def cached(self):
|
| 59 |
+
return getattr(self._data, name)
|
| 60 |
+
|
| 61 |
+
cached.__name__ = name
|
| 62 |
+
cached.__doc__ = attr.__doc__
|
| 63 |
+
method = cache_readonly(cached)
|
| 64 |
+
|
| 65 |
+
else:
|
| 66 |
+
|
| 67 |
+
def fget(self):
|
| 68 |
+
result = getattr(self._data, name)
|
| 69 |
+
if wrap:
|
| 70 |
+
if isinstance(result, type(self._data)):
|
| 71 |
+
return type(self)._simple_new(result, name=self.name)
|
| 72 |
+
elif isinstance(result, ABCDataFrame):
|
| 73 |
+
return result.set_index(self)
|
| 74 |
+
return Index(result, name=self.name)
|
| 75 |
+
return result
|
| 76 |
+
|
| 77 |
+
def fset(self, value) -> None:
|
| 78 |
+
setattr(self._data, name, value)
|
| 79 |
+
|
| 80 |
+
fget.__name__ = name
|
| 81 |
+
fget.__doc__ = attr.__doc__
|
| 82 |
+
|
| 83 |
+
method = property(fget, fset)
|
| 84 |
+
|
| 85 |
+
elif not callable(attr):
|
| 86 |
+
# just a normal attribute, no wrapping
|
| 87 |
+
method = attr
|
| 88 |
+
|
| 89 |
+
else:
|
| 90 |
+
# error: Incompatible redefinition (redefinition with type "Callable[[Any,
|
| 91 |
+
# VarArg(Any), KwArg(Any)], Any]", original type "property")
|
| 92 |
+
def method(self, *args, **kwargs): # type: ignore[misc]
|
| 93 |
+
if "inplace" in kwargs:
|
| 94 |
+
raise ValueError(f"cannot use inplace with {type(self).__name__}")
|
| 95 |
+
result = attr(self._data, *args, **kwargs)
|
| 96 |
+
if wrap:
|
| 97 |
+
if isinstance(result, type(self._data)):
|
| 98 |
+
return type(self)._simple_new(result, name=self.name)
|
| 99 |
+
elif isinstance(result, ABCDataFrame):
|
| 100 |
+
return result.set_index(self)
|
| 101 |
+
return Index(result, name=self.name)
|
| 102 |
+
return result
|
| 103 |
+
|
| 104 |
+
# error: "property" has no attribute "__name__"
|
| 105 |
+
method.__name__ = name # type: ignore[attr-defined]
|
| 106 |
+
method.__doc__ = attr.__doc__
|
| 107 |
+
return method
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def inherit_names(
|
| 111 |
+
names: list[str], delegate: type, cache: bool = False, wrap: bool = False
|
| 112 |
+
) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]:
|
| 113 |
+
"""
|
| 114 |
+
Class decorator to pin attributes from an ExtensionArray to a Index subclass.
|
| 115 |
+
|
| 116 |
+
Parameters
|
| 117 |
+
----------
|
| 118 |
+
names : List[str]
|
| 119 |
+
delegate : class
|
| 120 |
+
cache : bool, default False
|
| 121 |
+
wrap : bool, default False
|
| 122 |
+
Whether to wrap the inherited result in an Index.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]:
|
| 126 |
+
for name in names:
|
| 127 |
+
meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap)
|
| 128 |
+
setattr(cls, name, meth)
|
| 129 |
+
|
| 130 |
+
return cls
|
| 131 |
+
|
| 132 |
+
return wrapper
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class ExtensionIndex(Index):
|
| 136 |
+
"""
|
| 137 |
+
Index subclass for indexes backed by ExtensionArray.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
# The base class already passes through to _data:
|
| 141 |
+
# size, __len__, dtype
|
| 142 |
+
|
| 143 |
+
_data: IntervalArray | NDArrayBackedExtensionArray
|
| 144 |
+
|
| 145 |
+
# ---------------------------------------------------------------------
|
| 146 |
+
|
| 147 |
+
def _validate_fill_value(self, value):
|
| 148 |
+
"""
|
| 149 |
+
Convert value to be insertable to underlying array.
|
| 150 |
+
"""
|
| 151 |
+
return self._data._validate_setitem_value(value)
|
| 152 |
+
|
| 153 |
+
@cache_readonly
|
| 154 |
+
def _isnan(self) -> npt.NDArray[np.bool_]:
|
| 155 |
+
# error: Incompatible return value type (got "ExtensionArray", expected
|
| 156 |
+
# "ndarray")
|
| 157 |
+
return self._data.isna() # type: ignore[return-value]
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class NDArrayBackedExtensionIndex(ExtensionIndex):
|
| 161 |
+
"""
|
| 162 |
+
Index subclass for indexes backed by NDArrayBackedExtensionArray.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
_data: NDArrayBackedExtensionArray
|
| 166 |
+
|
| 167 |
+
def _get_engine_target(self) -> np.ndarray:
|
| 168 |
+
return self._data._ndarray
|
| 169 |
+
|
| 170 |
+
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
|
| 171 |
+
assert result.dtype == self._data._ndarray.dtype
|
| 172 |
+
return self._data._from_backing_data(result)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/frozen.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
frozen (immutable) data structures to support MultiIndexing
|
| 3 |
+
|
| 4 |
+
These are used for:
|
| 5 |
+
|
| 6 |
+
- .names (FrozenList)
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from typing import (
|
| 12 |
+
TYPE_CHECKING,
|
| 13 |
+
NoReturn,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
from pandas.core.base import PandasObject
|
| 17 |
+
|
| 18 |
+
from pandas.io.formats.printing import pprint_thing
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from pandas._typing import Self
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class FrozenList(PandasObject, list):
|
| 25 |
+
"""
|
| 26 |
+
Container that doesn't allow setting item *but*
|
| 27 |
+
because it's technically hashable, will be used
|
| 28 |
+
for lookups, appropriately, etc.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
# Side note: This has to be of type list. Otherwise,
|
| 32 |
+
# it messes up PyTables type checks.
|
| 33 |
+
|
| 34 |
+
def union(self, other) -> FrozenList:
|
| 35 |
+
"""
|
| 36 |
+
Returns a FrozenList with other concatenated to the end of self.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
other : array-like
|
| 41 |
+
The array-like whose elements we are concatenating.
|
| 42 |
+
|
| 43 |
+
Returns
|
| 44 |
+
-------
|
| 45 |
+
FrozenList
|
| 46 |
+
The collection difference between self and other.
|
| 47 |
+
"""
|
| 48 |
+
if isinstance(other, tuple):
|
| 49 |
+
other = list(other)
|
| 50 |
+
return type(self)(super().__add__(other))
|
| 51 |
+
|
| 52 |
+
def difference(self, other) -> FrozenList:
|
| 53 |
+
"""
|
| 54 |
+
Returns a FrozenList with elements from other removed from self.
|
| 55 |
+
|
| 56 |
+
Parameters
|
| 57 |
+
----------
|
| 58 |
+
other : array-like
|
| 59 |
+
The array-like whose elements we are removing self.
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
FrozenList
|
| 64 |
+
The collection difference between self and other.
|
| 65 |
+
"""
|
| 66 |
+
other = set(other)
|
| 67 |
+
temp = [x for x in self if x not in other]
|
| 68 |
+
return type(self)(temp)
|
| 69 |
+
|
| 70 |
+
# TODO: Consider deprecating these in favor of `union` (xref gh-15506)
|
| 71 |
+
# error: Incompatible types in assignment (expression has type
|
| 72 |
+
# "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the
|
| 73 |
+
# type as overloaded function)
|
| 74 |
+
__add__ = __iadd__ = union # type: ignore[assignment]
|
| 75 |
+
|
| 76 |
+
def __getitem__(self, n):
|
| 77 |
+
if isinstance(n, slice):
|
| 78 |
+
return type(self)(super().__getitem__(n))
|
| 79 |
+
return super().__getitem__(n)
|
| 80 |
+
|
| 81 |
+
def __radd__(self, other) -> Self:
|
| 82 |
+
if isinstance(other, tuple):
|
| 83 |
+
other = list(other)
|
| 84 |
+
return type(self)(other + list(self))
|
| 85 |
+
|
| 86 |
+
def __eq__(self, other: object) -> bool:
|
| 87 |
+
if isinstance(other, (tuple, FrozenList)):
|
| 88 |
+
other = list(other)
|
| 89 |
+
return super().__eq__(other)
|
| 90 |
+
|
| 91 |
+
__req__ = __eq__
|
| 92 |
+
|
| 93 |
+
def __mul__(self, other) -> Self:
|
| 94 |
+
return type(self)(super().__mul__(other))
|
| 95 |
+
|
| 96 |
+
__imul__ = __mul__
|
| 97 |
+
|
| 98 |
+
def __reduce__(self):
|
| 99 |
+
return type(self), (list(self),)
|
| 100 |
+
|
| 101 |
+
# error: Signature of "__hash__" incompatible with supertype "list"
|
| 102 |
+
def __hash__(self) -> int: # type: ignore[override]
|
| 103 |
+
return hash(tuple(self))
|
| 104 |
+
|
| 105 |
+
def _disabled(self, *args, **kwargs) -> NoReturn:
|
| 106 |
+
"""
|
| 107 |
+
This method will not function because object is immutable.
|
| 108 |
+
"""
|
| 109 |
+
raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
|
| 110 |
+
|
| 111 |
+
def __str__(self) -> str:
|
| 112 |
+
return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
|
| 113 |
+
|
| 114 |
+
def __repr__(self) -> str:
|
| 115 |
+
return f"{type(self).__name__}({str(self)})"
|
| 116 |
+
|
| 117 |
+
__setitem__ = __setslice__ = _disabled # type: ignore[assignment]
|
| 118 |
+
__delitem__ = __delslice__ = _disabled
|
| 119 |
+
pop = append = extend = _disabled
|
| 120 |
+
remove = sort = insert = _disabled # type: ignore[assignment]
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/interval.py
ADDED
|
@@ -0,0 +1,1136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" define the IntervalIndex """
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from operator import (
|
| 5 |
+
le,
|
| 6 |
+
lt,
|
| 7 |
+
)
|
| 8 |
+
import textwrap
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
Literal,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from pandas._libs import lib
|
| 18 |
+
from pandas._libs.interval import (
|
| 19 |
+
Interval,
|
| 20 |
+
IntervalMixin,
|
| 21 |
+
IntervalTree,
|
| 22 |
+
)
|
| 23 |
+
from pandas._libs.tslibs import (
|
| 24 |
+
BaseOffset,
|
| 25 |
+
Period,
|
| 26 |
+
Timedelta,
|
| 27 |
+
Timestamp,
|
| 28 |
+
to_offset,
|
| 29 |
+
)
|
| 30 |
+
from pandas.errors import InvalidIndexError
|
| 31 |
+
from pandas.util._decorators import (
|
| 32 |
+
Appender,
|
| 33 |
+
cache_readonly,
|
| 34 |
+
)
|
| 35 |
+
from pandas.util._exceptions import rewrite_exception
|
| 36 |
+
|
| 37 |
+
from pandas.core.dtypes.cast import (
|
| 38 |
+
find_common_type,
|
| 39 |
+
infer_dtype_from_scalar,
|
| 40 |
+
maybe_box_datetimelike,
|
| 41 |
+
maybe_downcast_numeric,
|
| 42 |
+
maybe_upcast_numeric_to_64bit,
|
| 43 |
+
)
|
| 44 |
+
from pandas.core.dtypes.common import (
|
| 45 |
+
ensure_platform_int,
|
| 46 |
+
is_float_dtype,
|
| 47 |
+
is_integer,
|
| 48 |
+
is_integer_dtype,
|
| 49 |
+
is_list_like,
|
| 50 |
+
is_number,
|
| 51 |
+
is_object_dtype,
|
| 52 |
+
is_scalar,
|
| 53 |
+
pandas_dtype,
|
| 54 |
+
)
|
| 55 |
+
from pandas.core.dtypes.dtypes import (
|
| 56 |
+
DatetimeTZDtype,
|
| 57 |
+
IntervalDtype,
|
| 58 |
+
)
|
| 59 |
+
from pandas.core.dtypes.missing import is_valid_na_for_dtype
|
| 60 |
+
|
| 61 |
+
from pandas.core.algorithms import unique
|
| 62 |
+
from pandas.core.arrays.datetimelike import validate_periods
|
| 63 |
+
from pandas.core.arrays.interval import (
|
| 64 |
+
IntervalArray,
|
| 65 |
+
_interval_shared_docs,
|
| 66 |
+
)
|
| 67 |
+
import pandas.core.common as com
|
| 68 |
+
from pandas.core.indexers import is_valid_positional_slice
|
| 69 |
+
import pandas.core.indexes.base as ibase
|
| 70 |
+
from pandas.core.indexes.base import (
|
| 71 |
+
Index,
|
| 72 |
+
_index_shared_docs,
|
| 73 |
+
ensure_index,
|
| 74 |
+
maybe_extract_name,
|
| 75 |
+
)
|
| 76 |
+
from pandas.core.indexes.datetimes import (
|
| 77 |
+
DatetimeIndex,
|
| 78 |
+
date_range,
|
| 79 |
+
)
|
| 80 |
+
from pandas.core.indexes.extension import (
|
| 81 |
+
ExtensionIndex,
|
| 82 |
+
inherit_names,
|
| 83 |
+
)
|
| 84 |
+
from pandas.core.indexes.multi import MultiIndex
|
| 85 |
+
from pandas.core.indexes.timedeltas import (
|
| 86 |
+
TimedeltaIndex,
|
| 87 |
+
timedelta_range,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
if TYPE_CHECKING:
|
| 91 |
+
from collections.abc import Hashable
|
| 92 |
+
|
| 93 |
+
from pandas._typing import (
|
| 94 |
+
Dtype,
|
| 95 |
+
DtypeObj,
|
| 96 |
+
IntervalClosedType,
|
| 97 |
+
Self,
|
| 98 |
+
npt,
|
| 99 |
+
)
|
| 100 |
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
|
| 101 |
+
|
| 102 |
+
_index_doc_kwargs.update(
|
| 103 |
+
{
|
| 104 |
+
"klass": "IntervalIndex",
|
| 105 |
+
"qualname": "IntervalIndex",
|
| 106 |
+
"target_klass": "IntervalIndex or list of Intervals",
|
| 107 |
+
"name": textwrap.dedent(
|
| 108 |
+
"""\
|
| 109 |
+
name : object, optional
|
| 110 |
+
Name to be stored in the index.
|
| 111 |
+
"""
|
| 112 |
+
),
|
| 113 |
+
}
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _get_next_label(label):
|
| 118 |
+
# see test_slice_locs_with_ints_and_floats_succeeds
|
| 119 |
+
dtype = getattr(label, "dtype", type(label))
|
| 120 |
+
if isinstance(label, (Timestamp, Timedelta)):
|
| 121 |
+
dtype = "datetime64[ns]"
|
| 122 |
+
dtype = pandas_dtype(dtype)
|
| 123 |
+
|
| 124 |
+
if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
|
| 125 |
+
return label + np.timedelta64(1, "ns")
|
| 126 |
+
elif is_integer_dtype(dtype):
|
| 127 |
+
return label + 1
|
| 128 |
+
elif is_float_dtype(dtype):
|
| 129 |
+
return np.nextafter(label, np.inf)
|
| 130 |
+
else:
|
| 131 |
+
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def _get_prev_label(label):
|
| 135 |
+
# see test_slice_locs_with_ints_and_floats_succeeds
|
| 136 |
+
dtype = getattr(label, "dtype", type(label))
|
| 137 |
+
if isinstance(label, (Timestamp, Timedelta)):
|
| 138 |
+
dtype = "datetime64[ns]"
|
| 139 |
+
dtype = pandas_dtype(dtype)
|
| 140 |
+
|
| 141 |
+
if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
|
| 142 |
+
return label - np.timedelta64(1, "ns")
|
| 143 |
+
elif is_integer_dtype(dtype):
|
| 144 |
+
return label - 1
|
| 145 |
+
elif is_float_dtype(dtype):
|
| 146 |
+
return np.nextafter(label, -np.inf)
|
| 147 |
+
else:
|
| 148 |
+
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _new_IntervalIndex(cls, d):
|
| 152 |
+
"""
|
| 153 |
+
This is called upon unpickling, rather than the default which doesn't have
|
| 154 |
+
arguments and breaks __new__.
|
| 155 |
+
"""
|
| 156 |
+
return cls.from_arrays(**d)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@Appender(
|
| 160 |
+
_interval_shared_docs["class"]
|
| 161 |
+
% {
|
| 162 |
+
"klass": "IntervalIndex",
|
| 163 |
+
"summary": "Immutable index of intervals that are closed on the same side.",
|
| 164 |
+
"name": _index_doc_kwargs["name"],
|
| 165 |
+
"extra_attributes": "is_overlapping\nvalues\n",
|
| 166 |
+
"extra_methods": "",
|
| 167 |
+
"examples": textwrap.dedent(
|
| 168 |
+
"""\
|
| 169 |
+
Examples
|
| 170 |
+
--------
|
| 171 |
+
A new ``IntervalIndex`` is typically constructed using
|
| 172 |
+
:func:`interval_range`:
|
| 173 |
+
|
| 174 |
+
>>> pd.interval_range(start=0, end=5)
|
| 175 |
+
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
|
| 176 |
+
dtype='interval[int64, right]')
|
| 177 |
+
|
| 178 |
+
It may also be constructed using one of the constructor
|
| 179 |
+
methods: :meth:`IntervalIndex.from_arrays`,
|
| 180 |
+
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
|
| 181 |
+
|
| 182 |
+
See further examples in the doc strings of ``interval_range`` and the
|
| 183 |
+
mentioned constructor methods.
|
| 184 |
+
"""
|
| 185 |
+
),
|
| 186 |
+
}
|
| 187 |
+
)
|
| 188 |
+
@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
|
| 189 |
+
@inherit_names(
|
| 190 |
+
[
|
| 191 |
+
"__array__",
|
| 192 |
+
"overlaps",
|
| 193 |
+
"contains",
|
| 194 |
+
"closed_left",
|
| 195 |
+
"closed_right",
|
| 196 |
+
"open_left",
|
| 197 |
+
"open_right",
|
| 198 |
+
"is_empty",
|
| 199 |
+
],
|
| 200 |
+
IntervalArray,
|
| 201 |
+
)
|
| 202 |
+
@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True)
|
| 203 |
+
class IntervalIndex(ExtensionIndex):
|
| 204 |
+
_typ = "intervalindex"
|
| 205 |
+
|
| 206 |
+
# annotate properties pinned via inherit_names
|
| 207 |
+
closed: IntervalClosedType
|
| 208 |
+
is_non_overlapping_monotonic: bool
|
| 209 |
+
closed_left: bool
|
| 210 |
+
closed_right: bool
|
| 211 |
+
open_left: bool
|
| 212 |
+
open_right: bool
|
| 213 |
+
|
| 214 |
+
_data: IntervalArray
|
| 215 |
+
_values: IntervalArray
|
| 216 |
+
_can_hold_strings = False
|
| 217 |
+
_data_cls = IntervalArray
|
| 218 |
+
|
| 219 |
+
# --------------------------------------------------------------------
|
| 220 |
+
# Constructors
|
| 221 |
+
|
| 222 |
+
def __new__(
|
| 223 |
+
cls,
|
| 224 |
+
data,
|
| 225 |
+
closed: IntervalClosedType | None = None,
|
| 226 |
+
dtype: Dtype | None = None,
|
| 227 |
+
copy: bool = False,
|
| 228 |
+
name: Hashable | None = None,
|
| 229 |
+
verify_integrity: bool = True,
|
| 230 |
+
) -> Self:
|
| 231 |
+
name = maybe_extract_name(name, data, cls)
|
| 232 |
+
|
| 233 |
+
with rewrite_exception("IntervalArray", cls.__name__):
|
| 234 |
+
array = IntervalArray(
|
| 235 |
+
data,
|
| 236 |
+
closed=closed,
|
| 237 |
+
copy=copy,
|
| 238 |
+
dtype=dtype,
|
| 239 |
+
verify_integrity=verify_integrity,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
return cls._simple_new(array, name)
|
| 243 |
+
|
| 244 |
+
@classmethod
|
| 245 |
+
@Appender(
|
| 246 |
+
_interval_shared_docs["from_breaks"]
|
| 247 |
+
% {
|
| 248 |
+
"klass": "IntervalIndex",
|
| 249 |
+
"name": textwrap.dedent(
|
| 250 |
+
"""
|
| 251 |
+
name : str, optional
|
| 252 |
+
Name of the resulting IntervalIndex."""
|
| 253 |
+
),
|
| 254 |
+
"examples": textwrap.dedent(
|
| 255 |
+
"""\
|
| 256 |
+
Examples
|
| 257 |
+
--------
|
| 258 |
+
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
|
| 259 |
+
IntervalIndex([(0, 1], (1, 2], (2, 3]],
|
| 260 |
+
dtype='interval[int64, right]')
|
| 261 |
+
"""
|
| 262 |
+
),
|
| 263 |
+
}
|
| 264 |
+
)
|
| 265 |
+
def from_breaks(
|
| 266 |
+
cls,
|
| 267 |
+
breaks,
|
| 268 |
+
closed: IntervalClosedType | None = "right",
|
| 269 |
+
name: Hashable | None = None,
|
| 270 |
+
copy: bool = False,
|
| 271 |
+
dtype: Dtype | None = None,
|
| 272 |
+
) -> IntervalIndex:
|
| 273 |
+
with rewrite_exception("IntervalArray", cls.__name__):
|
| 274 |
+
array = IntervalArray.from_breaks(
|
| 275 |
+
breaks, closed=closed, copy=copy, dtype=dtype
|
| 276 |
+
)
|
| 277 |
+
return cls._simple_new(array, name=name)
|
| 278 |
+
|
| 279 |
+
@classmethod
|
| 280 |
+
@Appender(
|
| 281 |
+
_interval_shared_docs["from_arrays"]
|
| 282 |
+
% {
|
| 283 |
+
"klass": "IntervalIndex",
|
| 284 |
+
"name": textwrap.dedent(
|
| 285 |
+
"""
|
| 286 |
+
name : str, optional
|
| 287 |
+
Name of the resulting IntervalIndex."""
|
| 288 |
+
),
|
| 289 |
+
"examples": textwrap.dedent(
|
| 290 |
+
"""\
|
| 291 |
+
Examples
|
| 292 |
+
--------
|
| 293 |
+
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
|
| 294 |
+
IntervalIndex([(0, 1], (1, 2], (2, 3]],
|
| 295 |
+
dtype='interval[int64, right]')
|
| 296 |
+
"""
|
| 297 |
+
),
|
| 298 |
+
}
|
| 299 |
+
)
|
| 300 |
+
def from_arrays(
|
| 301 |
+
cls,
|
| 302 |
+
left,
|
| 303 |
+
right,
|
| 304 |
+
closed: IntervalClosedType = "right",
|
| 305 |
+
name: Hashable | None = None,
|
| 306 |
+
copy: bool = False,
|
| 307 |
+
dtype: Dtype | None = None,
|
| 308 |
+
) -> IntervalIndex:
|
| 309 |
+
with rewrite_exception("IntervalArray", cls.__name__):
|
| 310 |
+
array = IntervalArray.from_arrays(
|
| 311 |
+
left, right, closed, copy=copy, dtype=dtype
|
| 312 |
+
)
|
| 313 |
+
return cls._simple_new(array, name=name)
|
| 314 |
+
|
| 315 |
+
@classmethod
|
| 316 |
+
@Appender(
|
| 317 |
+
_interval_shared_docs["from_tuples"]
|
| 318 |
+
% {
|
| 319 |
+
"klass": "IntervalIndex",
|
| 320 |
+
"name": textwrap.dedent(
|
| 321 |
+
"""
|
| 322 |
+
name : str, optional
|
| 323 |
+
Name of the resulting IntervalIndex."""
|
| 324 |
+
),
|
| 325 |
+
"examples": textwrap.dedent(
|
| 326 |
+
"""\
|
| 327 |
+
Examples
|
| 328 |
+
--------
|
| 329 |
+
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
|
| 330 |
+
IntervalIndex([(0, 1], (1, 2]],
|
| 331 |
+
dtype='interval[int64, right]')
|
| 332 |
+
"""
|
| 333 |
+
),
|
| 334 |
+
}
|
| 335 |
+
)
|
| 336 |
+
def from_tuples(
|
| 337 |
+
cls,
|
| 338 |
+
data,
|
| 339 |
+
closed: IntervalClosedType = "right",
|
| 340 |
+
name: Hashable | None = None,
|
| 341 |
+
copy: bool = False,
|
| 342 |
+
dtype: Dtype | None = None,
|
| 343 |
+
) -> IntervalIndex:
|
| 344 |
+
with rewrite_exception("IntervalArray", cls.__name__):
|
| 345 |
+
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
|
| 346 |
+
return cls._simple_new(arr, name=name)
|
| 347 |
+
|
| 348 |
+
# --------------------------------------------------------------------
|
| 349 |
+
# error: Return type "IntervalTree" of "_engine" incompatible with return type
|
| 350 |
+
# "Union[IndexEngine, ExtensionEngine]" in supertype "Index"
|
| 351 |
+
@cache_readonly
|
| 352 |
+
def _engine(self) -> IntervalTree: # type: ignore[override]
|
| 353 |
+
# IntervalTree does not supports numpy array unless they are 64 bit
|
| 354 |
+
left = self._maybe_convert_i8(self.left)
|
| 355 |
+
left = maybe_upcast_numeric_to_64bit(left)
|
| 356 |
+
right = self._maybe_convert_i8(self.right)
|
| 357 |
+
right = maybe_upcast_numeric_to_64bit(right)
|
| 358 |
+
return IntervalTree(left, right, closed=self.closed)
|
| 359 |
+
|
| 360 |
+
def __contains__(self, key: Any) -> bool:
|
| 361 |
+
"""
|
| 362 |
+
return a boolean if this key is IN the index
|
| 363 |
+
We *only* accept an Interval
|
| 364 |
+
|
| 365 |
+
Parameters
|
| 366 |
+
----------
|
| 367 |
+
key : Interval
|
| 368 |
+
|
| 369 |
+
Returns
|
| 370 |
+
-------
|
| 371 |
+
bool
|
| 372 |
+
"""
|
| 373 |
+
hash(key)
|
| 374 |
+
if not isinstance(key, Interval):
|
| 375 |
+
if is_valid_na_for_dtype(key, self.dtype):
|
| 376 |
+
return self.hasnans
|
| 377 |
+
return False
|
| 378 |
+
|
| 379 |
+
try:
|
| 380 |
+
self.get_loc(key)
|
| 381 |
+
return True
|
| 382 |
+
except KeyError:
|
| 383 |
+
return False
|
| 384 |
+
|
| 385 |
+
def _getitem_slice(self, slobj: slice) -> IntervalIndex:
|
| 386 |
+
"""
|
| 387 |
+
Fastpath for __getitem__ when we know we have a slice.
|
| 388 |
+
"""
|
| 389 |
+
res = self._data[slobj]
|
| 390 |
+
return type(self)._simple_new(res, name=self._name)
|
| 391 |
+
|
| 392 |
+
@cache_readonly
|
| 393 |
+
def _multiindex(self) -> MultiIndex:
|
| 394 |
+
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
|
| 395 |
+
|
| 396 |
+
def __reduce__(self):
|
| 397 |
+
d = {
|
| 398 |
+
"left": self.left,
|
| 399 |
+
"right": self.right,
|
| 400 |
+
"closed": self.closed,
|
| 401 |
+
"name": self.name,
|
| 402 |
+
}
|
| 403 |
+
return _new_IntervalIndex, (type(self), d), None
|
| 404 |
+
|
| 405 |
+
@property
|
| 406 |
+
def inferred_type(self) -> str:
|
| 407 |
+
"""Return a string of the type inferred from the values"""
|
| 408 |
+
return "interval"
|
| 409 |
+
|
| 410 |
+
# Cannot determine type of "memory_usage"
|
| 411 |
+
@Appender(Index.memory_usage.__doc__) # type: ignore[has-type]
|
| 412 |
+
def memory_usage(self, deep: bool = False) -> int:
|
| 413 |
+
# we don't use an explicit engine
|
| 414 |
+
# so return the bytes here
|
| 415 |
+
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
|
| 416 |
+
|
| 417 |
+
# IntervalTree doesn't have a is_monotonic_decreasing, so have to override
|
| 418 |
+
# the Index implementation
|
| 419 |
+
@cache_readonly
|
| 420 |
+
def is_monotonic_decreasing(self) -> bool:
|
| 421 |
+
"""
|
| 422 |
+
Return True if the IntervalIndex is monotonic decreasing (only equal or
|
| 423 |
+
decreasing values), else False
|
| 424 |
+
"""
|
| 425 |
+
return self[::-1].is_monotonic_increasing
|
| 426 |
+
|
| 427 |
+
@cache_readonly
|
| 428 |
+
def is_unique(self) -> bool:
|
| 429 |
+
"""
|
| 430 |
+
Return True if the IntervalIndex contains unique elements, else False.
|
| 431 |
+
"""
|
| 432 |
+
left = self.left
|
| 433 |
+
right = self.right
|
| 434 |
+
|
| 435 |
+
if self.isna().sum() > 1:
|
| 436 |
+
return False
|
| 437 |
+
|
| 438 |
+
if left.is_unique or right.is_unique:
|
| 439 |
+
return True
|
| 440 |
+
|
| 441 |
+
seen_pairs = set()
|
| 442 |
+
check_idx = np.where(left.duplicated(keep=False))[0]
|
| 443 |
+
for idx in check_idx:
|
| 444 |
+
pair = (left[idx], right[idx])
|
| 445 |
+
if pair in seen_pairs:
|
| 446 |
+
return False
|
| 447 |
+
seen_pairs.add(pair)
|
| 448 |
+
|
| 449 |
+
return True
|
| 450 |
+
|
| 451 |
+
@property
|
| 452 |
+
def is_overlapping(self) -> bool:
|
| 453 |
+
"""
|
| 454 |
+
Return True if the IntervalIndex has overlapping intervals, else False.
|
| 455 |
+
|
| 456 |
+
Two intervals overlap if they share a common point, including closed
|
| 457 |
+
endpoints. Intervals that only have an open endpoint in common do not
|
| 458 |
+
overlap.
|
| 459 |
+
|
| 460 |
+
Returns
|
| 461 |
+
-------
|
| 462 |
+
bool
|
| 463 |
+
Boolean indicating if the IntervalIndex has overlapping intervals.
|
| 464 |
+
|
| 465 |
+
See Also
|
| 466 |
+
--------
|
| 467 |
+
Interval.overlaps : Check whether two Interval objects overlap.
|
| 468 |
+
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
|
| 469 |
+
overlaps.
|
| 470 |
+
|
| 471 |
+
Examples
|
| 472 |
+
--------
|
| 473 |
+
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
|
| 474 |
+
>>> index
|
| 475 |
+
IntervalIndex([(0, 2], (1, 3], (4, 5]],
|
| 476 |
+
dtype='interval[int64, right]')
|
| 477 |
+
>>> index.is_overlapping
|
| 478 |
+
True
|
| 479 |
+
|
| 480 |
+
Intervals that share closed endpoints overlap:
|
| 481 |
+
|
| 482 |
+
>>> index = pd.interval_range(0, 3, closed='both')
|
| 483 |
+
>>> index
|
| 484 |
+
IntervalIndex([[0, 1], [1, 2], [2, 3]],
|
| 485 |
+
dtype='interval[int64, both]')
|
| 486 |
+
>>> index.is_overlapping
|
| 487 |
+
True
|
| 488 |
+
|
| 489 |
+
Intervals that only have an open endpoint in common do not overlap:
|
| 490 |
+
|
| 491 |
+
>>> index = pd.interval_range(0, 3, closed='left')
|
| 492 |
+
>>> index
|
| 493 |
+
IntervalIndex([[0, 1), [1, 2), [2, 3)],
|
| 494 |
+
dtype='interval[int64, left]')
|
| 495 |
+
>>> index.is_overlapping
|
| 496 |
+
False
|
| 497 |
+
"""
|
| 498 |
+
# GH 23309
|
| 499 |
+
return self._engine.is_overlapping
|
| 500 |
+
|
| 501 |
+
def _needs_i8_conversion(self, key) -> bool:
|
| 502 |
+
"""
|
| 503 |
+
Check if a given key needs i8 conversion. Conversion is necessary for
|
| 504 |
+
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
|
| 505 |
+
Interval-like requires conversion if its endpoints are one of the
|
| 506 |
+
aforementioned types.
|
| 507 |
+
|
| 508 |
+
Assumes that any list-like data has already been cast to an Index.
|
| 509 |
+
|
| 510 |
+
Parameters
|
| 511 |
+
----------
|
| 512 |
+
key : scalar or Index-like
|
| 513 |
+
The key that should be checked for i8 conversion
|
| 514 |
+
|
| 515 |
+
Returns
|
| 516 |
+
-------
|
| 517 |
+
bool
|
| 518 |
+
"""
|
| 519 |
+
key_dtype = getattr(key, "dtype", None)
|
| 520 |
+
if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
|
| 521 |
+
return self._needs_i8_conversion(key.left)
|
| 522 |
+
|
| 523 |
+
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
|
| 524 |
+
return isinstance(key, i8_types)
|
| 525 |
+
|
| 526 |
+
def _maybe_convert_i8(self, key):
|
| 527 |
+
"""
|
| 528 |
+
Maybe convert a given key to its equivalent i8 value(s). Used as a
|
| 529 |
+
preprocessing step prior to IntervalTree queries (self._engine), which
|
| 530 |
+
expects numeric data.
|
| 531 |
+
|
| 532 |
+
Parameters
|
| 533 |
+
----------
|
| 534 |
+
key : scalar or list-like
|
| 535 |
+
The key that should maybe be converted to i8.
|
| 536 |
+
|
| 537 |
+
Returns
|
| 538 |
+
-------
|
| 539 |
+
scalar or list-like
|
| 540 |
+
The original key if no conversion occurred, int if converted scalar,
|
| 541 |
+
Index with an int64 dtype if converted list-like.
|
| 542 |
+
"""
|
| 543 |
+
if is_list_like(key):
|
| 544 |
+
key = ensure_index(key)
|
| 545 |
+
key = maybe_upcast_numeric_to_64bit(key)
|
| 546 |
+
|
| 547 |
+
if not self._needs_i8_conversion(key):
|
| 548 |
+
return key
|
| 549 |
+
|
| 550 |
+
scalar = is_scalar(key)
|
| 551 |
+
key_dtype = getattr(key, "dtype", None)
|
| 552 |
+
if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
|
| 553 |
+
# convert left/right and reconstruct
|
| 554 |
+
left = self._maybe_convert_i8(key.left)
|
| 555 |
+
right = self._maybe_convert_i8(key.right)
|
| 556 |
+
constructor = Interval if scalar else IntervalIndex.from_arrays
|
| 557 |
+
# error: "object" not callable
|
| 558 |
+
return constructor(
|
| 559 |
+
left, right, closed=self.closed
|
| 560 |
+
) # type: ignore[operator]
|
| 561 |
+
|
| 562 |
+
if scalar:
|
| 563 |
+
# Timestamp/Timedelta
|
| 564 |
+
key_dtype, key_i8 = infer_dtype_from_scalar(key)
|
| 565 |
+
if isinstance(key, Period):
|
| 566 |
+
key_i8 = key.ordinal
|
| 567 |
+
elif isinstance(key_i8, Timestamp):
|
| 568 |
+
key_i8 = key_i8._value
|
| 569 |
+
elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
|
| 570 |
+
key_i8 = key_i8.view("i8")
|
| 571 |
+
else:
|
| 572 |
+
# DatetimeIndex/TimedeltaIndex
|
| 573 |
+
key_dtype, key_i8 = key.dtype, Index(key.asi8)
|
| 574 |
+
if key.hasnans:
|
| 575 |
+
# convert NaT from its i8 value to np.nan so it's not viewed
|
| 576 |
+
# as a valid value, maybe causing errors (e.g. is_overlapping)
|
| 577 |
+
key_i8 = key_i8.where(~key._isnan)
|
| 578 |
+
|
| 579 |
+
# ensure consistency with IntervalIndex subtype
|
| 580 |
+
# error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
|
| 581 |
+
# ExtensionDtype]" has no attribute "subtype"
|
| 582 |
+
subtype = self.dtype.subtype # type: ignore[union-attr]
|
| 583 |
+
|
| 584 |
+
if subtype != key_dtype:
|
| 585 |
+
raise ValueError(
|
| 586 |
+
f"Cannot index an IntervalIndex of subtype {subtype} with "
|
| 587 |
+
f"values of dtype {key_dtype}"
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
return key_i8
|
| 591 |
+
|
| 592 |
+
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
|
| 593 |
+
if not self.is_non_overlapping_monotonic:
|
| 594 |
+
raise KeyError(
|
| 595 |
+
"can only get slices from an IntervalIndex if bounds are "
|
| 596 |
+
"non-overlapping and all monotonic increasing or decreasing"
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
if isinstance(label, (IntervalMixin, IntervalIndex)):
|
| 600 |
+
raise NotImplementedError("Interval objects are not currently supported")
|
| 601 |
+
|
| 602 |
+
# GH 20921: "not is_monotonic_increasing" for the second condition
|
| 603 |
+
# instead of "is_monotonic_decreasing" to account for single element
|
| 604 |
+
# indexes being both increasing and decreasing
|
| 605 |
+
if (side == "left" and self.left.is_monotonic_increasing) or (
|
| 606 |
+
side == "right" and not self.left.is_monotonic_increasing
|
| 607 |
+
):
|
| 608 |
+
sub_idx = self.right
|
| 609 |
+
if self.open_right:
|
| 610 |
+
label = _get_next_label(label)
|
| 611 |
+
else:
|
| 612 |
+
sub_idx = self.left
|
| 613 |
+
if self.open_left:
|
| 614 |
+
label = _get_prev_label(label)
|
| 615 |
+
|
| 616 |
+
return sub_idx._searchsorted_monotonic(label, side)
|
| 617 |
+
|
| 618 |
+
# --------------------------------------------------------------------
|
| 619 |
+
# Indexing Methods
|
| 620 |
+
|
| 621 |
+
def get_loc(self, key) -> int | slice | np.ndarray:
|
| 622 |
+
"""
|
| 623 |
+
Get integer location, slice or boolean mask for requested label.
|
| 624 |
+
|
| 625 |
+
Parameters
|
| 626 |
+
----------
|
| 627 |
+
key : label
|
| 628 |
+
|
| 629 |
+
Returns
|
| 630 |
+
-------
|
| 631 |
+
int if unique index, slice if monotonic index, else mask
|
| 632 |
+
|
| 633 |
+
Examples
|
| 634 |
+
--------
|
| 635 |
+
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
|
| 636 |
+
>>> index = pd.IntervalIndex([i1, i2])
|
| 637 |
+
>>> index.get_loc(1)
|
| 638 |
+
0
|
| 639 |
+
|
| 640 |
+
You can also supply a point inside an interval.
|
| 641 |
+
|
| 642 |
+
>>> index.get_loc(1.5)
|
| 643 |
+
1
|
| 644 |
+
|
| 645 |
+
If a label is in several intervals, you get the locations of all the
|
| 646 |
+
relevant intervals.
|
| 647 |
+
|
| 648 |
+
>>> i3 = pd.Interval(0, 2)
|
| 649 |
+
>>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
|
| 650 |
+
>>> overlapping_index.get_loc(0.5)
|
| 651 |
+
array([ True, False, True])
|
| 652 |
+
|
| 653 |
+
Only exact matches will be returned if an interval is provided.
|
| 654 |
+
|
| 655 |
+
>>> index.get_loc(pd.Interval(0, 1))
|
| 656 |
+
0
|
| 657 |
+
"""
|
| 658 |
+
self._check_indexing_error(key)
|
| 659 |
+
|
| 660 |
+
if isinstance(key, Interval):
|
| 661 |
+
if self.closed != key.closed:
|
| 662 |
+
raise KeyError(key)
|
| 663 |
+
mask = (self.left == key.left) & (self.right == key.right)
|
| 664 |
+
elif is_valid_na_for_dtype(key, self.dtype):
|
| 665 |
+
mask = self.isna()
|
| 666 |
+
else:
|
| 667 |
+
# assume scalar
|
| 668 |
+
op_left = le if self.closed_left else lt
|
| 669 |
+
op_right = le if self.closed_right else lt
|
| 670 |
+
try:
|
| 671 |
+
mask = op_left(self.left, key) & op_right(key, self.right)
|
| 672 |
+
except TypeError as err:
|
| 673 |
+
# scalar is not comparable to II subtype --> invalid label
|
| 674 |
+
raise KeyError(key) from err
|
| 675 |
+
|
| 676 |
+
matches = mask.sum()
|
| 677 |
+
if matches == 0:
|
| 678 |
+
raise KeyError(key)
|
| 679 |
+
if matches == 1:
|
| 680 |
+
return mask.argmax()
|
| 681 |
+
|
| 682 |
+
res = lib.maybe_booleans_to_slice(mask.view("u1"))
|
| 683 |
+
if isinstance(res, slice) and res.stop is None:
|
| 684 |
+
# TODO: DO this in maybe_booleans_to_slice?
|
| 685 |
+
res = slice(res.start, len(self), res.step)
|
| 686 |
+
return res
|
| 687 |
+
|
| 688 |
+
def _get_indexer(
|
| 689 |
+
self,
|
| 690 |
+
target: Index,
|
| 691 |
+
method: str | None = None,
|
| 692 |
+
limit: int | None = None,
|
| 693 |
+
tolerance: Any | None = None,
|
| 694 |
+
) -> npt.NDArray[np.intp]:
|
| 695 |
+
if isinstance(target, IntervalIndex):
|
| 696 |
+
# We only get here with not self.is_overlapping
|
| 697 |
+
# -> at most one match per interval in target
|
| 698 |
+
# want exact matches -> need both left/right to match, so defer to
|
| 699 |
+
# left/right get_indexer, compare elementwise, equality -> match
|
| 700 |
+
indexer = self._get_indexer_unique_sides(target)
|
| 701 |
+
|
| 702 |
+
elif not is_object_dtype(target.dtype):
|
| 703 |
+
# homogeneous scalar index: use IntervalTree
|
| 704 |
+
# we should always have self._should_partial_index(target) here
|
| 705 |
+
target = self._maybe_convert_i8(target)
|
| 706 |
+
indexer = self._engine.get_indexer(target.values)
|
| 707 |
+
else:
|
| 708 |
+
# heterogeneous scalar index: defer elementwise to get_loc
|
| 709 |
+
# we should always have self._should_partial_index(target) here
|
| 710 |
+
return self._get_indexer_pointwise(target)[0]
|
| 711 |
+
|
| 712 |
+
return ensure_platform_int(indexer)
|
| 713 |
+
|
| 714 |
+
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
|
| 715 |
+
def get_indexer_non_unique(
|
| 716 |
+
self, target: Index
|
| 717 |
+
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
|
| 718 |
+
target = ensure_index(target)
|
| 719 |
+
|
| 720 |
+
if not self._should_compare(target) and not self._should_partial_index(target):
|
| 721 |
+
# e.g. IntervalIndex with different closed or incompatible subtype
|
| 722 |
+
# -> no matches
|
| 723 |
+
return self._get_indexer_non_comparable(target, None, unique=False)
|
| 724 |
+
|
| 725 |
+
elif isinstance(target, IntervalIndex):
|
| 726 |
+
if self.left.is_unique and self.right.is_unique:
|
| 727 |
+
# fastpath available even if we don't have self._index_as_unique
|
| 728 |
+
indexer = self._get_indexer_unique_sides(target)
|
| 729 |
+
missing = (indexer == -1).nonzero()[0]
|
| 730 |
+
else:
|
| 731 |
+
return self._get_indexer_pointwise(target)
|
| 732 |
+
|
| 733 |
+
elif is_object_dtype(target.dtype) or not self._should_partial_index(target):
|
| 734 |
+
# target might contain intervals: defer elementwise to get_loc
|
| 735 |
+
return self._get_indexer_pointwise(target)
|
| 736 |
+
|
| 737 |
+
else:
|
| 738 |
+
# Note: this case behaves differently from other Index subclasses
|
| 739 |
+
# because IntervalIndex does partial-int indexing
|
| 740 |
+
target = self._maybe_convert_i8(target)
|
| 741 |
+
indexer, missing = self._engine.get_indexer_non_unique(target.values)
|
| 742 |
+
|
| 743 |
+
return ensure_platform_int(indexer), ensure_platform_int(missing)
|
| 744 |
+
|
| 745 |
+
def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:
|
| 746 |
+
"""
|
| 747 |
+
_get_indexer specialized to the case where both of our sides are unique.
|
| 748 |
+
"""
|
| 749 |
+
# Caller is responsible for checking
|
| 750 |
+
# `self.left.is_unique and self.right.is_unique`
|
| 751 |
+
|
| 752 |
+
left_indexer = self.left.get_indexer(target.left)
|
| 753 |
+
right_indexer = self.right.get_indexer(target.right)
|
| 754 |
+
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
|
| 755 |
+
return indexer
|
| 756 |
+
|
| 757 |
+
def _get_indexer_pointwise(
|
| 758 |
+
self, target: Index
|
| 759 |
+
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
|
| 760 |
+
"""
|
| 761 |
+
pointwise implementation for get_indexer and get_indexer_non_unique.
|
| 762 |
+
"""
|
| 763 |
+
indexer, missing = [], []
|
| 764 |
+
for i, key in enumerate(target):
|
| 765 |
+
try:
|
| 766 |
+
locs = self.get_loc(key)
|
| 767 |
+
if isinstance(locs, slice):
|
| 768 |
+
# Only needed for get_indexer_non_unique
|
| 769 |
+
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
|
| 770 |
+
elif lib.is_integer(locs):
|
| 771 |
+
locs = np.array(locs, ndmin=1)
|
| 772 |
+
else:
|
| 773 |
+
# otherwise we have ndarray[bool]
|
| 774 |
+
locs = np.where(locs)[0]
|
| 775 |
+
except KeyError:
|
| 776 |
+
missing.append(i)
|
| 777 |
+
locs = np.array([-1])
|
| 778 |
+
except InvalidIndexError:
|
| 779 |
+
# i.e. non-scalar key e.g. a tuple.
|
| 780 |
+
# see test_append_different_columns_types_raises
|
| 781 |
+
missing.append(i)
|
| 782 |
+
locs = np.array([-1])
|
| 783 |
+
|
| 784 |
+
indexer.append(locs)
|
| 785 |
+
|
| 786 |
+
indexer = np.concatenate(indexer)
|
| 787 |
+
return ensure_platform_int(indexer), ensure_platform_int(missing)
|
| 788 |
+
|
| 789 |
+
@cache_readonly
|
| 790 |
+
def _index_as_unique(self) -> bool:
|
| 791 |
+
return not self.is_overlapping and self._engine._na_count < 2
|
| 792 |
+
|
| 793 |
+
_requires_unique_msg = (
|
| 794 |
+
"cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
|
| 798 |
+
if not (key.step is None or key.step == 1):
|
| 799 |
+
# GH#31658 if label-based, we require step == 1,
|
| 800 |
+
# if positional, we disallow float start/stop
|
| 801 |
+
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
|
| 802 |
+
if kind == "loc":
|
| 803 |
+
raise ValueError(msg)
|
| 804 |
+
if kind == "getitem":
|
| 805 |
+
if not is_valid_positional_slice(key):
|
| 806 |
+
# i.e. this cannot be interpreted as a positional slice
|
| 807 |
+
raise ValueError(msg)
|
| 808 |
+
|
| 809 |
+
return super()._convert_slice_indexer(key, kind)
|
| 810 |
+
|
| 811 |
+
@cache_readonly
|
| 812 |
+
def _should_fallback_to_positional(self) -> bool:
|
| 813 |
+
# integer lookups in Series.__getitem__ are unambiguously
|
| 814 |
+
# positional in this case
|
| 815 |
+
# error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
|
| 816 |
+
# ExtensionDtype]" has no attribute "subtype"
|
| 817 |
+
return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]
|
| 818 |
+
|
| 819 |
+
def _maybe_cast_slice_bound(self, label, side: str):
|
| 820 |
+
return getattr(self, side)._maybe_cast_slice_bound(label, side)
|
| 821 |
+
|
| 822 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
| 823 |
+
if not isinstance(dtype, IntervalDtype):
|
| 824 |
+
return False
|
| 825 |
+
common_subtype = find_common_type([self.dtype, dtype])
|
| 826 |
+
return not is_object_dtype(common_subtype)
|
| 827 |
+
|
| 828 |
+
# --------------------------------------------------------------------
|
| 829 |
+
|
| 830 |
+
@cache_readonly
|
| 831 |
+
def left(self) -> Index:
|
| 832 |
+
return Index(self._data.left, copy=False)
|
| 833 |
+
|
| 834 |
+
@cache_readonly
|
| 835 |
+
def right(self) -> Index:
|
| 836 |
+
return Index(self._data.right, copy=False)
|
| 837 |
+
|
| 838 |
+
@cache_readonly
|
| 839 |
+
def mid(self) -> Index:
|
| 840 |
+
return Index(self._data.mid, copy=False)
|
| 841 |
+
|
| 842 |
+
@property
|
| 843 |
+
def length(self) -> Index:
|
| 844 |
+
return Index(self._data.length, copy=False)
|
| 845 |
+
|
| 846 |
+
# --------------------------------------------------------------------
|
| 847 |
+
# Set Operations
|
| 848 |
+
|
| 849 |
+
def _intersection(self, other, sort):
|
| 850 |
+
"""
|
| 851 |
+
intersection specialized to the case with matching dtypes.
|
| 852 |
+
"""
|
| 853 |
+
# For IntervalIndex we also know other.closed == self.closed
|
| 854 |
+
if self.left.is_unique and self.right.is_unique:
|
| 855 |
+
taken = self._intersection_unique(other)
|
| 856 |
+
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
|
| 857 |
+
# Swap other/self if other is unique and self does not have
|
| 858 |
+
# multiple NaNs
|
| 859 |
+
taken = other._intersection_unique(self)
|
| 860 |
+
else:
|
| 861 |
+
# duplicates
|
| 862 |
+
taken = self._intersection_non_unique(other)
|
| 863 |
+
|
| 864 |
+
if sort is None:
|
| 865 |
+
taken = taken.sort_values()
|
| 866 |
+
|
| 867 |
+
return taken
|
| 868 |
+
|
| 869 |
+
def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:
|
| 870 |
+
"""
|
| 871 |
+
Used when the IntervalIndex does not have any common endpoint,
|
| 872 |
+
no matter left or right.
|
| 873 |
+
Return the intersection with another IntervalIndex.
|
| 874 |
+
Parameters
|
| 875 |
+
----------
|
| 876 |
+
other : IntervalIndex
|
| 877 |
+
Returns
|
| 878 |
+
-------
|
| 879 |
+
IntervalIndex
|
| 880 |
+
"""
|
| 881 |
+
# Note: this is much more performant than super()._intersection(other)
|
| 882 |
+
lindexer = self.left.get_indexer(other.left)
|
| 883 |
+
rindexer = self.right.get_indexer(other.right)
|
| 884 |
+
|
| 885 |
+
match = (lindexer == rindexer) & (lindexer != -1)
|
| 886 |
+
indexer = lindexer.take(match.nonzero()[0])
|
| 887 |
+
indexer = unique(indexer)
|
| 888 |
+
|
| 889 |
+
return self.take(indexer)
|
| 890 |
+
|
| 891 |
+
def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:
|
| 892 |
+
"""
|
| 893 |
+
Used when the IntervalIndex does have some common endpoints,
|
| 894 |
+
on either sides.
|
| 895 |
+
Return the intersection with another IntervalIndex.
|
| 896 |
+
|
| 897 |
+
Parameters
|
| 898 |
+
----------
|
| 899 |
+
other : IntervalIndex
|
| 900 |
+
|
| 901 |
+
Returns
|
| 902 |
+
-------
|
| 903 |
+
IntervalIndex
|
| 904 |
+
"""
|
| 905 |
+
# Note: this is about 3.25x faster than super()._intersection(other)
|
| 906 |
+
# in IntervalIndexMethod.time_intersection_both_duplicate(1000)
|
| 907 |
+
mask = np.zeros(len(self), dtype=bool)
|
| 908 |
+
|
| 909 |
+
if self.hasnans and other.hasnans:
|
| 910 |
+
first_nan_loc = np.arange(len(self))[self.isna()][0]
|
| 911 |
+
mask[first_nan_loc] = True
|
| 912 |
+
|
| 913 |
+
other_tups = set(zip(other.left, other.right))
|
| 914 |
+
for i, tup in enumerate(zip(self.left, self.right)):
|
| 915 |
+
if tup in other_tups:
|
| 916 |
+
mask[i] = True
|
| 917 |
+
|
| 918 |
+
return self[mask]
|
| 919 |
+
|
| 920 |
+
# --------------------------------------------------------------------
|
| 921 |
+
|
| 922 |
+
def _get_engine_target(self) -> np.ndarray:
|
| 923 |
+
# Note: we _could_ use libjoin functions by either casting to object
|
| 924 |
+
# dtype or constructing tuples (faster than constructing Intervals)
|
| 925 |
+
# but the libjoin fastpaths are no longer fast in these cases.
|
| 926 |
+
raise NotImplementedError(
|
| 927 |
+
"IntervalIndex does not use libjoin fastpaths or pass values to "
|
| 928 |
+
"IndexEngine objects"
|
| 929 |
+
)
|
| 930 |
+
|
| 931 |
+
def _from_join_target(self, result):
|
| 932 |
+
raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")
|
| 933 |
+
|
| 934 |
+
# TODO: arithmetic operations
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
def _is_valid_endpoint(endpoint) -> bool:
|
| 938 |
+
"""
|
| 939 |
+
Helper for interval_range to check if start/end are valid types.
|
| 940 |
+
"""
|
| 941 |
+
return any(
|
| 942 |
+
[
|
| 943 |
+
is_number(endpoint),
|
| 944 |
+
isinstance(endpoint, Timestamp),
|
| 945 |
+
isinstance(endpoint, Timedelta),
|
| 946 |
+
endpoint is None,
|
| 947 |
+
]
|
| 948 |
+
)
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
def _is_type_compatible(a, b) -> bool:
|
| 952 |
+
"""
|
| 953 |
+
Helper for interval_range to check type compat of start/end/freq.
|
| 954 |
+
"""
|
| 955 |
+
is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
|
| 956 |
+
is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
|
| 957 |
+
return (
|
| 958 |
+
(is_number(a) and is_number(b))
|
| 959 |
+
or (is_ts_compat(a) and is_ts_compat(b))
|
| 960 |
+
or (is_td_compat(a) and is_td_compat(b))
|
| 961 |
+
or com.any_none(a, b)
|
| 962 |
+
)
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
def interval_range(
|
| 966 |
+
start=None,
|
| 967 |
+
end=None,
|
| 968 |
+
periods=None,
|
| 969 |
+
freq=None,
|
| 970 |
+
name: Hashable | None = None,
|
| 971 |
+
closed: IntervalClosedType = "right",
|
| 972 |
+
) -> IntervalIndex:
|
| 973 |
+
"""
|
| 974 |
+
Return a fixed frequency IntervalIndex.
|
| 975 |
+
|
| 976 |
+
Parameters
|
| 977 |
+
----------
|
| 978 |
+
start : numeric or datetime-like, default None
|
| 979 |
+
Left bound for generating intervals.
|
| 980 |
+
end : numeric or datetime-like, default None
|
| 981 |
+
Right bound for generating intervals.
|
| 982 |
+
periods : int, default None
|
| 983 |
+
Number of periods to generate.
|
| 984 |
+
freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None
|
| 985 |
+
The length of each interval. Must be consistent with the type of start
|
| 986 |
+
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
|
| 987 |
+
for numeric and 'D' for datetime-like.
|
| 988 |
+
name : str, default None
|
| 989 |
+
Name of the resulting IntervalIndex.
|
| 990 |
+
closed : {'left', 'right', 'both', 'neither'}, default 'right'
|
| 991 |
+
Whether the intervals are closed on the left-side, right-side, both
|
| 992 |
+
or neither.
|
| 993 |
+
|
| 994 |
+
Returns
|
| 995 |
+
-------
|
| 996 |
+
IntervalIndex
|
| 997 |
+
|
| 998 |
+
See Also
|
| 999 |
+
--------
|
| 1000 |
+
IntervalIndex : An Index of intervals that are all closed on the same side.
|
| 1001 |
+
|
| 1002 |
+
Notes
|
| 1003 |
+
-----
|
| 1004 |
+
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
|
| 1005 |
+
exactly three must be specified. If ``freq`` is omitted, the resulting
|
| 1006 |
+
``IntervalIndex`` will have ``periods`` linearly spaced elements between
|
| 1007 |
+
``start`` and ``end``, inclusively.
|
| 1008 |
+
|
| 1009 |
+
To learn more about datetime-like frequency strings, please see `this link
|
| 1010 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 1011 |
+
|
| 1012 |
+
Examples
|
| 1013 |
+
--------
|
| 1014 |
+
Numeric ``start`` and ``end`` is supported.
|
| 1015 |
+
|
| 1016 |
+
>>> pd.interval_range(start=0, end=5)
|
| 1017 |
+
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
|
| 1018 |
+
dtype='interval[int64, right]')
|
| 1019 |
+
|
| 1020 |
+
Additionally, datetime-like input is also supported.
|
| 1021 |
+
|
| 1022 |
+
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
|
| 1023 |
+
... end=pd.Timestamp('2017-01-04'))
|
| 1024 |
+
IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],
|
| 1025 |
+
(2017-01-02 00:00:00, 2017-01-03 00:00:00],
|
| 1026 |
+
(2017-01-03 00:00:00, 2017-01-04 00:00:00]],
|
| 1027 |
+
dtype='interval[datetime64[ns], right]')
|
| 1028 |
+
|
| 1029 |
+
The ``freq`` parameter specifies the frequency between the left and right.
|
| 1030 |
+
endpoints of the individual intervals within the ``IntervalIndex``. For
|
| 1031 |
+
numeric ``start`` and ``end``, the frequency must also be numeric.
|
| 1032 |
+
|
| 1033 |
+
>>> pd.interval_range(start=0, periods=4, freq=1.5)
|
| 1034 |
+
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
|
| 1035 |
+
dtype='interval[float64, right]')
|
| 1036 |
+
|
| 1037 |
+
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
|
| 1038 |
+
convertible to a DateOffset.
|
| 1039 |
+
|
| 1040 |
+
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
|
| 1041 |
+
... periods=3, freq='MS')
|
| 1042 |
+
IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],
|
| 1043 |
+
(2017-02-01 00:00:00, 2017-03-01 00:00:00],
|
| 1044 |
+
(2017-03-01 00:00:00, 2017-04-01 00:00:00]],
|
| 1045 |
+
dtype='interval[datetime64[ns], right]')
|
| 1046 |
+
|
| 1047 |
+
Specify ``start``, ``end``, and ``periods``; the frequency is generated
|
| 1048 |
+
automatically (linearly spaced).
|
| 1049 |
+
|
| 1050 |
+
>>> pd.interval_range(start=0, end=6, periods=4)
|
| 1051 |
+
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
|
| 1052 |
+
dtype='interval[float64, right]')
|
| 1053 |
+
|
| 1054 |
+
The ``closed`` parameter specifies which endpoints of the individual
|
| 1055 |
+
intervals within the ``IntervalIndex`` are closed.
|
| 1056 |
+
|
| 1057 |
+
>>> pd.interval_range(end=5, periods=4, closed='both')
|
| 1058 |
+
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
|
| 1059 |
+
dtype='interval[int64, both]')
|
| 1060 |
+
"""
|
| 1061 |
+
start = maybe_box_datetimelike(start)
|
| 1062 |
+
end = maybe_box_datetimelike(end)
|
| 1063 |
+
endpoint = start if start is not None else end
|
| 1064 |
+
|
| 1065 |
+
if freq is None and com.any_none(periods, start, end):
|
| 1066 |
+
freq = 1 if is_number(endpoint) else "D"
|
| 1067 |
+
|
| 1068 |
+
if com.count_not_none(start, end, periods, freq) != 3:
|
| 1069 |
+
raise ValueError(
|
| 1070 |
+
"Of the four parameters: start, end, periods, and "
|
| 1071 |
+
"freq, exactly three must be specified"
|
| 1072 |
+
)
|
| 1073 |
+
|
| 1074 |
+
if not _is_valid_endpoint(start):
|
| 1075 |
+
raise ValueError(f"start must be numeric or datetime-like, got {start}")
|
| 1076 |
+
if not _is_valid_endpoint(end):
|
| 1077 |
+
raise ValueError(f"end must be numeric or datetime-like, got {end}")
|
| 1078 |
+
|
| 1079 |
+
periods = validate_periods(periods)
|
| 1080 |
+
|
| 1081 |
+
if freq is not None and not is_number(freq):
|
| 1082 |
+
try:
|
| 1083 |
+
freq = to_offset(freq)
|
| 1084 |
+
except ValueError as err:
|
| 1085 |
+
raise ValueError(
|
| 1086 |
+
f"freq must be numeric or convertible to DateOffset, got {freq}"
|
| 1087 |
+
) from err
|
| 1088 |
+
|
| 1089 |
+
# verify type compatibility
|
| 1090 |
+
if not all(
|
| 1091 |
+
[
|
| 1092 |
+
_is_type_compatible(start, end),
|
| 1093 |
+
_is_type_compatible(start, freq),
|
| 1094 |
+
_is_type_compatible(end, freq),
|
| 1095 |
+
]
|
| 1096 |
+
):
|
| 1097 |
+
raise TypeError("start, end, freq need to be type compatible")
|
| 1098 |
+
|
| 1099 |
+
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
|
| 1100 |
+
if periods is not None:
|
| 1101 |
+
periods += 1
|
| 1102 |
+
|
| 1103 |
+
breaks: np.ndarray | TimedeltaIndex | DatetimeIndex
|
| 1104 |
+
|
| 1105 |
+
if is_number(endpoint):
|
| 1106 |
+
if com.all_not_none(start, end, freq):
|
| 1107 |
+
# 0.1 ensures we capture end
|
| 1108 |
+
breaks = np.arange(start, end + (freq * 0.1), freq)
|
| 1109 |
+
else:
|
| 1110 |
+
# compute the period/start/end if unspecified (at most one)
|
| 1111 |
+
if periods is None:
|
| 1112 |
+
periods = int((end - start) // freq) + 1
|
| 1113 |
+
elif start is None:
|
| 1114 |
+
start = end - (periods - 1) * freq
|
| 1115 |
+
elif end is None:
|
| 1116 |
+
end = start + (periods - 1) * freq
|
| 1117 |
+
|
| 1118 |
+
breaks = np.linspace(start, end, periods)
|
| 1119 |
+
if all(is_integer(x) for x in com.not_none(start, end, freq)):
|
| 1120 |
+
# np.linspace always produces float output
|
| 1121 |
+
|
| 1122 |
+
# error: Argument 1 to "maybe_downcast_numeric" has incompatible type
|
| 1123 |
+
# "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
|
| 1124 |
+
# expected "ndarray[Any, Any]" [
|
| 1125 |
+
breaks = maybe_downcast_numeric(
|
| 1126 |
+
breaks, # type: ignore[arg-type]
|
| 1127 |
+
np.dtype("int64"),
|
| 1128 |
+
)
|
| 1129 |
+
else:
|
| 1130 |
+
# delegate to the appropriate range function
|
| 1131 |
+
if isinstance(endpoint, Timestamp):
|
| 1132 |
+
breaks = date_range(start=start, end=end, periods=periods, freq=freq)
|
| 1133 |
+
else:
|
| 1134 |
+
breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
|
| 1135 |
+
|
| 1136 |
+
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/multi.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/period.py
ADDED
|
@@ -0,0 +1,614 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import (
|
| 4 |
+
datetime,
|
| 5 |
+
timedelta,
|
| 6 |
+
)
|
| 7 |
+
from typing import TYPE_CHECKING
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from pandas._libs import index as libindex
|
| 13 |
+
from pandas._libs.tslibs import (
|
| 14 |
+
BaseOffset,
|
| 15 |
+
NaT,
|
| 16 |
+
Period,
|
| 17 |
+
Resolution,
|
| 18 |
+
Tick,
|
| 19 |
+
)
|
| 20 |
+
from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR
|
| 21 |
+
from pandas.util._decorators import (
|
| 22 |
+
cache_readonly,
|
| 23 |
+
doc,
|
| 24 |
+
)
|
| 25 |
+
from pandas.util._exceptions import find_stack_level
|
| 26 |
+
|
| 27 |
+
from pandas.core.dtypes.common import is_integer
|
| 28 |
+
from pandas.core.dtypes.dtypes import PeriodDtype
|
| 29 |
+
from pandas.core.dtypes.generic import ABCSeries
|
| 30 |
+
from pandas.core.dtypes.missing import is_valid_na_for_dtype
|
| 31 |
+
|
| 32 |
+
from pandas.core.arrays.period import (
|
| 33 |
+
PeriodArray,
|
| 34 |
+
period_array,
|
| 35 |
+
raise_on_incompatible,
|
| 36 |
+
validate_dtype_freq,
|
| 37 |
+
)
|
| 38 |
+
import pandas.core.common as com
|
| 39 |
+
import pandas.core.indexes.base as ibase
|
| 40 |
+
from pandas.core.indexes.base import maybe_extract_name
|
| 41 |
+
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
|
| 42 |
+
from pandas.core.indexes.datetimes import (
|
| 43 |
+
DatetimeIndex,
|
| 44 |
+
Index,
|
| 45 |
+
)
|
| 46 |
+
from pandas.core.indexes.extension import inherit_names
|
| 47 |
+
|
| 48 |
+
if TYPE_CHECKING:
|
| 49 |
+
from collections.abc import Hashable
|
| 50 |
+
|
| 51 |
+
from pandas._typing import (
|
| 52 |
+
Dtype,
|
| 53 |
+
DtypeObj,
|
| 54 |
+
Self,
|
| 55 |
+
npt,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
|
| 60 |
+
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
|
| 61 |
+
_shared_doc_kwargs = {
|
| 62 |
+
"klass": "PeriodArray",
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# --- Period index sketch
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _new_PeriodIndex(cls, **d):
|
| 69 |
+
# GH13277 for unpickling
|
| 70 |
+
values = d.pop("data")
|
| 71 |
+
if values.dtype == "int64":
|
| 72 |
+
freq = d.pop("freq", None)
|
| 73 |
+
dtype = PeriodDtype(freq)
|
| 74 |
+
values = PeriodArray(values, dtype=dtype)
|
| 75 |
+
return cls._simple_new(values, **d)
|
| 76 |
+
else:
|
| 77 |
+
return cls(values, **d)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@inherit_names(
|
| 81 |
+
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
|
| 82 |
+
PeriodArray,
|
| 83 |
+
wrap=True,
|
| 84 |
+
)
|
| 85 |
+
@inherit_names(["is_leap_year"], PeriodArray)
|
| 86 |
+
class PeriodIndex(DatetimeIndexOpsMixin):
|
| 87 |
+
"""
|
| 88 |
+
Immutable ndarray holding ordinal values indicating regular periods in time.
|
| 89 |
+
|
| 90 |
+
Index keys are boxed to Period objects which carries the metadata (eg,
|
| 91 |
+
frequency information).
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
data : array-like (1d int np.ndarray or PeriodArray), optional
|
| 96 |
+
Optional period-like data to construct index with.
|
| 97 |
+
copy : bool
|
| 98 |
+
Make a copy of input ndarray.
|
| 99 |
+
freq : str or period object, optional
|
| 100 |
+
One of pandas period strings or corresponding objects.
|
| 101 |
+
year : int, array, or Series, default None
|
| 102 |
+
|
| 103 |
+
.. deprecated:: 2.2.0
|
| 104 |
+
Use PeriodIndex.from_fields instead.
|
| 105 |
+
month : int, array, or Series, default None
|
| 106 |
+
|
| 107 |
+
.. deprecated:: 2.2.0
|
| 108 |
+
Use PeriodIndex.from_fields instead.
|
| 109 |
+
quarter : int, array, or Series, default None
|
| 110 |
+
|
| 111 |
+
.. deprecated:: 2.2.0
|
| 112 |
+
Use PeriodIndex.from_fields instead.
|
| 113 |
+
day : int, array, or Series, default None
|
| 114 |
+
|
| 115 |
+
.. deprecated:: 2.2.0
|
| 116 |
+
Use PeriodIndex.from_fields instead.
|
| 117 |
+
hour : int, array, or Series, default None
|
| 118 |
+
|
| 119 |
+
.. deprecated:: 2.2.0
|
| 120 |
+
Use PeriodIndex.from_fields instead.
|
| 121 |
+
minute : int, array, or Series, default None
|
| 122 |
+
|
| 123 |
+
.. deprecated:: 2.2.0
|
| 124 |
+
Use PeriodIndex.from_fields instead.
|
| 125 |
+
second : int, array, or Series, default None
|
| 126 |
+
|
| 127 |
+
.. deprecated:: 2.2.0
|
| 128 |
+
Use PeriodIndex.from_fields instead.
|
| 129 |
+
dtype : str or PeriodDtype, default None
|
| 130 |
+
|
| 131 |
+
Attributes
|
| 132 |
+
----------
|
| 133 |
+
day
|
| 134 |
+
dayofweek
|
| 135 |
+
day_of_week
|
| 136 |
+
dayofyear
|
| 137 |
+
day_of_year
|
| 138 |
+
days_in_month
|
| 139 |
+
daysinmonth
|
| 140 |
+
end_time
|
| 141 |
+
freq
|
| 142 |
+
freqstr
|
| 143 |
+
hour
|
| 144 |
+
is_leap_year
|
| 145 |
+
minute
|
| 146 |
+
month
|
| 147 |
+
quarter
|
| 148 |
+
qyear
|
| 149 |
+
second
|
| 150 |
+
start_time
|
| 151 |
+
week
|
| 152 |
+
weekday
|
| 153 |
+
weekofyear
|
| 154 |
+
year
|
| 155 |
+
|
| 156 |
+
Methods
|
| 157 |
+
-------
|
| 158 |
+
asfreq
|
| 159 |
+
strftime
|
| 160 |
+
to_timestamp
|
| 161 |
+
from_fields
|
| 162 |
+
from_ordinals
|
| 163 |
+
|
| 164 |
+
See Also
|
| 165 |
+
--------
|
| 166 |
+
Index : The base pandas Index type.
|
| 167 |
+
Period : Represents a period of time.
|
| 168 |
+
DatetimeIndex : Index with datetime64 data.
|
| 169 |
+
TimedeltaIndex : Index of timedelta64 data.
|
| 170 |
+
period_range : Create a fixed-frequency PeriodIndex.
|
| 171 |
+
|
| 172 |
+
Examples
|
| 173 |
+
--------
|
| 174 |
+
>>> idx = pd.PeriodIndex.from_fields(year=[2000, 2002], quarter=[1, 3])
|
| 175 |
+
>>> idx
|
| 176 |
+
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
_typ = "periodindex"
|
| 180 |
+
|
| 181 |
+
_data: PeriodArray
|
| 182 |
+
freq: BaseOffset
|
| 183 |
+
dtype: PeriodDtype
|
| 184 |
+
|
| 185 |
+
_data_cls = PeriodArray
|
| 186 |
+
_supports_partial_string_indexing = True
|
| 187 |
+
|
| 188 |
+
@property
|
| 189 |
+
def _engine_type(self) -> type[libindex.PeriodEngine]:
|
| 190 |
+
return libindex.PeriodEngine
|
| 191 |
+
|
| 192 |
+
@cache_readonly
|
| 193 |
+
def _resolution_obj(self) -> Resolution:
|
| 194 |
+
# for compat with DatetimeIndex
|
| 195 |
+
return self.dtype._resolution_obj
|
| 196 |
+
|
| 197 |
+
# --------------------------------------------------------------------
|
| 198 |
+
# methods that dispatch to array and wrap result in Index
|
| 199 |
+
# These are defined here instead of via inherit_names for mypy
|
| 200 |
+
|
| 201 |
+
@doc(
|
| 202 |
+
PeriodArray.asfreq,
|
| 203 |
+
other="pandas.arrays.PeriodArray",
|
| 204 |
+
other_name="PeriodArray",
|
| 205 |
+
**_shared_doc_kwargs,
|
| 206 |
+
)
|
| 207 |
+
def asfreq(self, freq=None, how: str = "E") -> Self:
|
| 208 |
+
arr = self._data.asfreq(freq, how)
|
| 209 |
+
return type(self)._simple_new(arr, name=self.name)
|
| 210 |
+
|
| 211 |
+
@doc(PeriodArray.to_timestamp)
|
| 212 |
+
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:
|
| 213 |
+
arr = self._data.to_timestamp(freq, how)
|
| 214 |
+
return DatetimeIndex._simple_new(arr, name=self.name)
|
| 215 |
+
|
| 216 |
+
@property
|
| 217 |
+
@doc(PeriodArray.hour.fget)
|
| 218 |
+
def hour(self) -> Index:
|
| 219 |
+
return Index(self._data.hour, name=self.name)
|
| 220 |
+
|
| 221 |
+
@property
|
| 222 |
+
@doc(PeriodArray.minute.fget)
|
| 223 |
+
def minute(self) -> Index:
|
| 224 |
+
return Index(self._data.minute, name=self.name)
|
| 225 |
+
|
| 226 |
+
@property
|
| 227 |
+
@doc(PeriodArray.second.fget)
|
| 228 |
+
def second(self) -> Index:
|
| 229 |
+
return Index(self._data.second, name=self.name)
|
| 230 |
+
|
| 231 |
+
# ------------------------------------------------------------------------
|
| 232 |
+
# Index Constructors
|
| 233 |
+
|
| 234 |
+
def __new__(
|
| 235 |
+
cls,
|
| 236 |
+
data=None,
|
| 237 |
+
ordinal=None,
|
| 238 |
+
freq=None,
|
| 239 |
+
dtype: Dtype | None = None,
|
| 240 |
+
copy: bool = False,
|
| 241 |
+
name: Hashable | None = None,
|
| 242 |
+
**fields,
|
| 243 |
+
) -> Self:
|
| 244 |
+
valid_field_set = {
|
| 245 |
+
"year",
|
| 246 |
+
"month",
|
| 247 |
+
"day",
|
| 248 |
+
"quarter",
|
| 249 |
+
"hour",
|
| 250 |
+
"minute",
|
| 251 |
+
"second",
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
refs = None
|
| 255 |
+
if not copy and isinstance(data, (Index, ABCSeries)):
|
| 256 |
+
refs = data._references
|
| 257 |
+
|
| 258 |
+
if not set(fields).issubset(valid_field_set):
|
| 259 |
+
argument = next(iter(set(fields) - valid_field_set))
|
| 260 |
+
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
|
| 261 |
+
elif len(fields):
|
| 262 |
+
# GH#55960
|
| 263 |
+
warnings.warn(
|
| 264 |
+
"Constructing PeriodIndex from fields is deprecated. Use "
|
| 265 |
+
"PeriodIndex.from_fields instead.",
|
| 266 |
+
FutureWarning,
|
| 267 |
+
stacklevel=find_stack_level(),
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
if ordinal is not None:
|
| 271 |
+
# GH#55960
|
| 272 |
+
warnings.warn(
|
| 273 |
+
"The 'ordinal' keyword in PeriodIndex is deprecated and will "
|
| 274 |
+
"be removed in a future version. Use PeriodIndex.from_ordinals "
|
| 275 |
+
"instead.",
|
| 276 |
+
FutureWarning,
|
| 277 |
+
stacklevel=find_stack_level(),
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
name = maybe_extract_name(name, data, cls)
|
| 281 |
+
|
| 282 |
+
if data is None and ordinal is None:
|
| 283 |
+
# range-based.
|
| 284 |
+
if not fields:
|
| 285 |
+
# test_pickle_compat_construction
|
| 286 |
+
cls._raise_scalar_data_error(None)
|
| 287 |
+
data = cls.from_fields(**fields, freq=freq)._data
|
| 288 |
+
copy = False
|
| 289 |
+
|
| 290 |
+
elif fields:
|
| 291 |
+
if data is not None:
|
| 292 |
+
raise ValueError("Cannot pass both data and fields")
|
| 293 |
+
raise ValueError("Cannot pass both ordinal and fields")
|
| 294 |
+
|
| 295 |
+
else:
|
| 296 |
+
freq = validate_dtype_freq(dtype, freq)
|
| 297 |
+
|
| 298 |
+
# PeriodIndex allow PeriodIndex(period_index, freq=different)
|
| 299 |
+
# Let's not encourage that kind of behavior in PeriodArray.
|
| 300 |
+
|
| 301 |
+
if freq and isinstance(data, cls) and data.freq != freq:
|
| 302 |
+
# TODO: We can do some of these with no-copy / coercion?
|
| 303 |
+
# e.g. D -> 2D seems to be OK
|
| 304 |
+
data = data.asfreq(freq)
|
| 305 |
+
|
| 306 |
+
if data is None and ordinal is not None:
|
| 307 |
+
ordinal = np.asarray(ordinal, dtype=np.int64)
|
| 308 |
+
dtype = PeriodDtype(freq)
|
| 309 |
+
data = PeriodArray(ordinal, dtype=dtype)
|
| 310 |
+
elif data is not None and ordinal is not None:
|
| 311 |
+
raise ValueError("Cannot pass both data and ordinal")
|
| 312 |
+
else:
|
| 313 |
+
# don't pass copy here, since we copy later.
|
| 314 |
+
data = period_array(data=data, freq=freq)
|
| 315 |
+
|
| 316 |
+
if copy:
|
| 317 |
+
data = data.copy()
|
| 318 |
+
|
| 319 |
+
return cls._simple_new(data, name=name, refs=refs)
|
| 320 |
+
|
| 321 |
+
@classmethod
|
| 322 |
+
def from_fields(
|
| 323 |
+
cls,
|
| 324 |
+
*,
|
| 325 |
+
year=None,
|
| 326 |
+
quarter=None,
|
| 327 |
+
month=None,
|
| 328 |
+
day=None,
|
| 329 |
+
hour=None,
|
| 330 |
+
minute=None,
|
| 331 |
+
second=None,
|
| 332 |
+
freq=None,
|
| 333 |
+
) -> Self:
|
| 334 |
+
fields = {
|
| 335 |
+
"year": year,
|
| 336 |
+
"quarter": quarter,
|
| 337 |
+
"month": month,
|
| 338 |
+
"day": day,
|
| 339 |
+
"hour": hour,
|
| 340 |
+
"minute": minute,
|
| 341 |
+
"second": second,
|
| 342 |
+
}
|
| 343 |
+
fields = {key: value for key, value in fields.items() if value is not None}
|
| 344 |
+
arr = PeriodArray._from_fields(fields=fields, freq=freq)
|
| 345 |
+
return cls._simple_new(arr)
|
| 346 |
+
|
| 347 |
+
@classmethod
|
| 348 |
+
def from_ordinals(cls, ordinals, *, freq, name=None) -> Self:
|
| 349 |
+
ordinals = np.asarray(ordinals, dtype=np.int64)
|
| 350 |
+
dtype = PeriodDtype(freq)
|
| 351 |
+
data = PeriodArray._simple_new(ordinals, dtype=dtype)
|
| 352 |
+
return cls._simple_new(data, name=name)
|
| 353 |
+
|
| 354 |
+
# ------------------------------------------------------------------------
|
| 355 |
+
# Data
|
| 356 |
+
|
| 357 |
+
@property
|
| 358 |
+
def values(self) -> npt.NDArray[np.object_]:
|
| 359 |
+
return np.asarray(self, dtype=object)
|
| 360 |
+
|
| 361 |
+
def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:
|
| 362 |
+
"""
|
| 363 |
+
Convert timedelta-like input to an integer multiple of self.freq
|
| 364 |
+
|
| 365 |
+
Parameters
|
| 366 |
+
----------
|
| 367 |
+
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
|
| 368 |
+
|
| 369 |
+
Returns
|
| 370 |
+
-------
|
| 371 |
+
converted : int, np.ndarray[int64]
|
| 372 |
+
|
| 373 |
+
Raises
|
| 374 |
+
------
|
| 375 |
+
IncompatibleFrequency : if the input cannot be written as a multiple
|
| 376 |
+
of self.freq. Note IncompatibleFrequency subclasses ValueError.
|
| 377 |
+
"""
|
| 378 |
+
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
|
| 379 |
+
if isinstance(self.freq, Tick):
|
| 380 |
+
# _check_timedeltalike_freq_compat will raise if incompatible
|
| 381 |
+
delta = self._data._check_timedeltalike_freq_compat(other)
|
| 382 |
+
return delta
|
| 383 |
+
elif isinstance(other, BaseOffset):
|
| 384 |
+
if other.base == self.freq.base:
|
| 385 |
+
return other.n
|
| 386 |
+
|
| 387 |
+
raise raise_on_incompatible(self, other)
|
| 388 |
+
elif is_integer(other):
|
| 389 |
+
assert isinstance(other, int)
|
| 390 |
+
return other
|
| 391 |
+
|
| 392 |
+
# raise when input doesn't have freq
|
| 393 |
+
raise raise_on_incompatible(self, None)
|
| 394 |
+
|
| 395 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
| 396 |
+
"""
|
| 397 |
+
Can we compare values of the given dtype to our own?
|
| 398 |
+
"""
|
| 399 |
+
return self.dtype == dtype
|
| 400 |
+
|
| 401 |
+
# ------------------------------------------------------------------------
|
| 402 |
+
# Index Methods
|
| 403 |
+
|
| 404 |
+
def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:
|
| 405 |
+
"""
|
| 406 |
+
where : array of timestamps
|
| 407 |
+
mask : np.ndarray[bool]
|
| 408 |
+
Array of booleans where data is not NA.
|
| 409 |
+
"""
|
| 410 |
+
if isinstance(where, DatetimeIndex):
|
| 411 |
+
where = PeriodIndex(where._values, freq=self.freq)
|
| 412 |
+
elif not isinstance(where, PeriodIndex):
|
| 413 |
+
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
|
| 414 |
+
|
| 415 |
+
return super().asof_locs(where, mask)
|
| 416 |
+
|
| 417 |
+
@property
|
| 418 |
+
def is_full(self) -> bool:
|
| 419 |
+
"""
|
| 420 |
+
Returns True if this PeriodIndex is range-like in that all Periods
|
| 421 |
+
between start and end are present, in order.
|
| 422 |
+
"""
|
| 423 |
+
if len(self) == 0:
|
| 424 |
+
return True
|
| 425 |
+
if not self.is_monotonic_increasing:
|
| 426 |
+
raise ValueError("Index is not monotonic")
|
| 427 |
+
values = self.asi8
|
| 428 |
+
return bool(((values[1:] - values[:-1]) < 2).all())
|
| 429 |
+
|
| 430 |
+
@property
|
| 431 |
+
def inferred_type(self) -> str:
|
| 432 |
+
# b/c data is represented as ints make sure we can't have ambiguous
|
| 433 |
+
# indexing
|
| 434 |
+
return "period"
|
| 435 |
+
|
| 436 |
+
# ------------------------------------------------------------------------
|
| 437 |
+
# Indexing Methods
|
| 438 |
+
|
| 439 |
+
def _convert_tolerance(self, tolerance, target):
|
| 440 |
+
# Returned tolerance must be in dtype/units so that
|
| 441 |
+
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
|
| 442 |
+
# is meaningful. Since PeriodIndex returns int64 for engine_target,
|
| 443 |
+
# we may need to convert timedelta64 tolerance to int64.
|
| 444 |
+
tolerance = super()._convert_tolerance(tolerance, target)
|
| 445 |
+
|
| 446 |
+
if self.dtype == target.dtype:
|
| 447 |
+
# convert tolerance to i8
|
| 448 |
+
tolerance = self._maybe_convert_timedelta(tolerance)
|
| 449 |
+
|
| 450 |
+
return tolerance
|
| 451 |
+
|
| 452 |
+
def get_loc(self, key):
|
| 453 |
+
"""
|
| 454 |
+
Get integer location for requested label.
|
| 455 |
+
|
| 456 |
+
Parameters
|
| 457 |
+
----------
|
| 458 |
+
key : Period, NaT, str, or datetime
|
| 459 |
+
String or datetime key must be parsable as Period.
|
| 460 |
+
|
| 461 |
+
Returns
|
| 462 |
+
-------
|
| 463 |
+
loc : int or ndarray[int64]
|
| 464 |
+
|
| 465 |
+
Raises
|
| 466 |
+
------
|
| 467 |
+
KeyError
|
| 468 |
+
Key is not present in the index.
|
| 469 |
+
TypeError
|
| 470 |
+
If key is listlike or otherwise not hashable.
|
| 471 |
+
"""
|
| 472 |
+
orig_key = key
|
| 473 |
+
|
| 474 |
+
self._check_indexing_error(key)
|
| 475 |
+
|
| 476 |
+
if is_valid_na_for_dtype(key, self.dtype):
|
| 477 |
+
key = NaT
|
| 478 |
+
|
| 479 |
+
elif isinstance(key, str):
|
| 480 |
+
try:
|
| 481 |
+
parsed, reso = self._parse_with_reso(key)
|
| 482 |
+
except ValueError as err:
|
| 483 |
+
# A string with invalid format
|
| 484 |
+
raise KeyError(f"Cannot interpret '{key}' as period") from err
|
| 485 |
+
|
| 486 |
+
if self._can_partial_date_slice(reso):
|
| 487 |
+
try:
|
| 488 |
+
return self._partial_date_slice(reso, parsed)
|
| 489 |
+
except KeyError as err:
|
| 490 |
+
raise KeyError(key) from err
|
| 491 |
+
|
| 492 |
+
if reso == self._resolution_obj:
|
| 493 |
+
# the reso < self._resolution_obj case goes
|
| 494 |
+
# through _get_string_slice
|
| 495 |
+
key = self._cast_partial_indexing_scalar(parsed)
|
| 496 |
+
else:
|
| 497 |
+
raise KeyError(key)
|
| 498 |
+
|
| 499 |
+
elif isinstance(key, Period):
|
| 500 |
+
self._disallow_mismatched_indexing(key)
|
| 501 |
+
|
| 502 |
+
elif isinstance(key, datetime):
|
| 503 |
+
key = self._cast_partial_indexing_scalar(key)
|
| 504 |
+
|
| 505 |
+
else:
|
| 506 |
+
# in particular integer, which Period constructor would cast to string
|
| 507 |
+
raise KeyError(key)
|
| 508 |
+
|
| 509 |
+
try:
|
| 510 |
+
return Index.get_loc(self, key)
|
| 511 |
+
except KeyError as err:
|
| 512 |
+
raise KeyError(orig_key) from err
|
| 513 |
+
|
| 514 |
+
def _disallow_mismatched_indexing(self, key: Period) -> None:
|
| 515 |
+
if key._dtype != self.dtype:
|
| 516 |
+
raise KeyError(key)
|
| 517 |
+
|
| 518 |
+
def _cast_partial_indexing_scalar(self, label: datetime) -> Period:
|
| 519 |
+
try:
|
| 520 |
+
period = Period(label, freq=self.freq)
|
| 521 |
+
except ValueError as err:
|
| 522 |
+
# we cannot construct the Period
|
| 523 |
+
raise KeyError(label) from err
|
| 524 |
+
return period
|
| 525 |
+
|
| 526 |
+
@doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound)
|
| 527 |
+
def _maybe_cast_slice_bound(self, label, side: str):
|
| 528 |
+
if isinstance(label, datetime):
|
| 529 |
+
label = self._cast_partial_indexing_scalar(label)
|
| 530 |
+
|
| 531 |
+
return super()._maybe_cast_slice_bound(label, side)
|
| 532 |
+
|
| 533 |
+
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
|
| 534 |
+
freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
|
| 535 |
+
iv = Period(parsed, freq=freq)
|
| 536 |
+
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
|
| 537 |
+
|
| 538 |
+
@doc(DatetimeIndexOpsMixin.shift)
|
| 539 |
+
def shift(self, periods: int = 1, freq=None) -> Self:
|
| 540 |
+
if freq is not None:
|
| 541 |
+
raise TypeError(
|
| 542 |
+
f"`freq` argument is not supported for {type(self).__name__}.shift"
|
| 543 |
+
)
|
| 544 |
+
return self + periods
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def period_range(
|
| 548 |
+
start=None,
|
| 549 |
+
end=None,
|
| 550 |
+
periods: int | None = None,
|
| 551 |
+
freq=None,
|
| 552 |
+
name: Hashable | None = None,
|
| 553 |
+
) -> PeriodIndex:
|
| 554 |
+
"""
|
| 555 |
+
Return a fixed frequency PeriodIndex.
|
| 556 |
+
|
| 557 |
+
The day (calendar) is the default frequency.
|
| 558 |
+
|
| 559 |
+
Parameters
|
| 560 |
+
----------
|
| 561 |
+
start : str, datetime, date, pandas.Timestamp, or period-like, default None
|
| 562 |
+
Left bound for generating periods.
|
| 563 |
+
end : str, datetime, date, pandas.Timestamp, or period-like, default None
|
| 564 |
+
Right bound for generating periods.
|
| 565 |
+
periods : int, default None
|
| 566 |
+
Number of periods to generate.
|
| 567 |
+
freq : str or DateOffset, optional
|
| 568 |
+
Frequency alias. By default the freq is taken from `start` or `end`
|
| 569 |
+
if those are Period objects. Otherwise, the default is ``"D"`` for
|
| 570 |
+
daily frequency.
|
| 571 |
+
name : str, default None
|
| 572 |
+
Name of the resulting PeriodIndex.
|
| 573 |
+
|
| 574 |
+
Returns
|
| 575 |
+
-------
|
| 576 |
+
PeriodIndex
|
| 577 |
+
|
| 578 |
+
Notes
|
| 579 |
+
-----
|
| 580 |
+
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
|
| 581 |
+
must be specified.
|
| 582 |
+
|
| 583 |
+
To learn more about the frequency strings, please see `this link
|
| 584 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 585 |
+
|
| 586 |
+
Examples
|
| 587 |
+
--------
|
| 588 |
+
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
|
| 589 |
+
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
|
| 590 |
+
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
|
| 591 |
+
'2018-01'],
|
| 592 |
+
dtype='period[M]')
|
| 593 |
+
|
| 594 |
+
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
|
| 595 |
+
endpoints for a ``PeriodIndex`` with frequency matching that of the
|
| 596 |
+
``period_range`` constructor.
|
| 597 |
+
|
| 598 |
+
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
|
| 599 |
+
... end=pd.Period('2017Q2', freq='Q'), freq='M')
|
| 600 |
+
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
|
| 601 |
+
dtype='period[M]')
|
| 602 |
+
"""
|
| 603 |
+
if com.count_not_none(start, end, periods) != 2:
|
| 604 |
+
raise ValueError(
|
| 605 |
+
"Of the three parameters: start, end, and periods, "
|
| 606 |
+
"exactly two must be specified"
|
| 607 |
+
)
|
| 608 |
+
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
|
| 609 |
+
freq = "D"
|
| 610 |
+
|
| 611 |
+
data, freq = PeriodArray._generate_range(start, end, periods, freq)
|
| 612 |
+
dtype = PeriodDtype(freq)
|
| 613 |
+
data = PeriodArray(data, dtype=dtype)
|
| 614 |
+
return PeriodIndex(data, name=name)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/range.py
ADDED
|
@@ -0,0 +1,1187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import (
|
| 4 |
+
Hashable,
|
| 5 |
+
Iterator,
|
| 6 |
+
)
|
| 7 |
+
from datetime import timedelta
|
| 8 |
+
import operator
|
| 9 |
+
from sys import getsizeof
|
| 10 |
+
from typing import (
|
| 11 |
+
TYPE_CHECKING,
|
| 12 |
+
Any,
|
| 13 |
+
Callable,
|
| 14 |
+
Literal,
|
| 15 |
+
cast,
|
| 16 |
+
overload,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from pandas._libs import (
|
| 22 |
+
index as libindex,
|
| 23 |
+
lib,
|
| 24 |
+
)
|
| 25 |
+
from pandas._libs.algos import unique_deltas
|
| 26 |
+
from pandas._libs.lib import no_default
|
| 27 |
+
from pandas.compat.numpy import function as nv
|
| 28 |
+
from pandas.util._decorators import (
|
| 29 |
+
cache_readonly,
|
| 30 |
+
deprecate_nonkeyword_arguments,
|
| 31 |
+
doc,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
from pandas.core.dtypes.common import (
|
| 35 |
+
ensure_platform_int,
|
| 36 |
+
ensure_python_int,
|
| 37 |
+
is_float,
|
| 38 |
+
is_integer,
|
| 39 |
+
is_scalar,
|
| 40 |
+
is_signed_integer_dtype,
|
| 41 |
+
)
|
| 42 |
+
from pandas.core.dtypes.generic import ABCTimedeltaIndex
|
| 43 |
+
|
| 44 |
+
from pandas.core import ops
|
| 45 |
+
import pandas.core.common as com
|
| 46 |
+
from pandas.core.construction import extract_array
|
| 47 |
+
import pandas.core.indexes.base as ibase
|
| 48 |
+
from pandas.core.indexes.base import (
|
| 49 |
+
Index,
|
| 50 |
+
maybe_extract_name,
|
| 51 |
+
)
|
| 52 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
| 53 |
+
|
| 54 |
+
if TYPE_CHECKING:
|
| 55 |
+
from pandas._typing import (
|
| 56 |
+
Axis,
|
| 57 |
+
Dtype,
|
| 58 |
+
NaPosition,
|
| 59 |
+
Self,
|
| 60 |
+
npt,
|
| 61 |
+
)
|
| 62 |
+
_empty_range = range(0)
|
| 63 |
+
_dtype_int64 = np.dtype(np.int64)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class RangeIndex(Index):
|
| 67 |
+
"""
|
| 68 |
+
Immutable Index implementing a monotonic integer range.
|
| 69 |
+
|
| 70 |
+
RangeIndex is a memory-saving special case of an Index limited to representing
|
| 71 |
+
monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances
|
| 72 |
+
improve computing speed.
|
| 73 |
+
|
| 74 |
+
This is the default index type used
|
| 75 |
+
by DataFrame and Series when no explicit index is provided by the user.
|
| 76 |
+
|
| 77 |
+
Parameters
|
| 78 |
+
----------
|
| 79 |
+
start : int (default: 0), range, or other RangeIndex instance
|
| 80 |
+
If int and "stop" is not given, interpreted as "stop" instead.
|
| 81 |
+
stop : int (default: 0)
|
| 82 |
+
step : int (default: 1)
|
| 83 |
+
dtype : np.int64
|
| 84 |
+
Unused, accepted for homogeneity with other index types.
|
| 85 |
+
copy : bool, default False
|
| 86 |
+
Unused, accepted for homogeneity with other index types.
|
| 87 |
+
name : object, optional
|
| 88 |
+
Name to be stored in the index.
|
| 89 |
+
|
| 90 |
+
Attributes
|
| 91 |
+
----------
|
| 92 |
+
start
|
| 93 |
+
stop
|
| 94 |
+
step
|
| 95 |
+
|
| 96 |
+
Methods
|
| 97 |
+
-------
|
| 98 |
+
from_range
|
| 99 |
+
|
| 100 |
+
See Also
|
| 101 |
+
--------
|
| 102 |
+
Index : The base pandas Index type.
|
| 103 |
+
|
| 104 |
+
Examples
|
| 105 |
+
--------
|
| 106 |
+
>>> list(pd.RangeIndex(5))
|
| 107 |
+
[0, 1, 2, 3, 4]
|
| 108 |
+
|
| 109 |
+
>>> list(pd.RangeIndex(-2, 4))
|
| 110 |
+
[-2, -1, 0, 1, 2, 3]
|
| 111 |
+
|
| 112 |
+
>>> list(pd.RangeIndex(0, 10, 2))
|
| 113 |
+
[0, 2, 4, 6, 8]
|
| 114 |
+
|
| 115 |
+
>>> list(pd.RangeIndex(2, -10, -3))
|
| 116 |
+
[2, -1, -4, -7]
|
| 117 |
+
|
| 118 |
+
>>> list(pd.RangeIndex(0))
|
| 119 |
+
[]
|
| 120 |
+
|
| 121 |
+
>>> list(pd.RangeIndex(1, 0))
|
| 122 |
+
[]
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
_typ = "rangeindex"
|
| 126 |
+
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
|
| 127 |
+
_range: range
|
| 128 |
+
_values: np.ndarray
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def _engine_type(self) -> type[libindex.Int64Engine]:
|
| 132 |
+
return libindex.Int64Engine
|
| 133 |
+
|
| 134 |
+
# --------------------------------------------------------------------
|
| 135 |
+
# Constructors
|
| 136 |
+
|
| 137 |
+
def __new__(
|
| 138 |
+
cls,
|
| 139 |
+
start=None,
|
| 140 |
+
stop=None,
|
| 141 |
+
step=None,
|
| 142 |
+
dtype: Dtype | None = None,
|
| 143 |
+
copy: bool = False,
|
| 144 |
+
name: Hashable | None = None,
|
| 145 |
+
) -> Self:
|
| 146 |
+
cls._validate_dtype(dtype)
|
| 147 |
+
name = maybe_extract_name(name, start, cls)
|
| 148 |
+
|
| 149 |
+
# RangeIndex
|
| 150 |
+
if isinstance(start, cls):
|
| 151 |
+
return start.copy(name=name)
|
| 152 |
+
elif isinstance(start, range):
|
| 153 |
+
return cls._simple_new(start, name=name)
|
| 154 |
+
|
| 155 |
+
# validate the arguments
|
| 156 |
+
if com.all_none(start, stop, step):
|
| 157 |
+
raise TypeError("RangeIndex(...) must be called with integers")
|
| 158 |
+
|
| 159 |
+
start = ensure_python_int(start) if start is not None else 0
|
| 160 |
+
|
| 161 |
+
if stop is None:
|
| 162 |
+
start, stop = 0, start
|
| 163 |
+
else:
|
| 164 |
+
stop = ensure_python_int(stop)
|
| 165 |
+
|
| 166 |
+
step = ensure_python_int(step) if step is not None else 1
|
| 167 |
+
if step == 0:
|
| 168 |
+
raise ValueError("Step must not be zero")
|
| 169 |
+
|
| 170 |
+
rng = range(start, stop, step)
|
| 171 |
+
return cls._simple_new(rng, name=name)
|
| 172 |
+
|
| 173 |
+
@classmethod
|
| 174 |
+
def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self:
|
| 175 |
+
"""
|
| 176 |
+
Create :class:`pandas.RangeIndex` from a ``range`` object.
|
| 177 |
+
|
| 178 |
+
Returns
|
| 179 |
+
-------
|
| 180 |
+
RangeIndex
|
| 181 |
+
|
| 182 |
+
Examples
|
| 183 |
+
--------
|
| 184 |
+
>>> pd.RangeIndex.from_range(range(5))
|
| 185 |
+
RangeIndex(start=0, stop=5, step=1)
|
| 186 |
+
|
| 187 |
+
>>> pd.RangeIndex.from_range(range(2, -10, -3))
|
| 188 |
+
RangeIndex(start=2, stop=-10, step=-3)
|
| 189 |
+
"""
|
| 190 |
+
if not isinstance(data, range):
|
| 191 |
+
raise TypeError(
|
| 192 |
+
f"{cls.__name__}(...) must be called with object coercible to a "
|
| 193 |
+
f"range, {repr(data)} was passed"
|
| 194 |
+
)
|
| 195 |
+
cls._validate_dtype(dtype)
|
| 196 |
+
return cls._simple_new(data, name=name)
|
| 197 |
+
|
| 198 |
+
# error: Argument 1 of "_simple_new" is incompatible with supertype "Index";
|
| 199 |
+
# supertype defines the argument type as
|
| 200 |
+
# "Union[ExtensionArray, ndarray[Any, Any]]" [override]
|
| 201 |
+
@classmethod
|
| 202 |
+
def _simple_new( # type: ignore[override]
|
| 203 |
+
cls, values: range, name: Hashable | None = None
|
| 204 |
+
) -> Self:
|
| 205 |
+
result = object.__new__(cls)
|
| 206 |
+
|
| 207 |
+
assert isinstance(values, range)
|
| 208 |
+
|
| 209 |
+
result._range = values
|
| 210 |
+
result._name = name
|
| 211 |
+
result._cache = {}
|
| 212 |
+
result._reset_identity()
|
| 213 |
+
result._references = None
|
| 214 |
+
return result
|
| 215 |
+
|
| 216 |
+
@classmethod
|
| 217 |
+
def _validate_dtype(cls, dtype: Dtype | None) -> None:
|
| 218 |
+
if dtype is None:
|
| 219 |
+
return
|
| 220 |
+
|
| 221 |
+
validation_func, expected = cls._dtype_validation_metadata
|
| 222 |
+
if not validation_func(dtype):
|
| 223 |
+
raise ValueError(
|
| 224 |
+
f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# --------------------------------------------------------------------
|
| 228 |
+
|
| 229 |
+
# error: Return type "Type[Index]" of "_constructor" incompatible with return
|
| 230 |
+
# type "Type[RangeIndex]" in supertype "Index"
|
| 231 |
+
@cache_readonly
|
| 232 |
+
def _constructor(self) -> type[Index]: # type: ignore[override]
|
| 233 |
+
"""return the class to use for construction"""
|
| 234 |
+
return Index
|
| 235 |
+
|
| 236 |
+
# error: Signature of "_data" incompatible with supertype "Index"
|
| 237 |
+
@cache_readonly
|
| 238 |
+
def _data(self) -> np.ndarray: # type: ignore[override]
|
| 239 |
+
"""
|
| 240 |
+
An int array that for performance reasons is created only when needed.
|
| 241 |
+
|
| 242 |
+
The constructed array is saved in ``_cache``.
|
| 243 |
+
"""
|
| 244 |
+
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
|
| 245 |
+
|
| 246 |
+
def _get_data_as_items(self) -> list[tuple[str, int]]:
|
| 247 |
+
"""return a list of tuples of start, stop, step"""
|
| 248 |
+
rng = self._range
|
| 249 |
+
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
|
| 250 |
+
|
| 251 |
+
def __reduce__(self):
|
| 252 |
+
d = {"name": self._name}
|
| 253 |
+
d.update(dict(self._get_data_as_items()))
|
| 254 |
+
return ibase._new_Index, (type(self), d), None
|
| 255 |
+
|
| 256 |
+
# --------------------------------------------------------------------
|
| 257 |
+
# Rendering Methods
|
| 258 |
+
|
| 259 |
+
def _format_attrs(self):
|
| 260 |
+
"""
|
| 261 |
+
Return a list of tuples of the (attr, formatted_value)
|
| 262 |
+
"""
|
| 263 |
+
attrs = cast("list[tuple[str, str | int]]", self._get_data_as_items())
|
| 264 |
+
if self._name is not None:
|
| 265 |
+
attrs.append(("name", ibase.default_pprint(self._name)))
|
| 266 |
+
return attrs
|
| 267 |
+
|
| 268 |
+
def _format_with_header(self, *, header: list[str], na_rep: str) -> list[str]:
|
| 269 |
+
# Equivalent to Index implementation, but faster
|
| 270 |
+
if not len(self._range):
|
| 271 |
+
return header
|
| 272 |
+
first_val_str = str(self._range[0])
|
| 273 |
+
last_val_str = str(self._range[-1])
|
| 274 |
+
max_length = max(len(first_val_str), len(last_val_str))
|
| 275 |
+
|
| 276 |
+
return header + [f"{x:<{max_length}}" for x in self._range]
|
| 277 |
+
|
| 278 |
+
# --------------------------------------------------------------------
|
| 279 |
+
|
| 280 |
+
@property
|
| 281 |
+
def start(self) -> int:
|
| 282 |
+
"""
|
| 283 |
+
The value of the `start` parameter (``0`` if this was not supplied).
|
| 284 |
+
|
| 285 |
+
Examples
|
| 286 |
+
--------
|
| 287 |
+
>>> idx = pd.RangeIndex(5)
|
| 288 |
+
>>> idx.start
|
| 289 |
+
0
|
| 290 |
+
|
| 291 |
+
>>> idx = pd.RangeIndex(2, -10, -3)
|
| 292 |
+
>>> idx.start
|
| 293 |
+
2
|
| 294 |
+
"""
|
| 295 |
+
# GH 25710
|
| 296 |
+
return self._range.start
|
| 297 |
+
|
| 298 |
+
@property
|
| 299 |
+
def stop(self) -> int:
|
| 300 |
+
"""
|
| 301 |
+
The value of the `stop` parameter.
|
| 302 |
+
|
| 303 |
+
Examples
|
| 304 |
+
--------
|
| 305 |
+
>>> idx = pd.RangeIndex(5)
|
| 306 |
+
>>> idx.stop
|
| 307 |
+
5
|
| 308 |
+
|
| 309 |
+
>>> idx = pd.RangeIndex(2, -10, -3)
|
| 310 |
+
>>> idx.stop
|
| 311 |
+
-10
|
| 312 |
+
"""
|
| 313 |
+
return self._range.stop
|
| 314 |
+
|
| 315 |
+
@property
|
| 316 |
+
def step(self) -> int:
|
| 317 |
+
"""
|
| 318 |
+
The value of the `step` parameter (``1`` if this was not supplied).
|
| 319 |
+
|
| 320 |
+
Examples
|
| 321 |
+
--------
|
| 322 |
+
>>> idx = pd.RangeIndex(5)
|
| 323 |
+
>>> idx.step
|
| 324 |
+
1
|
| 325 |
+
|
| 326 |
+
>>> idx = pd.RangeIndex(2, -10, -3)
|
| 327 |
+
>>> idx.step
|
| 328 |
+
-3
|
| 329 |
+
|
| 330 |
+
Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if
|
| 331 |
+
not supplied.
|
| 332 |
+
|
| 333 |
+
>>> idx = pd.RangeIndex(1, 0)
|
| 334 |
+
>>> idx.step
|
| 335 |
+
1
|
| 336 |
+
"""
|
| 337 |
+
# GH 25710
|
| 338 |
+
return self._range.step
|
| 339 |
+
|
| 340 |
+
@cache_readonly
|
| 341 |
+
def nbytes(self) -> int:
|
| 342 |
+
"""
|
| 343 |
+
Return the number of bytes in the underlying data.
|
| 344 |
+
"""
|
| 345 |
+
rng = self._range
|
| 346 |
+
return getsizeof(rng) + sum(
|
| 347 |
+
getsizeof(getattr(rng, attr_name))
|
| 348 |
+
for attr_name in ["start", "stop", "step"]
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
def memory_usage(self, deep: bool = False) -> int:
|
| 352 |
+
"""
|
| 353 |
+
Memory usage of my values
|
| 354 |
+
|
| 355 |
+
Parameters
|
| 356 |
+
----------
|
| 357 |
+
deep : bool
|
| 358 |
+
Introspect the data deeply, interrogate
|
| 359 |
+
`object` dtypes for system-level memory consumption
|
| 360 |
+
|
| 361 |
+
Returns
|
| 362 |
+
-------
|
| 363 |
+
bytes used
|
| 364 |
+
|
| 365 |
+
Notes
|
| 366 |
+
-----
|
| 367 |
+
Memory usage does not include memory consumed by elements that
|
| 368 |
+
are not components of the array if deep=False
|
| 369 |
+
|
| 370 |
+
See Also
|
| 371 |
+
--------
|
| 372 |
+
numpy.ndarray.nbytes
|
| 373 |
+
"""
|
| 374 |
+
return self.nbytes
|
| 375 |
+
|
| 376 |
+
@property
|
| 377 |
+
def dtype(self) -> np.dtype:
|
| 378 |
+
return _dtype_int64
|
| 379 |
+
|
| 380 |
+
@property
|
| 381 |
+
def is_unique(self) -> bool:
|
| 382 |
+
"""return if the index has unique values"""
|
| 383 |
+
return True
|
| 384 |
+
|
| 385 |
+
@cache_readonly
|
| 386 |
+
def is_monotonic_increasing(self) -> bool:
|
| 387 |
+
return self._range.step > 0 or len(self) <= 1
|
| 388 |
+
|
| 389 |
+
@cache_readonly
|
| 390 |
+
def is_monotonic_decreasing(self) -> bool:
|
| 391 |
+
return self._range.step < 0 or len(self) <= 1
|
| 392 |
+
|
| 393 |
+
def __contains__(self, key: Any) -> bool:
|
| 394 |
+
hash(key)
|
| 395 |
+
try:
|
| 396 |
+
key = ensure_python_int(key)
|
| 397 |
+
except TypeError:
|
| 398 |
+
return False
|
| 399 |
+
return key in self._range
|
| 400 |
+
|
| 401 |
+
@property
|
| 402 |
+
def inferred_type(self) -> str:
|
| 403 |
+
return "integer"
|
| 404 |
+
|
| 405 |
+
# --------------------------------------------------------------------
|
| 406 |
+
# Indexing Methods
|
| 407 |
+
|
| 408 |
+
@doc(Index.get_loc)
|
| 409 |
+
def get_loc(self, key) -> int:
|
| 410 |
+
if is_integer(key) or (is_float(key) and key.is_integer()):
|
| 411 |
+
new_key = int(key)
|
| 412 |
+
try:
|
| 413 |
+
return self._range.index(new_key)
|
| 414 |
+
except ValueError as err:
|
| 415 |
+
raise KeyError(key) from err
|
| 416 |
+
if isinstance(key, Hashable):
|
| 417 |
+
raise KeyError(key)
|
| 418 |
+
self._check_indexing_error(key)
|
| 419 |
+
raise KeyError(key)
|
| 420 |
+
|
| 421 |
+
def _get_indexer(
|
| 422 |
+
self,
|
| 423 |
+
target: Index,
|
| 424 |
+
method: str | None = None,
|
| 425 |
+
limit: int | None = None,
|
| 426 |
+
tolerance=None,
|
| 427 |
+
) -> npt.NDArray[np.intp]:
|
| 428 |
+
if com.any_not_none(method, tolerance, limit):
|
| 429 |
+
return super()._get_indexer(
|
| 430 |
+
target, method=method, tolerance=tolerance, limit=limit
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
if self.step > 0:
|
| 434 |
+
start, stop, step = self.start, self.stop, self.step
|
| 435 |
+
else:
|
| 436 |
+
# GH 28678: work on reversed range for simplicity
|
| 437 |
+
reverse = self._range[::-1]
|
| 438 |
+
start, stop, step = reverse.start, reverse.stop, reverse.step
|
| 439 |
+
|
| 440 |
+
target_array = np.asarray(target)
|
| 441 |
+
locs = target_array - start
|
| 442 |
+
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
|
| 443 |
+
locs[~valid] = -1
|
| 444 |
+
locs[valid] = locs[valid] / step
|
| 445 |
+
|
| 446 |
+
if step != self.step:
|
| 447 |
+
# We reversed this range: transform to original locs
|
| 448 |
+
locs[valid] = len(self) - 1 - locs[valid]
|
| 449 |
+
return ensure_platform_int(locs)
|
| 450 |
+
|
| 451 |
+
@cache_readonly
|
| 452 |
+
def _should_fallback_to_positional(self) -> bool:
|
| 453 |
+
"""
|
| 454 |
+
Should an integer key be treated as positional?
|
| 455 |
+
"""
|
| 456 |
+
return False
|
| 457 |
+
|
| 458 |
+
# --------------------------------------------------------------------
|
| 459 |
+
|
| 460 |
+
def tolist(self) -> list[int]:
|
| 461 |
+
return list(self._range)
|
| 462 |
+
|
| 463 |
+
@doc(Index.__iter__)
|
| 464 |
+
def __iter__(self) -> Iterator[int]:
|
| 465 |
+
yield from self._range
|
| 466 |
+
|
| 467 |
+
@doc(Index._shallow_copy)
|
| 468 |
+
def _shallow_copy(self, values, name: Hashable = no_default):
|
| 469 |
+
name = self._name if name is no_default else name
|
| 470 |
+
|
| 471 |
+
if values.dtype.kind == "f":
|
| 472 |
+
return Index(values, name=name, dtype=np.float64)
|
| 473 |
+
# GH 46675 & 43885: If values is equally spaced, return a
|
| 474 |
+
# more memory-compact RangeIndex instead of Index with 64-bit dtype
|
| 475 |
+
unique_diffs = unique_deltas(values)
|
| 476 |
+
if len(unique_diffs) == 1 and unique_diffs[0] != 0:
|
| 477 |
+
diff = unique_diffs[0]
|
| 478 |
+
new_range = range(values[0], values[-1] + diff, diff)
|
| 479 |
+
return type(self)._simple_new(new_range, name=name)
|
| 480 |
+
else:
|
| 481 |
+
return self._constructor._simple_new(values, name=name)
|
| 482 |
+
|
| 483 |
+
def _view(self) -> Self:
|
| 484 |
+
result = type(self)._simple_new(self._range, name=self._name)
|
| 485 |
+
result._cache = self._cache
|
| 486 |
+
return result
|
| 487 |
+
|
| 488 |
+
@doc(Index.copy)
|
| 489 |
+
def copy(self, name: Hashable | None = None, deep: bool = False) -> Self:
|
| 490 |
+
name = self._validate_names(name=name, deep=deep)[0]
|
| 491 |
+
new_index = self._rename(name=name)
|
| 492 |
+
return new_index
|
| 493 |
+
|
| 494 |
+
def _minmax(self, meth: str):
|
| 495 |
+
no_steps = len(self) - 1
|
| 496 |
+
if no_steps == -1:
|
| 497 |
+
return np.nan
|
| 498 |
+
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
|
| 499 |
+
return self.start
|
| 500 |
+
|
| 501 |
+
return self.start + self.step * no_steps
|
| 502 |
+
|
| 503 |
+
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
|
| 504 |
+
"""The minimum value of the RangeIndex"""
|
| 505 |
+
nv.validate_minmax_axis(axis)
|
| 506 |
+
nv.validate_min(args, kwargs)
|
| 507 |
+
return self._minmax("min")
|
| 508 |
+
|
| 509 |
+
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
|
| 510 |
+
"""The maximum value of the RangeIndex"""
|
| 511 |
+
nv.validate_minmax_axis(axis)
|
| 512 |
+
nv.validate_max(args, kwargs)
|
| 513 |
+
return self._minmax("max")
|
| 514 |
+
|
| 515 |
+
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
|
| 516 |
+
"""
|
| 517 |
+
Returns the indices that would sort the index and its
|
| 518 |
+
underlying data.
|
| 519 |
+
|
| 520 |
+
Returns
|
| 521 |
+
-------
|
| 522 |
+
np.ndarray[np.intp]
|
| 523 |
+
|
| 524 |
+
See Also
|
| 525 |
+
--------
|
| 526 |
+
numpy.ndarray.argsort
|
| 527 |
+
"""
|
| 528 |
+
ascending = kwargs.pop("ascending", True) # EA compat
|
| 529 |
+
kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant
|
| 530 |
+
nv.validate_argsort(args, kwargs)
|
| 531 |
+
|
| 532 |
+
if self._range.step > 0:
|
| 533 |
+
result = np.arange(len(self), dtype=np.intp)
|
| 534 |
+
else:
|
| 535 |
+
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
|
| 536 |
+
|
| 537 |
+
if not ascending:
|
| 538 |
+
result = result[::-1]
|
| 539 |
+
return result
|
| 540 |
+
|
| 541 |
+
def factorize(
|
| 542 |
+
self,
|
| 543 |
+
sort: bool = False,
|
| 544 |
+
use_na_sentinel: bool = True,
|
| 545 |
+
) -> tuple[npt.NDArray[np.intp], RangeIndex]:
|
| 546 |
+
codes = np.arange(len(self), dtype=np.intp)
|
| 547 |
+
uniques = self
|
| 548 |
+
if sort and self.step < 0:
|
| 549 |
+
codes = codes[::-1]
|
| 550 |
+
uniques = uniques[::-1]
|
| 551 |
+
return codes, uniques
|
| 552 |
+
|
| 553 |
+
def equals(self, other: object) -> bool:
|
| 554 |
+
"""
|
| 555 |
+
Determines if two Index objects contain the same elements.
|
| 556 |
+
"""
|
| 557 |
+
if isinstance(other, RangeIndex):
|
| 558 |
+
return self._range == other._range
|
| 559 |
+
return super().equals(other)
|
| 560 |
+
|
| 561 |
+
# error: Signature of "sort_values" incompatible with supertype "Index"
|
| 562 |
+
@overload # type: ignore[override]
|
| 563 |
+
def sort_values(
|
| 564 |
+
self,
|
| 565 |
+
*,
|
| 566 |
+
return_indexer: Literal[False] = ...,
|
| 567 |
+
ascending: bool = ...,
|
| 568 |
+
na_position: NaPosition = ...,
|
| 569 |
+
key: Callable | None = ...,
|
| 570 |
+
) -> Self:
|
| 571 |
+
...
|
| 572 |
+
|
| 573 |
+
@overload
|
| 574 |
+
def sort_values(
|
| 575 |
+
self,
|
| 576 |
+
*,
|
| 577 |
+
return_indexer: Literal[True],
|
| 578 |
+
ascending: bool = ...,
|
| 579 |
+
na_position: NaPosition = ...,
|
| 580 |
+
key: Callable | None = ...,
|
| 581 |
+
) -> tuple[Self, np.ndarray | RangeIndex]:
|
| 582 |
+
...
|
| 583 |
+
|
| 584 |
+
@overload
|
| 585 |
+
def sort_values(
|
| 586 |
+
self,
|
| 587 |
+
*,
|
| 588 |
+
return_indexer: bool = ...,
|
| 589 |
+
ascending: bool = ...,
|
| 590 |
+
na_position: NaPosition = ...,
|
| 591 |
+
key: Callable | None = ...,
|
| 592 |
+
) -> Self | tuple[Self, np.ndarray | RangeIndex]:
|
| 593 |
+
...
|
| 594 |
+
|
| 595 |
+
@deprecate_nonkeyword_arguments(
|
| 596 |
+
version="3.0", allowed_args=["self"], name="sort_values"
|
| 597 |
+
)
|
| 598 |
+
def sort_values(
|
| 599 |
+
self,
|
| 600 |
+
return_indexer: bool = False,
|
| 601 |
+
ascending: bool = True,
|
| 602 |
+
na_position: NaPosition = "last",
|
| 603 |
+
key: Callable | None = None,
|
| 604 |
+
) -> Self | tuple[Self, np.ndarray | RangeIndex]:
|
| 605 |
+
if key is not None:
|
| 606 |
+
return super().sort_values(
|
| 607 |
+
return_indexer=return_indexer,
|
| 608 |
+
ascending=ascending,
|
| 609 |
+
na_position=na_position,
|
| 610 |
+
key=key,
|
| 611 |
+
)
|
| 612 |
+
else:
|
| 613 |
+
sorted_index = self
|
| 614 |
+
inverse_indexer = False
|
| 615 |
+
if ascending:
|
| 616 |
+
if self.step < 0:
|
| 617 |
+
sorted_index = self[::-1]
|
| 618 |
+
inverse_indexer = True
|
| 619 |
+
else:
|
| 620 |
+
if self.step > 0:
|
| 621 |
+
sorted_index = self[::-1]
|
| 622 |
+
inverse_indexer = True
|
| 623 |
+
|
| 624 |
+
if return_indexer:
|
| 625 |
+
if inverse_indexer:
|
| 626 |
+
rng = range(len(self) - 1, -1, -1)
|
| 627 |
+
else:
|
| 628 |
+
rng = range(len(self))
|
| 629 |
+
return sorted_index, RangeIndex(rng)
|
| 630 |
+
else:
|
| 631 |
+
return sorted_index
|
| 632 |
+
|
| 633 |
+
# --------------------------------------------------------------------
|
| 634 |
+
# Set Operations
|
| 635 |
+
|
| 636 |
+
def _intersection(self, other: Index, sort: bool = False):
|
| 637 |
+
# caller is responsible for checking self and other are both non-empty
|
| 638 |
+
|
| 639 |
+
if not isinstance(other, RangeIndex):
|
| 640 |
+
return super()._intersection(other, sort=sort)
|
| 641 |
+
|
| 642 |
+
first = self._range[::-1] if self.step < 0 else self._range
|
| 643 |
+
second = other._range[::-1] if other.step < 0 else other._range
|
| 644 |
+
|
| 645 |
+
# check whether intervals intersect
|
| 646 |
+
# deals with in- and decreasing ranges
|
| 647 |
+
int_low = max(first.start, second.start)
|
| 648 |
+
int_high = min(first.stop, second.stop)
|
| 649 |
+
if int_high <= int_low:
|
| 650 |
+
return self._simple_new(_empty_range)
|
| 651 |
+
|
| 652 |
+
# Method hint: linear Diophantine equation
|
| 653 |
+
# solve intersection problem
|
| 654 |
+
# performance hint: for identical step sizes, could use
|
| 655 |
+
# cheaper alternative
|
| 656 |
+
gcd, s, _ = self._extended_gcd(first.step, second.step)
|
| 657 |
+
|
| 658 |
+
# check whether element sets intersect
|
| 659 |
+
if (first.start - second.start) % gcd:
|
| 660 |
+
return self._simple_new(_empty_range)
|
| 661 |
+
|
| 662 |
+
# calculate parameters for the RangeIndex describing the
|
| 663 |
+
# intersection disregarding the lower bounds
|
| 664 |
+
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
|
| 665 |
+
new_step = first.step * second.step // gcd
|
| 666 |
+
new_range = range(tmp_start, int_high, new_step)
|
| 667 |
+
new_index = self._simple_new(new_range)
|
| 668 |
+
|
| 669 |
+
# adjust index to limiting interval
|
| 670 |
+
new_start = new_index._min_fitting_element(int_low)
|
| 671 |
+
new_range = range(new_start, new_index.stop, new_index.step)
|
| 672 |
+
new_index = self._simple_new(new_range)
|
| 673 |
+
|
| 674 |
+
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
|
| 675 |
+
new_index = new_index[::-1]
|
| 676 |
+
|
| 677 |
+
if sort is None:
|
| 678 |
+
new_index = new_index.sort_values()
|
| 679 |
+
|
| 680 |
+
return new_index
|
| 681 |
+
|
| 682 |
+
def _min_fitting_element(self, lower_limit: int) -> int:
|
| 683 |
+
"""Returns the smallest element greater than or equal to the limit"""
|
| 684 |
+
no_steps = -(-(lower_limit - self.start) // abs(self.step))
|
| 685 |
+
return self.start + abs(self.step) * no_steps
|
| 686 |
+
|
| 687 |
+
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
|
| 688 |
+
"""
|
| 689 |
+
Extended Euclidean algorithms to solve Bezout's identity:
|
| 690 |
+
a*x + b*y = gcd(x, y)
|
| 691 |
+
Finds one particular solution for x, y: s, t
|
| 692 |
+
Returns: gcd, s, t
|
| 693 |
+
"""
|
| 694 |
+
s, old_s = 0, 1
|
| 695 |
+
t, old_t = 1, 0
|
| 696 |
+
r, old_r = b, a
|
| 697 |
+
while r:
|
| 698 |
+
quotient = old_r // r
|
| 699 |
+
old_r, r = r, old_r - quotient * r
|
| 700 |
+
old_s, s = s, old_s - quotient * s
|
| 701 |
+
old_t, t = t, old_t - quotient * t
|
| 702 |
+
return old_r, old_s, old_t
|
| 703 |
+
|
| 704 |
+
def _range_in_self(self, other: range) -> bool:
|
| 705 |
+
"""Check if other range is contained in self"""
|
| 706 |
+
# https://stackoverflow.com/a/32481015
|
| 707 |
+
if not other:
|
| 708 |
+
return True
|
| 709 |
+
if not self._range:
|
| 710 |
+
return False
|
| 711 |
+
if len(other) > 1 and other.step % self._range.step:
|
| 712 |
+
return False
|
| 713 |
+
return other.start in self._range and other[-1] in self._range
|
| 714 |
+
|
| 715 |
+
def _union(self, other: Index, sort: bool | None):
|
| 716 |
+
"""
|
| 717 |
+
Form the union of two Index objects and sorts if possible
|
| 718 |
+
|
| 719 |
+
Parameters
|
| 720 |
+
----------
|
| 721 |
+
other : Index or array-like
|
| 722 |
+
|
| 723 |
+
sort : bool or None, default None
|
| 724 |
+
Whether to sort (monotonically increasing) the resulting index.
|
| 725 |
+
``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted
|
| 726 |
+
``Index`` with a int64 dtype if not.
|
| 727 |
+
``sort=False`` can return a ``RangeIndex`` if self is monotonically
|
| 728 |
+
increasing and other is fully contained in self. Otherwise, returns
|
| 729 |
+
an unsorted ``Index`` with an int64 dtype.
|
| 730 |
+
|
| 731 |
+
Returns
|
| 732 |
+
-------
|
| 733 |
+
union : Index
|
| 734 |
+
"""
|
| 735 |
+
if isinstance(other, RangeIndex):
|
| 736 |
+
if sort in (None, True) or (
|
| 737 |
+
sort is False and self.step > 0 and self._range_in_self(other._range)
|
| 738 |
+
):
|
| 739 |
+
# GH 47557: Can still return a RangeIndex
|
| 740 |
+
# if other range in self and sort=False
|
| 741 |
+
start_s, step_s = self.start, self.step
|
| 742 |
+
end_s = self.start + self.step * (len(self) - 1)
|
| 743 |
+
start_o, step_o = other.start, other.step
|
| 744 |
+
end_o = other.start + other.step * (len(other) - 1)
|
| 745 |
+
if self.step < 0:
|
| 746 |
+
start_s, step_s, end_s = end_s, -step_s, start_s
|
| 747 |
+
if other.step < 0:
|
| 748 |
+
start_o, step_o, end_o = end_o, -step_o, start_o
|
| 749 |
+
if len(self) == 1 and len(other) == 1:
|
| 750 |
+
step_s = step_o = abs(self.start - other.start)
|
| 751 |
+
elif len(self) == 1:
|
| 752 |
+
step_s = step_o
|
| 753 |
+
elif len(other) == 1:
|
| 754 |
+
step_o = step_s
|
| 755 |
+
start_r = min(start_s, start_o)
|
| 756 |
+
end_r = max(end_s, end_o)
|
| 757 |
+
if step_o == step_s:
|
| 758 |
+
if (
|
| 759 |
+
(start_s - start_o) % step_s == 0
|
| 760 |
+
and (start_s - end_o) <= step_s
|
| 761 |
+
and (start_o - end_s) <= step_s
|
| 762 |
+
):
|
| 763 |
+
return type(self)(start_r, end_r + step_s, step_s)
|
| 764 |
+
if (
|
| 765 |
+
(step_s % 2 == 0)
|
| 766 |
+
and (abs(start_s - start_o) == step_s / 2)
|
| 767 |
+
and (abs(end_s - end_o) == step_s / 2)
|
| 768 |
+
):
|
| 769 |
+
# e.g. range(0, 10, 2) and range(1, 11, 2)
|
| 770 |
+
# but not range(0, 20, 4) and range(1, 21, 4) GH#44019
|
| 771 |
+
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
|
| 772 |
+
|
| 773 |
+
elif step_o % step_s == 0:
|
| 774 |
+
if (
|
| 775 |
+
(start_o - start_s) % step_s == 0
|
| 776 |
+
and (start_o + step_s >= start_s)
|
| 777 |
+
and (end_o - step_s <= end_s)
|
| 778 |
+
):
|
| 779 |
+
return type(self)(start_r, end_r + step_s, step_s)
|
| 780 |
+
elif step_s % step_o == 0:
|
| 781 |
+
if (
|
| 782 |
+
(start_s - start_o) % step_o == 0
|
| 783 |
+
and (start_s + step_o >= start_o)
|
| 784 |
+
and (end_s - step_o <= end_o)
|
| 785 |
+
):
|
| 786 |
+
return type(self)(start_r, end_r + step_o, step_o)
|
| 787 |
+
|
| 788 |
+
return super()._union(other, sort=sort)
|
| 789 |
+
|
| 790 |
+
def _difference(self, other, sort=None):
|
| 791 |
+
# optimized set operation if we have another RangeIndex
|
| 792 |
+
self._validate_sort_keyword(sort)
|
| 793 |
+
self._assert_can_do_setop(other)
|
| 794 |
+
other, result_name = self._convert_can_do_setop(other)
|
| 795 |
+
|
| 796 |
+
if not isinstance(other, RangeIndex):
|
| 797 |
+
return super()._difference(other, sort=sort)
|
| 798 |
+
|
| 799 |
+
if sort is not False and self.step < 0:
|
| 800 |
+
return self[::-1]._difference(other)
|
| 801 |
+
|
| 802 |
+
res_name = ops.get_op_result_name(self, other)
|
| 803 |
+
|
| 804 |
+
first = self._range[::-1] if self.step < 0 else self._range
|
| 805 |
+
overlap = self.intersection(other)
|
| 806 |
+
if overlap.step < 0:
|
| 807 |
+
overlap = overlap[::-1]
|
| 808 |
+
|
| 809 |
+
if len(overlap) == 0:
|
| 810 |
+
return self.rename(name=res_name)
|
| 811 |
+
if len(overlap) == len(self):
|
| 812 |
+
return self[:0].rename(res_name)
|
| 813 |
+
|
| 814 |
+
# overlap.step will always be a multiple of self.step (see _intersection)
|
| 815 |
+
|
| 816 |
+
if len(overlap) == 1:
|
| 817 |
+
if overlap[0] == self[0]:
|
| 818 |
+
return self[1:]
|
| 819 |
+
|
| 820 |
+
elif overlap[0] == self[-1]:
|
| 821 |
+
return self[:-1]
|
| 822 |
+
|
| 823 |
+
elif len(self) == 3 and overlap[0] == self[1]:
|
| 824 |
+
return self[::2]
|
| 825 |
+
|
| 826 |
+
else:
|
| 827 |
+
return super()._difference(other, sort=sort)
|
| 828 |
+
|
| 829 |
+
elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]:
|
| 830 |
+
# e.g. range(-8, 20, 7) and range(13, -9, -3)
|
| 831 |
+
return self[1:-1]
|
| 832 |
+
|
| 833 |
+
if overlap.step == first.step:
|
| 834 |
+
if overlap[0] == first.start:
|
| 835 |
+
# The difference is everything after the intersection
|
| 836 |
+
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
|
| 837 |
+
elif overlap[-1] == first[-1]:
|
| 838 |
+
# The difference is everything before the intersection
|
| 839 |
+
new_rng = range(first.start, overlap[0], first.step)
|
| 840 |
+
elif overlap._range == first[1:-1]:
|
| 841 |
+
# e.g. range(4) and range(1, 3)
|
| 842 |
+
step = len(first) - 1
|
| 843 |
+
new_rng = first[::step]
|
| 844 |
+
else:
|
| 845 |
+
# The difference is not range-like
|
| 846 |
+
# e.g. range(1, 10, 1) and range(3, 7, 1)
|
| 847 |
+
return super()._difference(other, sort=sort)
|
| 848 |
+
|
| 849 |
+
else:
|
| 850 |
+
# We must have len(self) > 1, bc we ruled out above
|
| 851 |
+
# len(overlap) == 0 and len(overlap) == len(self)
|
| 852 |
+
assert len(self) > 1
|
| 853 |
+
|
| 854 |
+
if overlap.step == first.step * 2:
|
| 855 |
+
if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]):
|
| 856 |
+
# e.g. range(1, 10, 1) and range(1, 10, 2)
|
| 857 |
+
new_rng = first[1::2]
|
| 858 |
+
|
| 859 |
+
elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]):
|
| 860 |
+
# e.g. range(1, 10, 1) and range(2, 10, 2)
|
| 861 |
+
new_rng = first[::2]
|
| 862 |
+
|
| 863 |
+
else:
|
| 864 |
+
# We can get here with e.g. range(20) and range(0, 10, 2)
|
| 865 |
+
return super()._difference(other, sort=sort)
|
| 866 |
+
|
| 867 |
+
else:
|
| 868 |
+
# e.g. range(10) and range(0, 10, 3)
|
| 869 |
+
return super()._difference(other, sort=sort)
|
| 870 |
+
|
| 871 |
+
new_index = type(self)._simple_new(new_rng, name=res_name)
|
| 872 |
+
if first is not self._range:
|
| 873 |
+
new_index = new_index[::-1]
|
| 874 |
+
|
| 875 |
+
return new_index
|
| 876 |
+
|
| 877 |
+
def symmetric_difference(
|
| 878 |
+
self, other, result_name: Hashable | None = None, sort=None
|
| 879 |
+
):
|
| 880 |
+
if not isinstance(other, RangeIndex) or sort is not None:
|
| 881 |
+
return super().symmetric_difference(other, result_name, sort)
|
| 882 |
+
|
| 883 |
+
left = self.difference(other)
|
| 884 |
+
right = other.difference(self)
|
| 885 |
+
result = left.union(right)
|
| 886 |
+
|
| 887 |
+
if result_name is not None:
|
| 888 |
+
result = result.rename(result_name)
|
| 889 |
+
return result
|
| 890 |
+
|
| 891 |
+
# --------------------------------------------------------------------
|
| 892 |
+
|
| 893 |
+
# error: Return type "Index" of "delete" incompatible with return type
|
| 894 |
+
# "RangeIndex" in supertype "Index"
|
| 895 |
+
def delete(self, loc) -> Index: # type: ignore[override]
|
| 896 |
+
# In some cases we can retain RangeIndex, see also
|
| 897 |
+
# DatetimeTimedeltaMixin._get_delete_Freq
|
| 898 |
+
if is_integer(loc):
|
| 899 |
+
if loc in (0, -len(self)):
|
| 900 |
+
return self[1:]
|
| 901 |
+
if loc in (-1, len(self) - 1):
|
| 902 |
+
return self[:-1]
|
| 903 |
+
if len(self) == 3 and loc in (1, -2):
|
| 904 |
+
return self[::2]
|
| 905 |
+
|
| 906 |
+
elif lib.is_list_like(loc):
|
| 907 |
+
slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
|
| 908 |
+
|
| 909 |
+
if isinstance(slc, slice):
|
| 910 |
+
# defer to RangeIndex._difference, which is optimized to return
|
| 911 |
+
# a RangeIndex whenever possible
|
| 912 |
+
other = self[slc]
|
| 913 |
+
return self.difference(other, sort=False)
|
| 914 |
+
|
| 915 |
+
return super().delete(loc)
|
| 916 |
+
|
| 917 |
+
def insert(self, loc: int, item) -> Index:
|
| 918 |
+
if len(self) and (is_integer(item) or is_float(item)):
|
| 919 |
+
# We can retain RangeIndex is inserting at the beginning or end,
|
| 920 |
+
# or right in the middle.
|
| 921 |
+
rng = self._range
|
| 922 |
+
if loc == 0 and item == self[0] - self.step:
|
| 923 |
+
new_rng = range(rng.start - rng.step, rng.stop, rng.step)
|
| 924 |
+
return type(self)._simple_new(new_rng, name=self._name)
|
| 925 |
+
|
| 926 |
+
elif loc == len(self) and item == self[-1] + self.step:
|
| 927 |
+
new_rng = range(rng.start, rng.stop + rng.step, rng.step)
|
| 928 |
+
return type(self)._simple_new(new_rng, name=self._name)
|
| 929 |
+
|
| 930 |
+
elif len(self) == 2 and item == self[0] + self.step / 2:
|
| 931 |
+
# e.g. inserting 1 into [0, 2]
|
| 932 |
+
step = int(self.step / 2)
|
| 933 |
+
new_rng = range(self.start, self.stop, step)
|
| 934 |
+
return type(self)._simple_new(new_rng, name=self._name)
|
| 935 |
+
|
| 936 |
+
return super().insert(loc, item)
|
| 937 |
+
|
| 938 |
+
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
|
| 939 |
+
"""
|
| 940 |
+
Overriding parent method for the case of all RangeIndex instances.
|
| 941 |
+
|
| 942 |
+
When all members of "indexes" are of type RangeIndex: result will be
|
| 943 |
+
RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:
|
| 944 |
+
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
|
| 945 |
+
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')
|
| 946 |
+
"""
|
| 947 |
+
if not all(isinstance(x, RangeIndex) for x in indexes):
|
| 948 |
+
return super()._concat(indexes, name)
|
| 949 |
+
|
| 950 |
+
elif len(indexes) == 1:
|
| 951 |
+
return indexes[0]
|
| 952 |
+
|
| 953 |
+
rng_indexes = cast(list[RangeIndex], indexes)
|
| 954 |
+
|
| 955 |
+
start = step = next_ = None
|
| 956 |
+
|
| 957 |
+
# Filter the empty indexes
|
| 958 |
+
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
|
| 959 |
+
|
| 960 |
+
for obj in non_empty_indexes:
|
| 961 |
+
rng = obj._range
|
| 962 |
+
|
| 963 |
+
if start is None:
|
| 964 |
+
# This is set by the first non-empty index
|
| 965 |
+
start = rng.start
|
| 966 |
+
if step is None and len(rng) > 1:
|
| 967 |
+
step = rng.step
|
| 968 |
+
elif step is None:
|
| 969 |
+
# First non-empty index had only one element
|
| 970 |
+
if rng.start == start:
|
| 971 |
+
values = np.concatenate([x._values for x in rng_indexes])
|
| 972 |
+
result = self._constructor(values)
|
| 973 |
+
return result.rename(name)
|
| 974 |
+
|
| 975 |
+
step = rng.start - start
|
| 976 |
+
|
| 977 |
+
non_consecutive = (step != rng.step and len(rng) > 1) or (
|
| 978 |
+
next_ is not None and rng.start != next_
|
| 979 |
+
)
|
| 980 |
+
if non_consecutive:
|
| 981 |
+
result = self._constructor(
|
| 982 |
+
np.concatenate([x._values for x in rng_indexes])
|
| 983 |
+
)
|
| 984 |
+
return result.rename(name)
|
| 985 |
+
|
| 986 |
+
if step is not None:
|
| 987 |
+
next_ = rng[-1] + step
|
| 988 |
+
|
| 989 |
+
if non_empty_indexes:
|
| 990 |
+
# Get the stop value from "next" or alternatively
|
| 991 |
+
# from the last non-empty index
|
| 992 |
+
stop = non_empty_indexes[-1].stop if next_ is None else next_
|
| 993 |
+
return RangeIndex(start, stop, step).rename(name)
|
| 994 |
+
|
| 995 |
+
# Here all "indexes" had 0 length, i.e. were empty.
|
| 996 |
+
# In this case return an empty range index.
|
| 997 |
+
return RangeIndex(0, 0).rename(name)
|
| 998 |
+
|
| 999 |
+
def __len__(self) -> int:
|
| 1000 |
+
"""
|
| 1001 |
+
return the length of the RangeIndex
|
| 1002 |
+
"""
|
| 1003 |
+
return len(self._range)
|
| 1004 |
+
|
| 1005 |
+
@property
|
| 1006 |
+
def size(self) -> int:
|
| 1007 |
+
return len(self)
|
| 1008 |
+
|
| 1009 |
+
def __getitem__(self, key):
|
| 1010 |
+
"""
|
| 1011 |
+
Conserve RangeIndex type for scalar and slice keys.
|
| 1012 |
+
"""
|
| 1013 |
+
if isinstance(key, slice):
|
| 1014 |
+
return self._getitem_slice(key)
|
| 1015 |
+
elif is_integer(key):
|
| 1016 |
+
new_key = int(key)
|
| 1017 |
+
try:
|
| 1018 |
+
return self._range[new_key]
|
| 1019 |
+
except IndexError as err:
|
| 1020 |
+
raise IndexError(
|
| 1021 |
+
f"index {key} is out of bounds for axis 0 with size {len(self)}"
|
| 1022 |
+
) from err
|
| 1023 |
+
elif is_scalar(key):
|
| 1024 |
+
raise IndexError(
|
| 1025 |
+
"only integers, slices (`:`), "
|
| 1026 |
+
"ellipsis (`...`), numpy.newaxis (`None`) "
|
| 1027 |
+
"and integer or boolean "
|
| 1028 |
+
"arrays are valid indices"
|
| 1029 |
+
)
|
| 1030 |
+
return super().__getitem__(key)
|
| 1031 |
+
|
| 1032 |
+
def _getitem_slice(self, slobj: slice) -> Self:
|
| 1033 |
+
"""
|
| 1034 |
+
Fastpath for __getitem__ when we know we have a slice.
|
| 1035 |
+
"""
|
| 1036 |
+
res = self._range[slobj]
|
| 1037 |
+
return type(self)._simple_new(res, name=self._name)
|
| 1038 |
+
|
| 1039 |
+
@unpack_zerodim_and_defer("__floordiv__")
|
| 1040 |
+
def __floordiv__(self, other):
|
| 1041 |
+
if is_integer(other) and other != 0:
|
| 1042 |
+
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
|
| 1043 |
+
start = self.start // other
|
| 1044 |
+
step = self.step // other
|
| 1045 |
+
stop = start + len(self) * step
|
| 1046 |
+
new_range = range(start, stop, step or 1)
|
| 1047 |
+
return self._simple_new(new_range, name=self._name)
|
| 1048 |
+
if len(self) == 1:
|
| 1049 |
+
start = self.start // other
|
| 1050 |
+
new_range = range(start, start + 1, 1)
|
| 1051 |
+
return self._simple_new(new_range, name=self._name)
|
| 1052 |
+
|
| 1053 |
+
return super().__floordiv__(other)
|
| 1054 |
+
|
| 1055 |
+
# --------------------------------------------------------------------
|
| 1056 |
+
# Reductions
|
| 1057 |
+
|
| 1058 |
+
def all(self, *args, **kwargs) -> bool:
|
| 1059 |
+
return 0 not in self._range
|
| 1060 |
+
|
| 1061 |
+
def any(self, *args, **kwargs) -> bool:
|
| 1062 |
+
return any(self._range)
|
| 1063 |
+
|
| 1064 |
+
# --------------------------------------------------------------------
|
| 1065 |
+
|
| 1066 |
+
def _cmp_method(self, other, op):
|
| 1067 |
+
if isinstance(other, RangeIndex) and self._range == other._range:
|
| 1068 |
+
# Both are immutable so if ._range attr. are equal, shortcut is possible
|
| 1069 |
+
return super()._cmp_method(self, op)
|
| 1070 |
+
return super()._cmp_method(other, op)
|
| 1071 |
+
|
| 1072 |
+
def _arith_method(self, other, op):
|
| 1073 |
+
"""
|
| 1074 |
+
Parameters
|
| 1075 |
+
----------
|
| 1076 |
+
other : Any
|
| 1077 |
+
op : callable that accepts 2 params
|
| 1078 |
+
perform the binary op
|
| 1079 |
+
"""
|
| 1080 |
+
|
| 1081 |
+
if isinstance(other, ABCTimedeltaIndex):
|
| 1082 |
+
# Defer to TimedeltaIndex implementation
|
| 1083 |
+
return NotImplemented
|
| 1084 |
+
elif isinstance(other, (timedelta, np.timedelta64)):
|
| 1085 |
+
# GH#19333 is_integer evaluated True on timedelta64,
|
| 1086 |
+
# so we need to catch these explicitly
|
| 1087 |
+
return super()._arith_method(other, op)
|
| 1088 |
+
elif lib.is_np_dtype(getattr(other, "dtype", None), "m"):
|
| 1089 |
+
# Must be an np.ndarray; GH#22390
|
| 1090 |
+
return super()._arith_method(other, op)
|
| 1091 |
+
|
| 1092 |
+
if op in [
|
| 1093 |
+
operator.pow,
|
| 1094 |
+
ops.rpow,
|
| 1095 |
+
operator.mod,
|
| 1096 |
+
ops.rmod,
|
| 1097 |
+
operator.floordiv,
|
| 1098 |
+
ops.rfloordiv,
|
| 1099 |
+
divmod,
|
| 1100 |
+
ops.rdivmod,
|
| 1101 |
+
]:
|
| 1102 |
+
return super()._arith_method(other, op)
|
| 1103 |
+
|
| 1104 |
+
step: Callable | None = None
|
| 1105 |
+
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
|
| 1106 |
+
step = op
|
| 1107 |
+
|
| 1108 |
+
# TODO: if other is a RangeIndex we may have more efficient options
|
| 1109 |
+
right = extract_array(other, extract_numpy=True, extract_range=True)
|
| 1110 |
+
left = self
|
| 1111 |
+
|
| 1112 |
+
try:
|
| 1113 |
+
# apply if we have an override
|
| 1114 |
+
if step:
|
| 1115 |
+
with np.errstate(all="ignore"):
|
| 1116 |
+
rstep = step(left.step, right)
|
| 1117 |
+
|
| 1118 |
+
# we don't have a representable op
|
| 1119 |
+
# so return a base index
|
| 1120 |
+
if not is_integer(rstep) or not rstep:
|
| 1121 |
+
raise ValueError
|
| 1122 |
+
|
| 1123 |
+
# GH#53255
|
| 1124 |
+
else:
|
| 1125 |
+
rstep = -left.step if op == ops.rsub else left.step
|
| 1126 |
+
|
| 1127 |
+
with np.errstate(all="ignore"):
|
| 1128 |
+
rstart = op(left.start, right)
|
| 1129 |
+
rstop = op(left.stop, right)
|
| 1130 |
+
|
| 1131 |
+
res_name = ops.get_op_result_name(self, other)
|
| 1132 |
+
result = type(self)(rstart, rstop, rstep, name=res_name)
|
| 1133 |
+
|
| 1134 |
+
# for compat with numpy / Index with int64 dtype
|
| 1135 |
+
# even if we can represent as a RangeIndex, return
|
| 1136 |
+
# as a float64 Index if we have float-like descriptors
|
| 1137 |
+
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
|
| 1138 |
+
result = result.astype("float64")
|
| 1139 |
+
|
| 1140 |
+
return result
|
| 1141 |
+
|
| 1142 |
+
except (ValueError, TypeError, ZeroDivisionError):
|
| 1143 |
+
# test_arithmetic_explicit_conversions
|
| 1144 |
+
return super()._arith_method(other, op)
|
| 1145 |
+
|
| 1146 |
+
# error: Return type "Index" of "take" incompatible with return type
|
| 1147 |
+
# "RangeIndex" in supertype "Index"
|
| 1148 |
+
def take( # type: ignore[override]
|
| 1149 |
+
self,
|
| 1150 |
+
indices,
|
| 1151 |
+
axis: Axis = 0,
|
| 1152 |
+
allow_fill: bool = True,
|
| 1153 |
+
fill_value=None,
|
| 1154 |
+
**kwargs,
|
| 1155 |
+
) -> Index:
|
| 1156 |
+
if kwargs:
|
| 1157 |
+
nv.validate_take((), kwargs)
|
| 1158 |
+
if is_scalar(indices):
|
| 1159 |
+
raise TypeError("Expected indices to be array-like")
|
| 1160 |
+
indices = ensure_platform_int(indices)
|
| 1161 |
+
|
| 1162 |
+
# raise an exception if allow_fill is True and fill_value is not None
|
| 1163 |
+
self._maybe_disallow_fill(allow_fill, fill_value, indices)
|
| 1164 |
+
|
| 1165 |
+
if len(indices) == 0:
|
| 1166 |
+
taken = np.array([], dtype=self.dtype)
|
| 1167 |
+
else:
|
| 1168 |
+
ind_max = indices.max()
|
| 1169 |
+
if ind_max >= len(self):
|
| 1170 |
+
raise IndexError(
|
| 1171 |
+
f"index {ind_max} is out of bounds for axis 0 with size {len(self)}"
|
| 1172 |
+
)
|
| 1173 |
+
ind_min = indices.min()
|
| 1174 |
+
if ind_min < -len(self):
|
| 1175 |
+
raise IndexError(
|
| 1176 |
+
f"index {ind_min} is out of bounds for axis 0 with size {len(self)}"
|
| 1177 |
+
)
|
| 1178 |
+
taken = indices.astype(self.dtype, casting="safe")
|
| 1179 |
+
if ind_min < 0:
|
| 1180 |
+
taken %= len(self)
|
| 1181 |
+
if self.step != 1:
|
| 1182 |
+
taken *= self.step
|
| 1183 |
+
if self.start != 0:
|
| 1184 |
+
taken += self.start
|
| 1185 |
+
|
| 1186 |
+
# _constructor so RangeIndex-> Index with an int64 dtype
|
| 1187 |
+
return self._constructor._simple_new(taken, name=self.name)
|
videollama2/lib/python3.10/site-packages/pandas/core/indexes/timedeltas.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" implement the TimedeltaIndex """
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
from pandas._libs import (
|
| 8 |
+
index as libindex,
|
| 9 |
+
lib,
|
| 10 |
+
)
|
| 11 |
+
from pandas._libs.tslibs import (
|
| 12 |
+
Resolution,
|
| 13 |
+
Timedelta,
|
| 14 |
+
to_offset,
|
| 15 |
+
)
|
| 16 |
+
from pandas._libs.tslibs.timedeltas import disallow_ambiguous_unit
|
| 17 |
+
from pandas.util._exceptions import find_stack_level
|
| 18 |
+
|
| 19 |
+
from pandas.core.dtypes.common import (
|
| 20 |
+
is_scalar,
|
| 21 |
+
pandas_dtype,
|
| 22 |
+
)
|
| 23 |
+
from pandas.core.dtypes.generic import ABCSeries
|
| 24 |
+
|
| 25 |
+
from pandas.core.arrays.timedeltas import TimedeltaArray
|
| 26 |
+
import pandas.core.common as com
|
| 27 |
+
from pandas.core.indexes.base import (
|
| 28 |
+
Index,
|
| 29 |
+
maybe_extract_name,
|
| 30 |
+
)
|
| 31 |
+
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
|
| 32 |
+
from pandas.core.indexes.extension import inherit_names
|
| 33 |
+
|
| 34 |
+
if TYPE_CHECKING:
|
| 35 |
+
from pandas._typing import DtypeObj
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@inherit_names(
|
| 39 |
+
["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
|
| 40 |
+
+ TimedeltaArray._field_ops,
|
| 41 |
+
TimedeltaArray,
|
| 42 |
+
wrap=True,
|
| 43 |
+
)
|
| 44 |
+
@inherit_names(
|
| 45 |
+
[
|
| 46 |
+
"components",
|
| 47 |
+
"to_pytimedelta",
|
| 48 |
+
"sum",
|
| 49 |
+
"std",
|
| 50 |
+
"median",
|
| 51 |
+
],
|
| 52 |
+
TimedeltaArray,
|
| 53 |
+
)
|
| 54 |
+
class TimedeltaIndex(DatetimeTimedeltaMixin):
|
| 55 |
+
"""
|
| 56 |
+
Immutable Index of timedelta64 data.
|
| 57 |
+
|
| 58 |
+
Represented internally as int64, and scalars returned Timedelta objects.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
data : array-like (1-dimensional), optional
|
| 63 |
+
Optional timedelta-like data to construct index with.
|
| 64 |
+
unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional
|
| 65 |
+
The unit of ``data``.
|
| 66 |
+
|
| 67 |
+
.. deprecated:: 2.2.0
|
| 68 |
+
Use ``pd.to_timedelta`` instead.
|
| 69 |
+
|
| 70 |
+
freq : str or pandas offset object, optional
|
| 71 |
+
One of pandas date offset strings or corresponding objects. The string
|
| 72 |
+
``'infer'`` can be passed in order to set the frequency of the index as
|
| 73 |
+
the inferred frequency upon creation.
|
| 74 |
+
dtype : numpy.dtype or str, default None
|
| 75 |
+
Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``,
|
| 76 |
+
``timedelta64[ms]``, and ``timedelta64[s]``.
|
| 77 |
+
copy : bool
|
| 78 |
+
Make a copy of input array.
|
| 79 |
+
name : object
|
| 80 |
+
Name to be stored in the index.
|
| 81 |
+
|
| 82 |
+
Attributes
|
| 83 |
+
----------
|
| 84 |
+
days
|
| 85 |
+
seconds
|
| 86 |
+
microseconds
|
| 87 |
+
nanoseconds
|
| 88 |
+
components
|
| 89 |
+
inferred_freq
|
| 90 |
+
|
| 91 |
+
Methods
|
| 92 |
+
-------
|
| 93 |
+
to_pytimedelta
|
| 94 |
+
to_series
|
| 95 |
+
round
|
| 96 |
+
floor
|
| 97 |
+
ceil
|
| 98 |
+
to_frame
|
| 99 |
+
mean
|
| 100 |
+
|
| 101 |
+
See Also
|
| 102 |
+
--------
|
| 103 |
+
Index : The base pandas Index type.
|
| 104 |
+
Timedelta : Represents a duration between two dates or times.
|
| 105 |
+
DatetimeIndex : Index of datetime64 data.
|
| 106 |
+
PeriodIndex : Index of Period data.
|
| 107 |
+
timedelta_range : Create a fixed-frequency TimedeltaIndex.
|
| 108 |
+
|
| 109 |
+
Notes
|
| 110 |
+
-----
|
| 111 |
+
To learn more about the frequency strings, please see `this link
|
| 112 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 113 |
+
|
| 114 |
+
Examples
|
| 115 |
+
--------
|
| 116 |
+
>>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'])
|
| 117 |
+
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
|
| 118 |
+
dtype='timedelta64[ns]', freq=None)
|
| 119 |
+
|
| 120 |
+
We can also let pandas infer the frequency when possible.
|
| 121 |
+
|
| 122 |
+
>>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer')
|
| 123 |
+
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
|
| 124 |
+
dtype='timedelta64[ns]', freq='D')
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
_typ = "timedeltaindex"
|
| 128 |
+
|
| 129 |
+
_data_cls = TimedeltaArray
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def _engine_type(self) -> type[libindex.TimedeltaEngine]:
|
| 133 |
+
return libindex.TimedeltaEngine
|
| 134 |
+
|
| 135 |
+
_data: TimedeltaArray
|
| 136 |
+
|
| 137 |
+
# Use base class method instead of DatetimeTimedeltaMixin._get_string_slice
|
| 138 |
+
_get_string_slice = Index._get_string_slice
|
| 139 |
+
|
| 140 |
+
# error: Signature of "_resolution_obj" incompatible with supertype
|
| 141 |
+
# "DatetimeIndexOpsMixin"
|
| 142 |
+
@property
|
| 143 |
+
def _resolution_obj(self) -> Resolution | None: # type: ignore[override]
|
| 144 |
+
return self._data._resolution_obj
|
| 145 |
+
|
| 146 |
+
# -------------------------------------------------------------------
|
| 147 |
+
# Constructors
|
| 148 |
+
|
| 149 |
+
def __new__(
|
| 150 |
+
cls,
|
| 151 |
+
data=None,
|
| 152 |
+
unit=lib.no_default,
|
| 153 |
+
freq=lib.no_default,
|
| 154 |
+
closed=lib.no_default,
|
| 155 |
+
dtype=None,
|
| 156 |
+
copy: bool = False,
|
| 157 |
+
name=None,
|
| 158 |
+
):
|
| 159 |
+
if closed is not lib.no_default:
|
| 160 |
+
# GH#52628
|
| 161 |
+
warnings.warn(
|
| 162 |
+
f"The 'closed' keyword in {cls.__name__} construction is "
|
| 163 |
+
"deprecated and will be removed in a future version.",
|
| 164 |
+
FutureWarning,
|
| 165 |
+
stacklevel=find_stack_level(),
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if unit is not lib.no_default:
|
| 169 |
+
# GH#55499
|
| 170 |
+
warnings.warn(
|
| 171 |
+
f"The 'unit' keyword in {cls.__name__} construction is "
|
| 172 |
+
"deprecated and will be removed in a future version. "
|
| 173 |
+
"Use pd.to_timedelta instead.",
|
| 174 |
+
FutureWarning,
|
| 175 |
+
stacklevel=find_stack_level(),
|
| 176 |
+
)
|
| 177 |
+
else:
|
| 178 |
+
unit = None
|
| 179 |
+
|
| 180 |
+
name = maybe_extract_name(name, data, cls)
|
| 181 |
+
|
| 182 |
+
if is_scalar(data):
|
| 183 |
+
cls._raise_scalar_data_error(data)
|
| 184 |
+
|
| 185 |
+
disallow_ambiguous_unit(unit)
|
| 186 |
+
if dtype is not None:
|
| 187 |
+
dtype = pandas_dtype(dtype)
|
| 188 |
+
|
| 189 |
+
if (
|
| 190 |
+
isinstance(data, TimedeltaArray)
|
| 191 |
+
and freq is lib.no_default
|
| 192 |
+
and (dtype is None or dtype == data.dtype)
|
| 193 |
+
):
|
| 194 |
+
if copy:
|
| 195 |
+
data = data.copy()
|
| 196 |
+
return cls._simple_new(data, name=name)
|
| 197 |
+
|
| 198 |
+
if (
|
| 199 |
+
isinstance(data, TimedeltaIndex)
|
| 200 |
+
and freq is lib.no_default
|
| 201 |
+
and name is None
|
| 202 |
+
and (dtype is None or dtype == data.dtype)
|
| 203 |
+
):
|
| 204 |
+
if copy:
|
| 205 |
+
return data.copy()
|
| 206 |
+
else:
|
| 207 |
+
return data._view()
|
| 208 |
+
|
| 209 |
+
# - Cases checked above all return/raise before reaching here - #
|
| 210 |
+
|
| 211 |
+
tdarr = TimedeltaArray._from_sequence_not_strict(
|
| 212 |
+
data, freq=freq, unit=unit, dtype=dtype, copy=copy
|
| 213 |
+
)
|
| 214 |
+
refs = None
|
| 215 |
+
if not copy and isinstance(data, (ABCSeries, Index)):
|
| 216 |
+
refs = data._references
|
| 217 |
+
|
| 218 |
+
return cls._simple_new(tdarr, name=name, refs=refs)
|
| 219 |
+
|
| 220 |
+
# -------------------------------------------------------------------
|
| 221 |
+
|
| 222 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
| 223 |
+
"""
|
| 224 |
+
Can we compare values of the given dtype to our own?
|
| 225 |
+
"""
|
| 226 |
+
return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype
|
| 227 |
+
|
| 228 |
+
# -------------------------------------------------------------------
|
| 229 |
+
# Indexing Methods
|
| 230 |
+
|
| 231 |
+
def get_loc(self, key):
|
| 232 |
+
"""
|
| 233 |
+
Get integer location for requested label
|
| 234 |
+
|
| 235 |
+
Returns
|
| 236 |
+
-------
|
| 237 |
+
loc : int, slice, or ndarray[int]
|
| 238 |
+
"""
|
| 239 |
+
self._check_indexing_error(key)
|
| 240 |
+
|
| 241 |
+
try:
|
| 242 |
+
key = self._data._validate_scalar(key, unbox=False)
|
| 243 |
+
except TypeError as err:
|
| 244 |
+
raise KeyError(key) from err
|
| 245 |
+
|
| 246 |
+
return Index.get_loc(self, key)
|
| 247 |
+
|
| 248 |
+
def _parse_with_reso(self, label: str):
|
| 249 |
+
# the "with_reso" is a no-op for TimedeltaIndex
|
| 250 |
+
parsed = Timedelta(label)
|
| 251 |
+
return parsed, None
|
| 252 |
+
|
| 253 |
+
def _parsed_string_to_bounds(self, reso, parsed: Timedelta):
|
| 254 |
+
# reso is unused, included to match signature of DTI/PI
|
| 255 |
+
lbound = parsed.round(parsed.resolution_string)
|
| 256 |
+
rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
|
| 257 |
+
return lbound, rbound
|
| 258 |
+
|
| 259 |
+
# -------------------------------------------------------------------
|
| 260 |
+
|
| 261 |
+
@property
|
| 262 |
+
def inferred_type(self) -> str:
|
| 263 |
+
return "timedelta64"
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def timedelta_range(
|
| 267 |
+
start=None,
|
| 268 |
+
end=None,
|
| 269 |
+
periods: int | None = None,
|
| 270 |
+
freq=None,
|
| 271 |
+
name=None,
|
| 272 |
+
closed=None,
|
| 273 |
+
*,
|
| 274 |
+
unit: str | None = None,
|
| 275 |
+
) -> TimedeltaIndex:
|
| 276 |
+
"""
|
| 277 |
+
Return a fixed frequency TimedeltaIndex with day as the default.
|
| 278 |
+
|
| 279 |
+
Parameters
|
| 280 |
+
----------
|
| 281 |
+
start : str or timedelta-like, default None
|
| 282 |
+
Left bound for generating timedeltas.
|
| 283 |
+
end : str or timedelta-like, default None
|
| 284 |
+
Right bound for generating timedeltas.
|
| 285 |
+
periods : int, default None
|
| 286 |
+
Number of periods to generate.
|
| 287 |
+
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
|
| 288 |
+
Frequency strings can have multiples, e.g. '5h'.
|
| 289 |
+
name : str, default None
|
| 290 |
+
Name of the resulting TimedeltaIndex.
|
| 291 |
+
closed : str, default None
|
| 292 |
+
Make the interval closed with respect to the given frequency to
|
| 293 |
+
the 'left', 'right', or both sides (None).
|
| 294 |
+
unit : str, default None
|
| 295 |
+
Specify the desired resolution of the result.
|
| 296 |
+
|
| 297 |
+
.. versionadded:: 2.0.0
|
| 298 |
+
|
| 299 |
+
Returns
|
| 300 |
+
-------
|
| 301 |
+
TimedeltaIndex
|
| 302 |
+
|
| 303 |
+
Notes
|
| 304 |
+
-----
|
| 305 |
+
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
|
| 306 |
+
exactly three must be specified. If ``freq`` is omitted, the resulting
|
| 307 |
+
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
|
| 308 |
+
``start`` and ``end`` (closed on both sides).
|
| 309 |
+
|
| 310 |
+
To learn more about the frequency strings, please see `this link
|
| 311 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
|
| 312 |
+
|
| 313 |
+
Examples
|
| 314 |
+
--------
|
| 315 |
+
>>> pd.timedelta_range(start='1 day', periods=4)
|
| 316 |
+
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
|
| 317 |
+
dtype='timedelta64[ns]', freq='D')
|
| 318 |
+
|
| 319 |
+
The ``closed`` parameter specifies which endpoint is included. The default
|
| 320 |
+
behavior is to include both endpoints.
|
| 321 |
+
|
| 322 |
+
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
|
| 323 |
+
TimedeltaIndex(['2 days', '3 days', '4 days'],
|
| 324 |
+
dtype='timedelta64[ns]', freq='D')
|
| 325 |
+
|
| 326 |
+
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
|
| 327 |
+
Only fixed frequencies can be passed, non-fixed frequencies such as
|
| 328 |
+
'M' (month end) will raise.
|
| 329 |
+
|
| 330 |
+
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')
|
| 331 |
+
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
|
| 332 |
+
'1 days 18:00:00', '2 days 00:00:00'],
|
| 333 |
+
dtype='timedelta64[ns]', freq='6h')
|
| 334 |
+
|
| 335 |
+
Specify ``start``, ``end``, and ``periods``; the frequency is generated
|
| 336 |
+
automatically (linearly spaced).
|
| 337 |
+
|
| 338 |
+
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
|
| 339 |
+
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
|
| 340 |
+
'5 days 00:00:00'],
|
| 341 |
+
dtype='timedelta64[ns]', freq=None)
|
| 342 |
+
|
| 343 |
+
**Specify a unit**
|
| 344 |
+
|
| 345 |
+
>>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")
|
| 346 |
+
TimedeltaIndex(['1 days', '100001 days', '200001 days'],
|
| 347 |
+
dtype='timedelta64[s]', freq='100000D')
|
| 348 |
+
"""
|
| 349 |
+
if freq is None and com.any_none(periods, start, end):
|
| 350 |
+
freq = "D"
|
| 351 |
+
|
| 352 |
+
freq = to_offset(freq)
|
| 353 |
+
tdarr = TimedeltaArray._generate_range(
|
| 354 |
+
start, end, periods, freq, closed=closed, unit=unit
|
| 355 |
+
)
|
| 356 |
+
return TimedeltaIndex._simple_new(tdarr, name=name)
|
videollama2/lib/python3.10/site-packages/pandas/core/methods/__init__.py
ADDED
|
File without changes
|