Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- vllm/lib/python3.10/site-packages/pandas/tests/computation/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py +2001 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py +258 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py +843 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py +129 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py +9 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py +25 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py +196 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_datetime.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_delitem.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_get.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_getitem.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_indexing.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_mask.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_set_value.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_setitem.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_take.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_where.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_xs.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_datetime.py +499 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_delitem.py +70 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_get.py +238 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_getitem.py +735 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_indexing.py +518 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_mask.py +69 -0
- vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_set_value.py +45 -0
vllm/lib/python3.10/site-packages/pandas/tests/computation/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py
ADDED
|
@@ -0,0 +1,2001 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from functools import reduce
|
| 4 |
+
from itertools import product
|
| 5 |
+
import operator
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
from pandas.compat import PY312
|
| 11 |
+
from pandas.errors import (
|
| 12 |
+
NumExprClobberingError,
|
| 13 |
+
PerformanceWarning,
|
| 14 |
+
UndefinedVariableError,
|
| 15 |
+
)
|
| 16 |
+
import pandas.util._test_decorators as td
|
| 17 |
+
|
| 18 |
+
from pandas.core.dtypes.common import (
|
| 19 |
+
is_bool,
|
| 20 |
+
is_float,
|
| 21 |
+
is_list_like,
|
| 22 |
+
is_scalar,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
import pandas as pd
|
| 26 |
+
from pandas import (
|
| 27 |
+
DataFrame,
|
| 28 |
+
Index,
|
| 29 |
+
Series,
|
| 30 |
+
date_range,
|
| 31 |
+
period_range,
|
| 32 |
+
timedelta_range,
|
| 33 |
+
)
|
| 34 |
+
import pandas._testing as tm
|
| 35 |
+
from pandas.core.computation import (
|
| 36 |
+
expr,
|
| 37 |
+
pytables,
|
| 38 |
+
)
|
| 39 |
+
from pandas.core.computation.engines import ENGINES
|
| 40 |
+
from pandas.core.computation.expr import (
|
| 41 |
+
BaseExprVisitor,
|
| 42 |
+
PandasExprVisitor,
|
| 43 |
+
PythonExprVisitor,
|
| 44 |
+
)
|
| 45 |
+
from pandas.core.computation.expressions import (
|
| 46 |
+
NUMEXPR_INSTALLED,
|
| 47 |
+
USE_NUMEXPR,
|
| 48 |
+
)
|
| 49 |
+
from pandas.core.computation.ops import (
|
| 50 |
+
ARITH_OPS_SYMS,
|
| 51 |
+
SPECIAL_CASE_ARITH_OPS_SYMS,
|
| 52 |
+
_binary_math_ops,
|
| 53 |
+
_binary_ops_dict,
|
| 54 |
+
_unary_math_ops,
|
| 55 |
+
)
|
| 56 |
+
from pandas.core.computation.scope import DEFAULT_GLOBALS
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@pytest.fixture(
|
| 60 |
+
params=(
|
| 61 |
+
pytest.param(
|
| 62 |
+
engine,
|
| 63 |
+
marks=[
|
| 64 |
+
pytest.mark.skipif(
|
| 65 |
+
engine == "numexpr" and not USE_NUMEXPR,
|
| 66 |
+
reason=f"numexpr enabled->{USE_NUMEXPR}, "
|
| 67 |
+
f"installed->{NUMEXPR_INSTALLED}",
|
| 68 |
+
),
|
| 69 |
+
td.skip_if_no("numexpr"),
|
| 70 |
+
],
|
| 71 |
+
)
|
| 72 |
+
for engine in ENGINES
|
| 73 |
+
)
|
| 74 |
+
)
|
| 75 |
+
def engine(request):
|
| 76 |
+
return request.param
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@pytest.fixture(params=expr.PARSERS)
|
| 80 |
+
def parser(request):
|
| 81 |
+
return request.param
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _eval_single_bin(lhs, cmp1, rhs, engine):
|
| 85 |
+
c = _binary_ops_dict[cmp1]
|
| 86 |
+
if ENGINES[engine].has_neg_frac:
|
| 87 |
+
try:
|
| 88 |
+
return c(lhs, rhs)
|
| 89 |
+
except ValueError as e:
|
| 90 |
+
if str(e).startswith(
|
| 91 |
+
"negative number cannot be raised to a fractional power"
|
| 92 |
+
):
|
| 93 |
+
return np.nan
|
| 94 |
+
raise
|
| 95 |
+
return c(lhs, rhs)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# TODO: using range(5) here is a kludge
|
| 99 |
+
@pytest.fixture(
|
| 100 |
+
params=list(range(5)),
|
| 101 |
+
ids=["DataFrame", "Series", "SeriesNaN", "DataFrameNaN", "float"],
|
| 102 |
+
)
|
| 103 |
+
def lhs(request):
|
| 104 |
+
nan_df1 = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
|
| 105 |
+
nan_df1[nan_df1 > 0.5] = np.nan
|
| 106 |
+
|
| 107 |
+
opts = (
|
| 108 |
+
DataFrame(np.random.default_rng(2).standard_normal((10, 5))),
|
| 109 |
+
Series(np.random.default_rng(2).standard_normal(5)),
|
| 110 |
+
Series([1, 2, np.nan, np.nan, 5]),
|
| 111 |
+
nan_df1,
|
| 112 |
+
np.random.default_rng(2).standard_normal(),
|
| 113 |
+
)
|
| 114 |
+
return opts[request.param]
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
rhs = lhs
|
| 118 |
+
midhs = lhs
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@pytest.fixture
|
| 122 |
+
def idx_func_dict():
|
| 123 |
+
return {
|
| 124 |
+
"i": lambda n: Index(np.arange(n), dtype=np.int64),
|
| 125 |
+
"f": lambda n: Index(np.arange(n), dtype=np.float64),
|
| 126 |
+
"s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
|
| 127 |
+
"dt": lambda n: date_range("2020-01-01", periods=n),
|
| 128 |
+
"td": lambda n: timedelta_range("1 day", periods=n),
|
| 129 |
+
"p": lambda n: period_range("2020-01-01", periods=n, freq="D"),
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class TestEval:
|
| 134 |
+
@pytest.mark.parametrize(
|
| 135 |
+
"cmp1",
|
| 136 |
+
["!=", "==", "<=", ">=", "<", ">"],
|
| 137 |
+
ids=["ne", "eq", "le", "ge", "lt", "gt"],
|
| 138 |
+
)
|
| 139 |
+
@pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"])
|
| 140 |
+
@pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS)
|
| 141 |
+
def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser):
|
| 142 |
+
if parser == "python" and binop in ["and", "or"]:
|
| 143 |
+
msg = "'BoolOp' nodes are not implemented"
|
| 144 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 145 |
+
ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
|
| 146 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
lhs_new = _eval_single_bin(lhs, cmp1, rhs, engine)
|
| 150 |
+
rhs_new = _eval_single_bin(lhs, cmp2, rhs, engine)
|
| 151 |
+
expected = _eval_single_bin(lhs_new, binop, rhs_new, engine)
|
| 152 |
+
|
| 153 |
+
ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
|
| 154 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 155 |
+
tm.assert_equal(result, expected)
|
| 156 |
+
|
| 157 |
+
@pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS)
|
| 158 |
+
def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser):
|
| 159 |
+
lhs = lhs < 0
|
| 160 |
+
rhs = rhs < 0
|
| 161 |
+
|
| 162 |
+
if parser == "python" and cmp_op in ["in", "not in"]:
|
| 163 |
+
msg = "'(In|NotIn)' nodes are not implemented"
|
| 164 |
+
|
| 165 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 166 |
+
ex = f"lhs {cmp_op} rhs"
|
| 167 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 168 |
+
return
|
| 169 |
+
|
| 170 |
+
ex = f"lhs {cmp_op} rhs"
|
| 171 |
+
msg = "|".join(
|
| 172 |
+
[
|
| 173 |
+
r"only list-like( or dict-like)? objects are allowed to be "
|
| 174 |
+
r"passed to (DataFrame\.)?isin\(\), you passed a "
|
| 175 |
+
r"(`|')bool(`|')",
|
| 176 |
+
"argument of type 'bool' is not iterable",
|
| 177 |
+
]
|
| 178 |
+
)
|
| 179 |
+
if cmp_op in ("in", "not in") and not is_list_like(rhs):
|
| 180 |
+
with pytest.raises(TypeError, match=msg):
|
| 181 |
+
pd.eval(
|
| 182 |
+
ex,
|
| 183 |
+
engine=engine,
|
| 184 |
+
parser=parser,
|
| 185 |
+
local_dict={"lhs": lhs, "rhs": rhs},
|
| 186 |
+
)
|
| 187 |
+
else:
|
| 188 |
+
expected = _eval_single_bin(lhs, cmp_op, rhs, engine)
|
| 189 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 190 |
+
tm.assert_equal(result, expected)
|
| 191 |
+
|
| 192 |
+
@pytest.mark.parametrize("op", expr.CMP_OPS_SYMS)
|
| 193 |
+
def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser):
|
| 194 |
+
if parser == "python" and op in ["in", "not in"]:
|
| 195 |
+
msg = "'(In|NotIn)' nodes are not implemented"
|
| 196 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 197 |
+
ex = f"~(lhs {op} rhs)"
|
| 198 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 199 |
+
return
|
| 200 |
+
|
| 201 |
+
if (
|
| 202 |
+
is_float(lhs)
|
| 203 |
+
and not is_float(rhs)
|
| 204 |
+
and op in ["in", "not in"]
|
| 205 |
+
and engine == "python"
|
| 206 |
+
and parser == "pandas"
|
| 207 |
+
):
|
| 208 |
+
mark = pytest.mark.xfail(
|
| 209 |
+
reason="Looks like expected is negative, unclear whether "
|
| 210 |
+
"expected is incorrect or result is incorrect"
|
| 211 |
+
)
|
| 212 |
+
request.applymarker(mark)
|
| 213 |
+
skip_these = ["in", "not in"]
|
| 214 |
+
ex = f"~(lhs {op} rhs)"
|
| 215 |
+
|
| 216 |
+
msg = "|".join(
|
| 217 |
+
[
|
| 218 |
+
r"only list-like( or dict-like)? objects are allowed to be "
|
| 219 |
+
r"passed to (DataFrame\.)?isin\(\), you passed a "
|
| 220 |
+
r"(`|')float(`|')",
|
| 221 |
+
"argument of type 'float' is not iterable",
|
| 222 |
+
]
|
| 223 |
+
)
|
| 224 |
+
if is_scalar(rhs) and op in skip_these:
|
| 225 |
+
with pytest.raises(TypeError, match=msg):
|
| 226 |
+
pd.eval(
|
| 227 |
+
ex,
|
| 228 |
+
engine=engine,
|
| 229 |
+
parser=parser,
|
| 230 |
+
local_dict={"lhs": lhs, "rhs": rhs},
|
| 231 |
+
)
|
| 232 |
+
else:
|
| 233 |
+
# compound
|
| 234 |
+
if is_scalar(lhs) and is_scalar(rhs):
|
| 235 |
+
lhs, rhs = (np.array([x]) for x in (lhs, rhs))
|
| 236 |
+
expected = _eval_single_bin(lhs, op, rhs, engine)
|
| 237 |
+
if is_scalar(expected):
|
| 238 |
+
expected = not expected
|
| 239 |
+
else:
|
| 240 |
+
expected = ~expected
|
| 241 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 242 |
+
tm.assert_almost_equal(expected, result)
|
| 243 |
+
|
| 244 |
+
@pytest.mark.parametrize("cmp1", ["<", ">"])
|
| 245 |
+
@pytest.mark.parametrize("cmp2", ["<", ">"])
|
| 246 |
+
def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs, engine, parser):
|
| 247 |
+
mid = midhs
|
| 248 |
+
if parser == "python":
|
| 249 |
+
ex1 = f"lhs {cmp1} mid {cmp2} rhs"
|
| 250 |
+
msg = "'BoolOp' nodes are not implemented"
|
| 251 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 252 |
+
pd.eval(ex1, engine=engine, parser=parser)
|
| 253 |
+
return
|
| 254 |
+
|
| 255 |
+
lhs_new = _eval_single_bin(lhs, cmp1, mid, engine)
|
| 256 |
+
rhs_new = _eval_single_bin(mid, cmp2, rhs, engine)
|
| 257 |
+
|
| 258 |
+
if lhs_new is not None and rhs_new is not None:
|
| 259 |
+
ex1 = f"lhs {cmp1} mid {cmp2} rhs"
|
| 260 |
+
ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs"
|
| 261 |
+
ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)"
|
| 262 |
+
expected = _eval_single_bin(lhs_new, "&", rhs_new, engine)
|
| 263 |
+
|
| 264 |
+
for ex in (ex1, ex2, ex3):
|
| 265 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 266 |
+
|
| 267 |
+
tm.assert_almost_equal(result, expected)
|
| 268 |
+
|
| 269 |
+
@pytest.mark.parametrize(
|
| 270 |
+
"arith1", sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS))
|
| 271 |
+
)
|
| 272 |
+
def test_binary_arith_ops(self, arith1, lhs, rhs, engine, parser):
|
| 273 |
+
ex = f"lhs {arith1} rhs"
|
| 274 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 275 |
+
expected = _eval_single_bin(lhs, arith1, rhs, engine)
|
| 276 |
+
|
| 277 |
+
tm.assert_almost_equal(result, expected)
|
| 278 |
+
ex = f"lhs {arith1} rhs {arith1} rhs"
|
| 279 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 280 |
+
nlhs = _eval_single_bin(lhs, arith1, rhs, engine)
|
| 281 |
+
try:
|
| 282 |
+
nlhs, ghs = nlhs.align(rhs)
|
| 283 |
+
except (ValueError, TypeError, AttributeError):
|
| 284 |
+
# ValueError: series frame or frame series align
|
| 285 |
+
# TypeError, AttributeError: series or frame with scalar align
|
| 286 |
+
return
|
| 287 |
+
else:
|
| 288 |
+
if engine == "numexpr":
|
| 289 |
+
import numexpr as ne
|
| 290 |
+
|
| 291 |
+
# direct numpy comparison
|
| 292 |
+
expected = ne.evaluate(f"nlhs {arith1} ghs")
|
| 293 |
+
# Update assert statement due to unreliable numerical
|
| 294 |
+
# precision component (GH37328)
|
| 295 |
+
# TODO: update testing code so that assert_almost_equal statement
|
| 296 |
+
# can be replaced again by the assert_numpy_array_equal statement
|
| 297 |
+
tm.assert_almost_equal(result.values, expected)
|
| 298 |
+
else:
|
| 299 |
+
expected = eval(f"nlhs {arith1} ghs")
|
| 300 |
+
tm.assert_almost_equal(result, expected)
|
| 301 |
+
|
| 302 |
+
# modulus, pow, and floor division require special casing
|
| 303 |
+
|
| 304 |
+
def test_modulus(self, lhs, rhs, engine, parser):
|
| 305 |
+
ex = r"lhs % rhs"
|
| 306 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 307 |
+
expected = lhs % rhs
|
| 308 |
+
tm.assert_almost_equal(result, expected)
|
| 309 |
+
|
| 310 |
+
if engine == "numexpr":
|
| 311 |
+
import numexpr as ne
|
| 312 |
+
|
| 313 |
+
expected = ne.evaluate(r"expected % rhs")
|
| 314 |
+
if isinstance(result, (DataFrame, Series)):
|
| 315 |
+
tm.assert_almost_equal(result.values, expected)
|
| 316 |
+
else:
|
| 317 |
+
tm.assert_almost_equal(result, expected.item())
|
| 318 |
+
else:
|
| 319 |
+
expected = _eval_single_bin(expected, "%", rhs, engine)
|
| 320 |
+
tm.assert_almost_equal(result, expected)
|
| 321 |
+
|
| 322 |
+
def test_floor_division(self, lhs, rhs, engine, parser):
|
| 323 |
+
ex = "lhs // rhs"
|
| 324 |
+
|
| 325 |
+
if engine == "python":
|
| 326 |
+
res = pd.eval(ex, engine=engine, parser=parser)
|
| 327 |
+
expected = lhs // rhs
|
| 328 |
+
tm.assert_equal(res, expected)
|
| 329 |
+
else:
|
| 330 |
+
msg = (
|
| 331 |
+
r"unsupported operand type\(s\) for //: 'VariableNode' and "
|
| 332 |
+
"'VariableNode'"
|
| 333 |
+
)
|
| 334 |
+
with pytest.raises(TypeError, match=msg):
|
| 335 |
+
pd.eval(
|
| 336 |
+
ex,
|
| 337 |
+
local_dict={"lhs": lhs, "rhs": rhs},
|
| 338 |
+
engine=engine,
|
| 339 |
+
parser=parser,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
@td.skip_if_windows
|
| 343 |
+
def test_pow(self, lhs, rhs, engine, parser):
|
| 344 |
+
# odd failure on win32 platform, so skip
|
| 345 |
+
ex = "lhs ** rhs"
|
| 346 |
+
expected = _eval_single_bin(lhs, "**", rhs, engine)
|
| 347 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 348 |
+
|
| 349 |
+
if (
|
| 350 |
+
is_scalar(lhs)
|
| 351 |
+
and is_scalar(rhs)
|
| 352 |
+
and isinstance(expected, (complex, np.complexfloating))
|
| 353 |
+
and np.isnan(result)
|
| 354 |
+
):
|
| 355 |
+
msg = "(DataFrame.columns|numpy array) are different"
|
| 356 |
+
with pytest.raises(AssertionError, match=msg):
|
| 357 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 358 |
+
else:
|
| 359 |
+
tm.assert_almost_equal(result, expected)
|
| 360 |
+
|
| 361 |
+
ex = "(lhs ** rhs) ** rhs"
|
| 362 |
+
result = pd.eval(ex, engine=engine, parser=parser)
|
| 363 |
+
|
| 364 |
+
middle = _eval_single_bin(lhs, "**", rhs, engine)
|
| 365 |
+
expected = _eval_single_bin(middle, "**", rhs, engine)
|
| 366 |
+
tm.assert_almost_equal(result, expected)
|
| 367 |
+
|
| 368 |
+
def test_check_single_invert_op(self, lhs, engine, parser):
|
| 369 |
+
# simple
|
| 370 |
+
try:
|
| 371 |
+
elb = lhs.astype(bool)
|
| 372 |
+
except AttributeError:
|
| 373 |
+
elb = np.array([bool(lhs)])
|
| 374 |
+
expected = ~elb
|
| 375 |
+
result = pd.eval("~elb", engine=engine, parser=parser)
|
| 376 |
+
tm.assert_almost_equal(expected, result)
|
| 377 |
+
|
| 378 |
+
def test_frame_invert(self, engine, parser):
|
| 379 |
+
expr = "~lhs"
|
| 380 |
+
|
| 381 |
+
# ~ ##
|
| 382 |
+
# frame
|
| 383 |
+
# float always raises
|
| 384 |
+
lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)))
|
| 385 |
+
if engine == "numexpr":
|
| 386 |
+
msg = "couldn't find matching opcode for 'invert_dd'"
|
| 387 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 388 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 389 |
+
else:
|
| 390 |
+
msg = "ufunc 'invert' not supported for the input types"
|
| 391 |
+
with pytest.raises(TypeError, match=msg):
|
| 392 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 393 |
+
|
| 394 |
+
# int raises on numexpr
|
| 395 |
+
lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2)))
|
| 396 |
+
if engine == "numexpr":
|
| 397 |
+
msg = "couldn't find matching opcode for 'invert"
|
| 398 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 399 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 400 |
+
else:
|
| 401 |
+
expect = ~lhs
|
| 402 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 403 |
+
tm.assert_frame_equal(expect, result)
|
| 404 |
+
|
| 405 |
+
# bool always works
|
| 406 |
+
lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5)
|
| 407 |
+
expect = ~lhs
|
| 408 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 409 |
+
tm.assert_frame_equal(expect, result)
|
| 410 |
+
|
| 411 |
+
# object raises
|
| 412 |
+
lhs = DataFrame(
|
| 413 |
+
{"b": ["a", 1, 2.0], "c": np.random.default_rng(2).standard_normal(3) > 0.5}
|
| 414 |
+
)
|
| 415 |
+
if engine == "numexpr":
|
| 416 |
+
with pytest.raises(ValueError, match="unknown type object"):
|
| 417 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 418 |
+
else:
|
| 419 |
+
msg = "bad operand type for unary ~: 'str'"
|
| 420 |
+
with pytest.raises(TypeError, match=msg):
|
| 421 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 422 |
+
|
| 423 |
+
def test_series_invert(self, engine, parser):
|
| 424 |
+
# ~ ####
|
| 425 |
+
expr = "~lhs"
|
| 426 |
+
|
| 427 |
+
# series
|
| 428 |
+
# float raises
|
| 429 |
+
lhs = Series(np.random.default_rng(2).standard_normal(5))
|
| 430 |
+
if engine == "numexpr":
|
| 431 |
+
msg = "couldn't find matching opcode for 'invert_dd'"
|
| 432 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 433 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 434 |
+
else:
|
| 435 |
+
msg = "ufunc 'invert' not supported for the input types"
|
| 436 |
+
with pytest.raises(TypeError, match=msg):
|
| 437 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 438 |
+
|
| 439 |
+
# int raises on numexpr
|
| 440 |
+
lhs = Series(np.random.default_rng(2).integers(5, size=5))
|
| 441 |
+
if engine == "numexpr":
|
| 442 |
+
msg = "couldn't find matching opcode for 'invert"
|
| 443 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 444 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 445 |
+
else:
|
| 446 |
+
expect = ~lhs
|
| 447 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 448 |
+
tm.assert_series_equal(expect, result)
|
| 449 |
+
|
| 450 |
+
# bool
|
| 451 |
+
lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5)
|
| 452 |
+
expect = ~lhs
|
| 453 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 454 |
+
tm.assert_series_equal(expect, result)
|
| 455 |
+
|
| 456 |
+
# float
|
| 457 |
+
# int
|
| 458 |
+
# bool
|
| 459 |
+
|
| 460 |
+
# object
|
| 461 |
+
lhs = Series(["a", 1, 2.0])
|
| 462 |
+
if engine == "numexpr":
|
| 463 |
+
with pytest.raises(ValueError, match="unknown type object"):
|
| 464 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 465 |
+
else:
|
| 466 |
+
msg = "bad operand type for unary ~: 'str'"
|
| 467 |
+
with pytest.raises(TypeError, match=msg):
|
| 468 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 469 |
+
|
| 470 |
+
def test_frame_negate(self, engine, parser):
|
| 471 |
+
expr = "-lhs"
|
| 472 |
+
|
| 473 |
+
# float
|
| 474 |
+
lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)))
|
| 475 |
+
expect = -lhs
|
| 476 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 477 |
+
tm.assert_frame_equal(expect, result)
|
| 478 |
+
|
| 479 |
+
# int
|
| 480 |
+
lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2)))
|
| 481 |
+
expect = -lhs
|
| 482 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 483 |
+
tm.assert_frame_equal(expect, result)
|
| 484 |
+
|
| 485 |
+
# bool doesn't work with numexpr but works elsewhere
|
| 486 |
+
lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5)
|
| 487 |
+
if engine == "numexpr":
|
| 488 |
+
msg = "couldn't find matching opcode for 'neg_bb'"
|
| 489 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 490 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 491 |
+
else:
|
| 492 |
+
expect = -lhs
|
| 493 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 494 |
+
tm.assert_frame_equal(expect, result)
|
| 495 |
+
|
| 496 |
+
def test_series_negate(self, engine, parser):
|
| 497 |
+
expr = "-lhs"
|
| 498 |
+
|
| 499 |
+
# float
|
| 500 |
+
lhs = Series(np.random.default_rng(2).standard_normal(5))
|
| 501 |
+
expect = -lhs
|
| 502 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 503 |
+
tm.assert_series_equal(expect, result)
|
| 504 |
+
|
| 505 |
+
# int
|
| 506 |
+
lhs = Series(np.random.default_rng(2).integers(5, size=5))
|
| 507 |
+
expect = -lhs
|
| 508 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 509 |
+
tm.assert_series_equal(expect, result)
|
| 510 |
+
|
| 511 |
+
# bool doesn't work with numexpr but works elsewhere
|
| 512 |
+
lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5)
|
| 513 |
+
if engine == "numexpr":
|
| 514 |
+
msg = "couldn't find matching opcode for 'neg_bb'"
|
| 515 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 516 |
+
pd.eval(expr, engine=engine, parser=parser)
|
| 517 |
+
else:
|
| 518 |
+
expect = -lhs
|
| 519 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 520 |
+
tm.assert_series_equal(expect, result)
|
| 521 |
+
|
| 522 |
+
@pytest.mark.parametrize(
|
| 523 |
+
"lhs",
|
| 524 |
+
[
|
| 525 |
+
# Float
|
| 526 |
+
DataFrame(np.random.default_rng(2).standard_normal((5, 2))),
|
| 527 |
+
# Int
|
| 528 |
+
DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))),
|
| 529 |
+
# bool doesn't work with numexpr but works elsewhere
|
| 530 |
+
DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5),
|
| 531 |
+
],
|
| 532 |
+
)
|
| 533 |
+
def test_frame_pos(self, lhs, engine, parser):
|
| 534 |
+
expr = "+lhs"
|
| 535 |
+
expect = lhs
|
| 536 |
+
|
| 537 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 538 |
+
tm.assert_frame_equal(expect, result)
|
| 539 |
+
|
| 540 |
+
@pytest.mark.parametrize(
|
| 541 |
+
"lhs",
|
| 542 |
+
[
|
| 543 |
+
# Float
|
| 544 |
+
Series(np.random.default_rng(2).standard_normal(5)),
|
| 545 |
+
# Int
|
| 546 |
+
Series(np.random.default_rng(2).integers(5, size=5)),
|
| 547 |
+
# bool doesn't work with numexpr but works elsewhere
|
| 548 |
+
Series(np.random.default_rng(2).standard_normal(5) > 0.5),
|
| 549 |
+
],
|
| 550 |
+
)
|
| 551 |
+
def test_series_pos(self, lhs, engine, parser):
|
| 552 |
+
expr = "+lhs"
|
| 553 |
+
expect = lhs
|
| 554 |
+
|
| 555 |
+
result = pd.eval(expr, engine=engine, parser=parser)
|
| 556 |
+
tm.assert_series_equal(expect, result)
|
| 557 |
+
|
| 558 |
+
def test_scalar_unary(self, engine, parser):
|
| 559 |
+
msg = "bad operand type for unary ~: 'float'"
|
| 560 |
+
warn = None
|
| 561 |
+
if PY312 and not (engine == "numexpr" and parser == "pandas"):
|
| 562 |
+
warn = DeprecationWarning
|
| 563 |
+
with pytest.raises(TypeError, match=msg):
|
| 564 |
+
pd.eval("~1.0", engine=engine, parser=parser)
|
| 565 |
+
|
| 566 |
+
assert pd.eval("-1.0", parser=parser, engine=engine) == -1.0
|
| 567 |
+
assert pd.eval("+1.0", parser=parser, engine=engine) == +1.0
|
| 568 |
+
assert pd.eval("~1", parser=parser, engine=engine) == ~1
|
| 569 |
+
assert pd.eval("-1", parser=parser, engine=engine) == -1
|
| 570 |
+
assert pd.eval("+1", parser=parser, engine=engine) == +1
|
| 571 |
+
with tm.assert_produces_warning(
|
| 572 |
+
warn, match="Bitwise inversion", check_stacklevel=False
|
| 573 |
+
):
|
| 574 |
+
assert pd.eval("~True", parser=parser, engine=engine) == ~True
|
| 575 |
+
with tm.assert_produces_warning(
|
| 576 |
+
warn, match="Bitwise inversion", check_stacklevel=False
|
| 577 |
+
):
|
| 578 |
+
assert pd.eval("~False", parser=parser, engine=engine) == ~False
|
| 579 |
+
assert pd.eval("-True", parser=parser, engine=engine) == -True
|
| 580 |
+
assert pd.eval("-False", parser=parser, engine=engine) == -False
|
| 581 |
+
assert pd.eval("+True", parser=parser, engine=engine) == +True
|
| 582 |
+
assert pd.eval("+False", parser=parser, engine=engine) == +False
|
| 583 |
+
|
| 584 |
+
def test_unary_in_array(self):
|
| 585 |
+
# GH 11235
|
| 586 |
+
# TODO: 2022-01-29: result return list with numexpr 2.7.3 in CI
|
| 587 |
+
# but cannot reproduce locally
|
| 588 |
+
result = np.array(
|
| 589 |
+
pd.eval("[-True, True, +True, -False, False, +False, -37, 37, ~37, +37]"),
|
| 590 |
+
dtype=np.object_,
|
| 591 |
+
)
|
| 592 |
+
expected = np.array(
|
| 593 |
+
[
|
| 594 |
+
-True,
|
| 595 |
+
True,
|
| 596 |
+
+True,
|
| 597 |
+
-False,
|
| 598 |
+
False,
|
| 599 |
+
+False,
|
| 600 |
+
-37,
|
| 601 |
+
37,
|
| 602 |
+
~37,
|
| 603 |
+
+37,
|
| 604 |
+
],
|
| 605 |
+
dtype=np.object_,
|
| 606 |
+
)
|
| 607 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 608 |
+
|
| 609 |
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
| 610 |
+
@pytest.mark.parametrize("expr", ["x < -0.1", "-5 > x"])
|
| 611 |
+
def test_float_comparison_bin_op(self, dtype, expr):
|
| 612 |
+
# GH 16363
|
| 613 |
+
df = DataFrame({"x": np.array([0], dtype=dtype)})
|
| 614 |
+
res = df.eval(expr)
|
| 615 |
+
assert res.values == np.array([False])
|
| 616 |
+
|
| 617 |
+
def test_unary_in_function(self):
|
| 618 |
+
# GH 46471
|
| 619 |
+
df = DataFrame({"x": [0, 1, np.nan]})
|
| 620 |
+
|
| 621 |
+
result = df.eval("x.fillna(-1)")
|
| 622 |
+
expected = df.x.fillna(-1)
|
| 623 |
+
# column name becomes None if using numexpr
|
| 624 |
+
# only check names when the engine is not numexpr
|
| 625 |
+
tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR)
|
| 626 |
+
|
| 627 |
+
result = df.eval("x.shift(1, fill_value=-1)")
|
| 628 |
+
expected = df.x.shift(1, fill_value=-1)
|
| 629 |
+
tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR)
|
| 630 |
+
|
| 631 |
+
@pytest.mark.parametrize(
|
| 632 |
+
"ex",
|
| 633 |
+
(
|
| 634 |
+
"1 or 2",
|
| 635 |
+
"1 and 2",
|
| 636 |
+
"a and b",
|
| 637 |
+
"a or b",
|
| 638 |
+
"1 or 2 and (3 + 2) > 3",
|
| 639 |
+
"2 * x > 2 or 1 and 2",
|
| 640 |
+
"2 * df > 3 and 1 or a",
|
| 641 |
+
),
|
| 642 |
+
)
|
| 643 |
+
def test_disallow_scalar_bool_ops(self, ex, engine, parser):
|
| 644 |
+
x, a, b = np.random.default_rng(2).standard_normal(3), 1, 2 # noqa: F841
|
| 645 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) # noqa: F841
|
| 646 |
+
|
| 647 |
+
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
|
| 648 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 649 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 650 |
+
|
| 651 |
+
def test_identical(self, engine, parser):
|
| 652 |
+
# see gh-10546
|
| 653 |
+
x = 1
|
| 654 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 655 |
+
assert result == 1
|
| 656 |
+
assert is_scalar(result)
|
| 657 |
+
|
| 658 |
+
x = 1.5
|
| 659 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 660 |
+
assert result == 1.5
|
| 661 |
+
assert is_scalar(result)
|
| 662 |
+
|
| 663 |
+
x = False
|
| 664 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 665 |
+
assert not result
|
| 666 |
+
assert is_bool(result)
|
| 667 |
+
assert is_scalar(result)
|
| 668 |
+
|
| 669 |
+
x = np.array([1])
|
| 670 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 671 |
+
tm.assert_numpy_array_equal(result, np.array([1]))
|
| 672 |
+
assert result.shape == (1,)
|
| 673 |
+
|
| 674 |
+
x = np.array([1.5])
|
| 675 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 676 |
+
tm.assert_numpy_array_equal(result, np.array([1.5]))
|
| 677 |
+
assert result.shape == (1,)
|
| 678 |
+
|
| 679 |
+
x = np.array([False]) # noqa: F841
|
| 680 |
+
result = pd.eval("x", engine=engine, parser=parser)
|
| 681 |
+
tm.assert_numpy_array_equal(result, np.array([False]))
|
| 682 |
+
assert result.shape == (1,)
|
| 683 |
+
|
| 684 |
+
def test_line_continuation(self, engine, parser):
|
| 685 |
+
# GH 11149
|
| 686 |
+
exp = """1 + 2 * \
|
| 687 |
+
5 - 1 + 2 """
|
| 688 |
+
result = pd.eval(exp, engine=engine, parser=parser)
|
| 689 |
+
assert result == 12
|
| 690 |
+
|
| 691 |
+
def test_float_truncation(self, engine, parser):
|
| 692 |
+
# GH 14241
|
| 693 |
+
exp = "1000000000.006"
|
| 694 |
+
result = pd.eval(exp, engine=engine, parser=parser)
|
| 695 |
+
expected = np.float64(exp)
|
| 696 |
+
assert result == expected
|
| 697 |
+
|
| 698 |
+
df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
|
| 699 |
+
cutoff = 1000000000.0006
|
| 700 |
+
result = df.query(f"A < {cutoff:.4f}")
|
| 701 |
+
assert result.empty
|
| 702 |
+
|
| 703 |
+
cutoff = 1000000000.0010
|
| 704 |
+
result = df.query(f"A > {cutoff:.4f}")
|
| 705 |
+
expected = df.loc[[1, 2], :]
|
| 706 |
+
tm.assert_frame_equal(expected, result)
|
| 707 |
+
|
| 708 |
+
exact = 1000000000.0011
|
| 709 |
+
result = df.query(f"A == {exact:.4f}")
|
| 710 |
+
expected = df.loc[[1], :]
|
| 711 |
+
tm.assert_frame_equal(expected, result)
|
| 712 |
+
|
| 713 |
+
def test_disallow_python_keywords(self):
|
| 714 |
+
# GH 18221
|
| 715 |
+
df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"])
|
| 716 |
+
msg = "Python keyword not valid identifier in numexpr query"
|
| 717 |
+
with pytest.raises(SyntaxError, match=msg):
|
| 718 |
+
df.query("class == 0")
|
| 719 |
+
|
| 720 |
+
df = DataFrame()
|
| 721 |
+
df.index.name = "lambda"
|
| 722 |
+
with pytest.raises(SyntaxError, match=msg):
|
| 723 |
+
df.query("lambda == 0")
|
| 724 |
+
|
| 725 |
+
def test_true_false_logic(self):
|
| 726 |
+
# GH 25823
|
| 727 |
+
# This behavior is deprecated in Python 3.12
|
| 728 |
+
with tm.maybe_produces_warning(
|
| 729 |
+
DeprecationWarning, PY312, check_stacklevel=False
|
| 730 |
+
):
|
| 731 |
+
assert pd.eval("not True") == -2
|
| 732 |
+
assert pd.eval("not False") == -1
|
| 733 |
+
assert pd.eval("True and not True") == 0
|
| 734 |
+
|
| 735 |
+
def test_and_logic_string_match(self):
|
| 736 |
+
# GH 25823
|
| 737 |
+
event = Series({"a": "hello"})
|
| 738 |
+
assert pd.eval(f"{event.str.match('hello').a}")
|
| 739 |
+
assert pd.eval(f"{event.str.match('hello').a and event.str.match('hello').a}")
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
# -------------------------------------
|
| 743 |
+
# gh-12388: Typecasting rules consistency with python
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
class TestTypeCasting:
|
| 747 |
+
@pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"])
|
| 748 |
+
# maybe someday... numexpr has too many upcasting rules now
|
| 749 |
+
# chain(*(np.core.sctypes[x] for x in ['uint', 'int', 'float']))
|
| 750 |
+
@pytest.mark.parametrize("left_right", [("df", "3"), ("3", "df")])
|
| 751 |
+
def test_binop_typecasting(
|
| 752 |
+
self, engine, parser, op, complex_or_float_dtype, left_right, request
|
| 753 |
+
):
|
| 754 |
+
# GH#21374
|
| 755 |
+
dtype = complex_or_float_dtype
|
| 756 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)), dtype=dtype)
|
| 757 |
+
left, right = left_right
|
| 758 |
+
s = f"{left} {op} {right}"
|
| 759 |
+
res = pd.eval(s, engine=engine, parser=parser)
|
| 760 |
+
if dtype == "complex64" and engine == "numexpr":
|
| 761 |
+
mark = pytest.mark.xfail(
|
| 762 |
+
reason="numexpr issue with complex that are upcast "
|
| 763 |
+
"to complex 128 "
|
| 764 |
+
"https://github.com/pydata/numexpr/issues/492"
|
| 765 |
+
)
|
| 766 |
+
request.applymarker(mark)
|
| 767 |
+
assert df.values.dtype == dtype
|
| 768 |
+
assert res.values.dtype == dtype
|
| 769 |
+
tm.assert_frame_equal(res, eval(s), check_exact=False)
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
# -------------------------------------
|
| 773 |
+
# Basic and complex alignment
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def should_warn(*args):
|
| 777 |
+
not_mono = not any(map(operator.attrgetter("is_monotonic_increasing"), args))
|
| 778 |
+
only_one_dt = reduce(
|
| 779 |
+
operator.xor, (issubclass(x.dtype.type, np.datetime64) for x in args)
|
| 780 |
+
)
|
| 781 |
+
return not_mono and only_one_dt
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
class TestAlignment:
|
| 785 |
+
index_types = ["i", "s", "dt"]
|
| 786 |
+
lhs_index_types = index_types + ["s"] # 'p'
|
| 787 |
+
|
| 788 |
+
def test_align_nested_unary_op(self, engine, parser):
|
| 789 |
+
s = "df * ~2"
|
| 790 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
|
| 791 |
+
res = pd.eval(s, engine=engine, parser=parser)
|
| 792 |
+
tm.assert_frame_equal(res, df * ~2)
|
| 793 |
+
|
| 794 |
+
@pytest.mark.filterwarnings("always::RuntimeWarning")
|
| 795 |
+
@pytest.mark.parametrize("lr_idx_type", lhs_index_types)
|
| 796 |
+
@pytest.mark.parametrize("rr_idx_type", index_types)
|
| 797 |
+
@pytest.mark.parametrize("c_idx_type", index_types)
|
| 798 |
+
def test_basic_frame_alignment(
|
| 799 |
+
self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type, idx_func_dict
|
| 800 |
+
):
|
| 801 |
+
df = DataFrame(
|
| 802 |
+
np.random.default_rng(2).standard_normal((10, 10)),
|
| 803 |
+
index=idx_func_dict[lr_idx_type](10),
|
| 804 |
+
columns=idx_func_dict[c_idx_type](10),
|
| 805 |
+
)
|
| 806 |
+
df2 = DataFrame(
|
| 807 |
+
np.random.default_rng(2).standard_normal((20, 10)),
|
| 808 |
+
index=idx_func_dict[rr_idx_type](20),
|
| 809 |
+
columns=idx_func_dict[c_idx_type](10),
|
| 810 |
+
)
|
| 811 |
+
# only warns if not monotonic and not sortable
|
| 812 |
+
if should_warn(df.index, df2.index):
|
| 813 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 814 |
+
res = pd.eval("df + df2", engine=engine, parser=parser)
|
| 815 |
+
else:
|
| 816 |
+
res = pd.eval("df + df2", engine=engine, parser=parser)
|
| 817 |
+
tm.assert_frame_equal(res, df + df2)
|
| 818 |
+
|
| 819 |
+
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
|
| 820 |
+
@pytest.mark.parametrize("c_idx_type", lhs_index_types)
|
| 821 |
+
def test_frame_comparison(
|
| 822 |
+
self, engine, parser, r_idx_type, c_idx_type, idx_func_dict
|
| 823 |
+
):
|
| 824 |
+
df = DataFrame(
|
| 825 |
+
np.random.default_rng(2).standard_normal((10, 10)),
|
| 826 |
+
index=idx_func_dict[r_idx_type](10),
|
| 827 |
+
columns=idx_func_dict[c_idx_type](10),
|
| 828 |
+
)
|
| 829 |
+
res = pd.eval("df < 2", engine=engine, parser=parser)
|
| 830 |
+
tm.assert_frame_equal(res, df < 2)
|
| 831 |
+
|
| 832 |
+
df3 = DataFrame(
|
| 833 |
+
np.random.default_rng(2).standard_normal(df.shape),
|
| 834 |
+
index=df.index,
|
| 835 |
+
columns=df.columns,
|
| 836 |
+
)
|
| 837 |
+
res = pd.eval("df < df3", engine=engine, parser=parser)
|
| 838 |
+
tm.assert_frame_equal(res, df < df3)
|
| 839 |
+
|
| 840 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 841 |
+
@pytest.mark.parametrize("r1", lhs_index_types)
|
| 842 |
+
@pytest.mark.parametrize("c1", index_types)
|
| 843 |
+
@pytest.mark.parametrize("r2", index_types)
|
| 844 |
+
@pytest.mark.parametrize("c2", index_types)
|
| 845 |
+
def test_medium_complex_frame_alignment(
|
| 846 |
+
self, engine, parser, r1, c1, r2, c2, idx_func_dict
|
| 847 |
+
):
|
| 848 |
+
df = DataFrame(
|
| 849 |
+
np.random.default_rng(2).standard_normal((3, 2)),
|
| 850 |
+
index=idx_func_dict[r1](3),
|
| 851 |
+
columns=idx_func_dict[c1](2),
|
| 852 |
+
)
|
| 853 |
+
df2 = DataFrame(
|
| 854 |
+
np.random.default_rng(2).standard_normal((4, 2)),
|
| 855 |
+
index=idx_func_dict[r2](4),
|
| 856 |
+
columns=idx_func_dict[c2](2),
|
| 857 |
+
)
|
| 858 |
+
df3 = DataFrame(
|
| 859 |
+
np.random.default_rng(2).standard_normal((5, 2)),
|
| 860 |
+
index=idx_func_dict[r2](5),
|
| 861 |
+
columns=idx_func_dict[c2](2),
|
| 862 |
+
)
|
| 863 |
+
if should_warn(df.index, df2.index, df3.index):
|
| 864 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 865 |
+
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
|
| 866 |
+
else:
|
| 867 |
+
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
|
| 868 |
+
tm.assert_frame_equal(res, df + df2 + df3)
|
| 869 |
+
|
| 870 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 871 |
+
@pytest.mark.parametrize("index_name", ["index", "columns"])
|
| 872 |
+
@pytest.mark.parametrize("c_idx_type", index_types)
|
| 873 |
+
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
|
| 874 |
+
def test_basic_frame_series_alignment(
|
| 875 |
+
self, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict
|
| 876 |
+
):
|
| 877 |
+
df = DataFrame(
|
| 878 |
+
np.random.default_rng(2).standard_normal((10, 10)),
|
| 879 |
+
index=idx_func_dict[r_idx_type](10),
|
| 880 |
+
columns=idx_func_dict[c_idx_type](10),
|
| 881 |
+
)
|
| 882 |
+
index = getattr(df, index_name)
|
| 883 |
+
s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
|
| 884 |
+
|
| 885 |
+
if should_warn(df.index, s.index):
|
| 886 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 887 |
+
res = pd.eval("df + s", engine=engine, parser=parser)
|
| 888 |
+
else:
|
| 889 |
+
res = pd.eval("df + s", engine=engine, parser=parser)
|
| 890 |
+
|
| 891 |
+
if r_idx_type == "dt" or c_idx_type == "dt":
|
| 892 |
+
expected = df.add(s) if engine == "numexpr" else df + s
|
| 893 |
+
else:
|
| 894 |
+
expected = df + s
|
| 895 |
+
tm.assert_frame_equal(res, expected)
|
| 896 |
+
|
| 897 |
+
@pytest.mark.parametrize("index_name", ["index", "columns"])
|
| 898 |
+
@pytest.mark.parametrize(
|
| 899 |
+
"r_idx_type, c_idx_type",
|
| 900 |
+
list(product(["i", "s"], ["i", "s"])) + [("dt", "dt")],
|
| 901 |
+
)
|
| 902 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 903 |
+
def test_basic_series_frame_alignment(
|
| 904 |
+
self, request, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict
|
| 905 |
+
):
|
| 906 |
+
if (
|
| 907 |
+
engine == "numexpr"
|
| 908 |
+
and parser in ("pandas", "python")
|
| 909 |
+
and index_name == "index"
|
| 910 |
+
and r_idx_type == "i"
|
| 911 |
+
and c_idx_type == "s"
|
| 912 |
+
):
|
| 913 |
+
reason = (
|
| 914 |
+
f"Flaky column ordering when engine={engine}, "
|
| 915 |
+
f"parser={parser}, index_name={index_name}, "
|
| 916 |
+
f"r_idx_type={r_idx_type}, c_idx_type={c_idx_type}"
|
| 917 |
+
)
|
| 918 |
+
request.applymarker(pytest.mark.xfail(reason=reason, strict=False))
|
| 919 |
+
df = DataFrame(
|
| 920 |
+
np.random.default_rng(2).standard_normal((10, 7)),
|
| 921 |
+
index=idx_func_dict[r_idx_type](10),
|
| 922 |
+
columns=idx_func_dict[c_idx_type](7),
|
| 923 |
+
)
|
| 924 |
+
index = getattr(df, index_name)
|
| 925 |
+
s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
|
| 926 |
+
if should_warn(s.index, df.index):
|
| 927 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 928 |
+
res = pd.eval("s + df", engine=engine, parser=parser)
|
| 929 |
+
else:
|
| 930 |
+
res = pd.eval("s + df", engine=engine, parser=parser)
|
| 931 |
+
|
| 932 |
+
if r_idx_type == "dt" or c_idx_type == "dt":
|
| 933 |
+
expected = df.add(s) if engine == "numexpr" else s + df
|
| 934 |
+
else:
|
| 935 |
+
expected = s + df
|
| 936 |
+
tm.assert_frame_equal(res, expected)
|
| 937 |
+
|
| 938 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 939 |
+
@pytest.mark.parametrize("c_idx_type", index_types)
|
| 940 |
+
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
|
| 941 |
+
@pytest.mark.parametrize("index_name", ["index", "columns"])
|
| 942 |
+
@pytest.mark.parametrize("op", ["+", "*"])
|
| 943 |
+
def test_series_frame_commutativity(
|
| 944 |
+
self, engine, parser, index_name, op, r_idx_type, c_idx_type, idx_func_dict
|
| 945 |
+
):
|
| 946 |
+
df = DataFrame(
|
| 947 |
+
np.random.default_rng(2).standard_normal((10, 10)),
|
| 948 |
+
index=idx_func_dict[r_idx_type](10),
|
| 949 |
+
columns=idx_func_dict[c_idx_type](10),
|
| 950 |
+
)
|
| 951 |
+
index = getattr(df, index_name)
|
| 952 |
+
s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
|
| 953 |
+
|
| 954 |
+
lhs = f"s {op} df"
|
| 955 |
+
rhs = f"df {op} s"
|
| 956 |
+
if should_warn(df.index, s.index):
|
| 957 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 958 |
+
a = pd.eval(lhs, engine=engine, parser=parser)
|
| 959 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 960 |
+
b = pd.eval(rhs, engine=engine, parser=parser)
|
| 961 |
+
else:
|
| 962 |
+
a = pd.eval(lhs, engine=engine, parser=parser)
|
| 963 |
+
b = pd.eval(rhs, engine=engine, parser=parser)
|
| 964 |
+
|
| 965 |
+
if r_idx_type != "dt" and c_idx_type != "dt":
|
| 966 |
+
if engine == "numexpr":
|
| 967 |
+
tm.assert_frame_equal(a, b)
|
| 968 |
+
|
| 969 |
+
@pytest.mark.filterwarnings("always::RuntimeWarning")
|
| 970 |
+
@pytest.mark.parametrize("r1", lhs_index_types)
|
| 971 |
+
@pytest.mark.parametrize("c1", index_types)
|
| 972 |
+
@pytest.mark.parametrize("r2", index_types)
|
| 973 |
+
@pytest.mark.parametrize("c2", index_types)
|
| 974 |
+
def test_complex_series_frame_alignment(
|
| 975 |
+
self, engine, parser, r1, c1, r2, c2, idx_func_dict
|
| 976 |
+
):
|
| 977 |
+
n = 3
|
| 978 |
+
m1 = 5
|
| 979 |
+
m2 = 2 * m1
|
| 980 |
+
df = DataFrame(
|
| 981 |
+
np.random.default_rng(2).standard_normal((m1, n)),
|
| 982 |
+
index=idx_func_dict[r1](m1),
|
| 983 |
+
columns=idx_func_dict[c1](n),
|
| 984 |
+
)
|
| 985 |
+
df2 = DataFrame(
|
| 986 |
+
np.random.default_rng(2).standard_normal((m2, n)),
|
| 987 |
+
index=idx_func_dict[r2](m2),
|
| 988 |
+
columns=idx_func_dict[c2](n),
|
| 989 |
+
)
|
| 990 |
+
index = df2.columns
|
| 991 |
+
ser = Series(np.random.default_rng(2).standard_normal(n), index[:n])
|
| 992 |
+
|
| 993 |
+
if r2 == "dt" or c2 == "dt":
|
| 994 |
+
if engine == "numexpr":
|
| 995 |
+
expected2 = df2.add(ser)
|
| 996 |
+
else:
|
| 997 |
+
expected2 = df2 + ser
|
| 998 |
+
else:
|
| 999 |
+
expected2 = df2 + ser
|
| 1000 |
+
|
| 1001 |
+
if r1 == "dt" or c1 == "dt":
|
| 1002 |
+
if engine == "numexpr":
|
| 1003 |
+
expected = expected2.add(df)
|
| 1004 |
+
else:
|
| 1005 |
+
expected = expected2 + df
|
| 1006 |
+
else:
|
| 1007 |
+
expected = expected2 + df
|
| 1008 |
+
|
| 1009 |
+
if should_warn(df2.index, ser.index, df.index):
|
| 1010 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
| 1011 |
+
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
|
| 1012 |
+
else:
|
| 1013 |
+
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
|
| 1014 |
+
assert res.shape == expected.shape
|
| 1015 |
+
tm.assert_frame_equal(res, expected)
|
| 1016 |
+
|
| 1017 |
+
def test_performance_warning_for_poor_alignment(self, engine, parser):
|
| 1018 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10)))
|
| 1019 |
+
s = Series(np.random.default_rng(2).standard_normal(10000))
|
| 1020 |
+
if engine == "numexpr":
|
| 1021 |
+
seen = PerformanceWarning
|
| 1022 |
+
else:
|
| 1023 |
+
seen = False
|
| 1024 |
+
|
| 1025 |
+
with tm.assert_produces_warning(seen):
|
| 1026 |
+
pd.eval("df + s", engine=engine, parser=parser)
|
| 1027 |
+
|
| 1028 |
+
s = Series(np.random.default_rng(2).standard_normal(1000))
|
| 1029 |
+
with tm.assert_produces_warning(False):
|
| 1030 |
+
pd.eval("df + s", engine=engine, parser=parser)
|
| 1031 |
+
|
| 1032 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 10000)))
|
| 1033 |
+
s = Series(np.random.default_rng(2).standard_normal(10000))
|
| 1034 |
+
with tm.assert_produces_warning(False):
|
| 1035 |
+
pd.eval("df + s", engine=engine, parser=parser)
|
| 1036 |
+
|
| 1037 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 10)))
|
| 1038 |
+
s = Series(np.random.default_rng(2).standard_normal(10000))
|
| 1039 |
+
|
| 1040 |
+
is_python_engine = engine == "python"
|
| 1041 |
+
|
| 1042 |
+
if not is_python_engine:
|
| 1043 |
+
wrn = PerformanceWarning
|
| 1044 |
+
else:
|
| 1045 |
+
wrn = False
|
| 1046 |
+
|
| 1047 |
+
with tm.assert_produces_warning(wrn) as w:
|
| 1048 |
+
pd.eval("df + s", engine=engine, parser=parser)
|
| 1049 |
+
|
| 1050 |
+
if not is_python_engine:
|
| 1051 |
+
assert len(w) == 1
|
| 1052 |
+
msg = str(w[0].message)
|
| 1053 |
+
logged = np.log10(s.size - df.shape[1])
|
| 1054 |
+
expected = (
|
| 1055 |
+
f"Alignment difference on axis 1 is larger "
|
| 1056 |
+
f"than an order of magnitude on term 'df', "
|
| 1057 |
+
f"by more than {logged:.4g}; performance may suffer."
|
| 1058 |
+
)
|
| 1059 |
+
assert msg == expected
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
# ------------------------------------
|
| 1063 |
+
# Slightly more complex ops
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
class TestOperations:
|
| 1067 |
+
def eval(self, *args, **kwargs):
|
| 1068 |
+
kwargs["level"] = kwargs.pop("level", 0) + 1
|
| 1069 |
+
return pd.eval(*args, **kwargs)
|
| 1070 |
+
|
| 1071 |
+
def test_simple_arith_ops(self, engine, parser):
|
| 1072 |
+
exclude_arith = []
|
| 1073 |
+
if parser == "python":
|
| 1074 |
+
exclude_arith = ["in", "not in"]
|
| 1075 |
+
|
| 1076 |
+
arith_ops = [
|
| 1077 |
+
op
|
| 1078 |
+
for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS
|
| 1079 |
+
if op not in exclude_arith
|
| 1080 |
+
]
|
| 1081 |
+
|
| 1082 |
+
ops = (op for op in arith_ops if op != "//")
|
| 1083 |
+
|
| 1084 |
+
for op in ops:
|
| 1085 |
+
ex = f"1 {op} 1"
|
| 1086 |
+
ex2 = f"x {op} 1"
|
| 1087 |
+
ex3 = f"1 {op} (x + 1)"
|
| 1088 |
+
|
| 1089 |
+
if op in ("in", "not in"):
|
| 1090 |
+
msg = "argument of type 'int' is not iterable"
|
| 1091 |
+
with pytest.raises(TypeError, match=msg):
|
| 1092 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 1093 |
+
else:
|
| 1094 |
+
expec = _eval_single_bin(1, op, 1, engine)
|
| 1095 |
+
x = self.eval(ex, engine=engine, parser=parser)
|
| 1096 |
+
assert x == expec
|
| 1097 |
+
|
| 1098 |
+
expec = _eval_single_bin(x, op, 1, engine)
|
| 1099 |
+
y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser)
|
| 1100 |
+
assert y == expec
|
| 1101 |
+
|
| 1102 |
+
expec = _eval_single_bin(1, op, x + 1, engine)
|
| 1103 |
+
y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser)
|
| 1104 |
+
assert y == expec
|
| 1105 |
+
|
| 1106 |
+
@pytest.mark.parametrize("rhs", [True, False])
|
| 1107 |
+
@pytest.mark.parametrize("lhs", [True, False])
|
| 1108 |
+
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
|
| 1109 |
+
def test_simple_bool_ops(self, rhs, lhs, op):
|
| 1110 |
+
ex = f"{lhs} {op} {rhs}"
|
| 1111 |
+
|
| 1112 |
+
if parser == "python" and op in ["and", "or"]:
|
| 1113 |
+
msg = "'BoolOp' nodes are not implemented"
|
| 1114 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1115 |
+
self.eval(ex)
|
| 1116 |
+
return
|
| 1117 |
+
|
| 1118 |
+
res = self.eval(ex)
|
| 1119 |
+
exp = eval(ex)
|
| 1120 |
+
assert res == exp
|
| 1121 |
+
|
| 1122 |
+
@pytest.mark.parametrize("rhs", [True, False])
|
| 1123 |
+
@pytest.mark.parametrize("lhs", [True, False])
|
| 1124 |
+
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
|
| 1125 |
+
def test_bool_ops_with_constants(self, rhs, lhs, op):
|
| 1126 |
+
ex = f"{lhs} {op} {rhs}"
|
| 1127 |
+
|
| 1128 |
+
if parser == "python" and op in ["and", "or"]:
|
| 1129 |
+
msg = "'BoolOp' nodes are not implemented"
|
| 1130 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1131 |
+
self.eval(ex)
|
| 1132 |
+
return
|
| 1133 |
+
|
| 1134 |
+
res = self.eval(ex)
|
| 1135 |
+
exp = eval(ex)
|
| 1136 |
+
assert res == exp
|
| 1137 |
+
|
| 1138 |
+
def test_4d_ndarray_fails(self):
|
| 1139 |
+
x = np.random.default_rng(2).standard_normal((3, 4, 5, 6))
|
| 1140 |
+
y = Series(np.random.default_rng(2).standard_normal(10))
|
| 1141 |
+
msg = "N-dimensional objects, where N > 2, are not supported with eval"
|
| 1142 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1143 |
+
self.eval("x + y", local_dict={"x": x, "y": y})
|
| 1144 |
+
|
| 1145 |
+
def test_constant(self):
|
| 1146 |
+
x = self.eval("1")
|
| 1147 |
+
assert x == 1
|
| 1148 |
+
|
| 1149 |
+
def test_single_variable(self):
|
| 1150 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
|
| 1151 |
+
df2 = self.eval("df", local_dict={"df": df})
|
| 1152 |
+
tm.assert_frame_equal(df, df2)
|
| 1153 |
+
|
| 1154 |
+
def test_failing_subscript_with_name_error(self):
|
| 1155 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
|
| 1156 |
+
with pytest.raises(NameError, match="name 'x' is not defined"):
|
| 1157 |
+
self.eval("df[x > 2] > 2")
|
| 1158 |
+
|
| 1159 |
+
def test_lhs_expression_subscript(self):
|
| 1160 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
|
| 1161 |
+
result = self.eval("(df + 1)[df > 2]", local_dict={"df": df})
|
| 1162 |
+
expected = (df + 1)[df > 2]
|
| 1163 |
+
tm.assert_frame_equal(result, expected)
|
| 1164 |
+
|
| 1165 |
+
def test_attr_expression(self):
|
| 1166 |
+
df = DataFrame(
|
| 1167 |
+
np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
|
| 1168 |
+
)
|
| 1169 |
+
expr1 = "df.a < df.b"
|
| 1170 |
+
expec1 = df.a < df.b
|
| 1171 |
+
expr2 = "df.a + df.b + df.c"
|
| 1172 |
+
expec2 = df.a + df.b + df.c
|
| 1173 |
+
expr3 = "df.a + df.b + df.c[df.b < 0]"
|
| 1174 |
+
expec3 = df.a + df.b + df.c[df.b < 0]
|
| 1175 |
+
exprs = expr1, expr2, expr3
|
| 1176 |
+
expecs = expec1, expec2, expec3
|
| 1177 |
+
for e, expec in zip(exprs, expecs):
|
| 1178 |
+
tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df}))
|
| 1179 |
+
|
| 1180 |
+
def test_assignment_fails(self):
|
| 1181 |
+
df = DataFrame(
|
| 1182 |
+
np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
|
| 1183 |
+
)
|
| 1184 |
+
df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
|
| 1185 |
+
expr1 = "df = df2"
|
| 1186 |
+
msg = "cannot assign without a target object"
|
| 1187 |
+
with pytest.raises(ValueError, match=msg):
|
| 1188 |
+
self.eval(expr1, local_dict={"df": df, "df2": df2})
|
| 1189 |
+
|
| 1190 |
+
def test_assignment_column_multiple_raise(self):
|
| 1191 |
+
df = DataFrame(
|
| 1192 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1193 |
+
)
|
| 1194 |
+
# multiple assignees
|
| 1195 |
+
with pytest.raises(SyntaxError, match="invalid syntax"):
|
| 1196 |
+
df.eval("d c = a + b")
|
| 1197 |
+
|
| 1198 |
+
def test_assignment_column_invalid_assign(self):
|
| 1199 |
+
df = DataFrame(
|
| 1200 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1201 |
+
)
|
| 1202 |
+
# invalid assignees
|
| 1203 |
+
msg = "left hand side of an assignment must be a single name"
|
| 1204 |
+
with pytest.raises(SyntaxError, match=msg):
|
| 1205 |
+
df.eval("d,c = a + b")
|
| 1206 |
+
|
| 1207 |
+
def test_assignment_column_invalid_assign_function_call(self):
|
| 1208 |
+
df = DataFrame(
|
| 1209 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1210 |
+
)
|
| 1211 |
+
msg = "cannot assign to function call"
|
| 1212 |
+
with pytest.raises(SyntaxError, match=msg):
|
| 1213 |
+
df.eval('Timestamp("20131001") = a + b')
|
| 1214 |
+
|
| 1215 |
+
def test_assignment_single_assign_existing(self):
|
| 1216 |
+
df = DataFrame(
|
| 1217 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1218 |
+
)
|
| 1219 |
+
# single assignment - existing variable
|
| 1220 |
+
expected = df.copy()
|
| 1221 |
+
expected["a"] = expected["a"] + expected["b"]
|
| 1222 |
+
df.eval("a = a + b", inplace=True)
|
| 1223 |
+
tm.assert_frame_equal(df, expected)
|
| 1224 |
+
|
| 1225 |
+
def test_assignment_single_assign_new(self):
|
| 1226 |
+
df = DataFrame(
|
| 1227 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1228 |
+
)
|
| 1229 |
+
# single assignment - new variable
|
| 1230 |
+
expected = df.copy()
|
| 1231 |
+
expected["c"] = expected["a"] + expected["b"]
|
| 1232 |
+
df.eval("c = a + b", inplace=True)
|
| 1233 |
+
tm.assert_frame_equal(df, expected)
|
| 1234 |
+
|
| 1235 |
+
def test_assignment_single_assign_local_overlap(self):
|
| 1236 |
+
df = DataFrame(
|
| 1237 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1238 |
+
)
|
| 1239 |
+
df = df.copy()
|
| 1240 |
+
a = 1 # noqa: F841
|
| 1241 |
+
df.eval("a = 1 + b", inplace=True)
|
| 1242 |
+
|
| 1243 |
+
expected = df.copy()
|
| 1244 |
+
expected["a"] = 1 + expected["b"]
|
| 1245 |
+
tm.assert_frame_equal(df, expected)
|
| 1246 |
+
|
| 1247 |
+
def test_assignment_single_assign_name(self):
|
| 1248 |
+
df = DataFrame(
|
| 1249 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1250 |
+
)
|
| 1251 |
+
|
| 1252 |
+
a = 1 # noqa: F841
|
| 1253 |
+
old_a = df.a.copy()
|
| 1254 |
+
df.eval("a = a + b", inplace=True)
|
| 1255 |
+
result = old_a + df.b
|
| 1256 |
+
tm.assert_series_equal(result, df.a, check_names=False)
|
| 1257 |
+
assert result.name is None
|
| 1258 |
+
|
| 1259 |
+
def test_assignment_multiple_raises(self):
|
| 1260 |
+
df = DataFrame(
|
| 1261 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1262 |
+
)
|
| 1263 |
+
# multiple assignment
|
| 1264 |
+
df.eval("c = a + b", inplace=True)
|
| 1265 |
+
msg = "can only assign a single expression"
|
| 1266 |
+
with pytest.raises(SyntaxError, match=msg):
|
| 1267 |
+
df.eval("c = a = b")
|
| 1268 |
+
|
| 1269 |
+
def test_assignment_explicit(self):
|
| 1270 |
+
df = DataFrame(
|
| 1271 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1272 |
+
)
|
| 1273 |
+
# explicit targets
|
| 1274 |
+
self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True)
|
| 1275 |
+
expected = df.copy()
|
| 1276 |
+
expected["c"] = expected["a"] + expected["b"]
|
| 1277 |
+
tm.assert_frame_equal(df, expected)
|
| 1278 |
+
|
| 1279 |
+
def test_column_in(self):
|
| 1280 |
+
# GH 11235
|
| 1281 |
+
df = DataFrame({"a": [11], "b": [-32]})
|
| 1282 |
+
result = df.eval("a in [11, -32]")
|
| 1283 |
+
expected = Series([True])
|
| 1284 |
+
# TODO: 2022-01-29: Name check failed with numexpr 2.7.3 in CI
|
| 1285 |
+
# but cannot reproduce locally
|
| 1286 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
| 1287 |
+
|
| 1288 |
+
@pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.")
|
| 1289 |
+
def test_assignment_not_inplace(self):
|
| 1290 |
+
# see gh-9297
|
| 1291 |
+
df = DataFrame(
|
| 1292 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
|
| 1293 |
+
)
|
| 1294 |
+
|
| 1295 |
+
actual = df.eval("c = a + b", inplace=False)
|
| 1296 |
+
assert actual is not None
|
| 1297 |
+
|
| 1298 |
+
expected = df.copy()
|
| 1299 |
+
expected["c"] = expected["a"] + expected["b"]
|
| 1300 |
+
tm.assert_frame_equal(df, expected)
|
| 1301 |
+
|
| 1302 |
+
def test_multi_line_expression(self, warn_copy_on_write):
|
| 1303 |
+
# GH 11149
|
| 1304 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1305 |
+
expected = df.copy()
|
| 1306 |
+
|
| 1307 |
+
expected["c"] = expected["a"] + expected["b"]
|
| 1308 |
+
expected["d"] = expected["c"] + expected["b"]
|
| 1309 |
+
answer = df.eval(
|
| 1310 |
+
"""
|
| 1311 |
+
c = a + b
|
| 1312 |
+
d = c + b""",
|
| 1313 |
+
inplace=True,
|
| 1314 |
+
)
|
| 1315 |
+
tm.assert_frame_equal(expected, df)
|
| 1316 |
+
assert answer is None
|
| 1317 |
+
|
| 1318 |
+
expected["a"] = expected["a"] - 1
|
| 1319 |
+
expected["e"] = expected["a"] + 2
|
| 1320 |
+
answer = df.eval(
|
| 1321 |
+
"""
|
| 1322 |
+
a = a - 1
|
| 1323 |
+
e = a + 2""",
|
| 1324 |
+
inplace=True,
|
| 1325 |
+
)
|
| 1326 |
+
tm.assert_frame_equal(expected, df)
|
| 1327 |
+
assert answer is None
|
| 1328 |
+
|
| 1329 |
+
# multi-line not valid if not all assignments
|
| 1330 |
+
msg = "Multi-line expressions are only valid if all expressions contain"
|
| 1331 |
+
with pytest.raises(ValueError, match=msg):
|
| 1332 |
+
df.eval(
|
| 1333 |
+
"""
|
| 1334 |
+
a = b + 2
|
| 1335 |
+
b - 2""",
|
| 1336 |
+
inplace=False,
|
| 1337 |
+
)
|
| 1338 |
+
|
| 1339 |
+
def test_multi_line_expression_not_inplace(self):
|
| 1340 |
+
# GH 11149
|
| 1341 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1342 |
+
expected = df.copy()
|
| 1343 |
+
|
| 1344 |
+
expected["c"] = expected["a"] + expected["b"]
|
| 1345 |
+
expected["d"] = expected["c"] + expected["b"]
|
| 1346 |
+
df = df.eval(
|
| 1347 |
+
"""
|
| 1348 |
+
c = a + b
|
| 1349 |
+
d = c + b""",
|
| 1350 |
+
inplace=False,
|
| 1351 |
+
)
|
| 1352 |
+
tm.assert_frame_equal(expected, df)
|
| 1353 |
+
|
| 1354 |
+
expected["a"] = expected["a"] - 1
|
| 1355 |
+
expected["e"] = expected["a"] + 2
|
| 1356 |
+
df = df.eval(
|
| 1357 |
+
"""
|
| 1358 |
+
a = a - 1
|
| 1359 |
+
e = a + 2""",
|
| 1360 |
+
inplace=False,
|
| 1361 |
+
)
|
| 1362 |
+
tm.assert_frame_equal(expected, df)
|
| 1363 |
+
|
| 1364 |
+
def test_multi_line_expression_local_variable(self):
|
| 1365 |
+
# GH 15342
|
| 1366 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1367 |
+
expected = df.copy()
|
| 1368 |
+
|
| 1369 |
+
local_var = 7
|
| 1370 |
+
expected["c"] = expected["a"] * local_var
|
| 1371 |
+
expected["d"] = expected["c"] + local_var
|
| 1372 |
+
answer = df.eval(
|
| 1373 |
+
"""
|
| 1374 |
+
c = a * @local_var
|
| 1375 |
+
d = c + @local_var
|
| 1376 |
+
""",
|
| 1377 |
+
inplace=True,
|
| 1378 |
+
)
|
| 1379 |
+
tm.assert_frame_equal(expected, df)
|
| 1380 |
+
assert answer is None
|
| 1381 |
+
|
| 1382 |
+
def test_multi_line_expression_callable_local_variable(self):
|
| 1383 |
+
# 26426
|
| 1384 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1385 |
+
|
| 1386 |
+
def local_func(a, b):
|
| 1387 |
+
return b
|
| 1388 |
+
|
| 1389 |
+
expected = df.copy()
|
| 1390 |
+
expected["c"] = expected["a"] * local_func(1, 7)
|
| 1391 |
+
expected["d"] = expected["c"] + local_func(1, 7)
|
| 1392 |
+
answer = df.eval(
|
| 1393 |
+
"""
|
| 1394 |
+
c = a * @local_func(1, 7)
|
| 1395 |
+
d = c + @local_func(1, 7)
|
| 1396 |
+
""",
|
| 1397 |
+
inplace=True,
|
| 1398 |
+
)
|
| 1399 |
+
tm.assert_frame_equal(expected, df)
|
| 1400 |
+
assert answer is None
|
| 1401 |
+
|
| 1402 |
+
def test_multi_line_expression_callable_local_variable_with_kwargs(self):
|
| 1403 |
+
# 26426
|
| 1404 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1405 |
+
|
| 1406 |
+
def local_func(a, b):
|
| 1407 |
+
return b
|
| 1408 |
+
|
| 1409 |
+
expected = df.copy()
|
| 1410 |
+
expected["c"] = expected["a"] * local_func(b=7, a=1)
|
| 1411 |
+
expected["d"] = expected["c"] + local_func(b=7, a=1)
|
| 1412 |
+
answer = df.eval(
|
| 1413 |
+
"""
|
| 1414 |
+
c = a * @local_func(b=7, a=1)
|
| 1415 |
+
d = c + @local_func(b=7, a=1)
|
| 1416 |
+
""",
|
| 1417 |
+
inplace=True,
|
| 1418 |
+
)
|
| 1419 |
+
tm.assert_frame_equal(expected, df)
|
| 1420 |
+
assert answer is None
|
| 1421 |
+
|
| 1422 |
+
def test_assignment_in_query(self):
|
| 1423 |
+
# GH 8664
|
| 1424 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1425 |
+
df_orig = df.copy()
|
| 1426 |
+
msg = "cannot assign without a target object"
|
| 1427 |
+
with pytest.raises(ValueError, match=msg):
|
| 1428 |
+
df.query("a = 1")
|
| 1429 |
+
tm.assert_frame_equal(df, df_orig)
|
| 1430 |
+
|
| 1431 |
+
def test_query_inplace(self):
|
| 1432 |
+
# see gh-11149
|
| 1433 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 1434 |
+
expected = df.copy()
|
| 1435 |
+
expected = expected[expected["a"] == 2]
|
| 1436 |
+
df.query("a == 2", inplace=True)
|
| 1437 |
+
tm.assert_frame_equal(expected, df)
|
| 1438 |
+
|
| 1439 |
+
df = {}
|
| 1440 |
+
expected = {"a": 3}
|
| 1441 |
+
|
| 1442 |
+
self.eval("a = 1 + 2", target=df, inplace=True)
|
| 1443 |
+
tm.assert_dict_equal(df, expected)
|
| 1444 |
+
|
| 1445 |
+
@pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)])
|
| 1446 |
+
def test_cannot_item_assign(self, invalid_target):
|
| 1447 |
+
msg = "Cannot assign expression output to target"
|
| 1448 |
+
expression = "a = 1 + 2"
|
| 1449 |
+
|
| 1450 |
+
with pytest.raises(ValueError, match=msg):
|
| 1451 |
+
self.eval(expression, target=invalid_target, inplace=True)
|
| 1452 |
+
|
| 1453 |
+
if hasattr(invalid_target, "copy"):
|
| 1454 |
+
with pytest.raises(ValueError, match=msg):
|
| 1455 |
+
self.eval(expression, target=invalid_target, inplace=False)
|
| 1456 |
+
|
| 1457 |
+
@pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)])
|
| 1458 |
+
def test_cannot_copy_item(self, invalid_target):
|
| 1459 |
+
msg = "Cannot return a copy of the target"
|
| 1460 |
+
expression = "a = 1 + 2"
|
| 1461 |
+
|
| 1462 |
+
with pytest.raises(ValueError, match=msg):
|
| 1463 |
+
self.eval(expression, target=invalid_target, inplace=False)
|
| 1464 |
+
|
| 1465 |
+
@pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}])
|
| 1466 |
+
def test_inplace_no_assignment(self, target):
|
| 1467 |
+
expression = "1 + 2"
|
| 1468 |
+
|
| 1469 |
+
assert self.eval(expression, target=target, inplace=False) == 3
|
| 1470 |
+
|
| 1471 |
+
msg = "Cannot operate inplace if there is no assignment"
|
| 1472 |
+
with pytest.raises(ValueError, match=msg):
|
| 1473 |
+
self.eval(expression, target=target, inplace=True)
|
| 1474 |
+
|
| 1475 |
+
def test_basic_period_index_boolean_expression(self):
|
| 1476 |
+
df = DataFrame(
|
| 1477 |
+
np.random.default_rng(2).standard_normal((2, 2)),
|
| 1478 |
+
columns=period_range("2020-01-01", freq="D", periods=2),
|
| 1479 |
+
)
|
| 1480 |
+
e = df < 2
|
| 1481 |
+
r = self.eval("df < 2", local_dict={"df": df})
|
| 1482 |
+
x = df < 2
|
| 1483 |
+
|
| 1484 |
+
tm.assert_frame_equal(r, e)
|
| 1485 |
+
tm.assert_frame_equal(x, e)
|
| 1486 |
+
|
| 1487 |
+
def test_basic_period_index_subscript_expression(self):
|
| 1488 |
+
df = DataFrame(
|
| 1489 |
+
np.random.default_rng(2).standard_normal((2, 2)),
|
| 1490 |
+
columns=period_range("2020-01-01", freq="D", periods=2),
|
| 1491 |
+
)
|
| 1492 |
+
r = self.eval("df[df < 2 + 3]", local_dict={"df": df})
|
| 1493 |
+
e = df[df < 2 + 3]
|
| 1494 |
+
tm.assert_frame_equal(r, e)
|
| 1495 |
+
|
| 1496 |
+
def test_nested_period_index_subscript_expression(self):
|
| 1497 |
+
df = DataFrame(
|
| 1498 |
+
np.random.default_rng(2).standard_normal((2, 2)),
|
| 1499 |
+
columns=period_range("2020-01-01", freq="D", periods=2),
|
| 1500 |
+
)
|
| 1501 |
+
r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df})
|
| 1502 |
+
e = df[df[df < 2] < 2] + df * 2
|
| 1503 |
+
tm.assert_frame_equal(r, e)
|
| 1504 |
+
|
| 1505 |
+
def test_date_boolean(self, engine, parser):
|
| 1506 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
|
| 1507 |
+
df["dates1"] = date_range("1/1/2012", periods=5)
|
| 1508 |
+
res = self.eval(
|
| 1509 |
+
"df.dates1 < 20130101",
|
| 1510 |
+
local_dict={"df": df},
|
| 1511 |
+
engine=engine,
|
| 1512 |
+
parser=parser,
|
| 1513 |
+
)
|
| 1514 |
+
expec = df.dates1 < "20130101"
|
| 1515 |
+
tm.assert_series_equal(res, expec, check_names=False)
|
| 1516 |
+
|
| 1517 |
+
def test_simple_in_ops(self, engine, parser):
|
| 1518 |
+
if parser != "python":
|
| 1519 |
+
res = pd.eval("1 in [1, 2]", engine=engine, parser=parser)
|
| 1520 |
+
assert res
|
| 1521 |
+
|
| 1522 |
+
res = pd.eval("2 in (1, 2)", engine=engine, parser=parser)
|
| 1523 |
+
assert res
|
| 1524 |
+
|
| 1525 |
+
res = pd.eval("3 in (1, 2)", engine=engine, parser=parser)
|
| 1526 |
+
assert not res
|
| 1527 |
+
|
| 1528 |
+
res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
|
| 1529 |
+
assert res
|
| 1530 |
+
|
| 1531 |
+
res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser)
|
| 1532 |
+
assert res
|
| 1533 |
+
|
| 1534 |
+
res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser)
|
| 1535 |
+
assert res
|
| 1536 |
+
|
| 1537 |
+
res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser)
|
| 1538 |
+
assert res
|
| 1539 |
+
|
| 1540 |
+
res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser)
|
| 1541 |
+
assert res
|
| 1542 |
+
|
| 1543 |
+
res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser)
|
| 1544 |
+
assert not res
|
| 1545 |
+
|
| 1546 |
+
res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser)
|
| 1547 |
+
assert res
|
| 1548 |
+
else:
|
| 1549 |
+
msg = "'In' nodes are not implemented"
|
| 1550 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1551 |
+
pd.eval("1 in [1, 2]", engine=engine, parser=parser)
|
| 1552 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1553 |
+
pd.eval("2 in (1, 2)", engine=engine, parser=parser)
|
| 1554 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1555 |
+
pd.eval("3 in (1, 2)", engine=engine, parser=parser)
|
| 1556 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1557 |
+
pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser)
|
| 1558 |
+
msg = "'NotIn' nodes are not implemented"
|
| 1559 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1560 |
+
pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
|
| 1561 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1562 |
+
pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser)
|
| 1563 |
+
|
| 1564 |
+
def test_check_many_exprs(self, engine, parser):
|
| 1565 |
+
a = 1 # noqa: F841
|
| 1566 |
+
expr = " * ".join("a" * 33)
|
| 1567 |
+
expected = 1
|
| 1568 |
+
res = pd.eval(expr, engine=engine, parser=parser)
|
| 1569 |
+
assert res == expected
|
| 1570 |
+
|
| 1571 |
+
@pytest.mark.parametrize(
|
| 1572 |
+
"expr",
|
| 1573 |
+
[
|
| 1574 |
+
"df > 2 and df > 3",
|
| 1575 |
+
"df > 2 or df > 3",
|
| 1576 |
+
"not df > 2",
|
| 1577 |
+
],
|
| 1578 |
+
)
|
| 1579 |
+
def test_fails_and_or_not(self, expr, engine, parser):
|
| 1580 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
|
| 1581 |
+
if parser == "python":
|
| 1582 |
+
msg = "'BoolOp' nodes are not implemented"
|
| 1583 |
+
if "not" in expr:
|
| 1584 |
+
msg = "'Not' nodes are not implemented"
|
| 1585 |
+
|
| 1586 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1587 |
+
pd.eval(
|
| 1588 |
+
expr,
|
| 1589 |
+
local_dict={"df": df},
|
| 1590 |
+
parser=parser,
|
| 1591 |
+
engine=engine,
|
| 1592 |
+
)
|
| 1593 |
+
else:
|
| 1594 |
+
# smoke-test, should not raise
|
| 1595 |
+
pd.eval(
|
| 1596 |
+
expr,
|
| 1597 |
+
local_dict={"df": df},
|
| 1598 |
+
parser=parser,
|
| 1599 |
+
engine=engine,
|
| 1600 |
+
)
|
| 1601 |
+
|
| 1602 |
+
@pytest.mark.parametrize("char", ["|", "&"])
|
| 1603 |
+
def test_fails_ampersand_pipe(self, char, engine, parser):
|
| 1604 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
|
| 1605 |
+
ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)"
|
| 1606 |
+
if parser == "python":
|
| 1607 |
+
msg = "cannot evaluate scalar only bool ops"
|
| 1608 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1609 |
+
pd.eval(ex, parser=parser, engine=engine)
|
| 1610 |
+
else:
|
| 1611 |
+
# smoke-test, should not raise
|
| 1612 |
+
pd.eval(ex, parser=parser, engine=engine)
|
| 1613 |
+
|
| 1614 |
+
|
| 1615 |
+
class TestMath:
|
| 1616 |
+
def eval(self, *args, **kwargs):
|
| 1617 |
+
kwargs["level"] = kwargs.pop("level", 0) + 1
|
| 1618 |
+
return pd.eval(*args, **kwargs)
|
| 1619 |
+
|
| 1620 |
+
@pytest.mark.skipif(
|
| 1621 |
+
not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr"
|
| 1622 |
+
)
|
| 1623 |
+
@pytest.mark.parametrize("fn", _unary_math_ops)
|
| 1624 |
+
def test_unary_functions(self, fn):
|
| 1625 |
+
df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
|
| 1626 |
+
a = df.a
|
| 1627 |
+
|
| 1628 |
+
expr = f"{fn}(a)"
|
| 1629 |
+
got = self.eval(expr)
|
| 1630 |
+
with np.errstate(all="ignore"):
|
| 1631 |
+
expect = getattr(np, fn)(a)
|
| 1632 |
+
tm.assert_series_equal(got, expect, check_names=False)
|
| 1633 |
+
|
| 1634 |
+
@pytest.mark.parametrize("fn", _binary_math_ops)
|
| 1635 |
+
def test_binary_functions(self, fn):
|
| 1636 |
+
df = DataFrame(
|
| 1637 |
+
{
|
| 1638 |
+
"a": np.random.default_rng(2).standard_normal(10),
|
| 1639 |
+
"b": np.random.default_rng(2).standard_normal(10),
|
| 1640 |
+
}
|
| 1641 |
+
)
|
| 1642 |
+
a = df.a
|
| 1643 |
+
b = df.b
|
| 1644 |
+
|
| 1645 |
+
expr = f"{fn}(a, b)"
|
| 1646 |
+
got = self.eval(expr)
|
| 1647 |
+
with np.errstate(all="ignore"):
|
| 1648 |
+
expect = getattr(np, fn)(a, b)
|
| 1649 |
+
tm.assert_almost_equal(got, expect, check_names=False)
|
| 1650 |
+
|
| 1651 |
+
def test_df_use_case(self, engine, parser):
|
| 1652 |
+
df = DataFrame(
|
| 1653 |
+
{
|
| 1654 |
+
"a": np.random.default_rng(2).standard_normal(10),
|
| 1655 |
+
"b": np.random.default_rng(2).standard_normal(10),
|
| 1656 |
+
}
|
| 1657 |
+
)
|
| 1658 |
+
df.eval(
|
| 1659 |
+
"e = arctan2(sin(a), b)",
|
| 1660 |
+
engine=engine,
|
| 1661 |
+
parser=parser,
|
| 1662 |
+
inplace=True,
|
| 1663 |
+
)
|
| 1664 |
+
got = df.e
|
| 1665 |
+
expect = np.arctan2(np.sin(df.a), df.b)
|
| 1666 |
+
tm.assert_series_equal(got, expect, check_names=False)
|
| 1667 |
+
|
| 1668 |
+
def test_df_arithmetic_subexpression(self, engine, parser):
|
| 1669 |
+
df = DataFrame(
|
| 1670 |
+
{
|
| 1671 |
+
"a": np.random.default_rng(2).standard_normal(10),
|
| 1672 |
+
"b": np.random.default_rng(2).standard_normal(10),
|
| 1673 |
+
}
|
| 1674 |
+
)
|
| 1675 |
+
df.eval("e = sin(a + b)", engine=engine, parser=parser, inplace=True)
|
| 1676 |
+
got = df.e
|
| 1677 |
+
expect = np.sin(df.a + df.b)
|
| 1678 |
+
tm.assert_series_equal(got, expect, check_names=False)
|
| 1679 |
+
|
| 1680 |
+
@pytest.mark.parametrize(
|
| 1681 |
+
"dtype, expect_dtype",
|
| 1682 |
+
[
|
| 1683 |
+
(np.int32, np.float64),
|
| 1684 |
+
(np.int64, np.float64),
|
| 1685 |
+
(np.float32, np.float32),
|
| 1686 |
+
(np.float64, np.float64),
|
| 1687 |
+
pytest.param(np.complex128, np.complex128, marks=td.skip_if_windows),
|
| 1688 |
+
],
|
| 1689 |
+
)
|
| 1690 |
+
def test_result_types(self, dtype, expect_dtype, engine, parser):
|
| 1691 |
+
# xref https://github.com/pandas-dev/pandas/issues/12293
|
| 1692 |
+
# this fails on Windows, apparently a floating point precision issue
|
| 1693 |
+
|
| 1694 |
+
# Did not test complex64 because DataFrame is converting it to
|
| 1695 |
+
# complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
|
| 1696 |
+
df = DataFrame(
|
| 1697 |
+
{"a": np.random.default_rng(2).standard_normal(10).astype(dtype)}
|
| 1698 |
+
)
|
| 1699 |
+
assert df.a.dtype == dtype
|
| 1700 |
+
df.eval("b = sin(a)", engine=engine, parser=parser, inplace=True)
|
| 1701 |
+
got = df.b
|
| 1702 |
+
expect = np.sin(df.a)
|
| 1703 |
+
assert expect.dtype == got.dtype
|
| 1704 |
+
assert expect_dtype == got.dtype
|
| 1705 |
+
tm.assert_series_equal(got, expect, check_names=False)
|
| 1706 |
+
|
| 1707 |
+
def test_undefined_func(self, engine, parser):
|
| 1708 |
+
df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
|
| 1709 |
+
msg = '"mysin" is not a supported function'
|
| 1710 |
+
|
| 1711 |
+
with pytest.raises(ValueError, match=msg):
|
| 1712 |
+
df.eval("mysin(a)", engine=engine, parser=parser)
|
| 1713 |
+
|
| 1714 |
+
def test_keyword_arg(self, engine, parser):
|
| 1715 |
+
df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
|
| 1716 |
+
msg = 'Function "sin" does not support keyword arguments'
|
| 1717 |
+
|
| 1718 |
+
with pytest.raises(TypeError, match=msg):
|
| 1719 |
+
df.eval("sin(x=a)", engine=engine, parser=parser)
|
| 1720 |
+
|
| 1721 |
+
|
| 1722 |
+
_var_s = np.random.default_rng(2).standard_normal(10)
|
| 1723 |
+
|
| 1724 |
+
|
| 1725 |
+
class TestScope:
|
| 1726 |
+
def test_global_scope(self, engine, parser):
|
| 1727 |
+
e = "_var_s * 2"
|
| 1728 |
+
tm.assert_numpy_array_equal(
|
| 1729 |
+
_var_s * 2, pd.eval(e, engine=engine, parser=parser)
|
| 1730 |
+
)
|
| 1731 |
+
|
| 1732 |
+
def test_no_new_locals(self, engine, parser):
|
| 1733 |
+
x = 1
|
| 1734 |
+
lcls = locals().copy()
|
| 1735 |
+
pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser)
|
| 1736 |
+
lcls2 = locals().copy()
|
| 1737 |
+
lcls2.pop("lcls")
|
| 1738 |
+
assert lcls == lcls2
|
| 1739 |
+
|
| 1740 |
+
def test_no_new_globals(self, engine, parser):
|
| 1741 |
+
x = 1 # noqa: F841
|
| 1742 |
+
gbls = globals().copy()
|
| 1743 |
+
pd.eval("x + 1", engine=engine, parser=parser)
|
| 1744 |
+
gbls2 = globals().copy()
|
| 1745 |
+
assert gbls == gbls2
|
| 1746 |
+
|
| 1747 |
+
def test_empty_locals(self, engine, parser):
|
| 1748 |
+
# GH 47084
|
| 1749 |
+
x = 1 # noqa: F841
|
| 1750 |
+
msg = "name 'x' is not defined"
|
| 1751 |
+
with pytest.raises(UndefinedVariableError, match=msg):
|
| 1752 |
+
pd.eval("x + 1", engine=engine, parser=parser, local_dict={})
|
| 1753 |
+
|
| 1754 |
+
def test_empty_globals(self, engine, parser):
|
| 1755 |
+
# GH 47084
|
| 1756 |
+
msg = "name '_var_s' is not defined"
|
| 1757 |
+
e = "_var_s * 2"
|
| 1758 |
+
with pytest.raises(UndefinedVariableError, match=msg):
|
| 1759 |
+
pd.eval(e, engine=engine, parser=parser, global_dict={})
|
| 1760 |
+
|
| 1761 |
+
|
| 1762 |
+
@td.skip_if_no("numexpr")
|
| 1763 |
+
def test_invalid_engine():
|
| 1764 |
+
msg = "Invalid engine 'asdf' passed"
|
| 1765 |
+
with pytest.raises(KeyError, match=msg):
|
| 1766 |
+
pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf")
|
| 1767 |
+
|
| 1768 |
+
|
| 1769 |
+
@td.skip_if_no("numexpr")
|
| 1770 |
+
@pytest.mark.parametrize(
|
| 1771 |
+
("use_numexpr", "expected"),
|
| 1772 |
+
(
|
| 1773 |
+
(True, "numexpr"),
|
| 1774 |
+
(False, "python"),
|
| 1775 |
+
),
|
| 1776 |
+
)
|
| 1777 |
+
def test_numexpr_option_respected(use_numexpr, expected):
|
| 1778 |
+
# GH 32556
|
| 1779 |
+
from pandas.core.computation.eval import _check_engine
|
| 1780 |
+
|
| 1781 |
+
with pd.option_context("compute.use_numexpr", use_numexpr):
|
| 1782 |
+
result = _check_engine(None)
|
| 1783 |
+
assert result == expected
|
| 1784 |
+
|
| 1785 |
+
|
| 1786 |
+
@td.skip_if_no("numexpr")
|
| 1787 |
+
def test_numexpr_option_incompatible_op():
|
| 1788 |
+
# GH 32556
|
| 1789 |
+
with pd.option_context("compute.use_numexpr", False):
|
| 1790 |
+
df = DataFrame(
|
| 1791 |
+
{"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]}
|
| 1792 |
+
)
|
| 1793 |
+
result = df.query("A.isnull()")
|
| 1794 |
+
expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=[4, 5])
|
| 1795 |
+
tm.assert_frame_equal(result, expected)
|
| 1796 |
+
|
| 1797 |
+
|
| 1798 |
+
@td.skip_if_no("numexpr")
|
| 1799 |
+
def test_invalid_parser():
|
| 1800 |
+
msg = "Invalid parser 'asdf' passed"
|
| 1801 |
+
with pytest.raises(KeyError, match=msg):
|
| 1802 |
+
pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf")
|
| 1803 |
+
|
| 1804 |
+
|
| 1805 |
+
_parsers: dict[str, type[BaseExprVisitor]] = {
|
| 1806 |
+
"python": PythonExprVisitor,
|
| 1807 |
+
"pytables": pytables.PyTablesExprVisitor,
|
| 1808 |
+
"pandas": PandasExprVisitor,
|
| 1809 |
+
}
|
| 1810 |
+
|
| 1811 |
+
|
| 1812 |
+
@pytest.mark.parametrize("engine", ENGINES)
|
| 1813 |
+
@pytest.mark.parametrize("parser", _parsers)
|
| 1814 |
+
def test_disallowed_nodes(engine, parser):
|
| 1815 |
+
VisitorClass = _parsers[parser]
|
| 1816 |
+
inst = VisitorClass("x + 1", engine, parser)
|
| 1817 |
+
|
| 1818 |
+
for ops in VisitorClass.unsupported_nodes:
|
| 1819 |
+
msg = "nodes are not implemented"
|
| 1820 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1821 |
+
getattr(inst, ops)()
|
| 1822 |
+
|
| 1823 |
+
|
| 1824 |
+
def test_syntax_error_exprs(engine, parser):
|
| 1825 |
+
e = "s +"
|
| 1826 |
+
with pytest.raises(SyntaxError, match="invalid syntax"):
|
| 1827 |
+
pd.eval(e, engine=engine, parser=parser)
|
| 1828 |
+
|
| 1829 |
+
|
| 1830 |
+
def test_name_error_exprs(engine, parser):
|
| 1831 |
+
e = "s + t"
|
| 1832 |
+
msg = "name 's' is not defined"
|
| 1833 |
+
with pytest.raises(NameError, match=msg):
|
| 1834 |
+
pd.eval(e, engine=engine, parser=parser)
|
| 1835 |
+
|
| 1836 |
+
|
| 1837 |
+
@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"])
|
| 1838 |
+
def test_invalid_local_variable_reference(engine, parser, express):
|
| 1839 |
+
a, b = 1, 2 # noqa: F841
|
| 1840 |
+
|
| 1841 |
+
if parser != "pandas":
|
| 1842 |
+
with pytest.raises(SyntaxError, match="The '@' prefix is only"):
|
| 1843 |
+
pd.eval(express, engine=engine, parser=parser)
|
| 1844 |
+
else:
|
| 1845 |
+
with pytest.raises(SyntaxError, match="The '@' prefix is not"):
|
| 1846 |
+
pd.eval(express, engine=engine, parser=parser)
|
| 1847 |
+
|
| 1848 |
+
|
| 1849 |
+
def test_numexpr_builtin_raises(engine, parser):
|
| 1850 |
+
sin, dotted_line = 1, 2
|
| 1851 |
+
if engine == "numexpr":
|
| 1852 |
+
msg = "Variables in expression .+"
|
| 1853 |
+
with pytest.raises(NumExprClobberingError, match=msg):
|
| 1854 |
+
pd.eval("sin + dotted_line", engine=engine, parser=parser)
|
| 1855 |
+
else:
|
| 1856 |
+
res = pd.eval("sin + dotted_line", engine=engine, parser=parser)
|
| 1857 |
+
assert res == sin + dotted_line
|
| 1858 |
+
|
| 1859 |
+
|
| 1860 |
+
def test_bad_resolver_raises(engine, parser):
|
| 1861 |
+
cannot_resolve = 42, 3.0
|
| 1862 |
+
with pytest.raises(TypeError, match="Resolver of type .+"):
|
| 1863 |
+
pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser)
|
| 1864 |
+
|
| 1865 |
+
|
| 1866 |
+
def test_empty_string_raises(engine, parser):
|
| 1867 |
+
# GH 13139
|
| 1868 |
+
with pytest.raises(ValueError, match="expr cannot be an empty string"):
|
| 1869 |
+
pd.eval("", engine=engine, parser=parser)
|
| 1870 |
+
|
| 1871 |
+
|
| 1872 |
+
def test_more_than_one_expression_raises(engine, parser):
|
| 1873 |
+
with pytest.raises(SyntaxError, match="only a single expression is allowed"):
|
| 1874 |
+
pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser)
|
| 1875 |
+
|
| 1876 |
+
|
| 1877 |
+
@pytest.mark.parametrize("cmp", ("and", "or"))
|
| 1878 |
+
@pytest.mark.parametrize("lhs", (int, float))
|
| 1879 |
+
@pytest.mark.parametrize("rhs", (int, float))
|
| 1880 |
+
def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
|
| 1881 |
+
gen = {
|
| 1882 |
+
int: lambda: np.random.default_rng(2).integers(10),
|
| 1883 |
+
float: np.random.default_rng(2).standard_normal,
|
| 1884 |
+
}
|
| 1885 |
+
|
| 1886 |
+
mid = gen[lhs]() # noqa: F841
|
| 1887 |
+
lhs = gen[lhs]()
|
| 1888 |
+
rhs = gen[rhs]()
|
| 1889 |
+
|
| 1890 |
+
ex1 = f"lhs {cmp} mid {cmp} rhs"
|
| 1891 |
+
ex2 = f"lhs {cmp} mid and mid {cmp} rhs"
|
| 1892 |
+
ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)"
|
| 1893 |
+
for ex in (ex1, ex2, ex3):
|
| 1894 |
+
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
|
| 1895 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1896 |
+
pd.eval(ex, engine=engine, parser=parser)
|
| 1897 |
+
|
| 1898 |
+
|
| 1899 |
+
@pytest.mark.parametrize(
|
| 1900 |
+
"other",
|
| 1901 |
+
[
|
| 1902 |
+
"'x'",
|
| 1903 |
+
"...",
|
| 1904 |
+
],
|
| 1905 |
+
)
|
| 1906 |
+
def test_equals_various(other):
|
| 1907 |
+
df = DataFrame({"A": ["a", "b", "c"]}, dtype=object)
|
| 1908 |
+
result = df.eval(f"A == {other}")
|
| 1909 |
+
expected = Series([False, False, False], name="A")
|
| 1910 |
+
if USE_NUMEXPR:
|
| 1911 |
+
# https://github.com/pandas-dev/pandas/issues/10239
|
| 1912 |
+
# lose name with numexpr engine. Remove when that's fixed.
|
| 1913 |
+
expected.name = None
|
| 1914 |
+
tm.assert_series_equal(result, expected)
|
| 1915 |
+
|
| 1916 |
+
|
| 1917 |
+
def test_inf(engine, parser):
|
| 1918 |
+
s = "inf + 1"
|
| 1919 |
+
expected = np.inf
|
| 1920 |
+
result = pd.eval(s, engine=engine, parser=parser)
|
| 1921 |
+
assert result == expected
|
| 1922 |
+
|
| 1923 |
+
|
| 1924 |
+
@pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"])
|
| 1925 |
+
def test_query_token(engine, column):
|
| 1926 |
+
# See: https://github.com/pandas-dev/pandas/pull/42826
|
| 1927 |
+
df = DataFrame(
|
| 1928 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"]
|
| 1929 |
+
)
|
| 1930 |
+
expected = df[df[column] > 5]
|
| 1931 |
+
query_string = f"`{column}` > 5"
|
| 1932 |
+
result = df.query(query_string, engine=engine)
|
| 1933 |
+
tm.assert_frame_equal(result, expected)
|
| 1934 |
+
|
| 1935 |
+
|
| 1936 |
+
def test_negate_lt_eq_le(engine, parser):
|
| 1937 |
+
df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
|
| 1938 |
+
expected = df[~(df.cat > 0)]
|
| 1939 |
+
|
| 1940 |
+
result = df.query("~(cat > 0)", engine=engine, parser=parser)
|
| 1941 |
+
tm.assert_frame_equal(result, expected)
|
| 1942 |
+
|
| 1943 |
+
if parser == "python":
|
| 1944 |
+
msg = "'Not' nodes are not implemented"
|
| 1945 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 1946 |
+
df.query("not (cat > 0)", engine=engine, parser=parser)
|
| 1947 |
+
else:
|
| 1948 |
+
result = df.query("not (cat > 0)", engine=engine, parser=parser)
|
| 1949 |
+
tm.assert_frame_equal(result, expected)
|
| 1950 |
+
|
| 1951 |
+
|
| 1952 |
+
@pytest.mark.parametrize(
|
| 1953 |
+
"column",
|
| 1954 |
+
DEFAULT_GLOBALS.keys(),
|
| 1955 |
+
)
|
| 1956 |
+
def test_eval_no_support_column_name(request, column):
|
| 1957 |
+
# GH 44603
|
| 1958 |
+
if column in ["True", "False", "inf", "Inf"]:
|
| 1959 |
+
request.applymarker(
|
| 1960 |
+
pytest.mark.xfail(
|
| 1961 |
+
raises=KeyError,
|
| 1962 |
+
reason=f"GH 47859 DataFrame eval not supported with {column}",
|
| 1963 |
+
)
|
| 1964 |
+
)
|
| 1965 |
+
|
| 1966 |
+
df = DataFrame(
|
| 1967 |
+
np.random.default_rng(2).integers(0, 100, size=(10, 2)),
|
| 1968 |
+
columns=[column, "col1"],
|
| 1969 |
+
)
|
| 1970 |
+
expected = df[df[column] > 6]
|
| 1971 |
+
result = df.query(f"{column}>6")
|
| 1972 |
+
|
| 1973 |
+
tm.assert_frame_equal(result, expected)
|
| 1974 |
+
|
| 1975 |
+
|
| 1976 |
+
def test_set_inplace(using_copy_on_write, warn_copy_on_write):
|
| 1977 |
+
# https://github.com/pandas-dev/pandas/issues/47449
|
| 1978 |
+
# Ensure we don't only update the DataFrame inplace, but also the actual
|
| 1979 |
+
# column values, such that references to this column also get updated
|
| 1980 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
|
| 1981 |
+
result_view = df[:]
|
| 1982 |
+
ser = df["A"]
|
| 1983 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
| 1984 |
+
df.eval("A = B + C", inplace=True)
|
| 1985 |
+
expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]})
|
| 1986 |
+
tm.assert_frame_equal(df, expected)
|
| 1987 |
+
if not using_copy_on_write:
|
| 1988 |
+
tm.assert_series_equal(ser, expected["A"])
|
| 1989 |
+
tm.assert_series_equal(result_view["A"], expected["A"])
|
| 1990 |
+
else:
|
| 1991 |
+
expected = Series([1, 2, 3], name="A")
|
| 1992 |
+
tm.assert_series_equal(ser, expected)
|
| 1993 |
+
tm.assert_series_equal(result_view["A"], expected)
|
| 1994 |
+
|
| 1995 |
+
|
| 1996 |
+
class TestValidate:
|
| 1997 |
+
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
|
| 1998 |
+
def test_validate_bool_args(self, value):
|
| 1999 |
+
msg = 'For argument "inplace" expected type bool, received type'
|
| 2000 |
+
with pytest.raises(ValueError, match=msg):
|
| 2001 |
+
pd.eval("2+2", inplace=value)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc
ADDED
|
Binary file (30.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc
ADDED
|
Binary file (78.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc
ADDED
|
Binary file (3.69 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc
ADDED
|
Binary file (1.73 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc
ADDED
|
Binary file (3.56 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc
ADDED
|
Binary file (1.93 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc
ADDED
|
Binary file (842 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc
ADDED
|
Binary file (8.61 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc
ADDED
|
Binary file (24.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc
ADDED
|
Binary file (3.8 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc
ADDED
|
Binary file (772 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc
ADDED
|
Binary file (4.03 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
Categorical,
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
Series,
|
| 9 |
+
Timestamp,
|
| 10 |
+
date_range,
|
| 11 |
+
period_range,
|
| 12 |
+
timedelta_range,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
from pandas.core.arrays.categorical import CategoricalAccessor
|
| 16 |
+
from pandas.core.indexes.accessors import Properties
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestCatAccessor:
|
| 20 |
+
@pytest.mark.parametrize(
|
| 21 |
+
"method",
|
| 22 |
+
[
|
| 23 |
+
lambda x: x.cat.set_categories([1, 2, 3]),
|
| 24 |
+
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
|
| 25 |
+
lambda x: x.cat.rename_categories([1, 2, 3]),
|
| 26 |
+
lambda x: x.cat.remove_unused_categories(),
|
| 27 |
+
lambda x: x.cat.remove_categories([2]),
|
| 28 |
+
lambda x: x.cat.add_categories([4]),
|
| 29 |
+
lambda x: x.cat.as_ordered(),
|
| 30 |
+
lambda x: x.cat.as_unordered(),
|
| 31 |
+
],
|
| 32 |
+
)
|
| 33 |
+
def test_getname_categorical_accessor(self, method):
|
| 34 |
+
# GH#17509
|
| 35 |
+
ser = Series([1, 2, 3], name="A").astype("category")
|
| 36 |
+
expected = "A"
|
| 37 |
+
result = method(ser).name
|
| 38 |
+
assert result == expected
|
| 39 |
+
|
| 40 |
+
def test_cat_accessor(self):
|
| 41 |
+
ser = Series(Categorical(["a", "b", np.nan, "a"]))
|
| 42 |
+
tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))
|
| 43 |
+
assert not ser.cat.ordered, False
|
| 44 |
+
|
| 45 |
+
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
|
| 46 |
+
|
| 47 |
+
res = ser.cat.set_categories(["b", "a"])
|
| 48 |
+
tm.assert_categorical_equal(res.values, exp)
|
| 49 |
+
|
| 50 |
+
ser[:] = "a"
|
| 51 |
+
ser = ser.cat.remove_unused_categories()
|
| 52 |
+
tm.assert_index_equal(ser.cat.categories, Index(["a"]))
|
| 53 |
+
|
| 54 |
+
def test_cat_accessor_api(self):
|
| 55 |
+
# GH#9322
|
| 56 |
+
|
| 57 |
+
assert Series.cat is CategoricalAccessor
|
| 58 |
+
ser = Series(list("aabbcde")).astype("category")
|
| 59 |
+
assert isinstance(ser.cat, CategoricalAccessor)
|
| 60 |
+
|
| 61 |
+
invalid = Series([1])
|
| 62 |
+
with pytest.raises(AttributeError, match="only use .cat accessor"):
|
| 63 |
+
invalid.cat
|
| 64 |
+
assert not hasattr(invalid, "cat")
|
| 65 |
+
|
| 66 |
+
def test_cat_accessor_no_new_attributes(self):
|
| 67 |
+
# https://github.com/pandas-dev/pandas/issues/10673
|
| 68 |
+
cat = Series(list("aabbcde")).astype("category")
|
| 69 |
+
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
|
| 70 |
+
cat.cat.xlabel = "a"
|
| 71 |
+
|
| 72 |
+
def test_categorical_delegations(self):
|
| 73 |
+
# invalid accessor
|
| 74 |
+
msg = r"Can only use \.cat accessor with a 'category' dtype"
|
| 75 |
+
with pytest.raises(AttributeError, match=msg):
|
| 76 |
+
Series([1, 2, 3]).cat
|
| 77 |
+
with pytest.raises(AttributeError, match=msg):
|
| 78 |
+
Series([1, 2, 3]).cat()
|
| 79 |
+
with pytest.raises(AttributeError, match=msg):
|
| 80 |
+
Series(["a", "b", "c"]).cat
|
| 81 |
+
with pytest.raises(AttributeError, match=msg):
|
| 82 |
+
Series(np.arange(5.0)).cat
|
| 83 |
+
with pytest.raises(AttributeError, match=msg):
|
| 84 |
+
Series([Timestamp("20130101")]).cat
|
| 85 |
+
|
| 86 |
+
# Series should delegate calls to '.categories', '.codes', '.ordered'
|
| 87 |
+
# and the methods '.set_categories()' 'drop_unused_categories()' to the
|
| 88 |
+
# categorical
|
| 89 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
| 90 |
+
exp_categories = Index(["a", "b", "c"])
|
| 91 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
| 92 |
+
ser = ser.cat.rename_categories([1, 2, 3])
|
| 93 |
+
exp_categories = Index([1, 2, 3])
|
| 94 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
| 95 |
+
|
| 96 |
+
exp_codes = Series([0, 1, 2, 0], dtype="int8")
|
| 97 |
+
tm.assert_series_equal(ser.cat.codes, exp_codes)
|
| 98 |
+
|
| 99 |
+
assert ser.cat.ordered
|
| 100 |
+
ser = ser.cat.as_unordered()
|
| 101 |
+
assert not ser.cat.ordered
|
| 102 |
+
|
| 103 |
+
ser = ser.cat.as_ordered()
|
| 104 |
+
assert ser.cat.ordered
|
| 105 |
+
|
| 106 |
+
# reorder
|
| 107 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
| 108 |
+
exp_categories = Index(["c", "b", "a"])
|
| 109 |
+
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
|
| 110 |
+
ser = ser.cat.set_categories(["c", "b", "a"])
|
| 111 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
| 112 |
+
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
|
| 113 |
+
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
|
| 114 |
+
|
| 115 |
+
# remove unused categories
|
| 116 |
+
ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
|
| 117 |
+
exp_categories = Index(["a", "b"])
|
| 118 |
+
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
|
| 119 |
+
ser = ser.cat.remove_unused_categories()
|
| 120 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
| 121 |
+
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
|
| 122 |
+
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
|
| 123 |
+
|
| 124 |
+
# This method is likely to be confused, so test that it raises an error
|
| 125 |
+
# on wrong inputs:
|
| 126 |
+
msg = "'Series' object has no attribute 'set_categories'"
|
| 127 |
+
with pytest.raises(AttributeError, match=msg):
|
| 128 |
+
ser.set_categories([4, 3, 2, 1])
|
| 129 |
+
|
| 130 |
+
# right: ser.cat.set_categories([4,3,2,1])
|
| 131 |
+
|
| 132 |
+
# GH#18862 (let Series.cat.rename_categories take callables)
|
| 133 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
| 134 |
+
result = ser.cat.rename_categories(lambda x: x.upper())
|
| 135 |
+
expected = Series(
|
| 136 |
+
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
|
| 137 |
+
)
|
| 138 |
+
tm.assert_series_equal(result, expected)
|
| 139 |
+
|
| 140 |
+
@pytest.mark.parametrize(
|
| 141 |
+
"idx",
|
| 142 |
+
[
|
| 143 |
+
date_range("1/1/2015", periods=5),
|
| 144 |
+
date_range("1/1/2015", periods=5, tz="MET"),
|
| 145 |
+
period_range("1/1/2015", freq="D", periods=5),
|
| 146 |
+
timedelta_range("1 days", "10 days"),
|
| 147 |
+
],
|
| 148 |
+
)
|
| 149 |
+
def test_dt_accessor_api_for_categorical(self, idx):
|
| 150 |
+
# https://github.com/pandas-dev/pandas/issues/10661
|
| 151 |
+
|
| 152 |
+
ser = Series(idx)
|
| 153 |
+
cat = ser.astype("category")
|
| 154 |
+
|
| 155 |
+
# only testing field (like .day)
|
| 156 |
+
# and bool (is_month_start)
|
| 157 |
+
attr_names = type(ser._values)._datetimelike_ops
|
| 158 |
+
|
| 159 |
+
assert isinstance(cat.dt, Properties)
|
| 160 |
+
|
| 161 |
+
special_func_defs = [
|
| 162 |
+
("strftime", ("%Y-%m-%d",), {}),
|
| 163 |
+
("round", ("D",), {}),
|
| 164 |
+
("floor", ("D",), {}),
|
| 165 |
+
("ceil", ("D",), {}),
|
| 166 |
+
("asfreq", ("D",), {}),
|
| 167 |
+
("as_unit", ("s"), {}),
|
| 168 |
+
]
|
| 169 |
+
if idx.dtype == "M8[ns]":
|
| 170 |
+
# exclude dt64tz since that is already localized and would raise
|
| 171 |
+
tup = ("tz_localize", ("UTC",), {})
|
| 172 |
+
special_func_defs.append(tup)
|
| 173 |
+
elif idx.dtype.kind == "M":
|
| 174 |
+
# exclude dt64 since that is not localized so would raise
|
| 175 |
+
tup = ("tz_convert", ("EST",), {})
|
| 176 |
+
special_func_defs.append(tup)
|
| 177 |
+
|
| 178 |
+
_special_func_names = [f[0] for f in special_func_defs]
|
| 179 |
+
|
| 180 |
+
_ignore_names = ["components", "tz_localize", "tz_convert"]
|
| 181 |
+
|
| 182 |
+
func_names = [
|
| 183 |
+
fname
|
| 184 |
+
for fname in dir(ser.dt)
|
| 185 |
+
if not (
|
| 186 |
+
fname.startswith("_")
|
| 187 |
+
or fname in attr_names
|
| 188 |
+
or fname in _special_func_names
|
| 189 |
+
or fname in _ignore_names
|
| 190 |
+
)
|
| 191 |
+
]
|
| 192 |
+
|
| 193 |
+
func_defs = [(fname, (), {}) for fname in func_names]
|
| 194 |
+
func_defs.extend(
|
| 195 |
+
f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
for func, args, kwargs in func_defs:
|
| 199 |
+
warn_cls = []
|
| 200 |
+
if func == "to_period" and getattr(idx, "tz", None) is not None:
|
| 201 |
+
# dropping TZ
|
| 202 |
+
warn_cls.append(UserWarning)
|
| 203 |
+
if func == "to_pydatetime":
|
| 204 |
+
# deprecated to return Index[object]
|
| 205 |
+
warn_cls.append(FutureWarning)
|
| 206 |
+
if warn_cls:
|
| 207 |
+
warn_cls = tuple(warn_cls)
|
| 208 |
+
else:
|
| 209 |
+
warn_cls = None
|
| 210 |
+
with tm.assert_produces_warning(warn_cls):
|
| 211 |
+
res = getattr(cat.dt, func)(*args, **kwargs)
|
| 212 |
+
exp = getattr(ser.dt, func)(*args, **kwargs)
|
| 213 |
+
|
| 214 |
+
tm.assert_equal(res, exp)
|
| 215 |
+
|
| 216 |
+
for attr in attr_names:
|
| 217 |
+
res = getattr(cat.dt, attr)
|
| 218 |
+
exp = getattr(ser.dt, attr)
|
| 219 |
+
|
| 220 |
+
tm.assert_equal(res, exp)
|
| 221 |
+
|
| 222 |
+
def test_dt_accessor_api_for_categorical_invalid(self):
|
| 223 |
+
invalid = Series([1, 2, 3]).astype("category")
|
| 224 |
+
msg = "Can only use .dt accessor with datetimelike"
|
| 225 |
+
|
| 226 |
+
with pytest.raises(AttributeError, match=msg):
|
| 227 |
+
invalid.dt
|
| 228 |
+
assert not hasattr(invalid, "str")
|
| 229 |
+
|
| 230 |
+
def test_set_categories_setitem(self):
|
| 231 |
+
# GH#43334
|
| 232 |
+
|
| 233 |
+
df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category")
|
| 234 |
+
|
| 235 |
+
df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"])
|
| 236 |
+
df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"])
|
| 237 |
+
|
| 238 |
+
# values should not be coerced to NaN
|
| 239 |
+
assert list(df["Sex"]) == ["female", "male", "male"]
|
| 240 |
+
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
|
| 241 |
+
|
| 242 |
+
df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False)
|
| 243 |
+
df["Survived"] = Categorical(
|
| 244 |
+
df["Survived"], categories=["No", "Yes"], ordered=False
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# values should not be coerced to NaN
|
| 248 |
+
assert list(df["Sex"]) == ["female", "male", "male"]
|
| 249 |
+
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
|
| 250 |
+
|
| 251 |
+
def test_categorical_of_booleans_is_boolean(self):
|
| 252 |
+
# https://github.com/pandas-dev/pandas/issues/46313
|
| 253 |
+
df = DataFrame(
|
| 254 |
+
{"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category"
|
| 255 |
+
)
|
| 256 |
+
value = df["bool_cat"].cat.categories.dtype
|
| 257 |
+
expected = np.dtype(np.bool_)
|
| 258 |
+
assert value is expected
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py
ADDED
|
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import calendar
|
| 2 |
+
from datetime import (
|
| 3 |
+
date,
|
| 4 |
+
datetime,
|
| 5 |
+
time,
|
| 6 |
+
)
|
| 7 |
+
import locale
|
| 8 |
+
import unicodedata
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import pytest
|
| 12 |
+
import pytz
|
| 13 |
+
|
| 14 |
+
from pandas._libs.tslibs.timezones import maybe_get_tz
|
| 15 |
+
from pandas.errors import SettingWithCopyError
|
| 16 |
+
|
| 17 |
+
from pandas.core.dtypes.common import (
|
| 18 |
+
is_integer_dtype,
|
| 19 |
+
is_list_like,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
import pandas as pd
|
| 23 |
+
from pandas import (
|
| 24 |
+
DataFrame,
|
| 25 |
+
DatetimeIndex,
|
| 26 |
+
Index,
|
| 27 |
+
Period,
|
| 28 |
+
PeriodIndex,
|
| 29 |
+
Series,
|
| 30 |
+
TimedeltaIndex,
|
| 31 |
+
date_range,
|
| 32 |
+
period_range,
|
| 33 |
+
timedelta_range,
|
| 34 |
+
)
|
| 35 |
+
import pandas._testing as tm
|
| 36 |
+
from pandas.core.arrays import (
|
| 37 |
+
DatetimeArray,
|
| 38 |
+
PeriodArray,
|
| 39 |
+
TimedeltaArray,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
ok_for_period = PeriodArray._datetimelike_ops
|
| 43 |
+
ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"]
|
| 44 |
+
ok_for_dt = DatetimeArray._datetimelike_ops
|
| 45 |
+
ok_for_dt_methods = [
|
| 46 |
+
"to_period",
|
| 47 |
+
"to_pydatetime",
|
| 48 |
+
"tz_localize",
|
| 49 |
+
"tz_convert",
|
| 50 |
+
"normalize",
|
| 51 |
+
"strftime",
|
| 52 |
+
"round",
|
| 53 |
+
"floor",
|
| 54 |
+
"ceil",
|
| 55 |
+
"day_name",
|
| 56 |
+
"month_name",
|
| 57 |
+
"isocalendar",
|
| 58 |
+
"as_unit",
|
| 59 |
+
]
|
| 60 |
+
ok_for_td = TimedeltaArray._datetimelike_ops
|
| 61 |
+
ok_for_td_methods = [
|
| 62 |
+
"components",
|
| 63 |
+
"to_pytimedelta",
|
| 64 |
+
"total_seconds",
|
| 65 |
+
"round",
|
| 66 |
+
"floor",
|
| 67 |
+
"ceil",
|
| 68 |
+
"as_unit",
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_dir(ser):
|
| 73 |
+
# check limited display api
|
| 74 |
+
results = [r for r in ser.dt.__dir__() if not r.startswith("_")]
|
| 75 |
+
return sorted(set(results))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class TestSeriesDatetimeValues:
|
| 79 |
+
def _compare(self, ser, name):
|
| 80 |
+
# GH 7207, 11128
|
| 81 |
+
# test .dt namespace accessor
|
| 82 |
+
|
| 83 |
+
def get_expected(ser, prop):
|
| 84 |
+
result = getattr(Index(ser._values), prop)
|
| 85 |
+
if isinstance(result, np.ndarray):
|
| 86 |
+
if is_integer_dtype(result):
|
| 87 |
+
result = result.astype("int64")
|
| 88 |
+
elif not is_list_like(result) or isinstance(result, DataFrame):
|
| 89 |
+
return result
|
| 90 |
+
return Series(result, index=ser.index, name=ser.name)
|
| 91 |
+
|
| 92 |
+
left = getattr(ser.dt, name)
|
| 93 |
+
right = get_expected(ser, name)
|
| 94 |
+
if not (is_list_like(left) and is_list_like(right)):
|
| 95 |
+
assert left == right
|
| 96 |
+
elif isinstance(left, DataFrame):
|
| 97 |
+
tm.assert_frame_equal(left, right)
|
| 98 |
+
else:
|
| 99 |
+
tm.assert_series_equal(left, right)
|
| 100 |
+
|
| 101 |
+
@pytest.mark.parametrize("freq", ["D", "s", "ms"])
|
| 102 |
+
def test_dt_namespace_accessor_datetime64(self, freq):
|
| 103 |
+
# GH#7207, GH#11128
|
| 104 |
+
# test .dt namespace accessor
|
| 105 |
+
|
| 106 |
+
# datetimeindex
|
| 107 |
+
dti = date_range("20130101", periods=5, freq=freq)
|
| 108 |
+
ser = Series(dti, name="xxx")
|
| 109 |
+
|
| 110 |
+
for prop in ok_for_dt:
|
| 111 |
+
# we test freq below
|
| 112 |
+
if prop != "freq":
|
| 113 |
+
self._compare(ser, prop)
|
| 114 |
+
|
| 115 |
+
for prop in ok_for_dt_methods:
|
| 116 |
+
getattr(ser.dt, prop)
|
| 117 |
+
|
| 118 |
+
msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"
|
| 119 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 120 |
+
result = ser.dt.to_pydatetime()
|
| 121 |
+
assert isinstance(result, np.ndarray)
|
| 122 |
+
assert result.dtype == object
|
| 123 |
+
|
| 124 |
+
result = ser.dt.tz_localize("US/Eastern")
|
| 125 |
+
exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern")
|
| 126 |
+
expected = Series(exp_values, index=ser.index, name="xxx")
|
| 127 |
+
tm.assert_series_equal(result, expected)
|
| 128 |
+
|
| 129 |
+
tz_result = result.dt.tz
|
| 130 |
+
assert str(tz_result) == "US/Eastern"
|
| 131 |
+
freq_result = ser.dt.freq
|
| 132 |
+
assert freq_result == DatetimeIndex(ser.values, freq="infer").freq
|
| 133 |
+
|
| 134 |
+
# let's localize, then convert
|
| 135 |
+
result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")
|
| 136 |
+
exp_values = (
|
| 137 |
+
DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern")
|
| 138 |
+
)
|
| 139 |
+
expected = Series(exp_values, index=ser.index, name="xxx")
|
| 140 |
+
tm.assert_series_equal(result, expected)
|
| 141 |
+
|
| 142 |
+
def test_dt_namespace_accessor_datetime64tz(self):
|
| 143 |
+
# GH#7207, GH#11128
|
| 144 |
+
# test .dt namespace accessor
|
| 145 |
+
|
| 146 |
+
# datetimeindex with tz
|
| 147 |
+
dti = date_range("20130101", periods=5, tz="US/Eastern")
|
| 148 |
+
ser = Series(dti, name="xxx")
|
| 149 |
+
for prop in ok_for_dt:
|
| 150 |
+
# we test freq below
|
| 151 |
+
if prop != "freq":
|
| 152 |
+
self._compare(ser, prop)
|
| 153 |
+
|
| 154 |
+
for prop in ok_for_dt_methods:
|
| 155 |
+
getattr(ser.dt, prop)
|
| 156 |
+
|
| 157 |
+
msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"
|
| 158 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 159 |
+
result = ser.dt.to_pydatetime()
|
| 160 |
+
assert isinstance(result, np.ndarray)
|
| 161 |
+
assert result.dtype == object
|
| 162 |
+
|
| 163 |
+
result = ser.dt.tz_convert("CET")
|
| 164 |
+
expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx")
|
| 165 |
+
tm.assert_series_equal(result, expected)
|
| 166 |
+
|
| 167 |
+
tz_result = result.dt.tz
|
| 168 |
+
assert str(tz_result) == "CET"
|
| 169 |
+
freq_result = ser.dt.freq
|
| 170 |
+
assert freq_result == DatetimeIndex(ser.values, freq="infer").freq
|
| 171 |
+
|
| 172 |
+
def test_dt_namespace_accessor_timedelta(self):
|
| 173 |
+
# GH#7207, GH#11128
|
| 174 |
+
# test .dt namespace accessor
|
| 175 |
+
|
| 176 |
+
# timedelta index
|
| 177 |
+
cases = [
|
| 178 |
+
Series(
|
| 179 |
+
timedelta_range("1 day", periods=5), index=list("abcde"), name="xxx"
|
| 180 |
+
),
|
| 181 |
+
Series(timedelta_range("1 day 01:23:45", periods=5, freq="s"), name="xxx"),
|
| 182 |
+
Series(
|
| 183 |
+
timedelta_range("2 days 01:23:45.012345", periods=5, freq="ms"),
|
| 184 |
+
name="xxx",
|
| 185 |
+
),
|
| 186 |
+
]
|
| 187 |
+
for ser in cases:
|
| 188 |
+
for prop in ok_for_td:
|
| 189 |
+
# we test freq below
|
| 190 |
+
if prop != "freq":
|
| 191 |
+
self._compare(ser, prop)
|
| 192 |
+
|
| 193 |
+
for prop in ok_for_td_methods:
|
| 194 |
+
getattr(ser.dt, prop)
|
| 195 |
+
|
| 196 |
+
result = ser.dt.components
|
| 197 |
+
assert isinstance(result, DataFrame)
|
| 198 |
+
tm.assert_index_equal(result.index, ser.index)
|
| 199 |
+
|
| 200 |
+
result = ser.dt.to_pytimedelta()
|
| 201 |
+
assert isinstance(result, np.ndarray)
|
| 202 |
+
assert result.dtype == object
|
| 203 |
+
|
| 204 |
+
result = ser.dt.total_seconds()
|
| 205 |
+
assert isinstance(result, Series)
|
| 206 |
+
assert result.dtype == "float64"
|
| 207 |
+
|
| 208 |
+
freq_result = ser.dt.freq
|
| 209 |
+
assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq
|
| 210 |
+
|
| 211 |
+
def test_dt_namespace_accessor_period(self):
|
| 212 |
+
# GH#7207, GH#11128
|
| 213 |
+
# test .dt namespace accessor
|
| 214 |
+
|
| 215 |
+
# periodindex
|
| 216 |
+
pi = period_range("20130101", periods=5, freq="D")
|
| 217 |
+
ser = Series(pi, name="xxx")
|
| 218 |
+
|
| 219 |
+
for prop in ok_for_period:
|
| 220 |
+
# we test freq below
|
| 221 |
+
if prop != "freq":
|
| 222 |
+
self._compare(ser, prop)
|
| 223 |
+
|
| 224 |
+
for prop in ok_for_period_methods:
|
| 225 |
+
getattr(ser.dt, prop)
|
| 226 |
+
|
| 227 |
+
freq_result = ser.dt.freq
|
| 228 |
+
assert freq_result == PeriodIndex(ser.values).freq
|
| 229 |
+
|
| 230 |
+
def test_dt_namespace_accessor_index_and_values(self):
|
| 231 |
+
# both
|
| 232 |
+
index = date_range("20130101", periods=3, freq="D")
|
| 233 |
+
dti = date_range("20140204", periods=3, freq="s")
|
| 234 |
+
ser = Series(dti, index=index, name="xxx")
|
| 235 |
+
exp = Series(
|
| 236 |
+
np.array([2014, 2014, 2014], dtype="int32"), index=index, name="xxx"
|
| 237 |
+
)
|
| 238 |
+
tm.assert_series_equal(ser.dt.year, exp)
|
| 239 |
+
|
| 240 |
+
exp = Series(np.array([2, 2, 2], dtype="int32"), index=index, name="xxx")
|
| 241 |
+
tm.assert_series_equal(ser.dt.month, exp)
|
| 242 |
+
|
| 243 |
+
exp = Series(np.array([0, 1, 2], dtype="int32"), index=index, name="xxx")
|
| 244 |
+
tm.assert_series_equal(ser.dt.second, exp)
|
| 245 |
+
|
| 246 |
+
exp = Series([ser.iloc[0]] * 3, index=index, name="xxx")
|
| 247 |
+
tm.assert_series_equal(ser.dt.normalize(), exp)
|
| 248 |
+
|
| 249 |
+
def test_dt_accessor_limited_display_api(self):
|
| 250 |
+
# tznaive
|
| 251 |
+
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
|
| 252 |
+
results = get_dir(ser)
|
| 253 |
+
tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))
|
| 254 |
+
|
| 255 |
+
# tzaware
|
| 256 |
+
ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")
|
| 257 |
+
ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")
|
| 258 |
+
results = get_dir(ser)
|
| 259 |
+
tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))
|
| 260 |
+
|
| 261 |
+
# Period
|
| 262 |
+
idx = period_range("20130101", periods=5, freq="D", name="xxx").astype(object)
|
| 263 |
+
with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
|
| 264 |
+
ser = Series(idx)
|
| 265 |
+
results = get_dir(ser)
|
| 266 |
+
tm.assert_almost_equal(
|
| 267 |
+
results, sorted(set(ok_for_period + ok_for_period_methods))
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
def test_dt_accessor_ambiguous_freq_conversions(self):
|
| 271 |
+
# GH#11295
|
| 272 |
+
# ambiguous time error on the conversions
|
| 273 |
+
ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")
|
| 274 |
+
ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")
|
| 275 |
+
|
| 276 |
+
exp_values = date_range(
|
| 277 |
+
"2015-01-01", "2016-01-01", freq="min", tz="UTC"
|
| 278 |
+
).tz_convert("America/Chicago")
|
| 279 |
+
# freq not preserved by tz_localize above
|
| 280 |
+
exp_values = exp_values._with_freq(None)
|
| 281 |
+
expected = Series(exp_values, name="xxx")
|
| 282 |
+
tm.assert_series_equal(ser, expected)
|
| 283 |
+
|
| 284 |
+
def test_dt_accessor_not_writeable(self, using_copy_on_write, warn_copy_on_write):
|
| 285 |
+
# no setting allowed
|
| 286 |
+
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
|
| 287 |
+
with pytest.raises(ValueError, match="modifications"):
|
| 288 |
+
ser.dt.hour = 5
|
| 289 |
+
|
| 290 |
+
# trying to set a copy
|
| 291 |
+
msg = "modifications to a property of a datetimelike.+not supported"
|
| 292 |
+
with pd.option_context("chained_assignment", "raise"):
|
| 293 |
+
if using_copy_on_write:
|
| 294 |
+
with tm.raises_chained_assignment_error():
|
| 295 |
+
ser.dt.hour[0] = 5
|
| 296 |
+
elif warn_copy_on_write:
|
| 297 |
+
with tm.assert_produces_warning(
|
| 298 |
+
FutureWarning, match="ChainedAssignmentError"
|
| 299 |
+
):
|
| 300 |
+
ser.dt.hour[0] = 5
|
| 301 |
+
else:
|
| 302 |
+
with pytest.raises(SettingWithCopyError, match=msg):
|
| 303 |
+
ser.dt.hour[0] = 5
|
| 304 |
+
|
| 305 |
+
@pytest.mark.parametrize(
|
| 306 |
+
"method, dates",
|
| 307 |
+
[
|
| 308 |
+
["round", ["2012-01-02", "2012-01-02", "2012-01-01"]],
|
| 309 |
+
["floor", ["2012-01-01", "2012-01-01", "2012-01-01"]],
|
| 310 |
+
["ceil", ["2012-01-02", "2012-01-02", "2012-01-02"]],
|
| 311 |
+
],
|
| 312 |
+
)
|
| 313 |
+
def test_dt_round(self, method, dates):
|
| 314 |
+
# round
|
| 315 |
+
ser = Series(
|
| 316 |
+
pd.to_datetime(
|
| 317 |
+
["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]
|
| 318 |
+
),
|
| 319 |
+
name="xxx",
|
| 320 |
+
)
|
| 321 |
+
result = getattr(ser.dt, method)("D")
|
| 322 |
+
expected = Series(pd.to_datetime(dates), name="xxx")
|
| 323 |
+
tm.assert_series_equal(result, expected)
|
| 324 |
+
|
| 325 |
+
def test_dt_round_tz(self):
|
| 326 |
+
ser = Series(
|
| 327 |
+
pd.to_datetime(
|
| 328 |
+
["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]
|
| 329 |
+
),
|
| 330 |
+
name="xxx",
|
| 331 |
+
)
|
| 332 |
+
result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D")
|
| 333 |
+
|
| 334 |
+
exp_values = pd.to_datetime(
|
| 335 |
+
["2012-01-01", "2012-01-01", "2012-01-01"]
|
| 336 |
+
).tz_localize("US/Eastern")
|
| 337 |
+
expected = Series(exp_values, name="xxx")
|
| 338 |
+
tm.assert_series_equal(result, expected)
|
| 339 |
+
|
| 340 |
+
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
|
| 341 |
+
def test_dt_round_tz_ambiguous(self, method):
|
| 342 |
+
# GH 18946 round near "fall back" DST
|
| 343 |
+
df1 = DataFrame(
|
| 344 |
+
[
|
| 345 |
+
pd.to_datetime("2017-10-29 02:00:00+02:00", utc=True),
|
| 346 |
+
pd.to_datetime("2017-10-29 02:00:00+01:00", utc=True),
|
| 347 |
+
pd.to_datetime("2017-10-29 03:00:00+01:00", utc=True),
|
| 348 |
+
],
|
| 349 |
+
columns=["date"],
|
| 350 |
+
)
|
| 351 |
+
df1["date"] = df1["date"].dt.tz_convert("Europe/Madrid")
|
| 352 |
+
# infer
|
| 353 |
+
result = getattr(df1.date.dt, method)("h", ambiguous="infer")
|
| 354 |
+
expected = df1["date"]
|
| 355 |
+
tm.assert_series_equal(result, expected)
|
| 356 |
+
|
| 357 |
+
# bool-array
|
| 358 |
+
result = getattr(df1.date.dt, method)("h", ambiguous=[True, False, False])
|
| 359 |
+
tm.assert_series_equal(result, expected)
|
| 360 |
+
|
| 361 |
+
# NaT
|
| 362 |
+
result = getattr(df1.date.dt, method)("h", ambiguous="NaT")
|
| 363 |
+
expected = df1["date"].copy()
|
| 364 |
+
expected.iloc[0:2] = pd.NaT
|
| 365 |
+
tm.assert_series_equal(result, expected)
|
| 366 |
+
|
| 367 |
+
# raise
|
| 368 |
+
with tm.external_error_raised(pytz.AmbiguousTimeError):
|
| 369 |
+
getattr(df1.date.dt, method)("h", ambiguous="raise")
|
| 370 |
+
|
| 371 |
+
@pytest.mark.parametrize(
|
| 372 |
+
"method, ts_str, freq",
|
| 373 |
+
[
|
| 374 |
+
["ceil", "2018-03-11 01:59:00-0600", "5min"],
|
| 375 |
+
["round", "2018-03-11 01:59:00-0600", "5min"],
|
| 376 |
+
["floor", "2018-03-11 03:01:00-0500", "2h"],
|
| 377 |
+
],
|
| 378 |
+
)
|
| 379 |
+
def test_dt_round_tz_nonexistent(self, method, ts_str, freq):
|
| 380 |
+
# GH 23324 round near "spring forward" DST
|
| 381 |
+
ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")])
|
| 382 |
+
result = getattr(ser.dt, method)(freq, nonexistent="shift_forward")
|
| 383 |
+
expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")])
|
| 384 |
+
tm.assert_series_equal(result, expected)
|
| 385 |
+
|
| 386 |
+
result = getattr(ser.dt, method)(freq, nonexistent="NaT")
|
| 387 |
+
expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz)
|
| 388 |
+
tm.assert_series_equal(result, expected)
|
| 389 |
+
|
| 390 |
+
with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"):
|
| 391 |
+
getattr(ser.dt, method)(freq, nonexistent="raise")
|
| 392 |
+
|
| 393 |
+
@pytest.mark.parametrize("freq", ["ns", "us", "1000us"])
|
| 394 |
+
def test_dt_round_nonnano_higher_resolution_no_op(self, freq):
|
| 395 |
+
# GH 52761
|
| 396 |
+
ser = Series(
|
| 397 |
+
["2020-05-31 08:00:00", "2000-12-31 04:00:05", "1800-03-14 07:30:20"],
|
| 398 |
+
dtype="datetime64[ms]",
|
| 399 |
+
)
|
| 400 |
+
expected = ser.copy()
|
| 401 |
+
result = ser.dt.round(freq)
|
| 402 |
+
tm.assert_series_equal(result, expected)
|
| 403 |
+
|
| 404 |
+
assert not np.shares_memory(ser.array._ndarray, result.array._ndarray)
|
| 405 |
+
|
| 406 |
+
def test_dt_namespace_accessor_categorical(self):
|
| 407 |
+
# GH 19468
|
| 408 |
+
dti = DatetimeIndex(["20171111", "20181212"]).repeat(2)
|
| 409 |
+
ser = Series(pd.Categorical(dti), name="foo")
|
| 410 |
+
result = ser.dt.year
|
| 411 |
+
expected = Series([2017, 2017, 2018, 2018], dtype="int32", name="foo")
|
| 412 |
+
tm.assert_series_equal(result, expected)
|
| 413 |
+
|
| 414 |
+
def test_dt_tz_localize_categorical(self, tz_aware_fixture):
|
| 415 |
+
# GH 27952
|
| 416 |
+
tz = tz_aware_fixture
|
| 417 |
+
datetimes = Series(
|
| 418 |
+
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns]"
|
| 419 |
+
)
|
| 420 |
+
categorical = datetimes.astype("category")
|
| 421 |
+
result = categorical.dt.tz_localize(tz)
|
| 422 |
+
expected = datetimes.dt.tz_localize(tz)
|
| 423 |
+
tm.assert_series_equal(result, expected)
|
| 424 |
+
|
| 425 |
+
def test_dt_tz_convert_categorical(self, tz_aware_fixture):
|
| 426 |
+
# GH 27952
|
| 427 |
+
tz = tz_aware_fixture
|
| 428 |
+
datetimes = Series(
|
| 429 |
+
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns, MET]"
|
| 430 |
+
)
|
| 431 |
+
categorical = datetimes.astype("category")
|
| 432 |
+
result = categorical.dt.tz_convert(tz)
|
| 433 |
+
expected = datetimes.dt.tz_convert(tz)
|
| 434 |
+
tm.assert_series_equal(result, expected)
|
| 435 |
+
|
| 436 |
+
@pytest.mark.parametrize("accessor", ["year", "month", "day"])
|
| 437 |
+
def test_dt_other_accessors_categorical(self, accessor):
|
| 438 |
+
# GH 27952
|
| 439 |
+
datetimes = Series(
|
| 440 |
+
["2018-01-01", "2018-01-01", "2019-01-02"], dtype="datetime64[ns]"
|
| 441 |
+
)
|
| 442 |
+
categorical = datetimes.astype("category")
|
| 443 |
+
result = getattr(categorical.dt, accessor)
|
| 444 |
+
expected = getattr(datetimes.dt, accessor)
|
| 445 |
+
tm.assert_series_equal(result, expected)
|
| 446 |
+
|
| 447 |
+
def test_dt_accessor_no_new_attributes(self):
|
| 448 |
+
# https://github.com/pandas-dev/pandas/issues/10673
|
| 449 |
+
ser = Series(date_range("20130101", periods=5, freq="D"))
|
| 450 |
+
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
|
| 451 |
+
ser.dt.xlabel = "a"
|
| 452 |
+
|
| 453 |
+
# error: Unsupported operand types for + ("List[None]" and "List[str]")
|
| 454 |
+
@pytest.mark.parametrize(
|
| 455 |
+
"time_locale", [None] + tm.get_locales() # type: ignore[operator]
|
| 456 |
+
)
|
| 457 |
+
def test_dt_accessor_datetime_name_accessors(self, time_locale):
|
| 458 |
+
# Test Monday -> Sunday and January -> December, in that sequence
|
| 459 |
+
if time_locale is None:
|
| 460 |
+
# If the time_locale is None, day-name and month_name should
|
| 461 |
+
# return the english attributes
|
| 462 |
+
expected_days = [
|
| 463 |
+
"Monday",
|
| 464 |
+
"Tuesday",
|
| 465 |
+
"Wednesday",
|
| 466 |
+
"Thursday",
|
| 467 |
+
"Friday",
|
| 468 |
+
"Saturday",
|
| 469 |
+
"Sunday",
|
| 470 |
+
]
|
| 471 |
+
expected_months = [
|
| 472 |
+
"January",
|
| 473 |
+
"February",
|
| 474 |
+
"March",
|
| 475 |
+
"April",
|
| 476 |
+
"May",
|
| 477 |
+
"June",
|
| 478 |
+
"July",
|
| 479 |
+
"August",
|
| 480 |
+
"September",
|
| 481 |
+
"October",
|
| 482 |
+
"November",
|
| 483 |
+
"December",
|
| 484 |
+
]
|
| 485 |
+
else:
|
| 486 |
+
with tm.set_locale(time_locale, locale.LC_TIME):
|
| 487 |
+
expected_days = calendar.day_name[:]
|
| 488 |
+
expected_months = calendar.month_name[1:]
|
| 489 |
+
|
| 490 |
+
ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365))
|
| 491 |
+
english_days = [
|
| 492 |
+
"Monday",
|
| 493 |
+
"Tuesday",
|
| 494 |
+
"Wednesday",
|
| 495 |
+
"Thursday",
|
| 496 |
+
"Friday",
|
| 497 |
+
"Saturday",
|
| 498 |
+
"Sunday",
|
| 499 |
+
]
|
| 500 |
+
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
|
| 501 |
+
name = name.capitalize()
|
| 502 |
+
assert ser.dt.day_name(locale=time_locale)[day] == name
|
| 503 |
+
assert ser.dt.day_name(locale=None)[day] == eng_name
|
| 504 |
+
ser = pd.concat([ser, Series([pd.NaT])])
|
| 505 |
+
assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1])
|
| 506 |
+
|
| 507 |
+
ser = Series(date_range(freq="ME", start="2012", end="2013"))
|
| 508 |
+
result = ser.dt.month_name(locale=time_locale)
|
| 509 |
+
expected = Series([month.capitalize() for month in expected_months])
|
| 510 |
+
|
| 511 |
+
# work around https://github.com/pandas-dev/pandas/issues/22342
|
| 512 |
+
result = result.str.normalize("NFD")
|
| 513 |
+
expected = expected.str.normalize("NFD")
|
| 514 |
+
|
| 515 |
+
tm.assert_series_equal(result, expected)
|
| 516 |
+
|
| 517 |
+
for s_date, expected in zip(ser, expected_months):
|
| 518 |
+
result = s_date.month_name(locale=time_locale)
|
| 519 |
+
expected = expected.capitalize()
|
| 520 |
+
|
| 521 |
+
result = unicodedata.normalize("NFD", result)
|
| 522 |
+
expected = unicodedata.normalize("NFD", expected)
|
| 523 |
+
|
| 524 |
+
assert result == expected
|
| 525 |
+
|
| 526 |
+
ser = pd.concat([ser, Series([pd.NaT])])
|
| 527 |
+
assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1])
|
| 528 |
+
|
| 529 |
+
def test_strftime(self):
|
| 530 |
+
# GH 10086
|
| 531 |
+
ser = Series(date_range("20130101", periods=5))
|
| 532 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
| 533 |
+
expected = Series(
|
| 534 |
+
["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
| 535 |
+
)
|
| 536 |
+
tm.assert_series_equal(result, expected)
|
| 537 |
+
|
| 538 |
+
ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5))
|
| 539 |
+
result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")
|
| 540 |
+
expected = Series(
|
| 541 |
+
[
|
| 542 |
+
"2015/02/03 11-22-33",
|
| 543 |
+
"2015/02/04 11-22-33",
|
| 544 |
+
"2015/02/05 11-22-33",
|
| 545 |
+
"2015/02/06 11-22-33",
|
| 546 |
+
"2015/02/07 11-22-33",
|
| 547 |
+
]
|
| 548 |
+
)
|
| 549 |
+
tm.assert_series_equal(result, expected)
|
| 550 |
+
|
| 551 |
+
ser = Series(period_range("20130101", periods=5))
|
| 552 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
| 553 |
+
expected = Series(
|
| 554 |
+
["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
| 555 |
+
)
|
| 556 |
+
tm.assert_series_equal(result, expected)
|
| 557 |
+
|
| 558 |
+
ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s"))
|
| 559 |
+
result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")
|
| 560 |
+
expected = Series(
|
| 561 |
+
[
|
| 562 |
+
"2015/02/03 11-22-33",
|
| 563 |
+
"2015/02/03 11-22-34",
|
| 564 |
+
"2015/02/03 11-22-35",
|
| 565 |
+
"2015/02/03 11-22-36",
|
| 566 |
+
"2015/02/03 11-22-37",
|
| 567 |
+
]
|
| 568 |
+
)
|
| 569 |
+
tm.assert_series_equal(result, expected)
|
| 570 |
+
|
| 571 |
+
def test_strftime_dt64_days(self):
|
| 572 |
+
ser = Series(date_range("20130101", periods=5))
|
| 573 |
+
ser.iloc[0] = pd.NaT
|
| 574 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
| 575 |
+
expected = Series(
|
| 576 |
+
[np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
| 577 |
+
)
|
| 578 |
+
tm.assert_series_equal(result, expected)
|
| 579 |
+
|
| 580 |
+
datetime_index = date_range("20150301", periods=5)
|
| 581 |
+
result = datetime_index.strftime("%Y/%m/%d")
|
| 582 |
+
|
| 583 |
+
expected = Index(
|
| 584 |
+
["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],
|
| 585 |
+
dtype=np.object_,
|
| 586 |
+
)
|
| 587 |
+
# dtype may be S10 or U10 depending on python version
|
| 588 |
+
tm.assert_index_equal(result, expected)
|
| 589 |
+
|
| 590 |
+
def test_strftime_period_days(self, using_infer_string):
|
| 591 |
+
period_index = period_range("20150301", periods=5)
|
| 592 |
+
result = period_index.strftime("%Y/%m/%d")
|
| 593 |
+
expected = Index(
|
| 594 |
+
["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],
|
| 595 |
+
dtype="=U10",
|
| 596 |
+
)
|
| 597 |
+
if using_infer_string:
|
| 598 |
+
expected = expected.astype("string[pyarrow_numpy]")
|
| 599 |
+
tm.assert_index_equal(result, expected)
|
| 600 |
+
|
| 601 |
+
def test_strftime_dt64_microsecond_resolution(self):
|
| 602 |
+
ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])
|
| 603 |
+
result = ser.dt.strftime("%Y-%m-%d %H:%M:%S")
|
| 604 |
+
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
|
| 605 |
+
tm.assert_series_equal(result, expected)
|
| 606 |
+
|
| 607 |
+
def test_strftime_period_hours(self):
|
| 608 |
+
ser = Series(period_range("20130101", periods=4, freq="h"))
|
| 609 |
+
result = ser.dt.strftime("%Y/%m/%d %H:%M:%S")
|
| 610 |
+
expected = Series(
|
| 611 |
+
[
|
| 612 |
+
"2013/01/01 00:00:00",
|
| 613 |
+
"2013/01/01 01:00:00",
|
| 614 |
+
"2013/01/01 02:00:00",
|
| 615 |
+
"2013/01/01 03:00:00",
|
| 616 |
+
]
|
| 617 |
+
)
|
| 618 |
+
tm.assert_series_equal(result, expected)
|
| 619 |
+
|
| 620 |
+
def test_strftime_period_minutes(self):
|
| 621 |
+
ser = Series(period_range("20130101", periods=4, freq="ms"))
|
| 622 |
+
result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l")
|
| 623 |
+
expected = Series(
|
| 624 |
+
[
|
| 625 |
+
"2013/01/01 00:00:00.000",
|
| 626 |
+
"2013/01/01 00:00:00.001",
|
| 627 |
+
"2013/01/01 00:00:00.002",
|
| 628 |
+
"2013/01/01 00:00:00.003",
|
| 629 |
+
]
|
| 630 |
+
)
|
| 631 |
+
tm.assert_series_equal(result, expected)
|
| 632 |
+
|
| 633 |
+
@pytest.mark.parametrize(
|
| 634 |
+
"data",
|
| 635 |
+
[
|
| 636 |
+
DatetimeIndex(["2019-01-01", pd.NaT]),
|
| 637 |
+
PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"),
|
| 638 |
+
],
|
| 639 |
+
)
|
| 640 |
+
def test_strftime_nat(self, data):
|
| 641 |
+
# GH 29578
|
| 642 |
+
ser = Series(data)
|
| 643 |
+
result = ser.dt.strftime("%Y-%m-%d")
|
| 644 |
+
expected = Series(["2019-01-01", np.nan])
|
| 645 |
+
tm.assert_series_equal(result, expected)
|
| 646 |
+
|
| 647 |
+
@pytest.mark.parametrize(
|
| 648 |
+
"data", [DatetimeIndex([pd.NaT]), PeriodIndex([pd.NaT], dtype="period[D]")]
|
| 649 |
+
)
|
| 650 |
+
def test_strftime_all_nat(self, data):
|
| 651 |
+
# https://github.com/pandas-dev/pandas/issues/45858
|
| 652 |
+
ser = Series(data)
|
| 653 |
+
with tm.assert_produces_warning(None):
|
| 654 |
+
result = ser.dt.strftime("%Y-%m-%d")
|
| 655 |
+
expected = Series([np.nan], dtype=object)
|
| 656 |
+
tm.assert_series_equal(result, expected)
|
| 657 |
+
|
| 658 |
+
def test_valid_dt_with_missing_values(self):
|
| 659 |
+
# GH 8689
|
| 660 |
+
ser = Series(date_range("20130101", periods=5, freq="D"))
|
| 661 |
+
ser.iloc[2] = pd.NaT
|
| 662 |
+
|
| 663 |
+
for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]:
|
| 664 |
+
expected = getattr(ser.dt, attr).copy()
|
| 665 |
+
expected.iloc[2] = np.nan
|
| 666 |
+
result = getattr(ser.dt, attr)
|
| 667 |
+
tm.assert_series_equal(result, expected)
|
| 668 |
+
|
| 669 |
+
result = ser.dt.date
|
| 670 |
+
expected = Series(
|
| 671 |
+
[
|
| 672 |
+
date(2013, 1, 1),
|
| 673 |
+
date(2013, 1, 2),
|
| 674 |
+
pd.NaT,
|
| 675 |
+
date(2013, 1, 4),
|
| 676 |
+
date(2013, 1, 5),
|
| 677 |
+
],
|
| 678 |
+
dtype="object",
|
| 679 |
+
)
|
| 680 |
+
tm.assert_series_equal(result, expected)
|
| 681 |
+
|
| 682 |
+
result = ser.dt.time
|
| 683 |
+
expected = Series([time(0), time(0), pd.NaT, time(0), time(0)], dtype="object")
|
| 684 |
+
tm.assert_series_equal(result, expected)
|
| 685 |
+
|
| 686 |
+
def test_dt_accessor_api(self):
|
| 687 |
+
# GH 9322
|
| 688 |
+
from pandas.core.indexes.accessors import (
|
| 689 |
+
CombinedDatetimelikeProperties,
|
| 690 |
+
DatetimeProperties,
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
assert Series.dt is CombinedDatetimelikeProperties
|
| 694 |
+
|
| 695 |
+
ser = Series(date_range("2000-01-01", periods=3))
|
| 696 |
+
assert isinstance(ser.dt, DatetimeProperties)
|
| 697 |
+
|
| 698 |
+
@pytest.mark.parametrize(
|
| 699 |
+
"ser",
|
| 700 |
+
[
|
| 701 |
+
Series(np.arange(5)),
|
| 702 |
+
Series(list("abcde")),
|
| 703 |
+
Series(np.random.default_rng(2).standard_normal(5)),
|
| 704 |
+
],
|
| 705 |
+
)
|
| 706 |
+
def test_dt_accessor_invalid(self, ser):
|
| 707 |
+
# GH#9322 check that series with incorrect dtypes don't have attr
|
| 708 |
+
with pytest.raises(AttributeError, match="only use .dt accessor"):
|
| 709 |
+
ser.dt
|
| 710 |
+
assert not hasattr(ser, "dt")
|
| 711 |
+
|
| 712 |
+
def test_dt_accessor_updates_on_inplace(self):
|
| 713 |
+
ser = Series(date_range("2018-01-01", periods=10))
|
| 714 |
+
ser[2] = None
|
| 715 |
+
return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True)
|
| 716 |
+
assert return_value is None
|
| 717 |
+
result = ser.dt.date
|
| 718 |
+
assert result[0] == result[2]
|
| 719 |
+
|
| 720 |
+
def test_date_tz(self):
|
| 721 |
+
# GH11757
|
| 722 |
+
rng = DatetimeIndex(
|
| 723 |
+
["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"],
|
| 724 |
+
tz="US/Eastern",
|
| 725 |
+
)
|
| 726 |
+
ser = Series(rng)
|
| 727 |
+
expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)])
|
| 728 |
+
tm.assert_series_equal(ser.dt.date, expected)
|
| 729 |
+
tm.assert_series_equal(ser.apply(lambda x: x.date()), expected)
|
| 730 |
+
|
| 731 |
+
def test_dt_timetz_accessor(self, tz_naive_fixture):
|
| 732 |
+
# GH21358
|
| 733 |
+
tz = maybe_get_tz(tz_naive_fixture)
|
| 734 |
+
|
| 735 |
+
dtindex = DatetimeIndex(
|
| 736 |
+
["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz
|
| 737 |
+
)
|
| 738 |
+
ser = Series(dtindex)
|
| 739 |
+
expected = Series(
|
| 740 |
+
[time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)]
|
| 741 |
+
)
|
| 742 |
+
result = ser.dt.timetz
|
| 743 |
+
tm.assert_series_equal(result, expected)
|
| 744 |
+
|
| 745 |
+
@pytest.mark.parametrize(
|
| 746 |
+
"input_series, expected_output",
|
| 747 |
+
[
|
| 748 |
+
[["2020-01-01"], [[2020, 1, 3]]],
|
| 749 |
+
[[pd.NaT], [[np.nan, np.nan, np.nan]]],
|
| 750 |
+
[["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]],
|
| 751 |
+
[["2010-01-01", pd.NaT], [[2009, 53, 5], [np.nan, np.nan, np.nan]]],
|
| 752 |
+
# see GH#36032
|
| 753 |
+
[["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]],
|
| 754 |
+
[["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]],
|
| 755 |
+
],
|
| 756 |
+
)
|
| 757 |
+
def test_isocalendar(self, input_series, expected_output):
|
| 758 |
+
result = pd.to_datetime(Series(input_series)).dt.isocalendar()
|
| 759 |
+
expected_frame = DataFrame(
|
| 760 |
+
expected_output, columns=["year", "week", "day"], dtype="UInt32"
|
| 761 |
+
)
|
| 762 |
+
tm.assert_frame_equal(result, expected_frame)
|
| 763 |
+
|
| 764 |
+
def test_hour_index(self):
|
| 765 |
+
dt_series = Series(
|
| 766 |
+
date_range(start="2021-01-01", periods=5, freq="h"),
|
| 767 |
+
index=[2, 6, 7, 8, 11],
|
| 768 |
+
dtype="category",
|
| 769 |
+
)
|
| 770 |
+
result = dt_series.dt.hour
|
| 771 |
+
expected = Series(
|
| 772 |
+
[0, 1, 2, 3, 4],
|
| 773 |
+
dtype="int32",
|
| 774 |
+
index=[2, 6, 7, 8, 11],
|
| 775 |
+
)
|
| 776 |
+
tm.assert_series_equal(result, expected)
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
class TestSeriesPeriodValuesDtAccessor:
|
| 780 |
+
@pytest.mark.parametrize(
|
| 781 |
+
"input_vals",
|
| 782 |
+
[
|
| 783 |
+
[Period("2016-01", freq="M"), Period("2016-02", freq="M")],
|
| 784 |
+
[Period("2016-01-01", freq="D"), Period("2016-01-02", freq="D")],
|
| 785 |
+
[
|
| 786 |
+
Period("2016-01-01 00:00:00", freq="h"),
|
| 787 |
+
Period("2016-01-01 01:00:00", freq="h"),
|
| 788 |
+
],
|
| 789 |
+
[
|
| 790 |
+
Period("2016-01-01 00:00:00", freq="M"),
|
| 791 |
+
Period("2016-01-01 00:01:00", freq="M"),
|
| 792 |
+
],
|
| 793 |
+
[
|
| 794 |
+
Period("2016-01-01 00:00:00", freq="s"),
|
| 795 |
+
Period("2016-01-01 00:00:01", freq="s"),
|
| 796 |
+
],
|
| 797 |
+
],
|
| 798 |
+
)
|
| 799 |
+
def test_end_time_timevalues(self, input_vals):
|
| 800 |
+
# GH#17157
|
| 801 |
+
# Check that the time part of the Period is adjusted by end_time
|
| 802 |
+
# when using the dt accessor on a Series
|
| 803 |
+
input_vals = PeriodArray._from_sequence(np.asarray(input_vals))
|
| 804 |
+
|
| 805 |
+
ser = Series(input_vals)
|
| 806 |
+
result = ser.dt.end_time
|
| 807 |
+
expected = ser.apply(lambda x: x.end_time)
|
| 808 |
+
tm.assert_series_equal(result, expected)
|
| 809 |
+
|
| 810 |
+
@pytest.mark.parametrize("input_vals", [("2001"), ("NaT")])
|
| 811 |
+
def test_to_period(self, input_vals):
|
| 812 |
+
# GH#21205
|
| 813 |
+
expected = Series([input_vals], dtype="Period[D]")
|
| 814 |
+
result = Series([input_vals], dtype="datetime64[ns]").dt.to_period("D")
|
| 815 |
+
tm.assert_series_equal(result, expected)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
def test_normalize_pre_epoch_dates():
|
| 819 |
+
# GH: 36294
|
| 820 |
+
ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
|
| 821 |
+
result = ser.dt.normalize()
|
| 822 |
+
expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"]))
|
| 823 |
+
tm.assert_series_equal(result, expected)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def test_day_attribute_non_nano_beyond_int32():
|
| 827 |
+
# GH 52386
|
| 828 |
+
data = np.array(
|
| 829 |
+
[
|
| 830 |
+
136457654736252,
|
| 831 |
+
134736784364431,
|
| 832 |
+
245345345545332,
|
| 833 |
+
223432411,
|
| 834 |
+
2343241,
|
| 835 |
+
3634548734,
|
| 836 |
+
23234,
|
| 837 |
+
],
|
| 838 |
+
dtype="timedelta64[s]",
|
| 839 |
+
)
|
| 840 |
+
ser = Series(data)
|
| 841 |
+
result = ser.dt.days
|
| 842 |
+
expected = Series([1579371003, 1559453522, 2839645203, 2586, 27, 42066, 0])
|
| 843 |
+
tm.assert_series_equal(result, expected)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from pandas import (
|
| 6 |
+
ArrowDtype,
|
| 7 |
+
Series,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
pa = pytest.importorskip("pyarrow")
|
| 12 |
+
|
| 13 |
+
from pandas.compat import pa_version_under11p0
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@pytest.mark.parametrize(
|
| 17 |
+
"list_dtype",
|
| 18 |
+
(
|
| 19 |
+
pa.list_(pa.int64()),
|
| 20 |
+
pa.list_(pa.int64(), list_size=3),
|
| 21 |
+
pa.large_list(pa.int64()),
|
| 22 |
+
),
|
| 23 |
+
)
|
| 24 |
+
def test_list_getitem(list_dtype):
|
| 25 |
+
ser = Series(
|
| 26 |
+
[[1, 2, 3], [4, None, 5], None],
|
| 27 |
+
dtype=ArrowDtype(list_dtype),
|
| 28 |
+
)
|
| 29 |
+
actual = ser.list[1]
|
| 30 |
+
expected = Series([2, None, None], dtype="int64[pyarrow]")
|
| 31 |
+
tm.assert_series_equal(actual, expected)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_list_getitem_slice():
|
| 35 |
+
ser = Series(
|
| 36 |
+
[[1, 2, 3], [4, None, 5], None],
|
| 37 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
| 38 |
+
)
|
| 39 |
+
if pa_version_under11p0:
|
| 40 |
+
with pytest.raises(
|
| 41 |
+
NotImplementedError, match="List slice not supported by pyarrow "
|
| 42 |
+
):
|
| 43 |
+
ser.list[1:None:None]
|
| 44 |
+
else:
|
| 45 |
+
actual = ser.list[1:None:None]
|
| 46 |
+
expected = Series(
|
| 47 |
+
[[2, 3], [None, 5], None], dtype=ArrowDtype(pa.list_(pa.int64()))
|
| 48 |
+
)
|
| 49 |
+
tm.assert_series_equal(actual, expected)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_list_len():
|
| 53 |
+
ser = Series(
|
| 54 |
+
[[1, 2, 3], [4, None], None],
|
| 55 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
| 56 |
+
)
|
| 57 |
+
actual = ser.list.len()
|
| 58 |
+
expected = Series([3, 2, None], dtype=ArrowDtype(pa.int32()))
|
| 59 |
+
tm.assert_series_equal(actual, expected)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def test_list_flatten():
|
| 63 |
+
ser = Series(
|
| 64 |
+
[[1, 2, 3], [4, None], None],
|
| 65 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
| 66 |
+
)
|
| 67 |
+
actual = ser.list.flatten()
|
| 68 |
+
expected = Series([1, 2, 3, 4, None], dtype=ArrowDtype(pa.int64()))
|
| 69 |
+
tm.assert_series_equal(actual, expected)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def test_list_getitem_slice_invalid():
|
| 73 |
+
ser = Series(
|
| 74 |
+
[[1, 2, 3], [4, None, 5], None],
|
| 75 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
| 76 |
+
)
|
| 77 |
+
if pa_version_under11p0:
|
| 78 |
+
with pytest.raises(
|
| 79 |
+
NotImplementedError, match="List slice not supported by pyarrow "
|
| 80 |
+
):
|
| 81 |
+
ser.list[1:None:0]
|
| 82 |
+
else:
|
| 83 |
+
with pytest.raises(pa.lib.ArrowInvalid, match=re.escape("`step` must be >= 1")):
|
| 84 |
+
ser.list[1:None:0]
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_list_accessor_non_list_dtype():
|
| 88 |
+
ser = Series(
|
| 89 |
+
[1, 2, 4],
|
| 90 |
+
dtype=ArrowDtype(pa.int64()),
|
| 91 |
+
)
|
| 92 |
+
with pytest.raises(
|
| 93 |
+
AttributeError,
|
| 94 |
+
match=re.escape(
|
| 95 |
+
"Can only use the '.list' accessor with 'list[pyarrow]' dtype, "
|
| 96 |
+
"not int64[pyarrow]."
|
| 97 |
+
),
|
| 98 |
+
):
|
| 99 |
+
ser.list[1:None:0]
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@pytest.mark.parametrize(
|
| 103 |
+
"list_dtype",
|
| 104 |
+
(
|
| 105 |
+
pa.list_(pa.int64()),
|
| 106 |
+
pa.list_(pa.int64(), list_size=3),
|
| 107 |
+
pa.large_list(pa.int64()),
|
| 108 |
+
),
|
| 109 |
+
)
|
| 110 |
+
def test_list_getitem_invalid_index(list_dtype):
|
| 111 |
+
ser = Series(
|
| 112 |
+
[[1, 2, 3], [4, None, 5], None],
|
| 113 |
+
dtype=ArrowDtype(list_dtype),
|
| 114 |
+
)
|
| 115 |
+
with pytest.raises(pa.lib.ArrowInvalid, match="Index -1 is out of bounds"):
|
| 116 |
+
ser.list[-1]
|
| 117 |
+
with pytest.raises(pa.lib.ArrowInvalid, match="Index 5 is out of bounds"):
|
| 118 |
+
ser.list[5]
|
| 119 |
+
with pytest.raises(ValueError, match="key must be an int or slice, got str"):
|
| 120 |
+
ser.list["abc"]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def test_list_accessor_not_iterable():
|
| 124 |
+
ser = Series(
|
| 125 |
+
[[1, 2, 3], [4, None], None],
|
| 126 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
| 127 |
+
)
|
| 128 |
+
with pytest.raises(TypeError, match="'ListAccessor' object is not iterable"):
|
| 129 |
+
iter(ser.list)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas import Series
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TestSparseAccessor:
|
| 5 |
+
def test_sparse_accessor_updates_on_inplace(self):
|
| 6 |
+
ser = Series([1, 1, 2, 3], dtype="Sparse[int]")
|
| 7 |
+
return_value = ser.drop([0, 1], inplace=True)
|
| 8 |
+
assert return_value is None
|
| 9 |
+
assert ser.sparse.density == 1.0
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import Series
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestStrAccessor:
|
| 8 |
+
def test_str_attribute(self):
|
| 9 |
+
# GH#9068
|
| 10 |
+
methods = ["strip", "rstrip", "lstrip"]
|
| 11 |
+
ser = Series([" jack", "jill ", " jesse ", "frank"])
|
| 12 |
+
for method in methods:
|
| 13 |
+
expected = Series([getattr(str, method)(x) for x in ser.values])
|
| 14 |
+
tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected)
|
| 15 |
+
|
| 16 |
+
# str accessor only valid with string values
|
| 17 |
+
ser = Series(range(5))
|
| 18 |
+
with pytest.raises(AttributeError, match="only use .str accessor"):
|
| 19 |
+
ser.str.repeat(2)
|
| 20 |
+
|
| 21 |
+
def test_str_accessor_updates_on_inplace(self):
|
| 22 |
+
ser = Series(list("abc"))
|
| 23 |
+
return_value = ser.drop([0], inplace=True)
|
| 24 |
+
assert return_value is None
|
| 25 |
+
assert len(ser.str.lower()) == 2
|
vllm/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from pandas.compat.pyarrow import (
|
| 6 |
+
pa_version_under11p0,
|
| 7 |
+
pa_version_under13p0,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
from pandas import (
|
| 11 |
+
ArrowDtype,
|
| 12 |
+
DataFrame,
|
| 13 |
+
Index,
|
| 14 |
+
Series,
|
| 15 |
+
)
|
| 16 |
+
import pandas._testing as tm
|
| 17 |
+
|
| 18 |
+
pa = pytest.importorskip("pyarrow")
|
| 19 |
+
pc = pytest.importorskip("pyarrow.compute")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_struct_accessor_dtypes():
|
| 23 |
+
ser = Series(
|
| 24 |
+
[],
|
| 25 |
+
dtype=ArrowDtype(
|
| 26 |
+
pa.struct(
|
| 27 |
+
[
|
| 28 |
+
("int_col", pa.int64()),
|
| 29 |
+
("string_col", pa.string()),
|
| 30 |
+
(
|
| 31 |
+
"struct_col",
|
| 32 |
+
pa.struct(
|
| 33 |
+
[
|
| 34 |
+
("int_col", pa.int64()),
|
| 35 |
+
("float_col", pa.float64()),
|
| 36 |
+
]
|
| 37 |
+
),
|
| 38 |
+
),
|
| 39 |
+
]
|
| 40 |
+
)
|
| 41 |
+
),
|
| 42 |
+
)
|
| 43 |
+
actual = ser.struct.dtypes
|
| 44 |
+
expected = Series(
|
| 45 |
+
[
|
| 46 |
+
ArrowDtype(pa.int64()),
|
| 47 |
+
ArrowDtype(pa.string()),
|
| 48 |
+
ArrowDtype(
|
| 49 |
+
pa.struct(
|
| 50 |
+
[
|
| 51 |
+
("int_col", pa.int64()),
|
| 52 |
+
("float_col", pa.float64()),
|
| 53 |
+
]
|
| 54 |
+
)
|
| 55 |
+
),
|
| 56 |
+
],
|
| 57 |
+
index=Index(["int_col", "string_col", "struct_col"]),
|
| 58 |
+
)
|
| 59 |
+
tm.assert_series_equal(actual, expected)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
|
| 63 |
+
def test_struct_accessor_field():
|
| 64 |
+
index = Index([-100, 42, 123])
|
| 65 |
+
ser = Series(
|
| 66 |
+
[
|
| 67 |
+
{"rice": 1.0, "maize": -1, "wheat": "a"},
|
| 68 |
+
{"rice": 2.0, "maize": 0, "wheat": "b"},
|
| 69 |
+
{"rice": 3.0, "maize": 1, "wheat": "c"},
|
| 70 |
+
],
|
| 71 |
+
dtype=ArrowDtype(
|
| 72 |
+
pa.struct(
|
| 73 |
+
[
|
| 74 |
+
("rice", pa.float64()),
|
| 75 |
+
("maize", pa.int64()),
|
| 76 |
+
("wheat", pa.string()),
|
| 77 |
+
]
|
| 78 |
+
)
|
| 79 |
+
),
|
| 80 |
+
index=index,
|
| 81 |
+
)
|
| 82 |
+
by_name = ser.struct.field("maize")
|
| 83 |
+
by_name_expected = Series(
|
| 84 |
+
[-1, 0, 1],
|
| 85 |
+
dtype=ArrowDtype(pa.int64()),
|
| 86 |
+
index=index,
|
| 87 |
+
name="maize",
|
| 88 |
+
)
|
| 89 |
+
tm.assert_series_equal(by_name, by_name_expected)
|
| 90 |
+
|
| 91 |
+
by_index = ser.struct.field(2)
|
| 92 |
+
by_index_expected = Series(
|
| 93 |
+
["a", "b", "c"],
|
| 94 |
+
dtype=ArrowDtype(pa.string()),
|
| 95 |
+
index=index,
|
| 96 |
+
name="wheat",
|
| 97 |
+
)
|
| 98 |
+
tm.assert_series_equal(by_index, by_index_expected)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def test_struct_accessor_field_with_invalid_name_or_index():
|
| 102 |
+
ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())])))
|
| 103 |
+
|
| 104 |
+
with pytest.raises(ValueError, match="name_or_index must be an int, str,"):
|
| 105 |
+
ser.struct.field(1.1)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@pytest.mark.skipif(pa_version_under11p0, reason="pyarrow>=11.0.0 required")
|
| 109 |
+
def test_struct_accessor_explode():
|
| 110 |
+
index = Index([-100, 42, 123])
|
| 111 |
+
ser = Series(
|
| 112 |
+
[
|
| 113 |
+
{"painted": 1, "snapping": {"sea": "green"}},
|
| 114 |
+
{"painted": 2, "snapping": {"sea": "leatherback"}},
|
| 115 |
+
{"painted": 3, "snapping": {"sea": "hawksbill"}},
|
| 116 |
+
],
|
| 117 |
+
dtype=ArrowDtype(
|
| 118 |
+
pa.struct(
|
| 119 |
+
[
|
| 120 |
+
("painted", pa.int64()),
|
| 121 |
+
("snapping", pa.struct([("sea", pa.string())])),
|
| 122 |
+
]
|
| 123 |
+
)
|
| 124 |
+
),
|
| 125 |
+
index=index,
|
| 126 |
+
)
|
| 127 |
+
actual = ser.struct.explode()
|
| 128 |
+
expected = DataFrame(
|
| 129 |
+
{
|
| 130 |
+
"painted": Series([1, 2, 3], index=index, dtype=ArrowDtype(pa.int64())),
|
| 131 |
+
"snapping": Series(
|
| 132 |
+
[{"sea": "green"}, {"sea": "leatherback"}, {"sea": "hawksbill"}],
|
| 133 |
+
index=index,
|
| 134 |
+
dtype=ArrowDtype(pa.struct([("sea", pa.string())])),
|
| 135 |
+
),
|
| 136 |
+
},
|
| 137 |
+
)
|
| 138 |
+
tm.assert_frame_equal(actual, expected)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@pytest.mark.parametrize(
|
| 142 |
+
"invalid",
|
| 143 |
+
[
|
| 144 |
+
pytest.param(Series([1, 2, 3], dtype="int64"), id="int64"),
|
| 145 |
+
pytest.param(
|
| 146 |
+
Series(["a", "b", "c"], dtype="string[pyarrow]"), id="string-pyarrow"
|
| 147 |
+
),
|
| 148 |
+
],
|
| 149 |
+
)
|
| 150 |
+
def test_struct_accessor_api_for_invalid(invalid):
|
| 151 |
+
with pytest.raises(
|
| 152 |
+
AttributeError,
|
| 153 |
+
match=re.escape(
|
| 154 |
+
"Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, "
|
| 155 |
+
f"not {invalid.dtype}."
|
| 156 |
+
),
|
| 157 |
+
):
|
| 158 |
+
invalid.struct
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
@pytest.mark.parametrize(
|
| 162 |
+
["indices", "name"],
|
| 163 |
+
[
|
| 164 |
+
(0, "int_col"),
|
| 165 |
+
([1, 2], "str_col"),
|
| 166 |
+
(pc.field("int_col"), "int_col"),
|
| 167 |
+
("int_col", "int_col"),
|
| 168 |
+
(b"string_col", b"string_col"),
|
| 169 |
+
([b"string_col"], "string_col"),
|
| 170 |
+
],
|
| 171 |
+
)
|
| 172 |
+
@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
|
| 173 |
+
def test_struct_accessor_field_expanded(indices, name):
|
| 174 |
+
arrow_type = pa.struct(
|
| 175 |
+
[
|
| 176 |
+
("int_col", pa.int64()),
|
| 177 |
+
(
|
| 178 |
+
"struct_col",
|
| 179 |
+
pa.struct(
|
| 180 |
+
[
|
| 181 |
+
("int_col", pa.int64()),
|
| 182 |
+
("float_col", pa.float64()),
|
| 183 |
+
("str_col", pa.string()),
|
| 184 |
+
]
|
| 185 |
+
),
|
| 186 |
+
),
|
| 187 |
+
(b"string_col", pa.string()),
|
| 188 |
+
]
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
data = pa.array([], type=arrow_type)
|
| 192 |
+
ser = Series(data, dtype=ArrowDtype(arrow_type))
|
| 193 |
+
expected = pc.struct_field(data, indices)
|
| 194 |
+
result = ser.struct.field(indices)
|
| 195 |
+
tm.assert_equal(result.array._pa_array.combine_chunks(), expected)
|
| 196 |
+
assert result.name == name
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_datetime.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_delitem.cpython-310.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_get.cpython-310.pyc
ADDED
|
Binary file (4.36 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_getitem.cpython-310.pyc
ADDED
|
Binary file (26.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_indexing.cpython-310.pyc
ADDED
|
Binary file (16.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_mask.cpython-310.pyc
ADDED
|
Binary file (1.92 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_set_value.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_setitem.cpython-310.pyc
ADDED
|
Binary file (55.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_take.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_where.cpython-310.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_xs.cpython-310.pyc
ADDED
|
Binary file (3.18 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_datetime.py
ADDED
|
@@ -0,0 +1,499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Also test support for datetime64[ns] in Series / DataFrame
|
| 3 |
+
"""
|
| 4 |
+
from datetime import (
|
| 5 |
+
datetime,
|
| 6 |
+
timedelta,
|
| 7 |
+
)
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
from dateutil.tz import (
|
| 11 |
+
gettz,
|
| 12 |
+
tzutc,
|
| 13 |
+
)
|
| 14 |
+
import numpy as np
|
| 15 |
+
import pytest
|
| 16 |
+
import pytz
|
| 17 |
+
|
| 18 |
+
from pandas._libs import index as libindex
|
| 19 |
+
|
| 20 |
+
import pandas as pd
|
| 21 |
+
from pandas import (
|
| 22 |
+
DataFrame,
|
| 23 |
+
Series,
|
| 24 |
+
Timestamp,
|
| 25 |
+
date_range,
|
| 26 |
+
period_range,
|
| 27 |
+
)
|
| 28 |
+
import pandas._testing as tm
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_fancy_getitem():
|
| 32 |
+
dti = date_range(
|
| 33 |
+
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
s = Series(np.arange(len(dti)), index=dti)
|
| 37 |
+
|
| 38 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 39 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 40 |
+
assert s[48] == 48
|
| 41 |
+
assert s["1/2/2009"] == 48
|
| 42 |
+
assert s["2009-1-2"] == 48
|
| 43 |
+
assert s[datetime(2009, 1, 2)] == 48
|
| 44 |
+
assert s[Timestamp(datetime(2009, 1, 2))] == 48
|
| 45 |
+
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
|
| 46 |
+
s["2009-1-3"]
|
| 47 |
+
tm.assert_series_equal(
|
| 48 |
+
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_fancy_setitem():
|
| 53 |
+
dti = date_range(
|
| 54 |
+
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
s = Series(np.arange(len(dti)), index=dti)
|
| 58 |
+
|
| 59 |
+
msg = "Series.__setitem__ treating keys as positions is deprecated"
|
| 60 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 61 |
+
s[48] = -1
|
| 62 |
+
assert s.iloc[48] == -1
|
| 63 |
+
s["1/2/2009"] = -2
|
| 64 |
+
assert s.iloc[48] == -2
|
| 65 |
+
s["1/2/2009":"2009-06-05"] = -3
|
| 66 |
+
assert (s[48:54] == -3).all()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@pytest.mark.parametrize("tz_source", ["pytz", "dateutil"])
|
| 70 |
+
def test_getitem_setitem_datetime_tz(tz_source):
|
| 71 |
+
if tz_source == "pytz":
|
| 72 |
+
tzget = pytz.timezone
|
| 73 |
+
else:
|
| 74 |
+
# handle special case for utc in dateutil
|
| 75 |
+
tzget = lambda x: tzutc() if x == "UTC" else gettz(x)
|
| 76 |
+
|
| 77 |
+
N = 50
|
| 78 |
+
# testing with timezone, GH #2785
|
| 79 |
+
rng = date_range("1/1/1990", periods=N, freq="h", tz=tzget("US/Eastern"))
|
| 80 |
+
ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
| 81 |
+
|
| 82 |
+
# also test Timestamp tz handling, GH #2789
|
| 83 |
+
result = ts.copy()
|
| 84 |
+
result["1990-01-01 09:00:00+00:00"] = 0
|
| 85 |
+
result["1990-01-01 09:00:00+00:00"] = ts.iloc[4]
|
| 86 |
+
tm.assert_series_equal(result, ts)
|
| 87 |
+
|
| 88 |
+
result = ts.copy()
|
| 89 |
+
result["1990-01-01 03:00:00-06:00"] = 0
|
| 90 |
+
result["1990-01-01 03:00:00-06:00"] = ts.iloc[4]
|
| 91 |
+
tm.assert_series_equal(result, ts)
|
| 92 |
+
|
| 93 |
+
# repeat with datetimes
|
| 94 |
+
result = ts.copy()
|
| 95 |
+
result[datetime(1990, 1, 1, 9, tzinfo=tzget("UTC"))] = 0
|
| 96 |
+
result[datetime(1990, 1, 1, 9, tzinfo=tzget("UTC"))] = ts.iloc[4]
|
| 97 |
+
tm.assert_series_equal(result, ts)
|
| 98 |
+
|
| 99 |
+
result = ts.copy()
|
| 100 |
+
dt = Timestamp(1990, 1, 1, 3).tz_localize(tzget("US/Central"))
|
| 101 |
+
dt = dt.to_pydatetime()
|
| 102 |
+
result[dt] = 0
|
| 103 |
+
result[dt] = ts.iloc[4]
|
| 104 |
+
tm.assert_series_equal(result, ts)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_getitem_setitem_datetimeindex():
|
| 108 |
+
N = 50
|
| 109 |
+
# testing with timezone, GH #2785
|
| 110 |
+
rng = date_range("1/1/1990", periods=N, freq="h", tz="US/Eastern")
|
| 111 |
+
ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
| 112 |
+
|
| 113 |
+
result = ts["1990-01-01 04:00:00"]
|
| 114 |
+
expected = ts.iloc[4]
|
| 115 |
+
assert result == expected
|
| 116 |
+
|
| 117 |
+
result = ts.copy()
|
| 118 |
+
result["1990-01-01 04:00:00"] = 0
|
| 119 |
+
result["1990-01-01 04:00:00"] = ts.iloc[4]
|
| 120 |
+
tm.assert_series_equal(result, ts)
|
| 121 |
+
|
| 122 |
+
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
|
| 123 |
+
expected = ts[4:8]
|
| 124 |
+
tm.assert_series_equal(result, expected)
|
| 125 |
+
|
| 126 |
+
result = ts.copy()
|
| 127 |
+
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
|
| 128 |
+
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
|
| 129 |
+
tm.assert_series_equal(result, ts)
|
| 130 |
+
|
| 131 |
+
lb = "1990-01-01 04:00:00"
|
| 132 |
+
rb = "1990-01-01 07:00:00"
|
| 133 |
+
# GH#18435 strings get a pass from tzawareness compat
|
| 134 |
+
result = ts[(ts.index >= lb) & (ts.index <= rb)]
|
| 135 |
+
expected = ts[4:8]
|
| 136 |
+
tm.assert_series_equal(result, expected)
|
| 137 |
+
|
| 138 |
+
lb = "1990-01-01 04:00:00-0500"
|
| 139 |
+
rb = "1990-01-01 07:00:00-0500"
|
| 140 |
+
result = ts[(ts.index >= lb) & (ts.index <= rb)]
|
| 141 |
+
expected = ts[4:8]
|
| 142 |
+
tm.assert_series_equal(result, expected)
|
| 143 |
+
|
| 144 |
+
# But we do not give datetimes a pass on tzawareness compat
|
| 145 |
+
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
|
| 146 |
+
naive = datetime(1990, 1, 1, 4)
|
| 147 |
+
for key in [naive, Timestamp(naive), np.datetime64(naive, "ns")]:
|
| 148 |
+
with pytest.raises(KeyError, match=re.escape(repr(key))):
|
| 149 |
+
# GH#36148 as of 2.0 we require tzawareness-compat
|
| 150 |
+
ts[key]
|
| 151 |
+
|
| 152 |
+
result = ts.copy()
|
| 153 |
+
# GH#36148 as of 2.0 we do not ignore tzawareness mismatch in indexing,
|
| 154 |
+
# so setting it as a new key casts to object rather than matching
|
| 155 |
+
# rng[4]
|
| 156 |
+
result[naive] = ts.iloc[4]
|
| 157 |
+
assert result.index.dtype == object
|
| 158 |
+
tm.assert_index_equal(result.index[:-1], rng.astype(object))
|
| 159 |
+
assert result.index[-1] == naive
|
| 160 |
+
|
| 161 |
+
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
|
| 162 |
+
with pytest.raises(TypeError, match=msg):
|
| 163 |
+
# GH#36148 require tzawareness compat as of 2.0
|
| 164 |
+
ts[naive : datetime(1990, 1, 1, 7)]
|
| 165 |
+
|
| 166 |
+
result = ts.copy()
|
| 167 |
+
with pytest.raises(TypeError, match=msg):
|
| 168 |
+
# GH#36148 require tzawareness compat as of 2.0
|
| 169 |
+
result[naive : datetime(1990, 1, 1, 7)] = 0
|
| 170 |
+
with pytest.raises(TypeError, match=msg):
|
| 171 |
+
# GH#36148 require tzawareness compat as of 2.0
|
| 172 |
+
result[naive : datetime(1990, 1, 1, 7)] = 99
|
| 173 |
+
# the __setitems__ here failed, so result should still match ts
|
| 174 |
+
tm.assert_series_equal(result, ts)
|
| 175 |
+
|
| 176 |
+
lb = naive
|
| 177 |
+
rb = datetime(1990, 1, 1, 7)
|
| 178 |
+
msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"
|
| 179 |
+
with pytest.raises(TypeError, match=msg):
|
| 180 |
+
# tznaive vs tzaware comparison is invalid
|
| 181 |
+
# see GH#18376, GH#18162
|
| 182 |
+
ts[(ts.index >= lb) & (ts.index <= rb)]
|
| 183 |
+
|
| 184 |
+
lb = Timestamp(naive).tz_localize(rng.tzinfo)
|
| 185 |
+
rb = Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
|
| 186 |
+
result = ts[(ts.index >= lb) & (ts.index <= rb)]
|
| 187 |
+
expected = ts[4:8]
|
| 188 |
+
tm.assert_series_equal(result, expected)
|
| 189 |
+
|
| 190 |
+
result = ts[ts.index[4]]
|
| 191 |
+
expected = ts.iloc[4]
|
| 192 |
+
assert result == expected
|
| 193 |
+
|
| 194 |
+
result = ts[ts.index[4:8]]
|
| 195 |
+
expected = ts[4:8]
|
| 196 |
+
tm.assert_series_equal(result, expected)
|
| 197 |
+
|
| 198 |
+
result = ts.copy()
|
| 199 |
+
result[ts.index[4:8]] = 0
|
| 200 |
+
result.iloc[4:8] = ts.iloc[4:8]
|
| 201 |
+
tm.assert_series_equal(result, ts)
|
| 202 |
+
|
| 203 |
+
# also test partial date slicing
|
| 204 |
+
result = ts["1990-01-02"]
|
| 205 |
+
expected = ts[24:48]
|
| 206 |
+
tm.assert_series_equal(result, expected)
|
| 207 |
+
|
| 208 |
+
result = ts.copy()
|
| 209 |
+
result["1990-01-02"] = 0
|
| 210 |
+
result["1990-01-02"] = ts[24:48]
|
| 211 |
+
tm.assert_series_equal(result, ts)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def test_getitem_setitem_periodindex():
|
| 215 |
+
N = 50
|
| 216 |
+
rng = period_range("1/1/1990", periods=N, freq="h")
|
| 217 |
+
ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
| 218 |
+
|
| 219 |
+
result = ts["1990-01-01 04"]
|
| 220 |
+
expected = ts.iloc[4]
|
| 221 |
+
assert result == expected
|
| 222 |
+
|
| 223 |
+
result = ts.copy()
|
| 224 |
+
result["1990-01-01 04"] = 0
|
| 225 |
+
result["1990-01-01 04"] = ts.iloc[4]
|
| 226 |
+
tm.assert_series_equal(result, ts)
|
| 227 |
+
|
| 228 |
+
result = ts["1990-01-01 04":"1990-01-01 07"]
|
| 229 |
+
expected = ts[4:8]
|
| 230 |
+
tm.assert_series_equal(result, expected)
|
| 231 |
+
|
| 232 |
+
result = ts.copy()
|
| 233 |
+
result["1990-01-01 04":"1990-01-01 07"] = 0
|
| 234 |
+
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
|
| 235 |
+
tm.assert_series_equal(result, ts)
|
| 236 |
+
|
| 237 |
+
lb = "1990-01-01 04"
|
| 238 |
+
rb = "1990-01-01 07"
|
| 239 |
+
result = ts[(ts.index >= lb) & (ts.index <= rb)]
|
| 240 |
+
expected = ts[4:8]
|
| 241 |
+
tm.assert_series_equal(result, expected)
|
| 242 |
+
|
| 243 |
+
# GH 2782
|
| 244 |
+
result = ts[ts.index[4]]
|
| 245 |
+
expected = ts.iloc[4]
|
| 246 |
+
assert result == expected
|
| 247 |
+
|
| 248 |
+
result = ts[ts.index[4:8]]
|
| 249 |
+
expected = ts[4:8]
|
| 250 |
+
tm.assert_series_equal(result, expected)
|
| 251 |
+
|
| 252 |
+
result = ts.copy()
|
| 253 |
+
result[ts.index[4:8]] = 0
|
| 254 |
+
result.iloc[4:8] = ts.iloc[4:8]
|
| 255 |
+
tm.assert_series_equal(result, ts)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def test_datetime_indexing():
|
| 259 |
+
index = date_range("1/1/2000", "1/7/2000")
|
| 260 |
+
index = index.repeat(3)
|
| 261 |
+
|
| 262 |
+
s = Series(len(index), index=index)
|
| 263 |
+
stamp = Timestamp("1/8/2000")
|
| 264 |
+
|
| 265 |
+
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
|
| 266 |
+
s[stamp]
|
| 267 |
+
s[stamp] = 0
|
| 268 |
+
assert s[stamp] == 0
|
| 269 |
+
|
| 270 |
+
# not monotonic
|
| 271 |
+
s = Series(len(index), index=index)
|
| 272 |
+
s = s[::-1]
|
| 273 |
+
|
| 274 |
+
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
|
| 275 |
+
s[stamp]
|
| 276 |
+
s[stamp] = 0
|
| 277 |
+
assert s[stamp] == 0
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
# test duplicates in time series
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def test_indexing_with_duplicate_datetimeindex(
|
| 284 |
+
rand_series_with_duplicate_datetimeindex,
|
| 285 |
+
):
|
| 286 |
+
ts = rand_series_with_duplicate_datetimeindex
|
| 287 |
+
|
| 288 |
+
uniques = ts.index.unique()
|
| 289 |
+
for date in uniques:
|
| 290 |
+
result = ts[date]
|
| 291 |
+
|
| 292 |
+
mask = ts.index == date
|
| 293 |
+
total = (ts.index == date).sum()
|
| 294 |
+
expected = ts[mask]
|
| 295 |
+
if total > 1:
|
| 296 |
+
tm.assert_series_equal(result, expected)
|
| 297 |
+
else:
|
| 298 |
+
tm.assert_almost_equal(result, expected.iloc[0])
|
| 299 |
+
|
| 300 |
+
cp = ts.copy()
|
| 301 |
+
cp[date] = 0
|
| 302 |
+
expected = Series(np.where(mask, 0, ts), index=ts.index)
|
| 303 |
+
tm.assert_series_equal(cp, expected)
|
| 304 |
+
|
| 305 |
+
key = datetime(2000, 1, 6)
|
| 306 |
+
with pytest.raises(KeyError, match=re.escape(repr(key))):
|
| 307 |
+
ts[key]
|
| 308 |
+
|
| 309 |
+
# new index
|
| 310 |
+
ts[datetime(2000, 1, 6)] = 0
|
| 311 |
+
assert ts[datetime(2000, 1, 6)] == 0
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def test_loc_getitem_over_size_cutoff(monkeypatch):
|
| 315 |
+
# #1821
|
| 316 |
+
|
| 317 |
+
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
|
| 318 |
+
|
| 319 |
+
# create large list of non periodic datetime
|
| 320 |
+
dates = []
|
| 321 |
+
sec = timedelta(seconds=1)
|
| 322 |
+
half_sec = timedelta(microseconds=500000)
|
| 323 |
+
d = datetime(2011, 12, 5, 20, 30)
|
| 324 |
+
n = 1100
|
| 325 |
+
for i in range(n):
|
| 326 |
+
dates.append(d)
|
| 327 |
+
dates.append(d + sec)
|
| 328 |
+
dates.append(d + sec + half_sec)
|
| 329 |
+
dates.append(d + sec + sec + half_sec)
|
| 330 |
+
d += 3 * sec
|
| 331 |
+
|
| 332 |
+
# duplicate some values in the list
|
| 333 |
+
duplicate_positions = np.random.default_rng(2).integers(0, len(dates) - 1, 20)
|
| 334 |
+
for p in duplicate_positions:
|
| 335 |
+
dates[p + 1] = dates[p]
|
| 336 |
+
|
| 337 |
+
df = DataFrame(
|
| 338 |
+
np.random.default_rng(2).standard_normal((len(dates), 4)),
|
| 339 |
+
index=dates,
|
| 340 |
+
columns=list("ABCD"),
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
pos = n * 3
|
| 344 |
+
timestamp = df.index[pos]
|
| 345 |
+
assert timestamp in df.index
|
| 346 |
+
|
| 347 |
+
# it works!
|
| 348 |
+
df.loc[timestamp]
|
| 349 |
+
assert len(df.loc[[timestamp]]) > 0
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def test_indexing_over_size_cutoff_period_index(monkeypatch):
|
| 353 |
+
# GH 27136
|
| 354 |
+
|
| 355 |
+
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
|
| 356 |
+
|
| 357 |
+
n = 1100
|
| 358 |
+
idx = period_range("1/1/2000", freq="min", periods=n)
|
| 359 |
+
assert idx._engine.over_size_threshold
|
| 360 |
+
|
| 361 |
+
s = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx)
|
| 362 |
+
|
| 363 |
+
pos = n - 1
|
| 364 |
+
timestamp = idx[pos]
|
| 365 |
+
assert timestamp in s.index
|
| 366 |
+
|
| 367 |
+
# it works!
|
| 368 |
+
s[timestamp]
|
| 369 |
+
assert len(s.loc[[timestamp]]) > 0
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def test_indexing_unordered():
|
| 373 |
+
# GH 2437
|
| 374 |
+
rng = date_range(start="2011-01-01", end="2011-01-15")
|
| 375 |
+
ts = Series(np.random.default_rng(2).random(len(rng)), index=rng)
|
| 376 |
+
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
|
| 377 |
+
|
| 378 |
+
for t in ts.index:
|
| 379 |
+
expected = ts[t]
|
| 380 |
+
result = ts2[t]
|
| 381 |
+
assert expected == result
|
| 382 |
+
|
| 383 |
+
# GH 3448 (ranges)
|
| 384 |
+
def compare(slobj):
|
| 385 |
+
result = ts2[slobj].copy()
|
| 386 |
+
result = result.sort_index()
|
| 387 |
+
expected = ts[slobj]
|
| 388 |
+
expected.index = expected.index._with_freq(None)
|
| 389 |
+
tm.assert_series_equal(result, expected)
|
| 390 |
+
|
| 391 |
+
for key in [
|
| 392 |
+
slice("2011-01-01", "2011-01-15"),
|
| 393 |
+
slice("2010-12-30", "2011-01-15"),
|
| 394 |
+
slice("2011-01-01", "2011-01-16"),
|
| 395 |
+
# partial ranges
|
| 396 |
+
slice("2011-01-01", "2011-01-6"),
|
| 397 |
+
slice("2011-01-06", "2011-01-8"),
|
| 398 |
+
slice("2011-01-06", "2011-01-12"),
|
| 399 |
+
]:
|
| 400 |
+
with pytest.raises(
|
| 401 |
+
KeyError, match="Value based partial slicing on non-monotonic"
|
| 402 |
+
):
|
| 403 |
+
compare(key)
|
| 404 |
+
|
| 405 |
+
# single values
|
| 406 |
+
result = ts2["2011"].sort_index()
|
| 407 |
+
expected = ts["2011"]
|
| 408 |
+
expected.index = expected.index._with_freq(None)
|
| 409 |
+
tm.assert_series_equal(result, expected)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def test_indexing_unordered2():
|
| 413 |
+
# diff freq
|
| 414 |
+
rng = date_range(datetime(2005, 1, 1), periods=20, freq="ME")
|
| 415 |
+
ts = Series(np.arange(len(rng)), index=rng)
|
| 416 |
+
ts = ts.take(np.random.default_rng(2).permutation(20))
|
| 417 |
+
|
| 418 |
+
result = ts["2005"]
|
| 419 |
+
for t in result.index:
|
| 420 |
+
assert t.year == 2005
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def test_indexing():
|
| 424 |
+
idx = date_range("2001-1-1", periods=20, freq="ME")
|
| 425 |
+
ts = Series(np.random.default_rng(2).random(len(idx)), index=idx)
|
| 426 |
+
|
| 427 |
+
# getting
|
| 428 |
+
|
| 429 |
+
# GH 3070, make sure semantics work on Series/Frame
|
| 430 |
+
result = ts["2001"]
|
| 431 |
+
tm.assert_series_equal(result, ts.iloc[:12])
|
| 432 |
+
|
| 433 |
+
df = DataFrame({"A": ts.copy()})
|
| 434 |
+
|
| 435 |
+
# GH#36179 pre-2.0 df["2001"] operated as slicing on rows. in 2.0 it behaves
|
| 436 |
+
# like any other key, so raises
|
| 437 |
+
with pytest.raises(KeyError, match="2001"):
|
| 438 |
+
df["2001"]
|
| 439 |
+
|
| 440 |
+
# setting
|
| 441 |
+
ts = Series(np.random.default_rng(2).random(len(idx)), index=idx)
|
| 442 |
+
expected = ts.copy()
|
| 443 |
+
expected.iloc[:12] = 1
|
| 444 |
+
ts["2001"] = 1
|
| 445 |
+
tm.assert_series_equal(ts, expected)
|
| 446 |
+
|
| 447 |
+
expected = df.copy()
|
| 448 |
+
expected.iloc[:12, 0] = 1
|
| 449 |
+
df.loc["2001", "A"] = 1
|
| 450 |
+
tm.assert_frame_equal(df, expected)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def test_getitem_str_month_with_datetimeindex():
|
| 454 |
+
# GH3546 (not including times on the last day)
|
| 455 |
+
idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:00", freq="h")
|
| 456 |
+
ts = Series(range(len(idx)), index=idx)
|
| 457 |
+
expected = ts["2013-05"]
|
| 458 |
+
tm.assert_series_equal(expected, ts)
|
| 459 |
+
|
| 460 |
+
idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:59", freq="s")
|
| 461 |
+
ts = Series(range(len(idx)), index=idx)
|
| 462 |
+
expected = ts["2013-05"]
|
| 463 |
+
tm.assert_series_equal(expected, ts)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def test_getitem_str_year_with_datetimeindex():
|
| 467 |
+
idx = [
|
| 468 |
+
Timestamp("2013-05-31 00:00"),
|
| 469 |
+
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)),
|
| 470 |
+
]
|
| 471 |
+
ts = Series(range(len(idx)), index=idx)
|
| 472 |
+
expected = ts["2013"]
|
| 473 |
+
tm.assert_series_equal(expected, ts)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def test_getitem_str_second_with_datetimeindex():
|
| 477 |
+
# GH14826, indexing with a seconds resolution string / datetime object
|
| 478 |
+
df = DataFrame(
|
| 479 |
+
np.random.default_rng(2).random((5, 5)),
|
| 480 |
+
columns=["open", "high", "low", "close", "volume"],
|
| 481 |
+
index=date_range("2012-01-02 18:01:00", periods=5, tz="US/Central", freq="s"),
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
# this is a single date, so will raise
|
| 485 |
+
with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"):
|
| 486 |
+
df["2012-01-02 18:01:02"]
|
| 487 |
+
|
| 488 |
+
msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central'\)"
|
| 489 |
+
with pytest.raises(KeyError, match=msg):
|
| 490 |
+
df[df.index[2]]
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def test_compare_datetime_with_all_none():
|
| 494 |
+
# GH#54870
|
| 495 |
+
ser = Series(["2020-01-01", "2020-01-02"], dtype="datetime64[ns]")
|
| 496 |
+
ser2 = Series([None, None])
|
| 497 |
+
result = ser > ser2
|
| 498 |
+
expected = Series([False, False])
|
| 499 |
+
tm.assert_series_equal(result, expected)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_delitem.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
Index,
|
| 5 |
+
Series,
|
| 6 |
+
date_range,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestSeriesDelItem:
|
| 12 |
+
def test_delitem(self):
|
| 13 |
+
# GH#5542
|
| 14 |
+
# should delete the item inplace
|
| 15 |
+
s = Series(range(5))
|
| 16 |
+
del s[0]
|
| 17 |
+
|
| 18 |
+
expected = Series(range(1, 5), index=range(1, 5))
|
| 19 |
+
tm.assert_series_equal(s, expected)
|
| 20 |
+
|
| 21 |
+
del s[1]
|
| 22 |
+
expected = Series(range(2, 5), index=range(2, 5))
|
| 23 |
+
tm.assert_series_equal(s, expected)
|
| 24 |
+
|
| 25 |
+
# only 1 left, del, add, del
|
| 26 |
+
s = Series(1)
|
| 27 |
+
del s[0]
|
| 28 |
+
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
|
| 29 |
+
s[0] = 1
|
| 30 |
+
tm.assert_series_equal(s, Series(1))
|
| 31 |
+
del s[0]
|
| 32 |
+
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
|
| 33 |
+
|
| 34 |
+
def test_delitem_object_index(self, using_infer_string):
|
| 35 |
+
# Index(dtype=object)
|
| 36 |
+
dtype = "string[pyarrow_numpy]" if using_infer_string else object
|
| 37 |
+
s = Series(1, index=Index(["a"], dtype=dtype))
|
| 38 |
+
del s["a"]
|
| 39 |
+
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype=dtype)))
|
| 40 |
+
s["a"] = 1
|
| 41 |
+
tm.assert_series_equal(s, Series(1, index=Index(["a"], dtype=dtype)))
|
| 42 |
+
del s["a"]
|
| 43 |
+
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype=dtype)))
|
| 44 |
+
|
| 45 |
+
def test_delitem_missing_key(self):
|
| 46 |
+
# empty
|
| 47 |
+
s = Series(dtype=object)
|
| 48 |
+
|
| 49 |
+
with pytest.raises(KeyError, match=r"^0$"):
|
| 50 |
+
del s[0]
|
| 51 |
+
|
| 52 |
+
def test_delitem_extension_dtype(self):
|
| 53 |
+
# GH#40386
|
| 54 |
+
# DatetimeTZDtype
|
| 55 |
+
dti = date_range("2016-01-01", periods=3, tz="US/Pacific")
|
| 56 |
+
ser = Series(dti)
|
| 57 |
+
|
| 58 |
+
expected = ser[[0, 2]]
|
| 59 |
+
del ser[1]
|
| 60 |
+
assert ser.dtype == dti.dtype
|
| 61 |
+
tm.assert_series_equal(ser, expected)
|
| 62 |
+
|
| 63 |
+
# PeriodDtype
|
| 64 |
+
pi = dti.tz_localize(None).to_period("D")
|
| 65 |
+
ser = Series(pi)
|
| 66 |
+
|
| 67 |
+
expected = ser[:2]
|
| 68 |
+
del ser[2]
|
| 69 |
+
assert ser.dtype == pi.dtype
|
| 70 |
+
tm.assert_series_equal(ser, expected)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_get.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DatetimeIndex,
|
| 7 |
+
Index,
|
| 8 |
+
Series,
|
| 9 |
+
date_range,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_get():
|
| 15 |
+
# GH 6383
|
| 16 |
+
s = Series(
|
| 17 |
+
np.array(
|
| 18 |
+
[
|
| 19 |
+
43,
|
| 20 |
+
48,
|
| 21 |
+
60,
|
| 22 |
+
48,
|
| 23 |
+
50,
|
| 24 |
+
51,
|
| 25 |
+
50,
|
| 26 |
+
45,
|
| 27 |
+
57,
|
| 28 |
+
48,
|
| 29 |
+
56,
|
| 30 |
+
45,
|
| 31 |
+
51,
|
| 32 |
+
39,
|
| 33 |
+
55,
|
| 34 |
+
43,
|
| 35 |
+
54,
|
| 36 |
+
52,
|
| 37 |
+
51,
|
| 38 |
+
54,
|
| 39 |
+
]
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
result = s.get(25, 0)
|
| 44 |
+
expected = 0
|
| 45 |
+
assert result == expected
|
| 46 |
+
|
| 47 |
+
s = Series(
|
| 48 |
+
np.array(
|
| 49 |
+
[
|
| 50 |
+
43,
|
| 51 |
+
48,
|
| 52 |
+
60,
|
| 53 |
+
48,
|
| 54 |
+
50,
|
| 55 |
+
51,
|
| 56 |
+
50,
|
| 57 |
+
45,
|
| 58 |
+
57,
|
| 59 |
+
48,
|
| 60 |
+
56,
|
| 61 |
+
45,
|
| 62 |
+
51,
|
| 63 |
+
39,
|
| 64 |
+
55,
|
| 65 |
+
43,
|
| 66 |
+
54,
|
| 67 |
+
52,
|
| 68 |
+
51,
|
| 69 |
+
54,
|
| 70 |
+
]
|
| 71 |
+
),
|
| 72 |
+
index=Index(
|
| 73 |
+
[
|
| 74 |
+
25.0,
|
| 75 |
+
36.0,
|
| 76 |
+
49.0,
|
| 77 |
+
64.0,
|
| 78 |
+
81.0,
|
| 79 |
+
100.0,
|
| 80 |
+
121.0,
|
| 81 |
+
144.0,
|
| 82 |
+
169.0,
|
| 83 |
+
196.0,
|
| 84 |
+
1225.0,
|
| 85 |
+
1296.0,
|
| 86 |
+
1369.0,
|
| 87 |
+
1444.0,
|
| 88 |
+
1521.0,
|
| 89 |
+
1600.0,
|
| 90 |
+
1681.0,
|
| 91 |
+
1764.0,
|
| 92 |
+
1849.0,
|
| 93 |
+
1936.0,
|
| 94 |
+
],
|
| 95 |
+
dtype=np.float64,
|
| 96 |
+
),
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
result = s.get(25, 0)
|
| 100 |
+
expected = 43
|
| 101 |
+
assert result == expected
|
| 102 |
+
|
| 103 |
+
# GH 7407
|
| 104 |
+
# with a boolean accessor
|
| 105 |
+
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
|
| 106 |
+
vc = df.i.value_counts()
|
| 107 |
+
result = vc.get(99, default="Missing")
|
| 108 |
+
assert result == "Missing"
|
| 109 |
+
|
| 110 |
+
vc = df.b.value_counts()
|
| 111 |
+
result = vc.get(False, default="Missing")
|
| 112 |
+
assert result == 3
|
| 113 |
+
|
| 114 |
+
result = vc.get(True, default="Missing")
|
| 115 |
+
assert result == "Missing"
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def test_get_nan(float_numpy_dtype):
|
| 119 |
+
# GH 8569
|
| 120 |
+
s = Index(range(10), dtype=float_numpy_dtype).to_series()
|
| 121 |
+
assert s.get(np.nan) is None
|
| 122 |
+
assert s.get(np.nan, default="Missing") == "Missing"
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def test_get_nan_multiple(float_numpy_dtype):
|
| 126 |
+
# GH 8569
|
| 127 |
+
# ensure that fixing "test_get_nan" above hasn't broken get
|
| 128 |
+
# with multiple elements
|
| 129 |
+
s = Index(range(10), dtype=float_numpy_dtype).to_series()
|
| 130 |
+
|
| 131 |
+
idx = [2, 30]
|
| 132 |
+
assert s.get(idx) is None
|
| 133 |
+
|
| 134 |
+
idx = [2, np.nan]
|
| 135 |
+
assert s.get(idx) is None
|
| 136 |
+
|
| 137 |
+
# GH 17295 - all missing keys
|
| 138 |
+
idx = [20, 30]
|
| 139 |
+
assert s.get(idx) is None
|
| 140 |
+
|
| 141 |
+
idx = [np.nan, np.nan]
|
| 142 |
+
assert s.get(idx) is None
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def test_get_with_default():
|
| 146 |
+
# GH#7725
|
| 147 |
+
d0 = ["a", "b", "c", "d"]
|
| 148 |
+
d1 = np.arange(4, dtype="int64")
|
| 149 |
+
|
| 150 |
+
for data, index in ((d0, d1), (d1, d0)):
|
| 151 |
+
s = Series(data, index=index)
|
| 152 |
+
for i, d in zip(index, data):
|
| 153 |
+
assert s.get(i) == d
|
| 154 |
+
assert s.get(i, d) == d
|
| 155 |
+
assert s.get(i, "z") == d
|
| 156 |
+
|
| 157 |
+
assert s.get("e", "z") == "z"
|
| 158 |
+
assert s.get("e", "e") == "e"
|
| 159 |
+
|
| 160 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 161 |
+
warn = None
|
| 162 |
+
if index is d0:
|
| 163 |
+
warn = FutureWarning
|
| 164 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 165 |
+
assert s.get(10, "z") == "z"
|
| 166 |
+
assert s.get(10, 10) == 10
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@pytest.mark.parametrize(
|
| 170 |
+
"arr",
|
| 171 |
+
[
|
| 172 |
+
np.random.default_rng(2).standard_normal(10),
|
| 173 |
+
DatetimeIndex(date_range("2020-01-01", periods=10), name="a").tz_localize(
|
| 174 |
+
tz="US/Eastern"
|
| 175 |
+
),
|
| 176 |
+
],
|
| 177 |
+
)
|
| 178 |
+
def test_get_with_ea(arr):
|
| 179 |
+
# GH#21260
|
| 180 |
+
ser = Series(arr, index=[2 * i for i in range(len(arr))])
|
| 181 |
+
assert ser.get(4) == ser.iloc[2]
|
| 182 |
+
|
| 183 |
+
result = ser.get([4, 6])
|
| 184 |
+
expected = ser.iloc[[2, 3]]
|
| 185 |
+
tm.assert_series_equal(result, expected)
|
| 186 |
+
|
| 187 |
+
result = ser.get(slice(2))
|
| 188 |
+
expected = ser.iloc[[0, 1]]
|
| 189 |
+
tm.assert_series_equal(result, expected)
|
| 190 |
+
|
| 191 |
+
assert ser.get(-1) is None
|
| 192 |
+
assert ser.get(ser.index.max() + 1) is None
|
| 193 |
+
|
| 194 |
+
ser = Series(arr[:6], index=list("abcdef"))
|
| 195 |
+
assert ser.get("c") == ser.iloc[2]
|
| 196 |
+
|
| 197 |
+
result = ser.get(slice("b", "d"))
|
| 198 |
+
expected = ser.iloc[[1, 2, 3]]
|
| 199 |
+
tm.assert_series_equal(result, expected)
|
| 200 |
+
|
| 201 |
+
result = ser.get("Z")
|
| 202 |
+
assert result is None
|
| 203 |
+
|
| 204 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 205 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 206 |
+
assert ser.get(4) == ser.iloc[4]
|
| 207 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 208 |
+
assert ser.get(-1) == ser.iloc[-1]
|
| 209 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 210 |
+
assert ser.get(len(ser)) is None
|
| 211 |
+
|
| 212 |
+
# GH#21257
|
| 213 |
+
ser = Series(arr)
|
| 214 |
+
ser2 = ser[::2]
|
| 215 |
+
assert ser2.get(1) is None
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def test_getitem_get(string_series, object_series):
|
| 219 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 220 |
+
|
| 221 |
+
for obj in [string_series, object_series]:
|
| 222 |
+
idx = obj.index[5]
|
| 223 |
+
|
| 224 |
+
assert obj[idx] == obj.get(idx)
|
| 225 |
+
assert obj[idx] == obj.iloc[5]
|
| 226 |
+
|
| 227 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 228 |
+
assert string_series.get(-1) == string_series.get(string_series.index[-1])
|
| 229 |
+
assert string_series.iloc[5] == string_series.get(string_series.index[5])
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def test_get_none():
|
| 233 |
+
# GH#5652
|
| 234 |
+
s1 = Series(dtype=object)
|
| 235 |
+
s2 = Series(dtype=object, index=list("abc"))
|
| 236 |
+
for s in [s1, s2]:
|
| 237 |
+
result = s.get(None)
|
| 238 |
+
assert result is None
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_getitem.py
ADDED
|
@@ -0,0 +1,735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Series.__getitem__ test classes are organized by the type of key passed.
|
| 3 |
+
"""
|
| 4 |
+
from datetime import (
|
| 5 |
+
date,
|
| 6 |
+
datetime,
|
| 7 |
+
time,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import pytest
|
| 12 |
+
|
| 13 |
+
from pandas._libs.tslibs import (
|
| 14 |
+
conversion,
|
| 15 |
+
timezones,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
from pandas.core.dtypes.common import is_scalar
|
| 19 |
+
|
| 20 |
+
import pandas as pd
|
| 21 |
+
from pandas import (
|
| 22 |
+
Categorical,
|
| 23 |
+
DataFrame,
|
| 24 |
+
DatetimeIndex,
|
| 25 |
+
Index,
|
| 26 |
+
Series,
|
| 27 |
+
Timestamp,
|
| 28 |
+
date_range,
|
| 29 |
+
period_range,
|
| 30 |
+
timedelta_range,
|
| 31 |
+
)
|
| 32 |
+
import pandas._testing as tm
|
| 33 |
+
from pandas.core.indexing import IndexingError
|
| 34 |
+
|
| 35 |
+
from pandas.tseries.offsets import BDay
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TestSeriesGetitemScalars:
|
| 39 |
+
def test_getitem_object_index_float_string(self):
|
| 40 |
+
# GH#17286
|
| 41 |
+
ser = Series([1] * 4, index=Index(["a", "b", "c", 1.0]))
|
| 42 |
+
assert ser["a"] == 1
|
| 43 |
+
assert ser[1.0] == 1
|
| 44 |
+
|
| 45 |
+
def test_getitem_float_keys_tuple_values(self):
|
| 46 |
+
# see GH#13509
|
| 47 |
+
|
| 48 |
+
# unique Index
|
| 49 |
+
ser = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name="foo")
|
| 50 |
+
result = ser[0.0]
|
| 51 |
+
assert result == (1, 1)
|
| 52 |
+
|
| 53 |
+
# non-unique Index
|
| 54 |
+
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name="foo")
|
| 55 |
+
ser = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name="foo")
|
| 56 |
+
|
| 57 |
+
result = ser[0.0]
|
| 58 |
+
tm.assert_series_equal(result, expected)
|
| 59 |
+
|
| 60 |
+
def test_getitem_unrecognized_scalar(self):
|
| 61 |
+
# GH#32684 a scalar key that is not recognized by lib.is_scalar
|
| 62 |
+
|
| 63 |
+
# a series that might be produced via `frame.dtypes`
|
| 64 |
+
ser = Series([1, 2], index=[np.dtype("O"), np.dtype("i8")])
|
| 65 |
+
|
| 66 |
+
key = ser.index[1]
|
| 67 |
+
|
| 68 |
+
result = ser[key]
|
| 69 |
+
assert result == 2
|
| 70 |
+
|
| 71 |
+
def test_getitem_negative_out_of_bounds(self):
|
| 72 |
+
ser = Series(["a"] * 10, index=["a"] * 10)
|
| 73 |
+
|
| 74 |
+
msg = "index -11 is out of bounds for axis 0 with size 10|index out of bounds"
|
| 75 |
+
warn_msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 76 |
+
with pytest.raises(IndexError, match=msg):
|
| 77 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 78 |
+
ser[-11]
|
| 79 |
+
|
| 80 |
+
def test_getitem_out_of_bounds_indexerror(self, datetime_series):
|
| 81 |
+
# don't segfault, GH#495
|
| 82 |
+
msg = r"index \d+ is out of bounds for axis 0 with size \d+"
|
| 83 |
+
warn_msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 84 |
+
with pytest.raises(IndexError, match=msg):
|
| 85 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 86 |
+
datetime_series[len(datetime_series)]
|
| 87 |
+
|
| 88 |
+
def test_getitem_out_of_bounds_empty_rangeindex_keyerror(self):
|
| 89 |
+
# GH#917
|
| 90 |
+
# With a RangeIndex, an int key gives a KeyError
|
| 91 |
+
ser = Series([], dtype=object)
|
| 92 |
+
with pytest.raises(KeyError, match="-1"):
|
| 93 |
+
ser[-1]
|
| 94 |
+
|
| 95 |
+
def test_getitem_keyerror_with_integer_index(self, any_int_numpy_dtype):
|
| 96 |
+
dtype = any_int_numpy_dtype
|
| 97 |
+
ser = Series(
|
| 98 |
+
np.random.default_rng(2).standard_normal(6),
|
| 99 |
+
index=Index([0, 0, 1, 1, 2, 2], dtype=dtype),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
with pytest.raises(KeyError, match=r"^5$"):
|
| 103 |
+
ser[5]
|
| 104 |
+
|
| 105 |
+
with pytest.raises(KeyError, match=r"^'c'$"):
|
| 106 |
+
ser["c"]
|
| 107 |
+
|
| 108 |
+
# not monotonic
|
| 109 |
+
ser = Series(
|
| 110 |
+
np.random.default_rng(2).standard_normal(6), index=[2, 2, 0, 0, 1, 1]
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
with pytest.raises(KeyError, match=r"^5$"):
|
| 114 |
+
ser[5]
|
| 115 |
+
|
| 116 |
+
with pytest.raises(KeyError, match=r"^'c'$"):
|
| 117 |
+
ser["c"]
|
| 118 |
+
|
| 119 |
+
def test_getitem_int64(self, datetime_series):
|
| 120 |
+
idx = np.int64(5)
|
| 121 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 122 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 123 |
+
res = datetime_series[idx]
|
| 124 |
+
assert res == datetime_series.iloc[5]
|
| 125 |
+
|
| 126 |
+
def test_getitem_full_range(self):
|
| 127 |
+
# github.com/pandas-dev/pandas/commit/4f433773141d2eb384325714a2776bcc5b2e20f7
|
| 128 |
+
ser = Series(range(5), index=list(range(5)))
|
| 129 |
+
result = ser[list(range(5))]
|
| 130 |
+
tm.assert_series_equal(result, ser)
|
| 131 |
+
|
| 132 |
+
# ------------------------------------------------------------------
|
| 133 |
+
# Series with DatetimeIndex
|
| 134 |
+
|
| 135 |
+
@pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"])
|
| 136 |
+
def test_getitem_pydatetime_tz(self, tzstr):
|
| 137 |
+
tz = timezones.maybe_get_tz(tzstr)
|
| 138 |
+
|
| 139 |
+
index = date_range(
|
| 140 |
+
start="2012-12-24 16:00", end="2012-12-24 18:00", freq="h", tz=tzstr
|
| 141 |
+
)
|
| 142 |
+
ts = Series(index=index, data=index.hour)
|
| 143 |
+
time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr)
|
| 144 |
+
|
| 145 |
+
dt = datetime(2012, 12, 24, 17, 0)
|
| 146 |
+
time_datetime = conversion.localize_pydatetime(dt, tz)
|
| 147 |
+
assert ts[time_pandas] == ts[time_datetime]
|
| 148 |
+
|
| 149 |
+
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
|
| 150 |
+
def test_string_index_alias_tz_aware(self, tz):
|
| 151 |
+
rng = date_range("1/1/2000", periods=10, tz=tz)
|
| 152 |
+
ser = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
|
| 153 |
+
|
| 154 |
+
result = ser["1/3/2000"]
|
| 155 |
+
tm.assert_almost_equal(result, ser.iloc[2])
|
| 156 |
+
|
| 157 |
+
def test_getitem_time_object(self):
|
| 158 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 159 |
+
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
|
| 160 |
+
|
| 161 |
+
mask = (rng.hour == 9) & (rng.minute == 30)
|
| 162 |
+
result = ts[time(9, 30)]
|
| 163 |
+
expected = ts[mask]
|
| 164 |
+
result.index = result.index._with_freq(None)
|
| 165 |
+
tm.assert_series_equal(result, expected)
|
| 166 |
+
|
| 167 |
+
# ------------------------------------------------------------------
|
| 168 |
+
# Series with CategoricalIndex
|
| 169 |
+
|
| 170 |
+
def test_getitem_scalar_categorical_index(self):
|
| 171 |
+
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
|
| 172 |
+
|
| 173 |
+
ser = Series([1, 2], index=cats)
|
| 174 |
+
|
| 175 |
+
expected = ser.iloc[0]
|
| 176 |
+
result = ser[cats[0]]
|
| 177 |
+
assert result == expected
|
| 178 |
+
|
| 179 |
+
def test_getitem_numeric_categorical_listlike_matches_scalar(self):
|
| 180 |
+
# GH#15470
|
| 181 |
+
ser = Series(["a", "b", "c"], index=pd.CategoricalIndex([2, 1, 0]))
|
| 182 |
+
|
| 183 |
+
# 0 is treated as a label
|
| 184 |
+
assert ser[0] == "c"
|
| 185 |
+
|
| 186 |
+
# the listlike analogue should also be treated as labels
|
| 187 |
+
res = ser[[0]]
|
| 188 |
+
expected = ser.iloc[-1:]
|
| 189 |
+
tm.assert_series_equal(res, expected)
|
| 190 |
+
|
| 191 |
+
res2 = ser[[0, 1, 2]]
|
| 192 |
+
tm.assert_series_equal(res2, ser.iloc[::-1])
|
| 193 |
+
|
| 194 |
+
def test_getitem_integer_categorical_not_positional(self):
|
| 195 |
+
# GH#14865
|
| 196 |
+
ser = Series(["a", "b", "c"], index=Index([1, 2, 3], dtype="category"))
|
| 197 |
+
assert ser.get(3) == "c"
|
| 198 |
+
assert ser[3] == "c"
|
| 199 |
+
|
| 200 |
+
def test_getitem_str_with_timedeltaindex(self):
|
| 201 |
+
rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
|
| 202 |
+
ser = Series(np.arange(len(rng)), index=rng)
|
| 203 |
+
|
| 204 |
+
key = "6 days, 23:11:12"
|
| 205 |
+
indexer = rng.get_loc(key)
|
| 206 |
+
assert indexer == 133
|
| 207 |
+
|
| 208 |
+
result = ser[key]
|
| 209 |
+
assert result == ser.iloc[133]
|
| 210 |
+
|
| 211 |
+
msg = r"^Timedelta\('50 days 00:00:00'\)$"
|
| 212 |
+
with pytest.raises(KeyError, match=msg):
|
| 213 |
+
rng.get_loc("50 days")
|
| 214 |
+
with pytest.raises(KeyError, match=msg):
|
| 215 |
+
ser["50 days"]
|
| 216 |
+
|
| 217 |
+
def test_getitem_bool_index_positional(self):
|
| 218 |
+
# GH#48653
|
| 219 |
+
ser = Series({True: 1, False: 0})
|
| 220 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 221 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 222 |
+
result = ser[0]
|
| 223 |
+
assert result == 1
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class TestSeriesGetitemSlices:
|
| 227 |
+
def test_getitem_partial_str_slice_with_datetimeindex(self):
|
| 228 |
+
# GH#34860
|
| 229 |
+
arr = date_range("1/1/2008", "1/1/2009")
|
| 230 |
+
ser = arr.to_series()
|
| 231 |
+
result = ser["2008"]
|
| 232 |
+
|
| 233 |
+
rng = date_range(start="2008-01-01", end="2008-12-31")
|
| 234 |
+
expected = Series(rng, index=rng)
|
| 235 |
+
|
| 236 |
+
tm.assert_series_equal(result, expected)
|
| 237 |
+
|
| 238 |
+
def test_getitem_slice_strings_with_datetimeindex(self):
|
| 239 |
+
idx = DatetimeIndex(
|
| 240 |
+
["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"]
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx)
|
| 244 |
+
|
| 245 |
+
result = ts["1/2/2000":]
|
| 246 |
+
expected = ts[1:]
|
| 247 |
+
tm.assert_series_equal(result, expected)
|
| 248 |
+
|
| 249 |
+
result = ts["1/2/2000":"1/3/2000"]
|
| 250 |
+
expected = ts[1:4]
|
| 251 |
+
tm.assert_series_equal(result, expected)
|
| 252 |
+
|
| 253 |
+
def test_getitem_partial_str_slice_with_timedeltaindex(self):
|
| 254 |
+
rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
|
| 255 |
+
ser = Series(np.arange(len(rng)), index=rng)
|
| 256 |
+
|
| 257 |
+
result = ser["5 day":"6 day"]
|
| 258 |
+
expected = ser.iloc[86:134]
|
| 259 |
+
tm.assert_series_equal(result, expected)
|
| 260 |
+
|
| 261 |
+
result = ser["5 day":]
|
| 262 |
+
expected = ser.iloc[86:]
|
| 263 |
+
tm.assert_series_equal(result, expected)
|
| 264 |
+
|
| 265 |
+
result = ser[:"6 day"]
|
| 266 |
+
expected = ser.iloc[:134]
|
| 267 |
+
tm.assert_series_equal(result, expected)
|
| 268 |
+
|
| 269 |
+
def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self):
|
| 270 |
+
# higher reso
|
| 271 |
+
rng = timedelta_range("1 day 10:11:12", freq="us", periods=2000)
|
| 272 |
+
ser = Series(np.arange(len(rng)), index=rng)
|
| 273 |
+
|
| 274 |
+
result = ser["1 day 10:11:12":]
|
| 275 |
+
expected = ser.iloc[0:]
|
| 276 |
+
tm.assert_series_equal(result, expected)
|
| 277 |
+
|
| 278 |
+
result = ser["1 day 10:11:12.001":]
|
| 279 |
+
expected = ser.iloc[1000:]
|
| 280 |
+
tm.assert_series_equal(result, expected)
|
| 281 |
+
|
| 282 |
+
result = ser["1 days, 10:11:12.001001"]
|
| 283 |
+
assert result == ser.iloc[1001]
|
| 284 |
+
|
| 285 |
+
def test_getitem_slice_2d(self, datetime_series):
|
| 286 |
+
# GH#30588 multi-dimensional indexing deprecated
|
| 287 |
+
with pytest.raises(ValueError, match="Multi-dimensional indexing"):
|
| 288 |
+
datetime_series[:, np.newaxis]
|
| 289 |
+
|
| 290 |
+
def test_getitem_median_slice_bug(self):
|
| 291 |
+
index = date_range("20090415", "20090519", freq="2B")
|
| 292 |
+
ser = Series(np.random.default_rng(2).standard_normal(13), index=index)
|
| 293 |
+
|
| 294 |
+
indexer = [slice(6, 7, None)]
|
| 295 |
+
msg = "Indexing with a single-item list"
|
| 296 |
+
with pytest.raises(ValueError, match=msg):
|
| 297 |
+
# GH#31299
|
| 298 |
+
ser[indexer]
|
| 299 |
+
# but we're OK with a single-element tuple
|
| 300 |
+
result = ser[(indexer[0],)]
|
| 301 |
+
expected = ser[indexer[0]]
|
| 302 |
+
tm.assert_series_equal(result, expected)
|
| 303 |
+
|
| 304 |
+
@pytest.mark.parametrize(
|
| 305 |
+
"slc, positions",
|
| 306 |
+
[
|
| 307 |
+
[slice(date(2018, 1, 1), None), [0, 1, 2]],
|
| 308 |
+
[slice(date(2019, 1, 2), None), [2]],
|
| 309 |
+
[slice(date(2020, 1, 1), None), []],
|
| 310 |
+
[slice(None, date(2020, 1, 1)), [0, 1, 2]],
|
| 311 |
+
[slice(None, date(2019, 1, 1)), [0]],
|
| 312 |
+
],
|
| 313 |
+
)
|
| 314 |
+
def test_getitem_slice_date(self, slc, positions):
|
| 315 |
+
# https://github.com/pandas-dev/pandas/issues/31501
|
| 316 |
+
ser = Series(
|
| 317 |
+
[0, 1, 2],
|
| 318 |
+
DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
|
| 319 |
+
)
|
| 320 |
+
result = ser[slc]
|
| 321 |
+
expected = ser.take(positions)
|
| 322 |
+
tm.assert_series_equal(result, expected)
|
| 323 |
+
|
| 324 |
+
def test_getitem_slice_float_raises(self, datetime_series):
|
| 325 |
+
msg = (
|
| 326 |
+
"cannot do slice indexing on DatetimeIndex with these indexers "
|
| 327 |
+
r"\[{key}\] of type float"
|
| 328 |
+
)
|
| 329 |
+
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
|
| 330 |
+
datetime_series[4.0:10.0]
|
| 331 |
+
|
| 332 |
+
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
|
| 333 |
+
datetime_series[4.5:10.0]
|
| 334 |
+
|
| 335 |
+
def test_getitem_slice_bug(self):
|
| 336 |
+
ser = Series(range(10), index=list(range(10)))
|
| 337 |
+
result = ser[-12:]
|
| 338 |
+
tm.assert_series_equal(result, ser)
|
| 339 |
+
|
| 340 |
+
result = ser[-7:]
|
| 341 |
+
tm.assert_series_equal(result, ser[3:])
|
| 342 |
+
|
| 343 |
+
result = ser[:-12]
|
| 344 |
+
tm.assert_series_equal(result, ser[:0])
|
| 345 |
+
|
| 346 |
+
def test_getitem_slice_integers(self):
|
| 347 |
+
ser = Series(
|
| 348 |
+
np.random.default_rng(2).standard_normal(8),
|
| 349 |
+
index=[2, 4, 6, 8, 10, 12, 14, 16],
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
result = ser[:4]
|
| 353 |
+
expected = Series(ser.values[:4], index=[2, 4, 6, 8])
|
| 354 |
+
tm.assert_series_equal(result, expected)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class TestSeriesGetitemListLike:
|
| 358 |
+
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
|
| 359 |
+
def test_getitem_no_matches(self, box):
|
| 360 |
+
# GH#33462 we expect the same behavior for list/ndarray/Index/Series
|
| 361 |
+
ser = Series(["A", "B"])
|
| 362 |
+
|
| 363 |
+
key = Series(["C"], dtype=object)
|
| 364 |
+
key = box(key)
|
| 365 |
+
|
| 366 |
+
msg = (
|
| 367 |
+
r"None of \[Index\(\['C'\], dtype='object|string'\)\] are in the \[index\]"
|
| 368 |
+
)
|
| 369 |
+
with pytest.raises(KeyError, match=msg):
|
| 370 |
+
ser[key]
|
| 371 |
+
|
| 372 |
+
def test_getitem_intlist_intindex_periodvalues(self):
|
| 373 |
+
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
|
| 374 |
+
|
| 375 |
+
result = ser[[2, 4]]
|
| 376 |
+
exp = Series(
|
| 377 |
+
[pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")],
|
| 378 |
+
index=[2, 4],
|
| 379 |
+
dtype="Period[D]",
|
| 380 |
+
)
|
| 381 |
+
tm.assert_series_equal(result, exp)
|
| 382 |
+
assert result.dtype == "Period[D]"
|
| 383 |
+
|
| 384 |
+
@pytest.mark.parametrize("box", [list, np.array, Index])
|
| 385 |
+
def test_getitem_intlist_intervalindex_non_int(self, box):
|
| 386 |
+
# GH#33404 fall back to positional since ints are unambiguous
|
| 387 |
+
dti = date_range("2000-01-03", periods=3)._with_freq(None)
|
| 388 |
+
ii = pd.IntervalIndex.from_breaks(dti)
|
| 389 |
+
ser = Series(range(len(ii)), index=ii)
|
| 390 |
+
|
| 391 |
+
expected = ser.iloc[:1]
|
| 392 |
+
key = box([0])
|
| 393 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 394 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 395 |
+
result = ser[key]
|
| 396 |
+
tm.assert_series_equal(result, expected)
|
| 397 |
+
|
| 398 |
+
@pytest.mark.parametrize("box", [list, np.array, Index])
|
| 399 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.float64, np.uint64])
|
| 400 |
+
def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):
|
| 401 |
+
# GH#33404 do _not_ fall back to positional since ints are ambiguous
|
| 402 |
+
idx = Index(range(4)).astype(dtype)
|
| 403 |
+
dti = date_range("2000-01-03", periods=3)
|
| 404 |
+
mi = pd.MultiIndex.from_product([idx, dti])
|
| 405 |
+
ser = Series(range(len(mi))[::-1], index=mi)
|
| 406 |
+
|
| 407 |
+
key = box([5])
|
| 408 |
+
with pytest.raises(KeyError, match="5"):
|
| 409 |
+
ser[key]
|
| 410 |
+
|
| 411 |
+
def test_getitem_uint_array_key(self, any_unsigned_int_numpy_dtype):
|
| 412 |
+
# GH #37218
|
| 413 |
+
ser = Series([1, 2, 3])
|
| 414 |
+
key = np.array([4], dtype=any_unsigned_int_numpy_dtype)
|
| 415 |
+
|
| 416 |
+
with pytest.raises(KeyError, match="4"):
|
| 417 |
+
ser[key]
|
| 418 |
+
with pytest.raises(KeyError, match="4"):
|
| 419 |
+
ser.loc[key]
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class TestGetitemBooleanMask:
|
| 423 |
+
def test_getitem_boolean(self, string_series):
|
| 424 |
+
ser = string_series
|
| 425 |
+
mask = ser > ser.median()
|
| 426 |
+
|
| 427 |
+
# passing list is OK
|
| 428 |
+
result = ser[list(mask)]
|
| 429 |
+
expected = ser[mask]
|
| 430 |
+
tm.assert_series_equal(result, expected)
|
| 431 |
+
tm.assert_index_equal(result.index, ser.index[mask])
|
| 432 |
+
|
| 433 |
+
def test_getitem_boolean_empty(self):
|
| 434 |
+
ser = Series([], dtype=np.int64)
|
| 435 |
+
ser.index.name = "index_name"
|
| 436 |
+
ser = ser[ser.isna()]
|
| 437 |
+
assert ser.index.name == "index_name"
|
| 438 |
+
assert ser.dtype == np.int64
|
| 439 |
+
|
| 440 |
+
# GH#5877
|
| 441 |
+
# indexing with empty series
|
| 442 |
+
ser = Series(["A", "B"], dtype=object)
|
| 443 |
+
expected = Series(dtype=object, index=Index([], dtype="int64"))
|
| 444 |
+
result = ser[Series([], dtype=object)]
|
| 445 |
+
tm.assert_series_equal(result, expected)
|
| 446 |
+
|
| 447 |
+
# invalid because of the boolean indexer
|
| 448 |
+
# that's empty or not-aligned
|
| 449 |
+
msg = (
|
| 450 |
+
r"Unalignable boolean Series provided as indexer \(index of "
|
| 451 |
+
r"the boolean Series and of the indexed object do not match"
|
| 452 |
+
)
|
| 453 |
+
with pytest.raises(IndexingError, match=msg):
|
| 454 |
+
ser[Series([], dtype=bool)]
|
| 455 |
+
|
| 456 |
+
with pytest.raises(IndexingError, match=msg):
|
| 457 |
+
ser[Series([True], dtype=bool)]
|
| 458 |
+
|
| 459 |
+
def test_getitem_boolean_object(self, string_series):
|
| 460 |
+
# using column from DataFrame
|
| 461 |
+
|
| 462 |
+
ser = string_series
|
| 463 |
+
mask = ser > ser.median()
|
| 464 |
+
omask = mask.astype(object)
|
| 465 |
+
|
| 466 |
+
# getitem
|
| 467 |
+
result = ser[omask]
|
| 468 |
+
expected = ser[mask]
|
| 469 |
+
tm.assert_series_equal(result, expected)
|
| 470 |
+
|
| 471 |
+
# setitem
|
| 472 |
+
s2 = ser.copy()
|
| 473 |
+
cop = ser.copy()
|
| 474 |
+
cop[omask] = 5
|
| 475 |
+
s2[mask] = 5
|
| 476 |
+
tm.assert_series_equal(cop, s2)
|
| 477 |
+
|
| 478 |
+
# nans raise exception
|
| 479 |
+
omask[5:10] = np.nan
|
| 480 |
+
msg = "Cannot mask with non-boolean array containing NA / NaN values"
|
| 481 |
+
with pytest.raises(ValueError, match=msg):
|
| 482 |
+
ser[omask]
|
| 483 |
+
with pytest.raises(ValueError, match=msg):
|
| 484 |
+
ser[omask] = 5
|
| 485 |
+
|
| 486 |
+
def test_getitem_boolean_dt64_copies(self):
|
| 487 |
+
# GH#36210
|
| 488 |
+
dti = date_range("2016-01-01", periods=4, tz="US/Pacific")
|
| 489 |
+
key = np.array([True, True, False, False])
|
| 490 |
+
|
| 491 |
+
ser = Series(dti._data)
|
| 492 |
+
|
| 493 |
+
res = ser[key]
|
| 494 |
+
assert res._values._ndarray.base is None
|
| 495 |
+
|
| 496 |
+
# compare with numeric case for reference
|
| 497 |
+
ser2 = Series(range(4))
|
| 498 |
+
res2 = ser2[key]
|
| 499 |
+
assert res2._values.base is None
|
| 500 |
+
|
| 501 |
+
def test_getitem_boolean_corner(self, datetime_series):
|
| 502 |
+
ts = datetime_series
|
| 503 |
+
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
|
| 504 |
+
|
| 505 |
+
msg = (
|
| 506 |
+
r"Unalignable boolean Series provided as indexer \(index of "
|
| 507 |
+
r"the boolean Series and of the indexed object do not match"
|
| 508 |
+
)
|
| 509 |
+
with pytest.raises(IndexingError, match=msg):
|
| 510 |
+
ts[mask_shifted]
|
| 511 |
+
|
| 512 |
+
with pytest.raises(IndexingError, match=msg):
|
| 513 |
+
ts.loc[mask_shifted]
|
| 514 |
+
|
| 515 |
+
def test_getitem_boolean_different_order(self, string_series):
|
| 516 |
+
ordered = string_series.sort_values()
|
| 517 |
+
|
| 518 |
+
sel = string_series[ordered > 0]
|
| 519 |
+
exp = string_series[string_series > 0]
|
| 520 |
+
tm.assert_series_equal(sel, exp)
|
| 521 |
+
|
| 522 |
+
def test_getitem_boolean_contiguous_preserve_freq(self):
|
| 523 |
+
rng = date_range("1/1/2000", "3/1/2000", freq="B")
|
| 524 |
+
|
| 525 |
+
mask = np.zeros(len(rng), dtype=bool)
|
| 526 |
+
mask[10:20] = True
|
| 527 |
+
|
| 528 |
+
masked = rng[mask]
|
| 529 |
+
expected = rng[10:20]
|
| 530 |
+
assert expected.freq == rng.freq
|
| 531 |
+
tm.assert_index_equal(masked, expected)
|
| 532 |
+
|
| 533 |
+
mask[22] = True
|
| 534 |
+
masked = rng[mask]
|
| 535 |
+
assert masked.freq is None
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class TestGetitemCallable:
|
| 539 |
+
def test_getitem_callable(self):
|
| 540 |
+
# GH#12533
|
| 541 |
+
ser = Series(4, index=list("ABCD"))
|
| 542 |
+
result = ser[lambda x: "A"]
|
| 543 |
+
assert result == ser.loc["A"]
|
| 544 |
+
|
| 545 |
+
result = ser[lambda x: ["A", "B"]]
|
| 546 |
+
expected = ser.loc[["A", "B"]]
|
| 547 |
+
tm.assert_series_equal(result, expected)
|
| 548 |
+
|
| 549 |
+
result = ser[lambda x: [True, False, True, True]]
|
| 550 |
+
expected = ser.iloc[[0, 2, 3]]
|
| 551 |
+
tm.assert_series_equal(result, expected)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def test_getitem_generator(string_series):
|
| 555 |
+
gen = (x > 0 for x in string_series)
|
| 556 |
+
result = string_series[gen]
|
| 557 |
+
result2 = string_series[iter(string_series > 0)]
|
| 558 |
+
expected = string_series[string_series > 0]
|
| 559 |
+
tm.assert_series_equal(result, expected)
|
| 560 |
+
tm.assert_series_equal(result2, expected)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
@pytest.mark.parametrize(
|
| 564 |
+
"series",
|
| 565 |
+
[
|
| 566 |
+
Series([0, 1]),
|
| 567 |
+
Series(date_range("2012-01-01", periods=2)),
|
| 568 |
+
Series(date_range("2012-01-01", periods=2, tz="CET")),
|
| 569 |
+
],
|
| 570 |
+
)
|
| 571 |
+
def test_getitem_ndim_deprecated(series):
|
| 572 |
+
with pytest.raises(ValueError, match="Multi-dimensional indexing"):
|
| 573 |
+
series[:, None]
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def test_getitem_multilevel_scalar_slice_not_implemented(
|
| 577 |
+
multiindex_year_month_day_dataframe_random_data,
|
| 578 |
+
):
|
| 579 |
+
# not implementing this for now
|
| 580 |
+
df = multiindex_year_month_day_dataframe_random_data
|
| 581 |
+
ser = df["A"]
|
| 582 |
+
|
| 583 |
+
msg = r"\(2000, slice\(3, 4, None\)\)"
|
| 584 |
+
with pytest.raises(TypeError, match=msg):
|
| 585 |
+
ser[2000, 3:4]
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def test_getitem_dataframe_raises():
|
| 589 |
+
rng = list(range(10))
|
| 590 |
+
ser = Series(10, index=rng)
|
| 591 |
+
df = DataFrame(rng, index=rng)
|
| 592 |
+
msg = (
|
| 593 |
+
"Indexing a Series with DataFrame is not supported, "
|
| 594 |
+
"use the appropriate DataFrame column"
|
| 595 |
+
)
|
| 596 |
+
with pytest.raises(TypeError, match=msg):
|
| 597 |
+
ser[df > 5]
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
def test_getitem_assignment_series_alignment():
|
| 601 |
+
# https://github.com/pandas-dev/pandas/issues/37427
|
| 602 |
+
# with getitem, when assigning with a Series, it is not first aligned
|
| 603 |
+
ser = Series(range(10))
|
| 604 |
+
idx = np.array([2, 4, 9])
|
| 605 |
+
ser[idx] = Series([10, 11, 12])
|
| 606 |
+
expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
|
| 607 |
+
tm.assert_series_equal(ser, expected)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def test_getitem_duplicate_index_mistyped_key_raises_keyerror():
|
| 611 |
+
# GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError
|
| 612 |
+
ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
|
| 613 |
+
with pytest.raises(KeyError, match="None"):
|
| 614 |
+
ser[None]
|
| 615 |
+
|
| 616 |
+
with pytest.raises(KeyError, match="None"):
|
| 617 |
+
ser.index.get_loc(None)
|
| 618 |
+
|
| 619 |
+
with pytest.raises(KeyError, match="None"):
|
| 620 |
+
ser.index._engine.get_loc(None)
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def test_getitem_1tuple_slice_without_multiindex():
|
| 624 |
+
ser = Series(range(5))
|
| 625 |
+
key = (slice(3),)
|
| 626 |
+
|
| 627 |
+
result = ser[key]
|
| 628 |
+
expected = ser[key[0]]
|
| 629 |
+
tm.assert_series_equal(result, expected)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def test_getitem_preserve_name(datetime_series):
|
| 633 |
+
result = datetime_series[datetime_series > 0]
|
| 634 |
+
assert result.name == datetime_series.name
|
| 635 |
+
|
| 636 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 637 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 638 |
+
result = datetime_series[[0, 2, 4]]
|
| 639 |
+
assert result.name == datetime_series.name
|
| 640 |
+
|
| 641 |
+
result = datetime_series[5:10]
|
| 642 |
+
assert result.name == datetime_series.name
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def test_getitem_with_integer_labels():
|
| 646 |
+
# integer indexes, be careful
|
| 647 |
+
ser = Series(
|
| 648 |
+
np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))
|
| 649 |
+
)
|
| 650 |
+
inds = [0, 2, 5, 7, 8]
|
| 651 |
+
arr_inds = np.array([0, 2, 5, 7, 8])
|
| 652 |
+
with pytest.raises(KeyError, match="not in index"):
|
| 653 |
+
ser[inds]
|
| 654 |
+
|
| 655 |
+
with pytest.raises(KeyError, match="not in index"):
|
| 656 |
+
ser[arr_inds]
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def test_getitem_missing(datetime_series):
|
| 660 |
+
# missing
|
| 661 |
+
d = datetime_series.index[0] - BDay()
|
| 662 |
+
msg = r"Timestamp\('1999-12-31 00:00:00'\)"
|
| 663 |
+
with pytest.raises(KeyError, match=msg):
|
| 664 |
+
datetime_series[d]
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
def test_getitem_fancy(string_series, object_series):
|
| 668 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 669 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 670 |
+
slice1 = string_series[[1, 2, 3]]
|
| 671 |
+
slice2 = object_series[[1, 2, 3]]
|
| 672 |
+
assert string_series.index[2] == slice1.index[1]
|
| 673 |
+
assert object_series.index[2] == slice2.index[1]
|
| 674 |
+
assert string_series.iloc[2] == slice1.iloc[1]
|
| 675 |
+
assert object_series.iloc[2] == slice2.iloc[1]
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def test_getitem_box_float64(datetime_series):
|
| 679 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 680 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 681 |
+
value = datetime_series[5]
|
| 682 |
+
assert isinstance(value, np.float64)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
def test_getitem_unordered_dup():
|
| 686 |
+
obj = Series(range(5), index=["c", "a", "a", "b", "b"])
|
| 687 |
+
assert is_scalar(obj["c"])
|
| 688 |
+
assert obj["c"] == 0
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
def test_getitem_dups():
|
| 692 |
+
ser = Series(range(5), index=["A", "A", "B", "C", "C"], dtype=np.int64)
|
| 693 |
+
expected = Series([3, 4], index=["C", "C"], dtype=np.int64)
|
| 694 |
+
result = ser["C"]
|
| 695 |
+
tm.assert_series_equal(result, expected)
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
def test_getitem_categorical_str():
|
| 699 |
+
# GH#31765
|
| 700 |
+
ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"]))
|
| 701 |
+
result = ser["a"]
|
| 702 |
+
expected = ser.iloc[[0, 3]]
|
| 703 |
+
tm.assert_series_equal(result, expected)
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
def test_slice_can_reorder_not_uniquely_indexed():
|
| 707 |
+
ser = Series(1, index=["a", "a", "b", "b", "c"])
|
| 708 |
+
ser[::-1] # it works!
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
@pytest.mark.parametrize("index_vals", ["aabcd", "aadcb"])
|
| 712 |
+
def test_duplicated_index_getitem_positional_indexer(index_vals):
|
| 713 |
+
# GH 11747
|
| 714 |
+
s = Series(range(5), index=list(index_vals))
|
| 715 |
+
|
| 716 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 717 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 718 |
+
result = s[3]
|
| 719 |
+
assert result == 3
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
class TestGetitemDeprecatedIndexers:
|
| 723 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}])
|
| 724 |
+
def test_getitem_dict_and_set_deprecated(self, key):
|
| 725 |
+
# GH#42825 enforced in 2.0
|
| 726 |
+
ser = Series([1, 2, 3])
|
| 727 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 728 |
+
ser[key]
|
| 729 |
+
|
| 730 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}])
|
| 731 |
+
def test_setitem_dict_and_set_disallowed(self, key):
|
| 732 |
+
# GH#42825 enforced in 2.0
|
| 733 |
+
ser = Series([1, 2, 3])
|
| 734 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 735 |
+
ser[key] = 1
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_indexing.py
ADDED
|
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" test get/set & misc """
|
| 2 |
+
from datetime import timedelta
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from pandas.errors import IndexingError
|
| 9 |
+
|
| 10 |
+
from pandas import (
|
| 11 |
+
NA,
|
| 12 |
+
DataFrame,
|
| 13 |
+
Index,
|
| 14 |
+
IndexSlice,
|
| 15 |
+
MultiIndex,
|
| 16 |
+
NaT,
|
| 17 |
+
Series,
|
| 18 |
+
Timedelta,
|
| 19 |
+
Timestamp,
|
| 20 |
+
concat,
|
| 21 |
+
date_range,
|
| 22 |
+
isna,
|
| 23 |
+
period_range,
|
| 24 |
+
timedelta_range,
|
| 25 |
+
)
|
| 26 |
+
import pandas._testing as tm
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def test_basic_indexing():
|
| 30 |
+
s = Series(
|
| 31 |
+
np.random.default_rng(2).standard_normal(5), index=["a", "b", "a", "a", "b"]
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
warn_msg = "Series.__[sg]etitem__ treating keys as positions is deprecated"
|
| 35 |
+
msg = "index 5 is out of bounds for axis 0 with size 5"
|
| 36 |
+
with pytest.raises(IndexError, match=msg):
|
| 37 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 38 |
+
s[5]
|
| 39 |
+
with pytest.raises(IndexError, match=msg):
|
| 40 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 41 |
+
s[5] = 0
|
| 42 |
+
|
| 43 |
+
with pytest.raises(KeyError, match=r"^'c'$"):
|
| 44 |
+
s["c"]
|
| 45 |
+
|
| 46 |
+
s = s.sort_index()
|
| 47 |
+
|
| 48 |
+
with pytest.raises(IndexError, match=msg):
|
| 49 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 50 |
+
s[5]
|
| 51 |
+
msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"
|
| 52 |
+
with pytest.raises(IndexError, match=msg):
|
| 53 |
+
with tm.assert_produces_warning(FutureWarning, match=warn_msg):
|
| 54 |
+
s[5] = 0
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_getitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):
|
| 58 |
+
# GH51053
|
| 59 |
+
dtype = any_numeric_dtype
|
| 60 |
+
idx = Index([1, 0, 1], dtype=dtype)
|
| 61 |
+
ser = Series(range(3), index=idx)
|
| 62 |
+
result = ser[1]
|
| 63 |
+
expected = Series([0, 2], index=Index([1, 1], dtype=dtype))
|
| 64 |
+
tm.assert_series_equal(result, expected, check_exact=True)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def test_setitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):
|
| 68 |
+
# GH51053
|
| 69 |
+
dtype = any_numeric_dtype
|
| 70 |
+
idx = Index([1, 0, 1], dtype=dtype)
|
| 71 |
+
ser = Series(range(3), index=idx)
|
| 72 |
+
ser[1] = 10
|
| 73 |
+
expected = Series([10, 1, 10], index=idx)
|
| 74 |
+
tm.assert_series_equal(ser, expected, check_exact=True)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_basic_getitem_with_labels(datetime_series):
|
| 78 |
+
indices = datetime_series.index[[5, 10, 15]]
|
| 79 |
+
|
| 80 |
+
result = datetime_series[indices]
|
| 81 |
+
expected = datetime_series.reindex(indices)
|
| 82 |
+
tm.assert_series_equal(result, expected)
|
| 83 |
+
|
| 84 |
+
result = datetime_series[indices[0] : indices[2]]
|
| 85 |
+
expected = datetime_series.loc[indices[0] : indices[2]]
|
| 86 |
+
tm.assert_series_equal(result, expected)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def test_basic_getitem_dt64tz_values():
|
| 90 |
+
# GH12089
|
| 91 |
+
# with tz for values
|
| 92 |
+
ser = Series(
|
| 93 |
+
date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
|
| 94 |
+
)
|
| 95 |
+
expected = Timestamp("2011-01-01", tz="US/Eastern")
|
| 96 |
+
result = ser.loc["a"]
|
| 97 |
+
assert result == expected
|
| 98 |
+
result = ser.iloc[0]
|
| 99 |
+
assert result == expected
|
| 100 |
+
result = ser["a"]
|
| 101 |
+
assert result == expected
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def test_getitem_setitem_ellipsis(using_copy_on_write, warn_copy_on_write):
|
| 105 |
+
s = Series(np.random.default_rng(2).standard_normal(10))
|
| 106 |
+
|
| 107 |
+
result = s[...]
|
| 108 |
+
tm.assert_series_equal(result, s)
|
| 109 |
+
|
| 110 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
| 111 |
+
s[...] = 5
|
| 112 |
+
if not using_copy_on_write:
|
| 113 |
+
assert (result == 5).all()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@pytest.mark.parametrize(
|
| 117 |
+
"result_1, duplicate_item, expected_1",
|
| 118 |
+
[
|
| 119 |
+
[
|
| 120 |
+
Series({1: 12, 2: [1, 2, 2, 3]}),
|
| 121 |
+
Series({1: 313}),
|
| 122 |
+
Series({1: 12}, dtype=object),
|
| 123 |
+
],
|
| 124 |
+
[
|
| 125 |
+
Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
|
| 126 |
+
Series({1: [1, 2, 3]}),
|
| 127 |
+
Series({1: [1, 2, 3]}),
|
| 128 |
+
],
|
| 129 |
+
],
|
| 130 |
+
)
|
| 131 |
+
def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
|
| 132 |
+
# GH 17610
|
| 133 |
+
result = result_1._append(duplicate_item)
|
| 134 |
+
expected = expected_1._append(duplicate_item)
|
| 135 |
+
tm.assert_series_equal(result[1], expected)
|
| 136 |
+
assert result[2] == result_1[2]
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def test_getitem_setitem_integers():
|
| 140 |
+
# caused bug without test
|
| 141 |
+
s = Series([1, 2, 3], ["a", "b", "c"])
|
| 142 |
+
|
| 143 |
+
assert s.iloc[0] == s["a"]
|
| 144 |
+
s.iloc[0] = 5
|
| 145 |
+
tm.assert_almost_equal(s["a"], 5)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def test_series_box_timestamp():
|
| 149 |
+
rng = date_range("20090415", "20090519", freq="B")
|
| 150 |
+
ser = Series(rng)
|
| 151 |
+
assert isinstance(ser[0], Timestamp)
|
| 152 |
+
assert isinstance(ser.at[1], Timestamp)
|
| 153 |
+
assert isinstance(ser.iat[2], Timestamp)
|
| 154 |
+
assert isinstance(ser.loc[3], Timestamp)
|
| 155 |
+
assert isinstance(ser.iloc[4], Timestamp)
|
| 156 |
+
|
| 157 |
+
ser = Series(rng, index=rng)
|
| 158 |
+
msg = "Series.__getitem__ treating keys as positions is deprecated"
|
| 159 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 160 |
+
assert isinstance(ser[0], Timestamp)
|
| 161 |
+
assert isinstance(ser.at[rng[1]], Timestamp)
|
| 162 |
+
assert isinstance(ser.iat[2], Timestamp)
|
| 163 |
+
assert isinstance(ser.loc[rng[3]], Timestamp)
|
| 164 |
+
assert isinstance(ser.iloc[4], Timestamp)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def test_series_box_timedelta():
|
| 168 |
+
rng = timedelta_range("1 day 1 s", periods=5, freq="h")
|
| 169 |
+
ser = Series(rng)
|
| 170 |
+
assert isinstance(ser[0], Timedelta)
|
| 171 |
+
assert isinstance(ser.at[1], Timedelta)
|
| 172 |
+
assert isinstance(ser.iat[2], Timedelta)
|
| 173 |
+
assert isinstance(ser.loc[3], Timedelta)
|
| 174 |
+
assert isinstance(ser.iloc[4], Timedelta)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def test_getitem_ambiguous_keyerror(indexer_sl):
|
| 178 |
+
ser = Series(range(10), index=list(range(0, 20, 2)))
|
| 179 |
+
with pytest.raises(KeyError, match=r"^1$"):
|
| 180 |
+
indexer_sl(ser)[1]
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def test_getitem_dups_with_missing(indexer_sl):
|
| 184 |
+
# breaks reindex, so need to use .loc internally
|
| 185 |
+
# GH 4246
|
| 186 |
+
ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"])
|
| 187 |
+
with pytest.raises(KeyError, match=re.escape("['bam'] not in index")):
|
| 188 |
+
indexer_sl(ser)[["foo", "bar", "bah", "bam"]]
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def test_setitem_ambiguous_keyerror(indexer_sl):
|
| 192 |
+
s = Series(range(10), index=list(range(0, 20, 2)))
|
| 193 |
+
|
| 194 |
+
# equivalent of an append
|
| 195 |
+
s2 = s.copy()
|
| 196 |
+
indexer_sl(s2)[1] = 5
|
| 197 |
+
expected = concat([s, Series([5], index=[1])])
|
| 198 |
+
tm.assert_series_equal(s2, expected)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def test_setitem(datetime_series):
|
| 202 |
+
datetime_series[datetime_series.index[5]] = np.nan
|
| 203 |
+
datetime_series.iloc[[1, 2, 17]] = np.nan
|
| 204 |
+
datetime_series.iloc[6] = np.nan
|
| 205 |
+
assert np.isnan(datetime_series.iloc[6])
|
| 206 |
+
assert np.isnan(datetime_series.iloc[2])
|
| 207 |
+
datetime_series[np.isnan(datetime_series)] = 5
|
| 208 |
+
assert not np.isnan(datetime_series.iloc[2])
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def test_setslice(datetime_series):
|
| 212 |
+
sl = datetime_series[5:20]
|
| 213 |
+
assert len(sl) == len(sl.index)
|
| 214 |
+
assert sl.index.is_unique is True
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def test_basic_getitem_setitem_corner(datetime_series):
|
| 218 |
+
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
|
| 219 |
+
msg = "key of type tuple not found and not a MultiIndex"
|
| 220 |
+
with pytest.raises(KeyError, match=msg):
|
| 221 |
+
datetime_series[:, 2]
|
| 222 |
+
with pytest.raises(KeyError, match=msg):
|
| 223 |
+
datetime_series[:, 2] = 2
|
| 224 |
+
|
| 225 |
+
# weird lists. [slice(0, 5)] raises but not two slices
|
| 226 |
+
msg = "Indexing with a single-item list"
|
| 227 |
+
with pytest.raises(ValueError, match=msg):
|
| 228 |
+
# GH#31299
|
| 229 |
+
datetime_series[[slice(None, 5)]]
|
| 230 |
+
|
| 231 |
+
# but we're OK with a single-element tuple
|
| 232 |
+
result = datetime_series[(slice(None, 5),)]
|
| 233 |
+
expected = datetime_series[:5]
|
| 234 |
+
tm.assert_series_equal(result, expected)
|
| 235 |
+
|
| 236 |
+
# OK
|
| 237 |
+
msg = r"unhashable type(: 'slice')?"
|
| 238 |
+
with pytest.raises(TypeError, match=msg):
|
| 239 |
+
datetime_series[[5, [None, None]]]
|
| 240 |
+
with pytest.raises(TypeError, match=msg):
|
| 241 |
+
datetime_series[[5, [None, None]]] = 2
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def test_slice(string_series, object_series, using_copy_on_write, warn_copy_on_write):
|
| 245 |
+
original = string_series.copy()
|
| 246 |
+
numSlice = string_series[10:20]
|
| 247 |
+
numSliceEnd = string_series[-10:]
|
| 248 |
+
objSlice = object_series[10:20]
|
| 249 |
+
|
| 250 |
+
assert string_series.index[9] not in numSlice.index
|
| 251 |
+
assert object_series.index[9] not in objSlice.index
|
| 252 |
+
|
| 253 |
+
assert len(numSlice) == len(numSlice.index)
|
| 254 |
+
assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]
|
| 255 |
+
|
| 256 |
+
assert numSlice.index[1] == string_series.index[11]
|
| 257 |
+
tm.assert_numpy_array_equal(np.array(numSliceEnd), np.array(string_series)[-10:])
|
| 258 |
+
|
| 259 |
+
# Test return view.
|
| 260 |
+
sl = string_series[10:20]
|
| 261 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
| 262 |
+
sl[:] = 0
|
| 263 |
+
|
| 264 |
+
if using_copy_on_write:
|
| 265 |
+
# Doesn't modify parent (CoW)
|
| 266 |
+
tm.assert_series_equal(string_series, original)
|
| 267 |
+
else:
|
| 268 |
+
assert (string_series[10:20] == 0).all()
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def test_timedelta_assignment():
|
| 272 |
+
# GH 8209
|
| 273 |
+
s = Series([], dtype=object)
|
| 274 |
+
s.loc["B"] = timedelta(1)
|
| 275 |
+
tm.assert_series_equal(s, Series(Timedelta("1 days"), index=["B"]))
|
| 276 |
+
|
| 277 |
+
s = s.reindex(s.index.insert(0, "A"))
|
| 278 |
+
tm.assert_series_equal(s, Series([np.nan, Timedelta("1 days")], index=["A", "B"]))
|
| 279 |
+
|
| 280 |
+
s.loc["A"] = timedelta(1)
|
| 281 |
+
expected = Series(Timedelta("1 days"), index=["A", "B"])
|
| 282 |
+
tm.assert_series_equal(s, expected)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_underlying_data_conversion(using_copy_on_write):
|
| 286 |
+
# GH 4080
|
| 287 |
+
df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
|
| 288 |
+
return_value = df.set_index(["a", "b", "c"], inplace=True)
|
| 289 |
+
assert return_value is None
|
| 290 |
+
s = Series([1], index=[(2, 2, 2)])
|
| 291 |
+
df["val"] = 0
|
| 292 |
+
df_original = df.copy()
|
| 293 |
+
df
|
| 294 |
+
|
| 295 |
+
if using_copy_on_write:
|
| 296 |
+
with tm.raises_chained_assignment_error():
|
| 297 |
+
df["val"].update(s)
|
| 298 |
+
expected = df_original
|
| 299 |
+
else:
|
| 300 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
| 301 |
+
df["val"].update(s)
|
| 302 |
+
expected = DataFrame(
|
| 303 |
+
{"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
|
| 304 |
+
)
|
| 305 |
+
return_value = expected.set_index(["a", "b", "c"], inplace=True)
|
| 306 |
+
assert return_value is None
|
| 307 |
+
tm.assert_frame_equal(df, expected)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def test_preserve_refs(datetime_series):
|
| 311 |
+
seq = datetime_series.iloc[[5, 10, 15]]
|
| 312 |
+
seq.iloc[1] = np.nan
|
| 313 |
+
assert not np.isnan(datetime_series.iloc[10])
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def test_multilevel_preserve_name(lexsorted_two_level_string_multiindex, indexer_sl):
|
| 317 |
+
index = lexsorted_two_level_string_multiindex
|
| 318 |
+
ser = Series(
|
| 319 |
+
np.random.default_rng(2).standard_normal(len(index)), index=index, name="sth"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
result = indexer_sl(ser)["foo"]
|
| 323 |
+
assert result.name == ser.name
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
# miscellaneous methods
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@pytest.mark.parametrize(
|
| 330 |
+
"index",
|
| 331 |
+
[
|
| 332 |
+
date_range("2014-01-01", periods=20, freq="MS"),
|
| 333 |
+
period_range("2014-01", periods=20, freq="M"),
|
| 334 |
+
timedelta_range("0", periods=20, freq="h"),
|
| 335 |
+
],
|
| 336 |
+
)
|
| 337 |
+
def test_slice_with_negative_step(index):
|
| 338 |
+
keystr1 = str(index[9])
|
| 339 |
+
keystr2 = str(index[13])
|
| 340 |
+
|
| 341 |
+
ser = Series(np.arange(20), index)
|
| 342 |
+
SLC = IndexSlice
|
| 343 |
+
|
| 344 |
+
for key in [keystr1, index[9]]:
|
| 345 |
+
tm.assert_indexing_slices_equivalent(ser, SLC[key::-1], SLC[9::-1])
|
| 346 |
+
tm.assert_indexing_slices_equivalent(ser, SLC[:key:-1], SLC[:8:-1])
|
| 347 |
+
|
| 348 |
+
for key2 in [keystr2, index[13]]:
|
| 349 |
+
tm.assert_indexing_slices_equivalent(ser, SLC[key2:key:-1], SLC[13:8:-1])
|
| 350 |
+
tm.assert_indexing_slices_equivalent(ser, SLC[key:key2:-1], SLC[0:0:-1])
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def test_tuple_index():
|
| 354 |
+
# GH 35534 - Selecting values when a Series has an Index of tuples
|
| 355 |
+
s = Series([1, 2], index=[("a",), ("b",)])
|
| 356 |
+
assert s[("a",)] == 1
|
| 357 |
+
assert s[("b",)] == 2
|
| 358 |
+
s[("b",)] = 3
|
| 359 |
+
assert s[("b",)] == 3
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def test_frozenset_index():
|
| 363 |
+
# GH35747 - Selecting values when a Series has an Index of frozenset
|
| 364 |
+
idx0, idx1 = frozenset("a"), frozenset("b")
|
| 365 |
+
s = Series([1, 2], index=[idx0, idx1])
|
| 366 |
+
assert s[idx0] == 1
|
| 367 |
+
assert s[idx1] == 2
|
| 368 |
+
s[idx1] = 3
|
| 369 |
+
assert s[idx1] == 3
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def test_loc_setitem_all_false_indexer():
|
| 373 |
+
# GH#45778
|
| 374 |
+
ser = Series([1, 2], index=["a", "b"])
|
| 375 |
+
expected = ser.copy()
|
| 376 |
+
rhs = Series([6, 7], index=["a", "b"])
|
| 377 |
+
ser.loc[ser > 100] = rhs
|
| 378 |
+
tm.assert_series_equal(ser, expected)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def test_loc_boolean_indexer_non_matching_index():
|
| 382 |
+
# GH#46551
|
| 383 |
+
ser = Series([1])
|
| 384 |
+
result = ser.loc[Series([NA, False], dtype="boolean")]
|
| 385 |
+
expected = Series([], dtype="int64")
|
| 386 |
+
tm.assert_series_equal(result, expected)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def test_loc_boolean_indexer_miss_matching_index():
|
| 390 |
+
# GH#46551
|
| 391 |
+
ser = Series([1])
|
| 392 |
+
indexer = Series([NA, False], dtype="boolean", index=[1, 2])
|
| 393 |
+
with pytest.raises(IndexingError, match="Unalignable"):
|
| 394 |
+
ser.loc[indexer]
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def test_loc_setitem_nested_data_enlargement():
|
| 398 |
+
# GH#48614
|
| 399 |
+
df = DataFrame({"a": [1]})
|
| 400 |
+
ser = Series({"label": df})
|
| 401 |
+
ser.loc["new_label"] = df
|
| 402 |
+
expected = Series({"label": df, "new_label": df})
|
| 403 |
+
tm.assert_series_equal(ser, expected)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def test_loc_ea_numeric_index_oob_slice_end():
|
| 407 |
+
# GH#50161
|
| 408 |
+
ser = Series(1, index=Index([0, 1, 2], dtype="Int64"))
|
| 409 |
+
result = ser.loc[2:3]
|
| 410 |
+
expected = Series(1, index=Index([2], dtype="Int64"))
|
| 411 |
+
tm.assert_series_equal(result, expected)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def test_getitem_bool_int_key():
|
| 415 |
+
# GH#48653
|
| 416 |
+
ser = Series({True: 1, False: 0})
|
| 417 |
+
with pytest.raises(KeyError, match="0"):
|
| 418 |
+
ser.loc[0]
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
@pytest.mark.parametrize("val", [{}, {"b": "x"}])
|
| 422 |
+
@pytest.mark.parametrize("indexer", [[], [False, False], slice(0, -1), np.array([])])
|
| 423 |
+
def test_setitem_empty_indexer(indexer, val):
|
| 424 |
+
# GH#45981
|
| 425 |
+
df = DataFrame({"a": [1, 2], **val})
|
| 426 |
+
expected = df.copy()
|
| 427 |
+
df.loc[indexer] = 1.5
|
| 428 |
+
tm.assert_frame_equal(df, expected)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class TestDeprecatedIndexers:
|
| 432 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}])
|
| 433 |
+
def test_getitem_dict_and_set_deprecated(self, key):
|
| 434 |
+
# GH#42825 enforced in 2.0
|
| 435 |
+
ser = Series([1, 2])
|
| 436 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 437 |
+
ser.loc[key]
|
| 438 |
+
|
| 439 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}, ({1}, 2), ({1: 1}, 2)])
|
| 440 |
+
def test_getitem_dict_and_set_deprecated_multiindex(self, key):
|
| 441 |
+
# GH#42825 enforced in 2.0
|
| 442 |
+
ser = Series([1, 2], index=MultiIndex.from_tuples([(1, 2), (3, 4)]))
|
| 443 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 444 |
+
ser.loc[key]
|
| 445 |
+
|
| 446 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}])
|
| 447 |
+
def test_setitem_dict_and_set_disallowed(self, key):
|
| 448 |
+
# GH#42825 enforced in 2.0
|
| 449 |
+
ser = Series([1, 2])
|
| 450 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 451 |
+
ser.loc[key] = 1
|
| 452 |
+
|
| 453 |
+
@pytest.mark.parametrize("key", [{1}, {1: 1}, ({1}, 2), ({1: 1}, 2)])
|
| 454 |
+
def test_setitem_dict_and_set_disallowed_multiindex(self, key):
|
| 455 |
+
# GH#42825 enforced in 2.0
|
| 456 |
+
ser = Series([1, 2], index=MultiIndex.from_tuples([(1, 2), (3, 4)]))
|
| 457 |
+
with pytest.raises(TypeError, match="as an indexer is not supported"):
|
| 458 |
+
ser.loc[key] = 1
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
class TestSetitemValidation:
|
| 462 |
+
# This is adapted from pandas/tests/arrays/masked/test_indexing.py
|
| 463 |
+
# but checks for warnings instead of errors.
|
| 464 |
+
def _check_setitem_invalid(self, ser, invalid, indexer, warn):
|
| 465 |
+
msg = "Setting an item of incompatible dtype is deprecated"
|
| 466 |
+
msg = re.escape(msg)
|
| 467 |
+
|
| 468 |
+
orig_ser = ser.copy()
|
| 469 |
+
|
| 470 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 471 |
+
ser[indexer] = invalid
|
| 472 |
+
ser = orig_ser.copy()
|
| 473 |
+
|
| 474 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 475 |
+
ser.iloc[indexer] = invalid
|
| 476 |
+
ser = orig_ser.copy()
|
| 477 |
+
|
| 478 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 479 |
+
ser.loc[indexer] = invalid
|
| 480 |
+
ser = orig_ser.copy()
|
| 481 |
+
|
| 482 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 483 |
+
ser[:] = invalid
|
| 484 |
+
|
| 485 |
+
_invalid_scalars = [
|
| 486 |
+
1 + 2j,
|
| 487 |
+
"True",
|
| 488 |
+
"1",
|
| 489 |
+
"1.0",
|
| 490 |
+
NaT,
|
| 491 |
+
np.datetime64("NaT"),
|
| 492 |
+
np.timedelta64("NaT"),
|
| 493 |
+
]
|
| 494 |
+
_indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
|
| 495 |
+
|
| 496 |
+
@pytest.mark.parametrize(
|
| 497 |
+
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
|
| 498 |
+
)
|
| 499 |
+
@pytest.mark.parametrize("indexer", _indexers)
|
| 500 |
+
def test_setitem_validation_scalar_bool(self, invalid, indexer):
|
| 501 |
+
ser = Series([True, False, False], dtype="bool")
|
| 502 |
+
self._check_setitem_invalid(ser, invalid, indexer, FutureWarning)
|
| 503 |
+
|
| 504 |
+
@pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)])
|
| 505 |
+
@pytest.mark.parametrize("indexer", _indexers)
|
| 506 |
+
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
|
| 507 |
+
ser = Series([1, 2, 3], dtype=any_int_numpy_dtype)
|
| 508 |
+
if isna(invalid) and invalid is not NaT and not np.isnat(invalid):
|
| 509 |
+
warn = None
|
| 510 |
+
else:
|
| 511 |
+
warn = FutureWarning
|
| 512 |
+
self._check_setitem_invalid(ser, invalid, indexer, warn)
|
| 513 |
+
|
| 514 |
+
@pytest.mark.parametrize("invalid", _invalid_scalars + [True])
|
| 515 |
+
@pytest.mark.parametrize("indexer", _indexers)
|
| 516 |
+
def test_setitem_validation_scalar_float(self, invalid, float_numpy_dtype, indexer):
|
| 517 |
+
ser = Series([1, 2, None], dtype=float_numpy_dtype)
|
| 518 |
+
self._check_setitem_invalid(ser, invalid, indexer, FutureWarning)
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_mask.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import Series
|
| 5 |
+
import pandas._testing as tm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_mask():
|
| 9 |
+
# compare with tested results in test_where
|
| 10 |
+
s = Series(np.random.default_rng(2).standard_normal(5))
|
| 11 |
+
cond = s > 0
|
| 12 |
+
|
| 13 |
+
rs = s.where(~cond, np.nan)
|
| 14 |
+
tm.assert_series_equal(rs, s.mask(cond))
|
| 15 |
+
|
| 16 |
+
rs = s.where(~cond)
|
| 17 |
+
rs2 = s.mask(cond)
|
| 18 |
+
tm.assert_series_equal(rs, rs2)
|
| 19 |
+
|
| 20 |
+
rs = s.where(~cond, -s)
|
| 21 |
+
rs2 = s.mask(cond, -s)
|
| 22 |
+
tm.assert_series_equal(rs, rs2)
|
| 23 |
+
|
| 24 |
+
cond = Series([True, False, False, True, False], index=s.index)
|
| 25 |
+
s2 = -(s.abs())
|
| 26 |
+
rs = s2.where(~cond[:3])
|
| 27 |
+
rs2 = s2.mask(cond[:3])
|
| 28 |
+
tm.assert_series_equal(rs, rs2)
|
| 29 |
+
|
| 30 |
+
rs = s2.where(~cond[:3], -s2)
|
| 31 |
+
rs2 = s2.mask(cond[:3], -s2)
|
| 32 |
+
tm.assert_series_equal(rs, rs2)
|
| 33 |
+
|
| 34 |
+
msg = "Array conditional must be same shape as self"
|
| 35 |
+
with pytest.raises(ValueError, match=msg):
|
| 36 |
+
s.mask(1)
|
| 37 |
+
with pytest.raises(ValueError, match=msg):
|
| 38 |
+
s.mask(cond[:3].values, -s)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_mask_casts():
|
| 42 |
+
# dtype changes
|
| 43 |
+
ser = Series([1, 2, 3, 4])
|
| 44 |
+
result = ser.mask(ser > 2, np.nan)
|
| 45 |
+
expected = Series([1, 2, np.nan, np.nan])
|
| 46 |
+
tm.assert_series_equal(result, expected)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_mask_casts2():
|
| 50 |
+
# see gh-21891
|
| 51 |
+
ser = Series([1, 2])
|
| 52 |
+
res = ser.mask([True, False])
|
| 53 |
+
|
| 54 |
+
exp = Series([np.nan, 2])
|
| 55 |
+
tm.assert_series_equal(res, exp)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_mask_inplace():
|
| 59 |
+
s = Series(np.random.default_rng(2).standard_normal(5))
|
| 60 |
+
cond = s > 0
|
| 61 |
+
|
| 62 |
+
rs = s.copy()
|
| 63 |
+
rs.mask(cond, inplace=True)
|
| 64 |
+
tm.assert_series_equal(rs.dropna(), s[~cond])
|
| 65 |
+
tm.assert_series_equal(rs, s.mask(cond))
|
| 66 |
+
|
| 67 |
+
rs = s.copy()
|
| 68 |
+
rs.mask(cond, -s, inplace=True)
|
| 69 |
+
tm.assert_series_equal(rs, s.mask(cond, -s))
|
vllm/lib/python3.10/site-packages/pandas/tests/series/indexing/test_set_value.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from pandas import (
|
| 6 |
+
DatetimeIndex,
|
| 7 |
+
Series,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_series_set_value():
|
| 13 |
+
# GH#1561
|
| 14 |
+
|
| 15 |
+
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
|
| 16 |
+
index = DatetimeIndex(dates)
|
| 17 |
+
|
| 18 |
+
s = Series(dtype=object)
|
| 19 |
+
s._set_value(dates[0], 1.0)
|
| 20 |
+
s._set_value(dates[1], np.nan)
|
| 21 |
+
|
| 22 |
+
expected = Series([1.0, np.nan], index=index)
|
| 23 |
+
|
| 24 |
+
tm.assert_series_equal(s, expected)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_set_value_dt64(datetime_series):
|
| 28 |
+
idx = datetime_series.index[10]
|
| 29 |
+
res = datetime_series._set_value(idx, 0)
|
| 30 |
+
assert res is None
|
| 31 |
+
assert datetime_series[idx] == 0
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_set_value_str_index(string_series):
|
| 35 |
+
# equiv
|
| 36 |
+
ser = string_series.copy()
|
| 37 |
+
res = ser._set_value("foobar", 0)
|
| 38 |
+
assert res is None
|
| 39 |
+
assert ser.index[-1] == "foobar"
|
| 40 |
+
assert ser["foobar"] == 0
|
| 41 |
+
|
| 42 |
+
ser2 = string_series.copy()
|
| 43 |
+
ser2.loc["foobar"] = 0
|
| 44 |
+
assert ser2.index[-1] == "foobar"
|
| 45 |
+
assert ser2["foobar"] == 0
|