title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG: groupby rank computing incorrect percentiles
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 63902b53ea36d..f10b8f602ea56 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -561,6 +561,7 @@ Numeric - Bug in :func:`select_dtypes` different behavior between Windows and Linux with ``include="int"`` (:issue:`36569`) - Bug in :meth:`DataFrame.apply` and :meth:`DataFrame.agg` when passed argument ``func="size"`` would operate on the entire ``DataFrame`` instead of rows or columns (:issue:`39934`) - Bug in :meth:`DataFrame.transform` would raise ``SpecificationError`` when passed a dictionary and columns were missing; will now raise a ``KeyError`` instead (:issue:`40004`) +- Bug in :meth:`DataFrameGroupBy.rank` giving incorrect results with ``pct=True`` and equal values between consecutive groups (:issue:`40518`) - Conversion diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 040cb17578fa2..a28f4929995c6 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -947,12 +947,14 @@ def rank_1d( TiebreakEnumType tiebreak Py_ssize_t i, j, N, grp_start=0, dups=0, sum_ranks=0 Py_ssize_t grp_vals_seen=1, grp_na_count=0 - ndarray[int64_t, ndim=1] lexsort_indexer - ndarray[float64_t, ndim=1] grp_sizes, out + ndarray[int64_t, ndim=1] grp_sizes + ndarray[intp_t, ndim=1] lexsort_indexer + ndarray[float64_t, ndim=1] out ndarray[rank_t, ndim=1] masked_vals ndarray[uint8_t, ndim=1] mask bint keep_na, at_end, next_val_diff, check_labels, group_changed rank_t nan_fill_val + int64_t grp_size tiebreak = tiebreakers[ties_method] if tiebreak == TIEBREAK_FIRST: @@ -965,7 +967,7 @@ def rank_1d( # TODO Cython 3.0: cast won't be necessary (#2992) assert <Py_ssize_t>len(labels) == N out = np.empty(N) - grp_sizes = np.ones(N) + grp_sizes = np.ones(N, dtype=np.int64) # If all 0 labels, can short-circuit later label # comparisons @@ -1022,7 +1024,7 @@ def rank_1d( # each label corresponds to a different group value, # the mask helps you differentiate missing values before # performing sort on the actual values - lexsort_indexer = np.lexsort(order).astype(np.int64, copy=False) + lexsort_indexer = np.lexsort(order).astype(np.intp, copy=False) if not ascending: lexsort_indexer = lexsort_indexer[::-1] @@ -1093,13 +1095,15 @@ def rank_1d( for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = grp_vals_seen - # Look forward to the next value (using the sorting in lexsort_indexer) - # if the value does not equal the current value then we need to - # reset the dups and sum_ranks, knowing that a new value is - # coming up. The conditional also needs to handle nan equality - # and the end of iteration - if next_val_diff or (mask[lexsort_indexer[i]] - ^ mask[lexsort_indexer[i+1]]): + # Look forward to the next value (using the sorting in + # lexsort_indexer). If the value does not equal the current + # value then we need to reset the dups and sum_ranks, knowing + # that a new value is coming up. The conditional also needs + # to handle nan equality and the end of iteration. If group + # changes we do not record seeing a new value in the group + if not group_changed and (next_val_diff or + (mask[lexsort_indexer[i]] + ^ mask[lexsort_indexer[i+1]])): dups = sum_ranks = 0 grp_vals_seen += 1 @@ -1110,14 +1114,21 @@ def rank_1d( # group encountered (used by pct calculations later). Also be # sure to reset any of the items helping to calculate dups if group_changed: + + # If not dense tiebreak, group size used to compute + # percentile will be # of non-null elements in group if tiebreak != TIEBREAK_DENSE: - for j in range(grp_start, i + 1): - grp_sizes[lexsort_indexer[j]] = \ - (i - grp_start + 1 - grp_na_count) + grp_size = i - grp_start + 1 - grp_na_count + + # Otherwise, it will be the number of distinct values + # in the group, subtracting 1 if NaNs are present + # since that is a distinct value we shouldn't count else: - for j in range(grp_start, i + 1): - grp_sizes[lexsort_indexer[j]] = \ - (grp_vals_seen - 1 - (grp_na_count > 0)) + grp_size = grp_vals_seen - (grp_na_count > 0) + + for j in range(grp_start, i + 1): + grp_sizes[lexsort_indexer[j]] = grp_size + dups = sum_ranks = 0 grp_na_count = 0 grp_start = i + 1 @@ -1184,12 +1195,14 @@ def rank_1d( out[lexsort_indexer[j]] = grp_vals_seen # Look forward to the next value (using the sorting in - # lexsort_indexer) if the value does not equal the current + # lexsort_indexer). If the value does not equal the current # value then we need to reset the dups and sum_ranks, knowing # that a new value is coming up. The conditional also needs - # to handle nan equality and the end of iteration - if next_val_diff or (mask[lexsort_indexer[i]] - ^ mask[lexsort_indexer[i+1]]): + # to handle nan equality and the end of iteration. If group + # changes we do not record seeing a new value in the group + if not group_changed and (next_val_diff or + (mask[lexsort_indexer[i]] + ^ mask[lexsort_indexer[i+1]])): dups = sum_ranks = 0 grp_vals_seen += 1 @@ -1200,14 +1213,21 @@ def rank_1d( # group encountered (used by pct calculations later). Also be # sure to reset any of the items helping to calculate dups if group_changed: + + # If not dense tiebreak, group size used to compute + # percentile will be # of non-null elements in group if tiebreak != TIEBREAK_DENSE: - for j in range(grp_start, i + 1): - grp_sizes[lexsort_indexer[j]] = \ - (i - grp_start + 1 - grp_na_count) + grp_size = i - grp_start + 1 - grp_na_count + + # Otherwise, it will be the number of distinct values + # in the group, subtracting 1 if NaNs are present + # since that is a distinct value we shouldn't count else: - for j in range(grp_start, i + 1): - grp_sizes[lexsort_indexer[j]] = \ - (grp_vals_seen - 1 - (grp_na_count > 0)) + grp_size = grp_vals_seen - (grp_na_count > 0) + + for j in range(grp_start, i + 1): + grp_sizes[lexsort_indexer[j]] = grp_size + dups = sum_ranks = 0 grp_na_count = 0 grp_start = i + 1 diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 00641effac08d..2e666c27386b4 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -542,3 +542,28 @@ def test_rank_min_int(): ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("use_nan", [True, False]) +def test_rank_pct_equal_values_on_group_transition(use_nan): + # GH#40518 + fill_value = np.nan if use_nan else 3 + df = DataFrame( + [ + [-1, 1], + [-1, 2], + [1, fill_value], + [-1, fill_value], + ], + columns=["group", "val"], + ) + result = df.groupby(["group"])["val"].rank( + method="dense", + pct=True, + ) + if use_nan: + expected = Series([0.5, 1, np.nan, np.nan], name="val") + else: + expected = Series([1 / 3, 2 / 3, 1, 1], name="val") + + tm.assert_series_equal(result, expected)
- [x] closes #40518 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Built on #40546, is a relatively minor change after that.
https://api.github.com/repos/pandas-dev/pandas/pulls/40575
2021-03-22T22:23:19Z
2021-04-01T15:54:05Z
2021-04-01T15:54:05Z
2021-04-01T16:16:09Z
CLN/DEPR: remove Block._holder, deprecated Block.is_categorical
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c13eb3f109354..4986822a3ed5a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -13,6 +13,7 @@ Union, cast, ) +import warnings import numpy as np @@ -191,16 +192,6 @@ def __init__(self, values, placement: BlockPlacement, ndim: int): self._mgr_locs = placement self.values = values - @property - def _holder(self): - """ - The array-like that can hold the underlying values. - - None for 'Block', overridden by subclasses that don't - use an ndarray. - """ - return None - @final @property def _consolidate_key(self): @@ -227,7 +218,14 @@ def _can_hold_na(self) -> bool: @final @property def is_categorical(self) -> bool: - return self._holder is Categorical + warnings.warn( + "Block.is_categorical is deprecated and will be removed in a " + "future version. Use isinstance(block.values, Categorical) " + "instead. See https://github.com/pandas-dev/pandas/issues/40226", + DeprecationWarning, + stacklevel=2, + ) + return isinstance(self.values, Categorical) @final def external_values(self): @@ -797,8 +795,10 @@ def _replace_list( """ See BlockManager._replace_list docstring. """ + values = self.values + # TODO: dont special-case Categorical - if self.is_categorical and len(algos.unique(dest_list)) == 1: + if isinstance(values, Categorical) and len(algos.unique(dest_list)) == 1: # We likely got here by tiling value inside NDFrame.replace, # so un-tile here return self.replace(src_list, dest_list[0], inplace, regex) @@ -813,17 +813,17 @@ def _replace_list( src_len = len(pairs) - 1 - if self.is_object: + if values.dtype == _dtype_obj: # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations - mask = ~isna(self.values) + mask = ~isna(values) masks = [ - compare_or_regex_search(self.values, s[0], regex=regex, mask=mask) + compare_or_regex_search(values, s[0], regex=regex, mask=mask) for s in pairs ] else: # GH#38086 faster if we know we dont need to check for regex - masks = [missing.mask_missing(self.values, s[0]) for s in pairs] + masks = [missing.mask_missing(values, s[0]) for s in pairs] # error: Argument 1 to "extract_bool_array" has incompatible type # "Union[ExtensionArray, ndarray, bool]"; expected "Union[ExtensionArray, @@ -1504,11 +1504,6 @@ def putmask(self, mask, new) -> List[Block]: new_values[mask] = new return [self.make_block(values=new_values)] - @property - def _holder(self): - # For extension blocks, the holder is values-dependent. - return type(self.values) - @property def is_view(self) -> bool: """Extension arrays are never treated as views.""" @@ -1714,7 +1709,7 @@ def where(self, other, cond, errors="raise") -> List[Block]: # NotImplementedError for class not implementing `__setitem__` # TypeError for SparseArray, which implements just to raise # a TypeError - result = self._holder._from_sequence( + result = type(self.values)._from_sequence( np.where(cond, self.values, other), dtype=dtype ) @@ -1904,10 +1899,6 @@ class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): def array_values(self): return ensure_wrapped_if_datetimelike(self.values) - @property - def _holder(self): - return type(self.array_values()) - class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index b40e2d90869ec..d5e549ec874da 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -39,6 +39,7 @@ import pandas.core.algorithms as algos from pandas.core.arrays import ( + Categorical, DatetimeArray, ExtensionArray, ) @@ -367,7 +368,7 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: # preserve these for validation in concat_compat return self.block.values - if self.block.is_bool and not self.block.is_categorical: + if self.block.is_bool and not isinstance(self.block.values, Categorical): # External code requested filling/upcasting, bool values must # be upcasted to object to avoid being upcasted to numeric. values = self.block.astype(np.object_).values diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index ef1c3ec0c2860..fc06b85b1f954 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -27,11 +27,7 @@ ) import pandas._testing as tm import pandas.core.algorithms as algos -from pandas.core.arrays import ( - DatetimeArray, - SparseArray, - TimedeltaArray, -) +from pandas.core.arrays import SparseArray from pandas.core.internals import ( BlockManager, SingleBlockManager, @@ -320,6 +316,12 @@ def test_split(self): for res, exp in zip(result, expected): assert_block_equal(res, exp) + def test_is_categorical_deprecated(self): + # GH#40571 + blk = self.fblock + with tm.assert_produces_warning(DeprecationWarning): + blk.is_categorical + class TestBlockManager: def test_attrs(self): @@ -1302,21 +1304,6 @@ def test_should_store_categorical(self): assert not blk.should_store(np.asarray(cat)) -@pytest.mark.parametrize( - "typestr, holder", - [ - ("category", Categorical), - ("M8[ns]", DatetimeArray), - ("M8[ns, US/Central]", DatetimeArray), - ("m8[ns]", TimedeltaArray), - ("sparse", SparseArray), - ], -) -def test_holder(typestr, holder, block_maker): - blk = create_block(typestr, [1], maker=block_maker) - assert blk._holder is holder - - def test_validate_ndim(block_maker): values = np.array([1.0, 2.0]) placement = slice(2)
https://api.github.com/repos/pandas-dev/pandas/pulls/40571
2021-03-22T16:38:52Z
2021-03-23T18:02:26Z
2021-03-23T18:02:26Z
2021-03-24T08:03:26Z
DEPR: use DeprecationWarning instead of FutureWarning for CategoricalBlock
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 1be8df2fabfd4..18e584575bc97 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -53,7 +53,7 @@ def __getattr__(name: str): warnings.warn( "CategoricalBlock is deprecated and will be removed in a future version. " "Use ExtensionBlock instead.", - FutureWarning, + DeprecationWarning, stacklevel=2, ) return ExtensionBlock diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 21d55e40a07fb..d882eb930137b 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -258,7 +258,9 @@ def test_read_expands_user_home_dir( ), ], ) - @pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:CategoricalBlock is deprecated:DeprecationWarning" + ) def test_read_fspath_all(self, reader, module, path, datapath): pytest.importorskip(module) path = datapath(*path) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 162094428dbc0..81af799640135 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -20,7 +20,7 @@ @filter_sparse @pytest.mark.single -@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") +@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning") class TestFeather: def check_error_on_write(self, df, exc, err_msg): # check that we are raising the exception diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index f3cfa033409cb..e74c915bbaf74 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -573,7 +573,7 @@ def test_write_column_index_nonstring(self, pa): self.check_error_on_write(df, engine, ValueError, msg) -@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") +@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning") class TestParquetPyArrow(Base): def test_basic(self, pa, df_full):
Follow-up on https://github.com/pandas-dev/pandas/pull/40527 There is no need for end-users to see this warning if an underlying library they use generates this, so using a DeprecationWarning instead of FutureWarning (as we did before with potentially noisy or internal warnings). At a later point we can change to FutureWarning.
https://api.github.com/repos/pandas-dev/pandas/pulls/40568
2021-03-22T13:33:39Z
2021-03-22T17:06:08Z
2021-03-22T17:06:08Z
2021-03-22T23:46:57Z
CLN: a couple of minor cleanups
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 769ae52744c74..1398db6960cc8 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -49,6 +49,7 @@ from pandas.core.dtypes.common import ( ensure_int64, ensure_object, + ensure_platform_int, is_categorical_dtype, is_datetime64_dtype, is_dict_like, @@ -533,7 +534,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: # error: Incompatible types in assignment (expression has type "ndarray", # variable has type "Categorical") result = take_nd( # type: ignore[assignment] - new_cats, libalgos.ensure_platform_int(self._codes) + new_cats, ensure_platform_int(self._codes) ) return result diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index 1a12cbff47092..7a626ce6312c5 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -162,8 +162,6 @@ def test_transform_method_name(method): frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail] -# mypy doesn't allow adding lists of different types -# https://github.com/python/mypy/issues/5492 @pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1]) def test_transform_bad_dtype(op, frame_or_series): # GH 35964
https://api.github.com/repos/pandas-dev/pandas/pulls/40567
2021-03-22T12:16:47Z
2021-03-22T13:08:41Z
2021-03-22T13:08:41Z
2021-03-22T13:08:49Z
⬆️ UPGRADE: Autoupdate pre-commit config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e3dd6b018b8aa..5bfceec6605c0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -47,11 +47,11 @@ repos: types: [text] args: [--append-config=flake8/cython-template.cfg] - repo: https://github.com/PyCQA/isort - rev: 5.7.0 + rev: 5.8.0 hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v2.10.0 + rev: v2.11.0 hooks: - id: pyupgrade args: [--py37-plus, --keep-runtime-typing] diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 174ea8760b0db..800228156fcd6 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -827,7 +827,7 @@ def _replace_list( rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): - new_rb: List["Block"] = [] + new_rb: List[Block] = [] for blk in rb: m = masks[i] convert = i == src_len # only convert once at the end diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 02a723902271e..99931123b0c81 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4058,7 +4058,7 @@ def get_blk_items(mgr): tuple(b_items.tolist()): (b, b_items) for b, b_items in zip(blocks, blk_items) } - new_blocks: List["Block"] = [] + new_blocks: List[Block] = [] new_blk_items = [] for ea in values_axes: items = tuple(ea.values) diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py index f09a1f7bcc492..2a3d7328aa662 100644 --- a/pandas/tests/io/parser/common/test_file_buffer_url.py +++ b/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -400,7 +400,7 @@ def test_context_manageri_user_provided(all_parsers, datapath): # make sure that user-provided handles are not closed parser = all_parsers - with open(datapath("io", "data", "csv", "iris.csv"), mode="r") as path: + with open(datapath("io", "data", "csv", "iris.csv")) as path: reader = parser.read_csv(path, chunksize=1) assert not reader._engine.handles.handle.closed diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index ba6bfe9d88a03..89ece3b1a7300 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -153,7 +153,7 @@ def test_binary_mode_file_buffers( fpath = datapath(*file_path) expected = parser.read_csv(fpath, encoding=encoding) - with open(fpath, mode="r", encoding=encoding) as fa: + with open(fpath, encoding=encoding) as fa: result = parser.read_csv(fa) assert not fa.closed tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a22f0cd8dff83..c90f6ef956a65 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -302,7 +302,7 @@ def load_iris_data(self, datapath, request): self.drop_table("iris") self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor]) - with open(iris_csv_file, mode="r", newline=None) as iris_csv: + with open(iris_csv_file, newline=None) as iris_csv: r = csv.reader(iris_csv) next(r) # skip header row ins = SQL_STRINGS["insert_iris"][self.flavor]
<!-- START pr-commits --> <!-- END pr-commits --> ## Base PullRequest default branch (https://github.com/pandas-dev/pandas/tree/master) ## Command results <details> <summary>Details: </summary> <details> <summary><em>add path</em></summary> ```Shell /home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin ``` </details> <details> <summary><em>pip install pre-commit</em></summary> ```Shell Collecting pre-commit Downloading pre_commit-2.11.1-py2.py3-none-any.whl (187 kB) Collecting identify>=1.0.0 Downloading identify-2.2.0-py2.py3-none-any.whl (98 kB) Collecting toml Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB) Collecting cfgv>=2.0.0 Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB) Collecting nodeenv>=0.11.1 Using cached nodeenv-1.5.0-py2.py3-none-any.whl (21 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.4.3-py2.py3-none-any.whl (7.2 MB) Collecting pyyaml>=5.1 Downloading PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB) Collecting filelock<4,>=3.0.0 Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB) Collecting distlib<1,>=0.3.1 Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB) Collecting six<2,>=1.9.0 Using cached six-1.15.0-py2.py3-none-any.whl (10 kB) Collecting appdirs<2,>=1.4.3 Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB) Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-2.2.0 nodeenv-1.5.0 pre-commit-2.11.1 pyyaml-5.4.1 six-1.15.0 toml-0.10.2 virtualenv-20.4.3 ``` </details> <details> <summary><em>pre-commit autoupdate || (exit 0);</em></summary> ```Shell Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports. already up to date. Updating https://github.com/python/black ... already up to date. Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell. already up to date. Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. already up to date. Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint. =====> /home/runner/.cache/pre-commit/repo14gb2ni_/.pre-commit-hooks.yaml does not exist Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. already up to date. Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort. updating 5.7.0 -> 5.8.0. Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade. updating v2.10.0 -> v2.11.0. Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks. already up to date. Updating https://github.com/asottile/yesqa ... already up to date. ``` </details> <details> <summary><em>pre-commit run -a || (exit 0);</em></summary> ```Shell [INFO] Initializing environment for https://github.com/cpplint/cpplint. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-comprehensions>=3.1.0. [INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/python/black. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/codespell-project/codespell. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/cpplint/cpplint. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/PyCQA/isort. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/pyupgrade. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/yesqa. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... absolufy-imports....................................................................................Passed black...............................................................................................Passed codespell...........................................................................................Passed Fix End of Files....................................................................................Passed Trim Trailing Whitespace............................................................................Passed cpplint.............................................................................................Passed flake8..............................................................................................Passed flake8 (cython).....................................................................................Passed flake8 (cython template)............................................................................Passed isort...............................................................................................Passed pyupgrade...........................................................................................Failed - hook id: pyupgrade - exit code: 1 - files were modified by this hook Rewriting pandas/io/pytables.py Rewriting pandas/core/internals/blocks.py Rewriting pandas/tests/io/test_sql.py Rewriting pandas/tests/io/parser/test_encoding.py Rewriting pandas/tests/io/parser/common/test_file_buffer_url.py rst ``code`` is two backticks.......................................................................Passed rst directives end with two colons..................................................................Passed rst ``inline code`` next to normal text.............................................................Passed Strip unnecessary `# noqa`s.........................................................................Passed flake8-rst..........................................................................................Passed Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias.......................Passed Check for inconsistent use of pandas namespace in tests.............................................Passed Check for incorrect code block or IPython directives................................................Passed Check code for instances of os.remove...............................................................Passed Check for non-standard imports......................................................................Passed Check for non-standard imports in test suite........................................................Passed Check for use of np.bool/np.object instead of np.bool_/np.object_...................................Passed Generate pip dependency from conda..................................................................Passed Validate correct capitalization among titles in documentation.......................................Passed Check for use of foo.__class__ instead of type(foo).................................................Passed Check for use of bare pytest raises.................................................................Passed Check for use of private functions across modules...................................................Passed Check for import of private attributes across modules...............................................Passed Check for use of pytest.xfail.......................................................................Passed Check for use of not concatenated strings...........................................................Passed Check for strings with wrong placed spaces..........................................................Passed Check for outdated annotation syntax and missing error codes........................................Passed ``` </details> </details> ## Changed files <details> <summary>Changed 6 files: </summary> - .pre-commit-config.yaml - pandas/core/internals/blocks.py - pandas/io/pytables.py - pandas/tests/io/parser/common/test_file_buffer_url.py - pandas/tests/io/parser/test_encoding.py - pandas/tests/io/test_sql.py </details> <hr> [:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action)
https://api.github.com/repos/pandas-dev/pandas/pulls/40564
2021-03-22T07:29:07Z
2021-03-22T10:49:50Z
2021-03-22T10:49:50Z
2021-03-22T10:49:55Z
ENH: applymap get kwargs #39987
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 16f76651a65aa..4e7067da3cc72 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -128,6 +128,7 @@ Other enhancements - :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`) - :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) - :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) +- :meth:`DataFrame.applymap` can now accept kwargs to pass on to func (:issue:`39987`) - Disallow :class:`DataFrame` indexer for ``iloc`` for :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__`, (:issue:`39004`) - :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`) - :meth:`DataFrame.plot.scatter` can now accept a categorical column as the argument to ``c`` (:issue:`12380`, :issue:`31357`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b1f0ad8eda2aa..1b07a5b97806e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -13,6 +13,7 @@ import collections from collections import abc import datetime +import functools from io import StringIO import itertools import mmap @@ -3332,7 +3333,6 @@ def _ixs(self, i: int, axis: int = 0): # this is a cached value, mark it so result._set_as_cached(label, self) - return result def _get_column_array(self, i: int) -> ArrayLike: @@ -8440,7 +8440,7 @@ def apply( return op.apply() def applymap( - self, func: PythonFuncType, na_action: Optional[str] = None + self, func: PythonFuncType, na_action: Optional[str] = None, **kwargs ) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -8457,6 +8457,12 @@ def applymap( .. versionadded:: 1.2 + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + .. versionadded:: 1.3 + Returns ------- DataFrame @@ -8508,6 +8514,7 @@ def applymap( f"na_action must be 'ignore' or None. Got {repr(na_action)}" ) ignore_na = na_action == "ignore" + func = functools.partial(func, **kwargs) # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 227037ecba664..cee8a0218e9e8 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -560,6 +560,13 @@ def test_applymap(float_frame): tm.assert_frame_equal(result, expected) +def test_applymap_kwargs(): + # GH 40652 + result = DataFrame([[1, 2], [3, 4]]).applymap(lambda x, y: x + y, y=2) + expected = DataFrame([[3, 4], [5, 6]]) + tm.assert_frame_equal(result, expected) + + def test_applymap_na_ignore(float_frame): # GH 23803 strlen_frame = float_frame.applymap(lambda x: len(str(x)))
- [X] closes #39987 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40562
2021-03-22T04:17:36Z
2021-03-23T16:11:17Z
2021-03-23T16:11:16Z
2021-03-23T16:11:20Z
CLN: remove unused axis keyword from Block.where
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c20b2840a40ab..93c597d738501 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8933,7 +8933,7 @@ def _where( # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): - cond, _ = cond.align(self, join="right", broadcast_axis=1) + cond, _ = cond.align(self, join="right", broadcast_axis=1, copy=False) else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) @@ -8961,6 +8961,7 @@ def _where( cond = cond.astype(bool) cond = -cond if inplace else cond + cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): @@ -8997,7 +8998,7 @@ def _where( "cannot align with a higher dimensional NDFrame" ) - if not isinstance(other, (MultiIndex, NDFrame)): + elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) @@ -9029,11 +9030,6 @@ def _where( else: align = self._get_axis_number(axis) == 1 - if isinstance(cond, NDFrame): - cond = cond.reindex( - self._info_axis, axis=self._info_axis_number, copy=False - ) - if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager @@ -9049,7 +9045,6 @@ def _where( cond=cond, align=align, errors=errors, - axis=axis, ) result = self._constructor(new_data) return result.__finalize__(self) diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 34b3d83c066c2..435d2421ccade 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -521,7 +521,7 @@ def quantile( axes = [qs, self._axes[1]] return type(self)(new_arrs, axes) - def where(self, other, cond, align: bool, errors: str, axis: int) -> ArrayManager: + def where(self, other, cond, align: bool, errors: str) -> ArrayManager: if align: align_keys = ["other", "cond"] else: @@ -534,7 +534,6 @@ def where(self, other, cond, align: bool, errors: str, axis: int) -> ArrayManage other=other, cond=cond, errors=errors, - axis=axis, ) # TODO what is this used for? diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 174ea8760b0db..f38202f1e3476 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1286,7 +1286,7 @@ def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> List[Blo return [self.make_block(new_values)] - def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: + def where(self, other, cond, errors="raise") -> List[Block]: """ evaluate the block; return result block(s) from the result @@ -1297,7 +1297,6 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object - axis : int, default 0 Returns ------- @@ -1305,6 +1304,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: """ import pandas.core.computation.expressions as expressions + assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) assert errors in ["raise", "ignore"] @@ -1317,7 +1317,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: icond, noop = validate_putmask(values, ~cond) - if is_valid_na_for_dtype(other, self.dtype) and not self.is_object: + if is_valid_na_for_dtype(other, self.dtype) and self.dtype != _dtype_obj: other = self.fill_value if noop: @@ -1330,7 +1330,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: # we cannot coerce, return a compat dtype # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) - blocks = block.where(orig_other, cond, errors=errors, axis=axis) + blocks = block.where(orig_other, cond, errors=errors) return self._maybe_downcast(blocks, "infer") # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type @@ -1359,7 +1359,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: cond = ~icond axis = cond.ndim - 1 cond = cond.swapaxes(axis, 0) - mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool) + mask = cond.all(axis=1) result_blocks: List[Block] = [] for m in [mask, ~mask]: @@ -1670,7 +1670,7 @@ def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> List[Blo new_values = self.values.shift(periods=periods, fill_value=fill_value) return [self.make_block_same_class(new_values)] - def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: + def where(self, other, cond, errors="raise") -> List[Block]: cond = extract_bool_array(cond) assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) @@ -1837,7 +1837,7 @@ def putmask(self, mask, new) -> List[Block]: arr.T.putmask(mask, new) return [self] - def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: + def where(self, other, cond, errors="raise") -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs arr = self.array_values().reshape(self.shape) @@ -1846,7 +1846,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: try: res_values = arr.T.where(cond, other).T except (ValueError, TypeError): - return super().where(other, cond, errors=errors, axis=axis) + return super().where(other, cond, errors=errors) # TODO(EA2D): reshape not needed with 2D EAs res_values = res_values.reshape(self.values.shape) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da78fc5dfba76..f4eafd882b62b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -574,8 +574,7 @@ def quantile( return type(self)(blocks, new_axes) - def where(self, other, cond, align: bool, errors: str, axis: int) -> BlockManager: - axis = self._normalize_axis(axis) + def where(self, other, cond, align: bool, errors: str) -> BlockManager: if align: align_keys = ["other", "cond"] else: @@ -588,7 +587,6 @@ def where(self, other, cond, align: bool, errors: str, axis: int) -> BlockManage other=other, cond=cond, errors=errors, - axis=axis, ) def setitem(self, indexer, value) -> BlockManager:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40561
2021-03-22T03:03:20Z
2021-03-22T13:11:24Z
2021-03-22T13:11:24Z
2021-03-22T14:25:23Z
REF/PERF: deduplicate kth_smallest
diff --git a/pandas/_libs/algos.pxd b/pandas/_libs/algos.pxd index 4bca5b33a3c62..7e87f4767c86d 100644 --- a/pandas/_libs/algos.pxd +++ b/pandas/_libs/algos.pxd @@ -1,21 +1,4 @@ from pandas._libs.util cimport numeric -cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil: - cdef: - numeric t - - # cython doesn't allow pointer dereference so use array syntax - t = a[0] - a[0] = b[0] - b[0] = t - return 0 - - -cdef enum TiebreakEnumType: - TIEBREAK_AVERAGE - TIEBREAK_MIN, - TIEBREAK_MAX - TIEBREAK_FIRST - TIEBREAK_FIRST_DESCENDING - TIEBREAK_DENSE +cdef numeric kth_smallest_c(numeric* arr, Py_ssize_t k, Py_ssize_t n) nogil diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 047eb848b7540..94bd8b49777cf 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -64,6 +64,14 @@ cdef: float64_t NaN = <float64_t>np.NaN int64_t NPY_NAT = get_nat() +cdef enum TiebreakEnumType: + TIEBREAK_AVERAGE + TIEBREAK_MIN, + TIEBREAK_MAX + TIEBREAK_FIRST + TIEBREAK_FIRST_DESCENDING + TIEBREAK_DENSE + tiebreakers = { "average": TIEBREAK_AVERAGE, "min": TIEBREAK_MIN, @@ -237,34 +245,75 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): return indexer, counts +cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil: + cdef: + numeric t + + # cython doesn't allow pointer dereference so use array syntax + t = a[0] + a[0] = b[0] + b[0] = t + return 0 + + +cdef inline numeric kth_smallest_c(numeric* arr, Py_ssize_t k, Py_ssize_t n) nogil: + """ + See kth_smallest.__doc__. The additional parameter n specifies the maximum + number of elements considered in arr, needed for compatibility with usage + in groupby.pyx + """ + cdef: + Py_ssize_t i, j, l, m + numeric x + + l = 0 + m = n - 1 + + while l < m: + x = arr[k] + i = l + j = m + + while 1: + while arr[i] < x: i += 1 + while x < arr[j]: j -= 1 + if i <= j: + swap(&arr[i], &arr[j]) + i += 1; j -= 1 + + if i > j: break + + if j < k: l = i + if k < i: m = j + return arr[k] + + @cython.boundscheck(False) @cython.wraparound(False) -def kth_smallest(numeric[:] a, Py_ssize_t k) -> numeric: +def kth_smallest(numeric[::1] arr, Py_ssize_t k) -> numeric: + """ + Compute the kth smallest value in arr. Note that the input + array will be modified. + + Parameters + ---------- + arr : numeric[::1] + Array to compute the kth smallest value for, must be + contiguous + k : Py_ssize_t + + Returns + ------- + numeric + The kth smallest value in arr + """ cdef: - Py_ssize_t i, j, l, m, n = a.shape[0] - numeric x + numeric result with nogil: - l = 0 - m = n - 1 - - while l < m: - x = a[k] - i = l - j = m - - while 1: - while a[i] < x: i += 1 - while x < a[j]: j -= 1 - if i <= j: - swap(&a[i], &a[j]) - i += 1; j -= 1 - - if i > j: break - - if j < k: l = i - if k < i: m = j - return a[k] + result = kth_smallest_c(&arr[0], k, arr.shape[0]) + + return result # ---------------------------------------------------------------------- diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 89020f2078584..f09a6c04aecbf 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -30,7 +30,7 @@ from numpy.math cimport NAN cnp.import_array() -from pandas._libs.algos cimport swap +from pandas._libs.algos cimport kth_smallest_c from pandas._libs.util cimport ( get_nat, numeric, @@ -88,7 +88,7 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: n -= na_count if n % 2: - result = kth_smallest_c( a, n // 2, n) + result = kth_smallest_c(a, n // 2, n) else: result = (kth_smallest_c(a, n // 2, n) + kth_smallest_c(a, n // 2 - 1, n)) / 2 @@ -99,35 +99,6 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: return result -# TODO: Is this redundant with algos.kth_smallest -cdef inline float64_t kth_smallest_c(float64_t* a, - Py_ssize_t k, - Py_ssize_t n) nogil: - cdef: - Py_ssize_t i, j, l, m - float64_t x, t - - l = 0 - m = n - 1 - while l < m: - x = a[k] - i = l - j = m - - while 1: - while a[i] < x: i += 1 - while x < a[j]: j -= 1 - if i <= j: - swap(&a[i], &a[j]) - i += 1; j -= 1 - - if i > j: break - - if j < k: l = i - if k < i: m = j - return a[k] - - @cython.boundscheck(False) @cython.wraparound(False) def group_median_float64(ndarray[float64_t, ndim=2] out, diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 15f54c11be0a0..1df126e024207 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1305,7 +1305,9 @@ def compute(self, method: str) -> Series: narr = len(arr) n = min(n, narr) - kth_val = algos.kth_smallest(arr.copy(), n - 1) + # arr passed into kth_smallest must be contiguous. We copy + # here because kth_smallest will modify its input + kth_val = algos.kth_smallest(arr.copy(order="C"), n - 1) (ns,) = np.nonzero(arr <= kth_val) inds = ns[arr[ns].argsort(kind="mergesort")]
Ensuring a contiguous input seems to give about a 5-10% performance improvement on the existing benchmark `gil.ParallelKth`
https://api.github.com/repos/pandas-dev/pandas/pulls/40559
2021-03-21T20:48:52Z
2021-03-23T16:10:30Z
2021-03-23T16:10:30Z
2021-03-23T16:18:52Z
TST: add test_ffill_with_string_column
diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py index e53518269408a..f3149abb52291 100644 --- a/pandas/tests/groupby/test_missing.py +++ b/pandas/tests/groupby/test_missing.py @@ -43,6 +43,18 @@ def test_ffill_missing_arguments(): df.groupby("b").fillna() +@pytest.mark.parametrize( + "method, expected", [("ffill", [None, "a", "a"]), ("bfill", ["a", "a", None])] +) +def test_fillna_with_string_dtype(method, expected): + # GH 40250 + df = DataFrame({"a": pd.array([None, "a", None], dtype="string"), "b": [0, 0, 0]}) + grp = df.groupby("b") + result = grp.fillna(method=method) + expected = DataFrame({"a": pd.array(expected, dtype="string")}) + tm.assert_frame_equal(result, expected) + + def test_fill_consistency(): # GH9221
- [x] closes #40250 - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry #40250 is an issue which seems to have been fixed in master already through #39446. This PR adds a test for this issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/40557
2021-03-21T17:31:44Z
2021-05-13T23:27:45Z
2021-05-13T23:27:44Z
2021-05-13T23:27:48Z
ENH: Pluggable SQL performance via new SQL `engine` keyword
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst index 1fcaac1a91d09..278eb907102ed 100644 --- a/doc/source/user_guide/options.rst +++ b/doc/source/user_guide/options.rst @@ -456,6 +456,10 @@ io.hdf.dropna_table True drop ALL nan rows when appe io.parquet.engine None The engine to use as a default for parquet reading and writing. If None then try 'pyarrow' and 'fastparquet' +io.sql.engine None The engine to use as a default for + sql reading and writing, with SQLAlchemy + as a higher level interface. If None + then try 'sqlalchemy' mode.chained_assignment warn Controls ``SettingWithCopyWarning``: 'raise', 'warn', or None. Raise an exception, warn, or no action if diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index fd49ac0176ce4..baac872a6a466 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -652,6 +652,22 @@ def use_inf_as_na_cb(key): validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) + +# Set up the io.sql specific configuration. +sql_engine_doc = """ +: string + The default sql reader/writer engine. Available options: + 'auto', 'sqlalchemy', the default is 'auto' +""" + +with cf.config_prefix("io.sql"): + cf.register_option( + "engine", + "auto", + sql_engine_doc, + validator=is_one_of_factory(["auto", "sqlalchemy"]), + ) + # -------- # Plotting # --------- diff --git a/pandas/io/sql.py b/pandas/io/sql.py index d797fa51984d6..04a7ccb538a67 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -27,6 +27,8 @@ import pandas._libs.lib as lib from pandas._typing import DtypeArg +from pandas.compat._optional import import_optional_dependency +from pandas.errors import AbstractMethodError from pandas.core.dtypes.common import ( is_datetime64tz_dtype, @@ -36,6 +38,7 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna +from pandas import get_option from pandas.core.api import ( DataFrame, Series, @@ -643,6 +646,8 @@ def to_sql( chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, + engine: str = "auto", + **engine_kwargs, ) -> None: """ Write records stored in a DataFrame to a SQL database. @@ -689,6 +694,16 @@ def to_sql( section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 + + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. """ if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") @@ -712,6 +727,8 @@ def to_sql( chunksize=chunksize, dtype=dtype, method=method, + engine=engine, + **engine_kwargs, ) @@ -1283,6 +1300,91 @@ def to_sql( ) +class BaseEngine: + def insert_records( + self, + table: SQLTable, + con, + frame, + name, + index=True, + schema=None, + chunksize=None, + method=None, + **engine_kwargs, + ): + """ + Inserts data into already-prepared table + """ + raise AbstractMethodError(self) + + +class SQLAlchemyEngine(BaseEngine): + def __init__(self): + import_optional_dependency( + "sqlalchemy", extra="sqlalchemy is required for SQL support." + ) + + def insert_records( + self, + table: SQLTable, + con, + frame, + name, + index=True, + schema=None, + chunksize=None, + method=None, + **engine_kwargs, + ): + from sqlalchemy import exc + + try: + table.insert(chunksize=chunksize, method=method) + except exc.SQLAlchemyError as err: + # GH34431 + # https://stackoverflow.com/a/67358288/6067848 + msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?# + )|inf can not be used with MySQL""" + err_text = str(err.orig) + if re.search(msg, err_text): + raise ValueError("inf cannot be used with MySQL") from err + else: + raise err + + +def get_engine(engine: str) -> BaseEngine: + """ return our implementation """ + if engine == "auto": + engine = get_option("io.sql.engine") + + if engine == "auto": + # try engines in this order + engine_classes = [SQLAlchemyEngine] + + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) + + raise ImportError( + "Unable to find a usable engine; " + "tried using: 'sqlalchemy'.\n" + "A suitable version of " + "sqlalchemy is required for sql I/O " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" + ) + + elif engine == "sqlalchemy": + return SQLAlchemyEngine() + + raise ValueError("engine must be one of 'auto', 'sqlalchemy'") + + class SQLDatabase(PandasSQL): """ This class enables conversion between DataFrame and SQL databases @@ -1504,7 +1606,7 @@ def read_query( read_sql = read_query - def to_sql( + def prep_table( self, frame, name, @@ -1512,50 +1614,10 @@ def to_sql( index=True, index_label=None, schema=None, - chunksize=None, dtype: DtypeArg | None = None, - method=None, - ): + ) -> SQLTable: """ - Write records stored in a DataFrame to a SQL database. - - Parameters - ---------- - frame : DataFrame - name : string - Name of SQL table. - if_exists : {'fail', 'replace', 'append'}, default 'fail' - - fail: If table exists, do nothing. - - replace: If table exists, drop it, recreate it, and insert data. - - append: If table exists, insert data. Create if does not exist. - index : bool, default True - Write DataFrame index as a column. - index_label : string or sequence, default None - Column label for index column(s). If None is given (default) and - `index` is True, then the index names are used. - A sequence should be given if the DataFrame uses MultiIndex. - schema : string, default None - Name of SQL schema in database to write to (if database flavor - supports this). If specified, this overwrites the default - schema of the SQLDatabase object. - chunksize : int, default None - If not None, then rows will be written in batches of this size at a - time. If None, all rows will be written at once. - dtype : single type or dict of column name to SQL type, default None - Optional specifying the datatype for columns. The SQL type should - be a SQLAlchemy type. If all columns are of the same type, one - single value can be used. - method : {None', 'multi', callable}, default None - Controls the SQL insertion clause used: - - * None : Uses standard SQL ``INSERT`` clause (one per row). - * 'multi': Pass multiple values in a single ``INSERT`` clause. - * callable with signature ``(pd_table, conn, keys, data_iter)``. - - Details and a sample callable implementation can be found in the - section :ref:`insert method <io.sql.method>`. - - .. versionadded:: 0.24.0 + Prepares table in the database for data insertion. Creates it if needed, etc. """ if dtype: if not is_dict_like(dtype): @@ -1589,15 +1651,17 @@ def to_sql( dtype=dtype, ) table.create() + return table - from sqlalchemy.exc import SQLAlchemyError - - try: - table.insert(chunksize, method=method) - except SQLAlchemyError as err: - # GH 34431 36465 - raise ValueError("inf cannot be used with MySQL") from err - + def check_case_sensitive( + self, + name, + schema, + ): + """ + Checks table name for issues with case-sensitivity. + Method is called after data is inserted. + """ if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case @@ -1623,6 +1687,97 @@ def to_sql( ) warnings.warn(msg, UserWarning) + def to_sql( + self, + frame, + name, + if_exists="fail", + index=True, + index_label=None, + schema=None, + chunksize=None, + dtype: DtypeArg | None = None, + method=None, + engine="auto", + **engine_kwargs, + ): + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame + name : string + Name of SQL table. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default True + Write DataFrame index as a column. + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If specified, this overwrites the default + schema of the SQLDatabase object. + chunksize : int, default None + If not None, then rows will be written in batches of this size at a + time. If None, all rows will be written at once. + dtype : single type or dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a SQLAlchemy type. If all columns are of the same type, one + single value can be used. + method : {None', 'multi', callable}, default None + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method <io.sql.method>`. + + .. versionadded:: 0.24.0 + + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. + """ + sql_engine = get_engine(engine) + + table = self.prep_table( + frame=frame, + name=name, + if_exists=if_exists, + index=index, + index_label=index_label, + schema=schema, + dtype=dtype, + ) + + sql_engine.insert_records( + table=table, + con=self.connectable, + frame=frame, + name=name, + index=index, + schema=schema, + chunksize=chunksize, + method=method, + **engine_kwargs, + ) + + self.check_case_sensitive(name=name, schema=schema) + @property def tables(self): return self.meta.tables @@ -2008,6 +2163,7 @@ def to_sql( chunksize=None, dtype: DtypeArg | None = None, method=None, + **kwargs, ): """ Write records stored in a DataFrame to a SQL database. diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 04ddef57a9621..290e063a59be7 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -52,7 +52,9 @@ import pandas.io.sql as sql from pandas.io.sql import ( + SQLAlchemyEngine, _gt14, + get_engine, read_sql_query, read_sql_table, ) @@ -575,6 +577,23 @@ def sample(pd_table, conn, keys, data_iter): # Nuke table self.drop_table("test_frame1") + def _to_sql_with_sql_engine(self, engine="auto", **engine_kwargs): + """`to_sql` with the `engine` param""" + # mostly copied from this class's `_to_sql()` method + self.drop_table("test_frame1") + + self.pandasSQL.to_sql( + self.test_frame1, "test_frame1", engine=engine, **engine_kwargs + ) + assert self.pandasSQL.has_table("test_frame1") + + num_entries = len(self.test_frame1) + num_rows = self._count_rows("test_frame1") + assert num_rows == num_entries + + # Nuke table + self.drop_table("test_frame1") + def _roundtrip(self): self.drop_table("test_frame_roundtrip") self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip") @@ -2053,6 +2072,41 @@ class Temporary(Base): tm.assert_frame_equal(df, expected) + # -- SQL Engine tests (in the base class for now) + def test_invalid_engine(self): + msg = "engine must be one of 'auto', 'sqlalchemy'" + with pytest.raises(ValueError, match=msg): + self._to_sql_with_sql_engine("bad_engine") + + def test_options_sqlalchemy(self): + # use the set option + + with pd.option_context("io.sql.engine", "sqlalchemy"): + self._to_sql_with_sql_engine() + + def test_options_auto(self): + # use the set option + + with pd.option_context("io.sql.engine", "auto"): + self._to_sql_with_sql_engine() + + def test_options_get_engine(self): + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "sqlalchemy"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "auto"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + def test_get_engine_auto_error_message(self): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + pass + # TODO fill this in when we add more engines + class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy): def test_transactions(self):
- [x] xref, but is first step in implementing #36893 - [x] tests added and passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry (in the docstring) - [ ] whatsnew entry (in the docs) This is the first step in implementing an `engine` backend to the SQL I/O functions, as discussed in #36893. This PR is simply to refactor the existing SQLAlchemy parts to make them extensible for an external engine. No new engines were added at this stage. No new tests were added, only verified that existing tests fail ("test behavior, not implementation" suggests that we don't need to test internal refactoring) Lots of the code was borrowed from the Parquet I/O implementation.
https://api.github.com/repos/pandas-dev/pandas/pulls/40556
2021-03-21T16:07:22Z
2021-05-03T17:31:13Z
2021-05-03T17:31:13Z
2021-05-03T19:07:13Z
BUG: Fix behavior of replace_list with mixed types.
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..e936519383520 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -16,7 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) -- +- Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d1eb50f2702ba..0ab14df9c08c6 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -6,6 +6,8 @@ TYPE_CHECKING, Any, Callable, + Iterable, + Sequence, cast, ) import warnings @@ -763,8 +765,8 @@ def _replace_regex( @final def _replace_list( self, - src_list: list[Any], - dest_list: list[Any], + src_list: Iterable[Any], + dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, ) -> list[Block]: @@ -779,6 +781,14 @@ def _replace_list( # so un-tile here return self.replace(src_list, dest_list[0], inplace, regex) + # https://github.com/pandas-dev/pandas/issues/40371 + # the following pairs check code caused a regression so we catch that case here + # until the issue is fixed properly in can_hold_element + + # error: "Iterable[Any]" has no attribute "tolist" + if hasattr(src_list, "tolist"): + src_list = src_list.tolist() # type: ignore[attr-defined] + # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index d2f02be43dace..46a5a47e091dd 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1428,6 +1428,25 @@ def test_replace_bytes(self, frame_or_series): obj = obj.replace({None: np.nan}) tm.assert_equal(obj, expected) + @pytest.mark.parametrize( + "data, to_replace, value, expected", + [ + ([1], [1.0], [0], [0]), + ([1], [1], [0], [0]), + ([1.0], [1.0], [0], [0.0]), + ([1.0], [1], [0], [0.0]), + ], + ) + @pytest.mark.parametrize("box", [list, tuple, np.array]) + def test_replace_list_with_mixed_type( + self, data, to_replace, value, expected, box, frame_or_series + ): + # GH#40371 + obj = frame_or_series(data) + expected = frame_or_series(expected) + result = obj.replace(box(to_replace), value) + tm.assert_equal(result, expected) + class TestDataFrameReplaceRegex: @pytest.mark.parametrize(
- [x] closes #40371 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40555
2021-03-21T15:15:44Z
2021-06-01T12:36:36Z
2021-06-01T12:36:36Z
2021-06-01T13:05:04Z
CLN: Remove unnecessary code in docs/make.py
diff --git a/doc/make.py b/doc/make.py index 76ce2aca2916c..5d2476fcdca8d 100755 --- a/doc/make.py +++ b/doc/make.py @@ -54,7 +54,6 @@ def __init__( if single_doc: single_doc = self._process_single_doc(single_doc) - include_api = False os.environ["SPHINX_PATTERN"] = single_doc elif not include_api: os.environ["SPHINX_PATTERN"] = "-api"
Simple cleaning in docs/make.py. `include_api = False` in the if block doesn't have any effect.
https://api.github.com/repos/pandas-dev/pandas/pulls/40553
2021-03-21T11:51:37Z
2021-03-23T16:12:20Z
2021-03-23T16:12:20Z
2021-03-23T16:12:24Z
DOC: Fix minimum Python version in contributing docs
diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst index 8a457d2c210b5..bc0a3556b9ac1 100644 --- a/doc/source/development/contributing_environment.rst +++ b/doc/source/development/contributing_environment.rst @@ -189,7 +189,7 @@ Creating a Python environment (pip) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you aren't using conda for your development environment, follow these instructions. -You'll need to have at least Python 3.7.0 installed on your system. If your Python version +You'll need to have at least the :ref:`minimum Python version <install.version>` that pandas supports. If your Python version is 3.8.0 (or later), you might need to update your ``setuptools`` to version 42.0.0 (or later) in your development environment before installing the build dependencies:: diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 1184c596648fc..f56391ab568ac 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -15,6 +15,8 @@ Instructions for installing from source, `PyPI <https://pypi.org/project/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a `development version <https://github.com/pandas-dev/pandas>`__ are also provided. +.. _install.version: + Python version support ----------------------
- [x] closes #40485
https://api.github.com/repos/pandas-dev/pandas/pulls/40552
2021-03-21T10:16:35Z
2021-04-04T19:57:20Z
2021-04-04T19:57:20Z
2021-04-04T19:59:26Z
TST: `test_highlight.py` convert to functional tests not class
diff --git a/pandas/tests/io/formats/style/test_highlight.py b/pandas/tests/io/formats/style/test_highlight.py index 7cf958be11392..8aca3cadff0b4 100644 --- a/pandas/tests/io/formats/style/test_highlight.py +++ b/pandas/tests/io/formats/style/test_highlight.py @@ -5,61 +5,68 @@ pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler -class TestStylerHighlight: - def setup_method(self, method): - np.random.seed(24) - self.s = DataFrame({"A": np.random.permutation(range(6))}) - self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)}) - def test_highlight_null(self): - df = DataFrame({"A": [0, np.nan]}) - result = df.style.highlight_null()._compute().ctx - expected = {(1, 0): [("background-color", "red")]} - assert result == expected +@pytest.fixture +def df(): + return DataFrame({"A": [0, np.nan, 10], "B": [1, None, 2]}) - def test_highlight_null_subset(self): - # GH 31345 - df = DataFrame({"A": [0, np.nan], "B": [0, np.nan]}) - result = ( - df.style.highlight_null(null_color="red", subset=["A"]) - .highlight_null(null_color="green", subset=["B"]) - ._compute() - .ctx - ) - expected = { - (1, 0): [("background-color", "red")], - (1, 1): [("background-color", "green")], - } - assert result == expected - @pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) - def test_highlight_minmax_basic(self, f): - expected = { - (0, 0): [("background-color", "red")], - (1, 0): [("background-color", "red")], - } - if f == "highlight_min": - df = -self.df - else: - df = self.df - result = getattr(df.style, f)(axis=1, color="red")._compute().ctx - assert result == expected +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) - @pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) - @pytest.mark.parametrize( - "kwargs", - [ - {"axis": None, "color": "red"}, # test axis - {"axis": 0, "subset": ["A"], "color": "red"}, # test subset - {"axis": None, "props": "background-color: red"}, # test props - ], + +def test_highlight_null(styler): + result = styler.highlight_null()._compute().ctx + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "red")], + } + assert result == expected + + +def test_highlight_null_subset(styler): + # GH 31345 + result = ( + styler.highlight_null(null_color="red", subset=["A"]) + .highlight_null(null_color="green", subset=["B"]) + ._compute() + .ctx ) - def test_highlight_minmax_ext(self, f, kwargs): - expected = {(1, 0): [("background-color", "red")]} - if f == "highlight_min": - df = -self.df - else: - df = self.df - result = getattr(df.style, f)(**kwargs)._compute().ctx - assert result == expected + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "green")], + } + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +def test_highlight_minmax_basic(df, f): + expected = { + (0, 1): [("background-color", "red")], + # ignores NaN row, + (2, 0): [("background-color", "red")], + } + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(axis=1, color="red")._compute().ctx + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +@pytest.mark.parametrize( + "kwargs", + [ + {"axis": None, "color": "red"}, # test axis + {"axis": 0, "subset": ["A"], "color": "red"}, # test subset and ignores NaN + {"axis": None, "props": "background-color: red"}, # test props + ], +) +def test_highlight_minmax_ext(df, f, kwargs): + expected = {(2, 0): [("background-color", "red")]} + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected
- [x] add `pytest.fixture` instead of Class setup method - [x] no test removed or added, just amended to accept new fixtures
https://api.github.com/repos/pandas-dev/pandas/pulls/40551
2021-03-21T08:57:15Z
2021-03-23T16:13:15Z
2021-03-23T16:13:15Z
2021-03-23T16:24:49Z
TST: `test_tooltip.py` convert to functional tests instead of class
diff --git a/pandas/tests/io/formats/style/test_tooltip.py b/pandas/tests/io/formats/style/test_tooltip.py index 9539780287f15..71ce496cca030 100644 --- a/pandas/tests/io/formats/style/test_tooltip.py +++ b/pandas/tests/io/formats/style/test_tooltip.py @@ -7,99 +7,79 @@ from pandas.io.formats.style import Styler -class TestStylerTooltip: - @pytest.mark.parametrize( - "ttips", - [ - DataFrame( - data=[["Min", "Max"], [np.nan, ""]], - columns=["A", "B"], - index=["a", "b"], - ), - DataFrame(data=[["Max", "Min"]], columns=["B", "A"], index=["a"]), - DataFrame( - data=[["Min", "Max", None]], columns=["A", "B", "C"], index=["a"] - ), - ], +@pytest.fixture +def df(): + return DataFrame( + data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], + columns=["A", "B", "C"], + index=["x", "y", "z"], ) - def test_tooltip_render(self, ttips): - # GH 21266 - df = DataFrame(data=[[0, 3], [1, 2]], columns=["A", "B"], index=["a", "b"]) - s = Styler(df, uuid_len=0).set_tooltips(ttips).render() - - # test tooltip table level class - assert "#T__ .pd-t {\n visibility: hidden;\n" in s - - # test 'Min' tooltip added - assert ( - "#T__ #T__row0_col0:hover .pd-t {\n visibility: visible;\n}\n" - + '#T__ #T__row0_col0 .pd-t::after {\n content: "Min";\n}' - in s - ) - assert ( - '<td id="T__row0_col0" class="data row0 col0" >0<span class="pd-t">' - + "</span></td>" - in s - ) - - # test 'Max' tooltip added - assert ( - "#T__ #T__row0_col1:hover .pd-t {\n visibility: visible;\n}\n" - + '#T__ #T__row0_col1 .pd-t::after {\n content: "Max";\n}' - in s - ) - assert ( - '<td id="T__row0_col1" class="data row0 col1" >3<span class="pd-t">' - + "</span></td>" - in s - ) - - def test_tooltip_reindex(self): - # GH 39317 - df = DataFrame( - data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2] - ) - ttips = DataFrame( - data=[["Mi", "Ma"], ["Mu", "Mo"]], - columns=[0, 2], - index=[0, 2], - ) - s = Styler(df, uuid_len=0).set_tooltips(DataFrame(ttips)).render() - assert '#T__ #T__row0_col0 .pd-t::after {\n content: "Mi";\n}' in s - assert '#T__ #T__row0_col2 .pd-t::after {\n content: "Ma";\n}' in s - assert '#T__ #T__row2_col0 .pd-t::after {\n content: "Mu";\n}' in s - assert '#T__ #T__row2_col2 .pd-t::after {\n content: "Mo";\n}' in s - - def test_tooltip_ignored(self): - # GH 21266 - df = DataFrame(data=[[0, 1], [2, 3]]) - s = Styler(df).render() # no set_tooltips() creates no <span> - assert '<style type="text/css">\n</style>' in s - assert '<span class="pd-t"></span>' not in s - - def test_tooltip_css_class(self): - # GH 21266 - df = DataFrame(data=[[0, 1], [2, 3]]) - s = ( - Styler(df, uuid_len=0) - .set_tooltips( - DataFrame([["tooltip"]]), - css_class="other-class", - props=[("color", "green")], - ) - .render() - ) - assert "#T__ .other-class {\n color: green;\n" in s - assert '#T__ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in s - - # GH 39563 - s = ( - Styler(df, uuid_len=0) - .set_tooltips( - DataFrame([["tooltip"]]), - css_class="other-class", - props="color:green;color:red;", - ) - .render() - ) - assert "#T__ .other-class {\n color: green;\n color: red;\n}" in s + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +@pytest.mark.parametrize( + "ttips", + [ + DataFrame( # Test basic reindex and ignoring blank + data=[["Min", "Max"], [np.nan, ""]], + columns=["A", "C"], + index=["x", "y"], + ), + DataFrame( # Test non-referenced columns, reversed col names, short index + data=[["Max", "Min", "Bad-Col"]], columns=["C", "A", "D"], index=["x"] + ), + ], +) +def test_tooltip_render(ttips, styler): + # GH 21266 + result = styler.set_tooltips(ttips).render() + + # test tooltip table level class + assert "#T__ .pd-t {\n visibility: hidden;\n" in result + + # test 'Min' tooltip added + assert "#T__ #T__row0_col0:hover .pd-t {\n visibility: visible;\n}" in result + assert '#T__ #T__row0_col0 .pd-t::after {\n content: "Min";\n}' in result + assert 'class="data row0 col0" >0<span class="pd-t"></span></td>' in result + + # test 'Max' tooltip added + assert "#T__ #T__row0_col2:hover .pd-t {\n visibility: visible;\n}" in result + assert '#T__ #T__row0_col2 .pd-t::after {\n content: "Max";\n}' in result + assert 'class="data row0 col2" >2<span class="pd-t"></span></td>' in result + + # test Nan, empty string and bad column ignored + assert "#T__ #T__row1_col0:hover .pd-t {\n visibility: visible;\n}" not in result + assert "#T__ #T__row1_col1:hover .pd-t {\n visibility: visible;\n}" not in result + assert "#T__ #T__row0_col1:hover .pd-t {\n visibility: visible;\n}" not in result + assert "#T__ #T__row1_col2:hover .pd-t {\n visibility: visible;\n}" not in result + assert "Bad-Col" not in result + + +def test_tooltip_ignored(styler): + # GH 21266 + result = styler.render() # no set_tooltips() creates no <span> + assert '<style type="text/css">\n</style>' in result + assert '<span class="pd-t"></span>' not in result + + +def test_tooltip_css_class(styler): + # GH 21266 + result = styler.set_tooltips( + DataFrame([["tooltip"]], index=["x"], columns=["A"]), + css_class="other-class", + props=[("color", "green")], + ).render() + assert "#T__ .other-class {\n color: green;\n" in result + assert '#T__ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in result + + # GH 39563 + result = styler.set_tooltips( # set_tooltips overwrites previous + DataFrame([["tooltip"]], index=["x"], columns=["A"]), + css_class="another-class", + props="color:green;color:red;", + ).render() + assert "#T__ .another-class {\n color: green;\n color: red;\n}" in result
- [x] adds `pytest.fixture` instead of Class setup method. - [x] removes `test_tooltip_reindex` since it is already being tested by `test_tooltip_render` - [x] other tests no changes except adopting fixtures
https://api.github.com/repos/pandas-dev/pandas/pulls/40550
2021-03-21T08:04:04Z
2021-03-23T16:14:12Z
2021-03-23T16:14:12Z
2021-03-23T16:24:57Z
TYP: internals.pyi
diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi new file mode 100644 index 0000000000000..446ee299698c5 --- /dev/null +++ b/pandas/_libs/internals.pyi @@ -0,0 +1,58 @@ +from typing import ( + Iterator, + Sequence, + overload, +) + +import numpy as np + +from pandas._typing import ArrayLike + +def slice_len(slc: slice, objlen: int = ...) -> int: ... + + +def get_blkno_indexers( + blknos: np.ndarray, # int64_t[:] + group: bool = ..., +) -> list[tuple[int, slice | np.ndarray]]: ... + + +def get_blkno_placements( + blknos: np.ndarray, + group: bool = ..., +) -> Iterator[tuple[int, BlockPlacement]]: ... + + +class BlockPlacement: + def __init__(self, val: int | slice | np.ndarray): ... + + @property + def indexer(self) -> np.ndarray | slice: ... + + @property + def as_array(self) -> np.ndarray: ... + + @property + def is_slice_like(self) -> bool: ... + + @overload + def __getitem__(self, loc: slice | Sequence[int]) -> BlockPlacement: ... + + @overload + def __getitem__(self, loc: int) -> int: ... + + def __iter__(self) -> Iterator[int]: ... + + def __len__(self) -> int: ... + + def delete(self, loc) -> BlockPlacement: ... + + def append(self, others: list[BlockPlacement]) -> BlockPlacement: ... + + +class Block: + _mgr_locs: BlockPlacement + ndim: int + values: ArrayLike + + def __init__(self, values: ArrayLike, placement: BlockPlacement, ndim: int): ...
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40549
2021-03-21T04:07:27Z
2021-03-29T18:28:14Z
2021-03-29T18:28:14Z
2021-03-29T18:32:25Z
REF: simplify BlockManager.idelete
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c20b2840a40ab..f1cce9b94e3dc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4063,7 +4063,7 @@ def __delitem__(self, key) -> None: # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) - self._mgr.idelete(loc) + self._mgr = self._mgr.idelete(loc) # delete from the caches try: diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 34b3d83c066c2..37dae775a9e54 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -848,6 +848,7 @@ def idelete(self, indexer): self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] self._axes = [self._axes[0], self._axes[1][to_keep]] + return self def iset(self, loc: Union[int, slice, np.ndarray], value): """ @@ -1259,7 +1260,7 @@ def apply(self, func, **kwargs): def setitem(self, indexer, value): return self.apply_with_block("setitem", indexer=indexer, value=value) - def idelete(self, indexer): + def idelete(self, indexer) -> SingleArrayManager: """ Delete selected locations in-place (new array, same ArrayManager) """ @@ -1268,6 +1269,7 @@ def idelete(self, indexer): self.arrays = [self.arrays[0][to_keep]] self._axes = [self._axes[0][to_keep]] + return self def _get_data_subset(self, predicate: Callable) -> ArrayManager: # used in get_numeric_data / get_bool_data diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da78fc5dfba76..b84bbb20b09ad 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1066,41 +1066,18 @@ def iget_values(self, i: int) -> ArrayLike: values = block.iget(self.blklocs[i]) return values - def idelete(self, indexer): + def idelete(self, indexer) -> BlockManager: """ - Delete selected locations in-place (new block and array, same BlockManager) + Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True - ref_loc_offset = -is_deleted.cumsum() + taker = (~is_deleted).nonzero()[0] - is_blk_deleted = [False] * len(self.blocks) - - if isinstance(indexer, int): - affected_start = indexer - else: - affected_start = is_deleted.nonzero()[0][0] - - for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]): - blk = self.blocks[blkno] - bml = blk.mgr_locs - blk_del = is_deleted[bml.indexer].nonzero()[0] - - if len(blk_del) == len(bml): - is_blk_deleted[blkno] = True - continue - elif len(blk_del) != 0: - blk.delete(blk_del) - bml = blk.mgr_locs - - blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) - - # FIXME: use Index.delete as soon as it uses fastpath=True - self.axes[0] = self.items[~is_deleted] - self.blocks = tuple( - b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno] - ) - self._rebuild_blknos_and_blklocs() + nbs = self._slice_take_blocks_ax0(taker, only_slice=True) + new_columns = self.items[~is_deleted] + axes = [new_columns, self.axes[1]] + return type(self)._simple_new(tuple(nbs), axes) def iset(self, loc: Union[int, slice, np.ndarray], value): """ @@ -1715,7 +1692,7 @@ def _consolidate_check(self): def _consolidate_inplace(self): pass - def idelete(self, indexer): + def idelete(self, indexer) -> SingleBlockManager: """ Delete single location from SingleBlockManager. @@ -1723,6 +1700,7 @@ def idelete(self, indexer): """ self._block.delete(indexer) self.axes[0] = self.axes[0].delete(indexer) + return self def fast_xs(self, loc): """
https://api.github.com/repos/pandas-dev/pandas/pulls/40548
2021-03-21T00:41:20Z
2021-03-30T13:13:03Z
2021-03-30T13:13:03Z
2021-03-30T14:01:35Z
CLN: remove unreachable quantile code
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index eb96c14286715..5f9e67a484d24 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -4,13 +4,9 @@ import numpy as np -from pandas._libs import lib from pandas._typing import ArrayLike -from pandas.core.dtypes.common import ( - is_list_like, - is_sparse, -) +from pandas.core.dtypes.common import is_sparse from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, @@ -22,16 +18,15 @@ from pandas.core.arrays import ExtensionArray -def quantile_compat(values: ArrayLike, qs, interpolation: str, axis: int) -> ArrayLike: +def quantile_compat(values: ArrayLike, qs: np.ndarray, interpolation: str) -> ArrayLike: """ Compute the quantiles of the given values for each quantile in `qs`. Parameters ---------- values : np.ndarray or ExtensionArray - qs : a scalar or list of the quantiles to be computed + qs : np.ndarray[float64] interpolation : str - axis : int Returns ------- @@ -40,18 +35,17 @@ def quantile_compat(values: ArrayLike, qs, interpolation: str, axis: int) -> Arr if isinstance(values, np.ndarray): fill_value = na_value_for_dtype(values.dtype, compat=False) mask = isna(values) - return quantile_with_mask(values, mask, fill_value, qs, interpolation, axis) + return _quantile_with_mask(values, mask, fill_value, qs, interpolation) else: - return quantile_ea_compat(values, qs, interpolation, axis) + return _quantile_ea_compat(values, qs, interpolation) -def quantile_with_mask( +def _quantile_with_mask( values: np.ndarray, mask: np.ndarray, fill_value, - qs, + qs: np.ndarray, interpolation: str, - axis: int, ) -> np.ndarray: """ Compute the quantiles of the given values for each quantile in `qs`. @@ -66,11 +60,9 @@ def quantile_with_mask( fill_value : Scalar The value to interpret fill NA entries with For ExtensionArray, this is _values_for_factorize()[1] - qs : a scalar or list of the quantiles to be computed + qs : np.ndarray[float64] interpolation : str Type of interpolation - axis : int - Axis along which to compute quantiles. Returns ------- @@ -80,12 +72,12 @@ def quantile_with_mask( ----- Assumes values is already 2D. For ExtensionArray this means np.atleast_2d has been called on _values_for_factorize()[0] + + Quantile is computed along axis=1. """ - is_empty = values.shape[axis] == 0 - orig_scalar = not is_list_like(qs) - if orig_scalar: - # make list-like, unpack later - qs = [qs] + assert values.ndim == 2 + + is_empty = values.shape[1] == 0 if is_empty: # create the array of na_values @@ -97,29 +89,22 @@ def quantile_with_mask( result = nanpercentile( values, np.array(qs) * 100, - axis=axis, na_value=fill_value, mask=mask, - ndim=values.ndim, interpolation=interpolation, ) result = np.array(result, copy=False) result = result.T - if orig_scalar: - assert result.shape[-1] == 1, result.shape - result = result[..., 0] - result = lib.item_from_zerodim(result) - return result -def quantile_ea_compat( - values: ExtensionArray, qs, interpolation: str, axis: int +def _quantile_ea_compat( + values: ExtensionArray, qs: np.ndarray, interpolation: str ) -> ExtensionArray: """ - ExtensionArray compatibility layer for quantile_with_mask. + ExtensionArray compatibility layer for _quantile_with_mask. We pretend that an ExtensionArray with shape (N,) is actually (1, N,) for compatibility with non-EA code. @@ -127,9 +112,8 @@ def quantile_ea_compat( Parameters ---------- values : ExtensionArray - qs : a scalar or list of the quantiles to be computed + qs : np.ndarray[float64] interpolation: str - axis : int Returns ------- @@ -145,19 +129,12 @@ def quantile_ea_compat( arr, fill_value = values._values_for_factorize() arr = np.atleast_2d(arr) - result = quantile_with_mask(arr, mask, fill_value, qs, interpolation, axis) + result = _quantile_with_mask(arr, mask, fill_value, qs, interpolation) if not is_sparse(orig.dtype): # shape[0] should be 1 as long as EAs are 1D - - if result.ndim == 1: - # i.e. qs was originally a scalar - assert result.shape == (1,), result.shape - result = type(orig)._from_factorized(result, orig) - - else: - assert result.shape == (1, len(qs)), result.shape - result = type(orig)._from_factorized(result[0], orig) + assert result.shape == (1, len(qs)), result.shape + result = type(orig)._from_factorized(result[0], orig) # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") return result # type: ignore[return-value] diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 34b3d83c066c2..40d7a49a1d6be 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -512,7 +512,9 @@ def quantile( arrs = [ensure_block_shape(x, 2) for x in self.arrays] assert axis == 1 - new_arrs = [quantile_compat(x, qs, interpolation, axis=axis) for x in arrs] + new_arrs = [ + quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs + ] for i, arr in enumerate(new_arrs): if arr.ndim == 2: assert arr.shape[0] == 1, arr.shape diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 174ea8760b0db..8cb5c6c56006e 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1426,7 +1426,7 @@ def quantile( assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this - result = quantile_compat(self.values, qs, interpolation, axis) + result = quantile_compat(self.values, np.asarray(qs._values), interpolation) return new_block(result, placement=self._mgr_locs, ndim=2) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 45f275664b206..2aadf5fc07f87 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1648,7 +1648,7 @@ def f(x, y): def _nanpercentile_1d( - values: np.ndarray, mask: np.ndarray, q, na_value: Scalar, interpolation + values: np.ndarray, mask: np.ndarray, q: np.ndarray, na_value: Scalar, interpolation ) -> Union[Scalar, np.ndarray]: """ Wrapper for np.percentile that skips missing values, specialized to @@ -1659,7 +1659,7 @@ def _nanpercentile_1d( values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing - q : scalar or array of quantile indices to find + q : np.ndarray[float64] of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str @@ -1672,22 +1672,17 @@ def _nanpercentile_1d( values = values[~mask] if len(values) == 0: - if lib.is_scalar(q): - return na_value - else: - return np.array([na_value] * len(q), dtype=values.dtype) + return np.array([na_value] * len(q), dtype=values.dtype) return np.percentile(values, q, interpolation=interpolation) def nanpercentile( values: np.ndarray, - q, + q: np.ndarray, *, - axis: int, na_value, mask: np.ndarray, - ndim: int, interpolation, ): """ @@ -1695,29 +1690,26 @@ def nanpercentile( Parameters ---------- - values : array over which to find quantiles - q : scalar or array of quantile indices to find - axis : {0, 1} + values : np.ndarray[ndim=2] over which to find quantiles + q : np.ndarray[float64] of quantile indices to find na_value : scalar value to return for empty or all-null values mask : ndarray[bool] locations in values that should be considered missing - ndim : {1, 2} interpolation : str Returns ------- quantiles : scalar or array """ + if values.dtype.kind in ["m", "M"]: # need to cast to integer to avoid rounding errors in numpy result = nanpercentile( values.view("i8"), q=q, - axis=axis, na_value=na_value.view("i8"), mask=mask, - ndim=ndim, interpolation=interpolation, ) @@ -1726,25 +1718,16 @@ def nanpercentile( return result.astype(values.dtype) if not lib.is_scalar(mask) and mask.any(): - if ndim == 1: - return _nanpercentile_1d( - values, mask, q, na_value, interpolation=interpolation - ) - else: - # for nonconsolidatable blocks mask is 1D, but values 2D - if mask.ndim < values.ndim: - mask = mask.reshape(values.shape) - if axis == 0: - values = values.T - mask = mask.T - result = [ - _nanpercentile_1d(val, m, q, na_value, interpolation=interpolation) - for (val, m) in zip(list(values), list(mask)) - ] - result = np.array(result, dtype=values.dtype, copy=False).T - return result + # Caller is responsible for ensuring mask shape match + assert mask.shape == values.shape + result = [ + _nanpercentile_1d(val, m, q, na_value, interpolation=interpolation) + for (val, m) in zip(list(values), list(mask)) + ] + result = np.array(result, dtype=values.dtype, copy=False).T + return result else: - return np.percentile(values, q, axis=axis, interpolation=interpolation) + return np.percentile(values, q, axis=1, interpolation=interpolation) def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40547
2021-03-21T00:35:35Z
2021-03-22T13:05:03Z
2021-03-22T13:05:03Z
2021-03-22T14:17:49Z
CLN: rank_1d
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 047eb848b7540..cda20e536c11c 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -946,15 +946,19 @@ def rank_1d( cdef: TiebreakEnumType tiebreak Py_ssize_t i, j, N, grp_start=0, dups=0, sum_ranks=0 - Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0 + Py_ssize_t grp_vals_seen=1, grp_na_count=0 ndarray[int64_t, ndim=1] lexsort_indexer ndarray[float64_t, ndim=1] grp_sizes, out ndarray[rank_t, ndim=1] masked_vals ndarray[uint8_t, ndim=1] mask - bint keep_na, at_end, next_val_diff, check_labels + bint keep_na, at_end, next_val_diff, check_labels, group_changed rank_t nan_fill_val tiebreak = tiebreakers[ties_method] + if tiebreak == TIEBREAK_FIRST: + if not ascending: + tiebreak = TIEBREAK_FIRST_DESCENDING + keep_na = na_option == 'keep' N = len(values) @@ -962,6 +966,7 @@ def rank_1d( assert <Py_ssize_t>len(labels) == N out = np.empty(N) grp_sizes = np.ones(N) + # If all 0 labels, can short-circuit later label # comparisons check_labels = np.any(labels) @@ -983,6 +988,12 @@ def rank_1d( else: mask = np.zeros(shape=len(masked_vals), dtype=np.uint8) + # If `na_option == 'top'`, we want to assign the lowest rank + # to NaN regardless of ascending/descending. So if ascending, + # fill with lowest value of type to end up with lowest rank. + # If descending, fill with highest value since descending + # will flip the ordering to still end up with lowest rank. + # Symmetric logic applies to `na_option == 'bottom'` if ascending ^ (na_option == 'top'): if rank_t is object: nan_fill_val = Infinity() @@ -1025,36 +1036,36 @@ def rank_1d( if rank_t is object: for i in range(N): at_end = i == N - 1 + # dups and sum_ranks will be incremented each loop where # the value / group remains the same, and should be reset - # when either of those change - # Used to calculate tiebreakers + # when either of those change. Used to calculate tiebreakers dups += 1 sum_ranks += i - grp_start + 1 + next_val_diff = at_end or are_diff(masked_vals[lexsort_indexer[i]], + masked_vals[lexsort_indexer[i+1]]) + + # We'll need this check later anyway to determine group size, so just + # compute it here since shortcircuiting won't help + group_changed = at_end or (check_labels and + (labels[lexsort_indexer[i]] + != labels[lexsort_indexer[i+1]])) + # Update out only when there is a transition of values or labels. # When a new value or group is encountered, go back #dups steps( # the number of occurrence of current value) and assign the ranks # based on the starting index of the current group (grp_start) # and the current index - if not at_end: - next_val_diff = are_diff(masked_vals[lexsort_indexer[i]], - masked_vals[lexsort_indexer[i+1]]) - else: - next_val_diff = True - - if (next_val_diff - or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]]) - or (check_labels - and (labels[lexsort_indexer[i]] - != labels[lexsort_indexer[i+1]])) - ): - # if keep_na, check for missing values and assign back + if (next_val_diff or group_changed + or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]])): + + # If keep_na, check for missing values and assign back # to the result where appropriate if keep_na and mask[lexsort_indexer[i]]: + grp_na_count = dups for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = NaN - grp_na_count = dups elif tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = sum_ranks / <float64_t>dups @@ -1064,37 +1075,41 @@ def rank_1d( elif tiebreak == TIEBREAK_MAX: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = i - grp_start + 1 + + # With n as the previous rank in the group and m as the number + # of duplicates in this stretch, if TIEBREAK_FIRST and ascending, + # then rankings should be n + 1, n + 2 ... n + m elif tiebreak == TIEBREAK_FIRST: for j in range(i - dups + 1, i + 1): - if ascending: - out[lexsort_indexer[j]] = j + 1 - grp_start - else: - out[lexsort_indexer[j]] = 2 * i - j - dups + 2 - grp_start + out[lexsort_indexer[j]] = j + 1 - grp_start + + # If TIEBREAK_FIRST and descending, the ranking should be + # n + m, n + (m - 1) ... n + 1. This is equivalent to + # (i - dups + 1) + (i - j + 1) - grp_start + elif tiebreak == TIEBREAK_FIRST_DESCENDING: + for j in range(i - dups + 1, i + 1): + out[lexsort_indexer[j]] = 2 * i - j - dups + 2 - grp_start elif tiebreak == TIEBREAK_DENSE: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = grp_vals_seen - # look forward to the next value (using the sorting in _as) + # Look forward to the next value (using the sorting in lexsort_indexer) # if the value does not equal the current value then we need to # reset the dups and sum_ranks, knowing that a new value is - # coming up. the conditional also needs to handle nan equality + # coming up. The conditional also needs to handle nan equality # and the end of iteration if next_val_diff or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]]): dups = sum_ranks = 0 grp_vals_seen += 1 - grp_tie_count += 1 # Similar to the previous conditional, check now if we are # moving to a new group. If so, keep track of the index where # the new group occurs, so the tiebreaker calculations can - # decrement that from their position. fill in the size of each - # group encountered (used by pct calculations later). also be + # decrement that from their position. Fill in the size of each + # group encountered (used by pct calculations later). Also be # sure to reset any of the items helping to calculate dups - if (at_end or - (check_labels - and (labels[lexsort_indexer[i]] - != labels[lexsort_indexer[i+1]]))): + if group_changed: if tiebreak != TIEBREAK_DENSE: for j in range(grp_start, i + 1): grp_sizes[lexsort_indexer[j]] = \ @@ -1102,46 +1117,45 @@ def rank_1d( else: for j in range(grp_start, i + 1): grp_sizes[lexsort_indexer[j]] = \ - (grp_tie_count - (grp_na_count > 0)) + (grp_vals_seen - 1 - (grp_na_count > 0)) dups = sum_ranks = 0 grp_na_count = 0 - grp_tie_count = 0 grp_start = i + 1 grp_vals_seen = 1 else: with nogil: for i in range(N): at_end = i == N - 1 + # dups and sum_ranks will be incremented each loop where # the value / group remains the same, and should be reset - # when either of those change - # Used to calculate tiebreakers + # when either of those change. Used to calculate tiebreakers dups += 1 sum_ranks += i - grp_start + 1 + next_val_diff = at_end or (masked_vals[lexsort_indexer[i]] + != masked_vals[lexsort_indexer[i+1]]) + + # We'll need this check later anyway to determine group size, so just + # compute it here since shortcircuiting won't help + group_changed = at_end or (check_labels and + (labels[lexsort_indexer[i]] + != labels[lexsort_indexer[i+1]])) + # Update out only when there is a transition of values or labels. # When a new value or group is encountered, go back #dups steps( # the number of occurrence of current value) and assign the ranks # based on the starting index of the current group (grp_start) # and the current index - if not at_end: - next_val_diff = (masked_vals[lexsort_indexer[i]] - != masked_vals[lexsort_indexer[i+1]]) - else: - next_val_diff = True - - if (next_val_diff - or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]]) - or (check_labels - and (labels[lexsort_indexer[i]] - != labels[lexsort_indexer[i+1]])) - ): - # if keep_na, check for missing values and assign back + if (next_val_diff or group_changed + or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]])): + + # If keep_na, check for missing values and assign back # to the result where appropriate if keep_na and mask[lexsort_indexer[i]]: + grp_na_count = dups for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = NaN - grp_na_count = dups elif tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = sum_ranks / <float64_t>dups @@ -1151,37 +1165,41 @@ def rank_1d( elif tiebreak == TIEBREAK_MAX: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = i - grp_start + 1 + + # With n as the previous rank in the group and m as the number + # of duplicates in this stretch, if TIEBREAK_FIRST and ascending, + # then rankings should be n + 1, n + 2 ... n + m elif tiebreak == TIEBREAK_FIRST: for j in range(i - dups + 1, i + 1): - if ascending: - out[lexsort_indexer[j]] = j + 1 - grp_start - else: - out[lexsort_indexer[j]] = \ - (2 * i - j - dups + 2 - grp_start) + out[lexsort_indexer[j]] = j + 1 - grp_start + + # If TIEBREAK_FIRST and descending, the ranking should be + # n + m, n + (m - 1) ... n + 1. This is equivalent to + # (i - dups + 1) + (i - j + 1) - grp_start + elif tiebreak == TIEBREAK_FIRST_DESCENDING: + for j in range(i - dups + 1, i + 1): + out[lexsort_indexer[j]] = 2 * i - j - dups + 2 - grp_start elif tiebreak == TIEBREAK_DENSE: for j in range(i - dups + 1, i + 1): out[lexsort_indexer[j]] = grp_vals_seen - # look forward to the next value (using the sorting in + # Look forward to the next value (using the sorting in # lexsort_indexer) if the value does not equal the current - # value then we need to reset the dups and sum_ranks, - # knowing that a new value is coming up. the conditional - # also needs to handle nan equality and the end of iteration + # value then we need to reset the dups and sum_ranks, knowing + # that a new value is coming up. The conditional also needs + # to handle nan equality and the end of iteration if next_val_diff or (mask[lexsort_indexer[i]] ^ mask[lexsort_indexer[i+1]]): dups = sum_ranks = 0 grp_vals_seen += 1 - grp_tie_count += 1 # Similar to the previous conditional, check now if we are # moving to a new group. If so, keep track of the index where # the new group occurs, so the tiebreaker calculations can - # decrement that from their position. fill in the size of each - # group encountered (used by pct calculations later). also be + # decrement that from their position. Fill in the size of each + # group encountered (used by pct calculations later). Also be # sure to reset any of the items helping to calculate dups - if at_end or (check_labels and - (labels[lexsort_indexer[i]] - != labels[lexsort_indexer[i+1]])): + if group_changed: if tiebreak != TIEBREAK_DENSE: for j in range(grp_start, i + 1): grp_sizes[lexsort_indexer[j]] = \ @@ -1189,10 +1207,9 @@ def rank_1d( else: for j in range(grp_start, i + 1): grp_sizes[lexsort_indexer[j]] = \ - (grp_tie_count - (grp_na_count > 0)) + (grp_vals_seen - 1 - (grp_na_count > 0)) dups = sum_ranks = 0 grp_na_count = 0 - grp_tie_count = 0 grp_start = i + 1 grp_vals_seen = 1
Long overdue followup to #38744. Only half the diff is relevant since the changes are duplicated over the if rank_t is object... else with no_gil... structure.
https://api.github.com/repos/pandas-dev/pandas/pulls/40546
2021-03-21T00:22:13Z
2021-03-23T20:31:22Z
2021-03-23T20:31:22Z
2021-03-23T20:32:33Z
TYP: mostly core.arrays, some core.indexes
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 3b472b162cdff..301644274111b 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -411,7 +411,7 @@ cdef class {{name}}HashTable(HashTable): k = kh_get_{{dtype}}(self.table, ckey) return k != self.table.n_buckets - def sizeof(self, deep=False): + def sizeof(self, deep: bool = False) -> int: """ return the size of my table in bytes """ overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*) for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t) @@ -419,7 +419,7 @@ cdef class {{name}}HashTable(HashTable): sizeof(Py_ssize_t)) # vals return overhead + for_flags + for_pairs - def get_state(self): + def get_state(self) -> dict[str, int]: """ returns infos about the state of the hashtable""" return { 'n_buckets' : self.table.n_buckets, @@ -747,14 +747,14 @@ cdef class StringHashTable(HashTable): kh_destroy_str(self.table) self.table = NULL - def sizeof(self, deep=False): + def sizeof(self, deep: bool = False) -> int: overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*) for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t) for_pairs = self.table.n_buckets * (sizeof(char *) + # keys sizeof(Py_ssize_t)) # vals return overhead + for_flags + for_pairs - def get_state(self): + def get_state(self) -> dict[str, int]: """ returns infos about the state of the hashtable""" return { 'n_buckets' : self.table.n_buckets, @@ -1079,7 +1079,7 @@ cdef class PyObjectHashTable(HashTable): k = kh_get_pymap(self.table, <PyObject*>key) return k != self.table.n_buckets - def sizeof(self, deep=False): + def sizeof(self, deep: bool = False) -> int: """ return the size of my table in bytes """ overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*) for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t) @@ -1087,7 +1087,7 @@ cdef class PyObjectHashTable(HashTable): sizeof(Py_ssize_t)) # vals return overhead + for_flags + for_pairs - def get_state(self): + def get_state(self) -> dict[str, int]: """ returns infos about the current state of the hashtable like size, number of buckets and so on. diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx index ecb7041fb2c5a..7951bb5c093ef 100644 --- a/pandas/_libs/ops.pyx +++ b/pandas/_libs/ops.pyx @@ -258,7 +258,7 @@ def vec_binop(object[:] left, object[:] right, object op) -> ndarray: def maybe_convert_bool(ndarray[object] arr, - true_values=None, false_values=None): + true_values=None, false_values=None) -> ndarray: cdef: Py_ssize_t i, n ndarray[uint8_t] result diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 1d99ebba3b9f0..f536c8dd76f0d 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -311,7 +311,9 @@ cdef convert_to_timedelta64(object ts, str unit): @cython.boundscheck(False) @cython.wraparound(False) -def array_to_timedelta64(ndarray[object] values, str unit=None, str errors="raise"): +def array_to_timedelta64( + ndarray[object] values, str unit=None, str errors="raise" +) -> ndarray: """ Convert an ndarray to an array of timedeltas. If errors == 'coerce', coerce non-convertible objects to NaT. Otherwise, raise. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 541dd8abee3c3..0c8a5bbc33c91 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1619,7 +1619,7 @@ def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray: _diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"} -def diff(arr, n: int, axis: int = 0, stacklevel=3): +def diff(arr, n: int, axis: int = 0, stacklevel: int = 3): """ difference of n between self, analogous to s-s.shift(n) @@ -1865,7 +1865,7 @@ def safe_sort( return ordered, ensure_platform_int(new_codes) -def _sort_mixed(values): +def _sort_mixed(values) -> np.ndarray: """ order ints before strings in 1d arrays, safe in py3 """ str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) nums = np.sort(values[~str_pos]) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 427b3106ea10c..08061eb1ec28c 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -157,8 +157,7 @@ def ndim(self) -> int: @cache_readonly def size(self) -> int: - # error: Incompatible return value type (got "number", expected "int") - return np.prod(self.shape) # type: ignore[return-value] + return self._ndarray.size @cache_readonly def nbytes(self) -> int: @@ -190,7 +189,7 @@ def equals(self, other) -> bool: return False return bool(array_equivalent(self._ndarray, other._ndarray)) - def _values_for_argsort(self): + def _values_for_argsort(self) -> np.ndarray: return self._ndarray # Signature of "argmin" incompatible with supertype "ExtensionArray" diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9aafea4b998a1..32c3095c3e6ee 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -606,7 +606,9 @@ def _from_inferred_categories( if true_values is None: true_values = ["True", "TRUE", "true"] - cats = cats.isin(true_values) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + cats = cats.isin(true_values) # type: ignore[assignment] if known_categories: # Recode from observation order to dtype.categories order. @@ -1444,7 +1446,7 @@ def memory_usage(self, deep: bool = False) -> int: """ return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) - def isna(self): + def isna(self) -> np.ndarray: """ Detect missing values @@ -1465,7 +1467,7 @@ def isna(self): isnull = isna - def notna(self): + def notna(self) -> np.ndarray: """ Inverse of isna @@ -1731,7 +1733,7 @@ def view(self, dtype=None): raise NotImplementedError(dtype) return self._from_backing_data(self._ndarray) - def to_dense(self): + def to_dense(self) -> np.ndarray: """ Return my 'dense' representation @@ -1804,14 +1806,14 @@ def __contains__(self, key) -> bool: """ # if key is a NaN, check if any NaN is in self. if is_valid_na_for_dtype(key, self.categories.dtype): - return self.isna().any() + return bool(self.isna().any()) return contains(self, key, container=self._codes) # ------------------------------------------------------------------ # Rendering Methods - def _formatter(self, boxed=False): + def _formatter(self, boxed: bool = False): # Defer to CategoricalFormatter's formatter. return None @@ -1889,7 +1891,7 @@ def _repr_footer(self) -> str: info = self._repr_categories_info() return f"Length: {len(self)}\n{info}" - def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str: + def _get_repr(self, length: bool = True, na_rep="NaN", footer: bool = True) -> str: from pandas.io.formats import format as fmt formatter = fmt.CategoricalFormatter( diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index cefb9bfa51280..7be06fe92c418 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -327,7 +327,7 @@ def _format_native_types(self, na_rep="NaT", date_format=None): """ raise AbstractMethodError(self) - def _formatter(self, boxed=False): + def _formatter(self, boxed: bool = False): # TODO: Remove Datetime & DatetimeTZ formatters. return "'{}'".format @@ -354,7 +354,7 @@ def __getitem__( result._freq = self._get_getitem_freq(key) return result - def _get_getitem_freq(self, key): + def _get_getitem_freq(self, key) -> Optional[BaseOffset]: """ Find the `freq` attribute to assign to the result of a __getitem__ lookup. """ @@ -406,7 +406,7 @@ def _maybe_clear_freq(self): # DatetimeArray and TimedeltaArray pass - def astype(self, dtype, copy=True): + def astype(self, dtype, copy: bool = True): # Some notes on cases we don't have to handle here in the base class: # 1. PeriodArray.astype handles period -> period # 2. DatetimeArray.astype handles conversion between tz. @@ -545,7 +545,7 @@ def _values_for_factorize(self): @classmethod def _from_factorized( - cls: Type[DatetimeLikeArrayT], values, original + cls: Type[DatetimeLikeArrayT], values, original: DatetimeLikeArrayT ) -> DatetimeLikeArrayT: return cls(values, dtype=original.dtype) @@ -939,7 +939,7 @@ def freq(self, value): self._freq = value @property - def freqstr(self): + def freqstr(self) -> Optional[str]: """ Return the frequency object as a string if its set, otherwise None. """ @@ -948,7 +948,7 @@ def freqstr(self): return self.freq.freqstr @property # NB: override with cache_readonly in immutable subclasses - def inferred_freq(self): + def inferred_freq(self) -> Optional[str]: """ Tries to return a string representing a frequency guess, generated by infer_freq. Returns None if it can't autodetect the @@ -963,8 +963,11 @@ def inferred_freq(self): @property # NB: override with cache_readonly in immutable subclasses def _resolution_obj(self) -> Optional[Resolution]: + freqstr = self.freqstr + if freqstr is None: + return None try: - return Resolution.get_reso_from_freq(self.freqstr) + return Resolution.get_reso_from_freq(freqstr) except KeyError: return None @@ -1241,7 +1244,7 @@ def _addsub_object_array(self, other: np.ndarray, op): ) return result - def _time_shift(self, periods, freq=None): + def _time_shift(self, periods: int, freq=None): """ Shift each value by `periods`. @@ -1440,7 +1443,7 @@ def __isub__(self, other): # -------------------------------------------------------------- # Reductions - def min(self, *, axis=None, skipna=True, **kwargs): + def min(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs): """ Return the minimum value of the Array or minimum along an axis. @@ -1469,7 +1472,7 @@ def min(self, *, axis=None, skipna=True, **kwargs): result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) - def max(self, *, axis=None, skipna=True, **kwargs): + def max(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs): """ Return the maximum value of the Array or maximum along an axis. @@ -1500,7 +1503,7 @@ def max(self, *, axis=None, skipna=True, **kwargs): result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) - def mean(self, *, skipna=True, axis: Optional[int] = 0): + def mean(self, *, skipna: bool = True, axis: Optional[int] = 0): """ Return the mean value of the Array. @@ -1568,7 +1571,7 @@ class DatelikeOps(DatetimeLikeArrayMixin): URL="https://docs.python.org/3/library/datetime.html" "#strftime-and-strptime-behavior" ) - def strftime(self, date_format): + def strftime(self, date_format: str) -> np.ndarray: """ Convert to Index using specified date_format. @@ -1760,7 +1763,7 @@ def all(self, *, axis: Optional[int] = None, skipna: bool = True): # -------------------------------------------------------------- # Frequency Methods - def _maybe_clear_freq(self): + def _maybe_clear_freq(self) -> None: self._freq = None def _with_freq(self, freq): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d1f0f506766a8..956a93a142afe 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -84,6 +84,11 @@ if TYPE_CHECKING: from typing import Literal + from pandas.core.arrays import ( + PeriodArray, + TimedeltaArray, + ) + _midnight = time(0, 0) @@ -244,7 +249,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): _dtype: Union[np.dtype, DatetimeTZDtype] _freq = None - def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False): + def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False): values = extract_array(values, extract_numpy=True) if isinstance(values, IntegerArray): values = values.to_numpy("int64", na_value=iNaT) @@ -319,7 +324,7 @@ def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False): @classmethod def _simple_new( - cls, values, freq: Optional[BaseOffset] = None, dtype=DT64NS_DTYPE + cls, values: np.ndarray, freq: Optional[BaseOffset] = None, dtype=DT64NS_DTYPE ) -> DatetimeArray: assert isinstance(values, np.ndarray) assert values.dtype == DT64NS_DTYPE @@ -339,11 +344,11 @@ def _from_sequence_not_strict( cls, data, dtype=None, - copy=False, + copy: bool = False, tz=None, freq=lib.no_default, - dayfirst=False, - yearfirst=False, + dayfirst: bool = False, + yearfirst: bool = False, ambiguous="raise", ): explicit_none = freq is None @@ -492,7 +497,7 @@ def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64: self._check_compatible_with(value, setitem=setitem) return value.asm8 - def _scalar_from_string(self, value): + def _scalar_from_string(self, value) -> Union[Timestamp, NaTType]: return Timestamp(value, tz=self.tz) def _check_compatible_with(self, other, setitem: bool = False): @@ -536,7 +541,7 @@ def dtype(self) -> Union[np.dtype, DatetimeTZDtype]: # type: ignore[override] return self._dtype @property - def tz(self): + def tz(self) -> Optional[tzinfo]: """ Return timezone, if any. @@ -557,14 +562,14 @@ def tz(self, value): ) @property - def tzinfo(self): + def tzinfo(self) -> Optional[tzinfo]: """ Alias for tz attribute """ return self.tz @property # NB: override with cache_readonly in immutable subclasses - def is_normalized(self): + def is_normalized(self) -> bool: """ Returns True if all of the dates are at midnight ("no time") """ @@ -609,7 +614,7 @@ def __iter__(self): ) yield from converted - def astype(self, dtype, copy=True): + def astype(self, dtype, copy: bool = True): # We handle # --> datetime # --> period @@ -636,7 +641,9 @@ def astype(self, dtype, copy=True): # Rendering Methods @dtl.ravel_compat - def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): + def _format_native_types( + self, na_rep="NaT", date_format=None, **kwargs + ) -> np.ndarray: from pandas.io.formats.format import get_format_datetime64_from_values fmt = get_format_datetime64_from_values(self, date_format) @@ -660,7 +667,7 @@ def _has_same_tz(self, other) -> bool: other_tz = other.tzinfo return timezones.tz_compare(self.tzinfo, other_tz) - def _assert_tzawareness_compat(self, other): + def _assert_tzawareness_compat(self, other) -> None: # adapted from _Timestamp._assert_tzawareness_compat other_tz = getattr(other, "tzinfo", None) other_dtype = getattr(other, "dtype", None) @@ -708,7 +715,7 @@ def _sub_datetime_arraylike(self, other): np.putmask(new_values, arr_mask, iNaT) return new_values.view("timedelta64[ns]") - def _add_offset(self, offset): + def _add_offset(self, offset) -> DatetimeArray: if self.ndim == 2: return self.ravel()._add_offset(offset).reshape(self.shape) @@ -756,7 +763,7 @@ def _sub_datetimelike_scalar(self, other): # ----------------------------------------------------------------- # Timezone Conversion and Localization Methods - def _local_timestamps(self): + def _local_timestamps(self) -> np.ndarray: """ Convert to an i8 (unix-like nanosecond timestamp) representation while keeping the local timezone and not using UTC. @@ -767,7 +774,7 @@ def _local_timestamps(self): return self.asi8 return tzconversion.tz_convert_from_utc(self.asi8, self.tz) - def tz_convert(self, tz): + def tz_convert(self, tz) -> DatetimeArray: """ Convert tz-aware Datetime Array/Index from one time zone to another. @@ -844,7 +851,7 @@ def tz_convert(self, tz): return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) @dtl.ravel_compat - def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): + def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray: """ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. @@ -1035,7 +1042,7 @@ def to_pydatetime(self) -> np.ndarray: """ return ints_to_pydatetime(self.asi8, tz=self.tz) - def normalize(self): + def normalize(self) -> DatetimeArray: """ Convert times to midnight. @@ -1077,7 +1084,7 @@ def normalize(self): return type(self)(new_values)._with_freq("infer").tz_localize(self.tz) @dtl.ravel_compat - def to_period(self, freq=None): + def to_period(self, freq=None) -> PeriodArray: """ Cast to PeriodArray/Index at a particular frequency. @@ -1148,7 +1155,7 @@ def to_period(self, freq=None): return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz) - def to_perioddelta(self, freq): + def to_perioddelta(self, freq) -> TimedeltaArray: """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 45656459792ba..a824e27e3e36a 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -205,38 +205,60 @@ def _values_for_factorize(self) -> Tuple[np.ndarray, int]: # ------------------------------------------------------------------------ # Reductions - def any(self, *, axis=None, out=None, keepdims=False, skipna=True): + def any( + self, + *, + axis: Optional[int] = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): nv.validate_any((), {"out": out, "keepdims": keepdims}) result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) - def all(self, *, axis=None, out=None, keepdims=False, skipna=True): + def all( + self, + *, + axis: Optional[int] = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): nv.validate_all((), {"out": out, "keepdims": keepdims}) result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) - def min(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar: + def min( + self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs + ) -> Scalar: nv.validate_min((), kwargs) result = nanops.nanmin( values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna ) return self._wrap_reduction_result(axis, result) - def max(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar: + def max( + self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs + ) -> Scalar: nv.validate_max((), kwargs) result = nanops.nanmax( values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna ) return self._wrap_reduction_result(axis, result) - def sum(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar: + def sum( + self, *, axis: Optional[int] = None, skipna: bool = True, min_count=0, **kwargs + ) -> Scalar: nv.validate_sum((), kwargs) result = nanops.nansum( self._ndarray, axis=axis, skipna=skipna, min_count=min_count ) return self._wrap_reduction_result(axis, result) - def prod(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar: + def prod( + self, *, axis: Optional[int] = None, skipna: bool = True, min_count=0, **kwargs + ) -> Scalar: nv.validate_prod((), kwargs) result = nanops.nanprod( self._ndarray, axis=axis, skipna=skipna, min_count=min_count @@ -246,18 +268,24 @@ def prod(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar: def mean( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims}) result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def median( - self, *, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True + self, + *, + axis: Optional[int] = None, + out=None, + overwrite_input: bool = False, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_median( (), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims} @@ -268,12 +296,12 @@ def median( def std( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, ddof=1, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_stat_ddof_func( (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std" @@ -284,12 +312,12 @@ def std( def var( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, ddof=1, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_stat_ddof_func( (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var" @@ -300,12 +328,12 @@ def var( def sem( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, ddof=1, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_stat_ddof_func( (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem" @@ -316,11 +344,11 @@ def sem( def kurt( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_stat_ddof_func( (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt" @@ -331,11 +359,11 @@ def kurt( def skew( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, - keepdims=False, - skipna=True, + keepdims: bool = False, + skipna: bool = True, ): nv.validate_stat_ddof_func( (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew" @@ -368,7 +396,7 @@ def to_numpy( # type: ignore[override] # ------------------------------------------------------------------------ # Ops - def __invert__(self): + def __invert__(self) -> PandasArray: return type(self)(~self._ndarray) def _cmp_method(self, other, op): diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index d91522a9e1bb6..2355999933a7a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -3,6 +3,7 @@ from datetime import timedelta import operator from typing import ( + TYPE_CHECKING, Any, Callable, List, @@ -76,6 +77,9 @@ from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com +if TYPE_CHECKING: + from pandas.core.arrays import DatetimeArray + _shared_doc_kwargs = { "klass": "PeriodArray", } @@ -186,7 +190,9 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): # -------------------------------------------------------------------- # Constructors - def __init__(self, values, dtype: Optional[Dtype] = None, freq=None, copy=False): + def __init__( + self, values, dtype: Optional[Dtype] = None, freq=None, copy: bool = False + ): freq = validate_dtype_freq(dtype, freq) if freq is not None: @@ -250,7 +256,7 @@ def _from_sequence( @classmethod def _from_sequence_of_strings( - cls, strings, *, dtype: Optional[Dtype] = None, copy=False + cls, strings, *, dtype: Optional[Dtype] = None, copy: bool = False ) -> PeriodArray: return cls._from_sequence(strings, dtype=dtype, copy=copy) @@ -448,7 +454,7 @@ def is_leap_year(self) -> np.ndarray: """ return isleapyear_arr(np.asarray(self.year)) - def to_timestamp(self, freq=None, how="start"): + def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: """ Cast to DatetimeArray/Index. @@ -492,7 +498,7 @@ def to_timestamp(self, freq=None, how="start"): # -------------------------------------------------------------------- - def _time_shift(self, periods, freq=None): + def _time_shift(self, periods: int, freq=None) -> PeriodArray: """ Shift each value by `periods`. @@ -597,7 +603,9 @@ def _formatter(self, boxed: bool = False): return "'{}'".format @dtl.ravel_compat - def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): + def _format_native_types( + self, na_rep="NaT", date_format=None, **kwargs + ) -> np.ndarray: """ actually format my specific types """ diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 7251faee333bb..e1262d691128f 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -222,7 +222,7 @@ def _chk_pyarrow_available(cls) -> None: raise ImportError(msg) @classmethod - def _from_sequence(cls, scalars, dtype: Optional[Dtype] = None, copy=False): + def _from_sequence(cls, scalars, dtype: Optional[Dtype] = None, copy: bool = False): cls._chk_pyarrow_available() # convert non-na-likes to str, and nan-likes to ArrowStringDtype.na_value scalars = lib.ensure_string_array(scalars, copy=False) @@ -230,7 +230,7 @@ def _from_sequence(cls, scalars, dtype: Optional[Dtype] = None, copy=False): @classmethod def _from_sequence_of_strings( - cls, strings, dtype: Optional[Dtype] = None, copy=False + cls, strings, dtype: Optional[Dtype] = None, copy: bool = False ): return cls._from_sequence(strings, dtype=dtype, copy=copy) @@ -431,7 +431,7 @@ def fillna(self, value=None, method=None, limit=None): new_values = self.copy() return new_values - def _reduce(self, name, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): if name in ["min", "max"]: return getattr(self, name)(skipna=skipna) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 1bf822c1ae3e5..59077bfceaa4a 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -2,6 +2,7 @@ from datetime import timedelta from typing import ( + TYPE_CHECKING, List, Optional, Tuple, @@ -35,7 +36,10 @@ ints_to_pytimedelta, parse_timedelta_unit, ) -from pandas._typing import NpDtype +from pandas._typing import ( + DtypeObj, + NpDtype, +) from pandas.compat.numpy import function as nv from pandas.core.dtypes.cast import astype_td64_unit_conversion @@ -70,6 +74,12 @@ from pandas.core.construction import extract_array from pandas.core.ops.common import unpack_zerodim_and_defer +if TYPE_CHECKING: + from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + ) + def _field_accessor(name: str, alias: str, docstring: str): def f(self) -> np.ndarray: @@ -171,7 +181,9 @@ def dtype(self) -> np.dtype: # type: ignore[override] _freq = None - def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False): + def __init__( + self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy: bool = False + ): values = extract_array(values, extract_numpy=True) if isinstance(values, IntegerArray): values = values.to_numpy("int64", na_value=tslibs.iNaT) @@ -230,7 +242,7 @@ def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False): @classmethod def _simple_new( - cls, values, freq: Optional[BaseOffset] = None, dtype=TD64NS_DTYPE + cls, values: np.ndarray, freq: Optional[BaseOffset] = None, dtype=TD64NS_DTYPE ) -> TimedeltaArray: assert dtype == TD64NS_DTYPE, dtype assert isinstance(values, np.ndarray), type(values) @@ -331,10 +343,10 @@ def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64: self._check_compatible_with(value, setitem=setitem) return np.timedelta64(value.value, "ns") - def _scalar_from_string(self, value): + def _scalar_from_string(self, value) -> Union[Timedelta, NaTType]: return Timedelta(value) - def _check_compatible_with(self, other, setitem: bool = False): + def _check_compatible_with(self, other, setitem: bool = False) -> None: # we don't have anything to validate. pass @@ -375,7 +387,7 @@ def __iter__(self): def sum( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, keepdims: bool = False, @@ -395,7 +407,7 @@ def sum( def std( self, *, - axis=None, + axis: Optional[int] = None, dtype: Optional[NpDtype] = None, out=None, ddof: int = 1, @@ -414,13 +426,15 @@ def std( # ---------------------------------------------------------------- # Rendering Methods - def _formatter(self, boxed=False): + def _formatter(self, boxed: bool = False): from pandas.io.formats.format import get_format_timedelta64 return get_format_timedelta64(self, box=True) @dtl.ravel_compat - def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): + def _format_native_types( + self, na_rep="NaT", date_format=None, **kwargs + ) -> np.ndarray: from pandas.io.formats.format import get_format_timedelta64 formatter = get_format_timedelta64(self._ndarray, na_rep) @@ -435,7 +449,7 @@ def _add_offset(self, other): f"cannot add the type {type(other).__name__} to a {type(self).__name__}" ) - def _add_period(self, other: Period): + def _add_period(self, other: Period) -> PeriodArray: """ Add a Period object. """ @@ -459,7 +473,7 @@ def _add_datetime_arraylike(self, other): # defer to implementation in DatetimeArray return other + self - def _add_datetimelike_scalar(self, other): + def _add_datetimelike_scalar(self, other) -> DatetimeArray: # adding a timedeltaindex to a datetimelike from pandas.core.arrays import DatetimeArray @@ -919,7 +933,7 @@ def f(x): def sequence_to_td64ns( - data, copy=False, unit=None, errors="raise" + data, copy: bool = False, unit=None, errors="raise" ) -> Tuple[np.ndarray, Optional[Tick]]: """ Parameters @@ -1095,7 +1109,7 @@ def objects_to_td64ns(data, unit=None, errors="raise"): return result.view("timedelta64[ns]") -def _validate_td64_dtype(dtype): +def _validate_td64_dtype(dtype) -> DtypeObj: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("timedelta64")): # no precision disallowed GH#24806 diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 29c2f7cfcf00d..23632922adc29 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2376,7 +2376,7 @@ def __reduce__(self): """The expected NA value to use with this index.""" @cache_readonly - def _isnan(self): + def _isnan(self) -> np.ndarray: """ Return if each value is NaN. """ @@ -5238,7 +5238,7 @@ def get_indexer_non_unique(self, target): return ensure_platform_int(indexer), missing @final - def get_indexer_for(self, target, **kwargs): + def get_indexer_for(self, target, **kwargs) -> np.ndarray: """ Guaranteed return of an indexer even when non-unique. @@ -5306,7 +5306,7 @@ def _index_as_unique(self) -> bool: _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" @final - def _maybe_promote(self, other: Index): + def _maybe_promote(self, other: Index) -> Tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. @@ -5481,7 +5481,7 @@ def _transform_index(self, func, level=None) -> Index: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) - def isin(self, values, level=None): + def isin(self, values, level=None) -> np.ndarray: """ Return a boolean array where the index values are in `values`. @@ -5849,7 +5849,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): return start_slice, end_slice - def delete(self, loc): + def delete(self, loc) -> Index: """ Make new Index with passed location(-s) deleted. @@ -5881,7 +5881,7 @@ def delete(self, loc): res_values = np.delete(self._data, loc) return type(self)._simple_new(res_values, name=self.name) - def insert(self, loc: int, item): + def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. @@ -5916,7 +5916,7 @@ def insert(self, loc: int, item): idx = np.concatenate((arr[:loc], item, arr[loc:])) return Index(idx, name=self.name) - def drop(self: _IndexT, labels, errors: str_t = "raise") -> _IndexT: + def drop(self, labels, errors: str_t = "raise") -> Index: """ Make new Index with passed list of labels deleted. @@ -5929,6 +5929,7 @@ def drop(self: _IndexT, labels, errors: str_t = "raise") -> _IndexT: Returns ------- dropped : Index + Will be same type as self, except for RangeIndex. Raises ------ diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f37faa4ab844b..c1ed00820a376 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -402,7 +402,7 @@ def _get_indexer( method: Optional[str] = None, limit: Optional[int] = None, tolerance=None, - ): + ) -> np.ndarray: if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit @@ -436,10 +436,11 @@ def repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index: - return self._int64index.delete(loc) + # error: Incompatible return value type (got "Index", expected "Int64Index") + return self._int64index.delete(loc) # type: ignore[return-value] def take( - self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs + self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception("Int64Index", type(self).__name__): return self._int64index.take( @@ -471,7 +472,13 @@ def _view(self: RangeIndex) -> RangeIndex: return result @doc(Int64Index.copy) - def copy(self, name=None, deep=False, dtype: Optional[Dtype] = None, names=None): + def copy( + self, + name: Hashable = None, + deep: bool = False, + dtype: Optional[Dtype] = None, + names=None, + ): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 292c459919398..5550da7421e00 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1413,7 +1413,10 @@ def hide_columns(self, subset) -> Styler: """ subset = _non_reducing_slice(subset) hidden_df = self.data.loc[subset] - self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) + hcols = self.columns.get_indexer_for(hidden_df.columns) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Sequence[int]") + self.hidden_columns = hcols # type: ignore[assignment] return self # -----------------------------------------------------------------------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40545
2021-03-20T20:44:23Z
2021-04-02T01:51:44Z
2021-04-02T01:51:44Z
2021-04-02T18:25:07Z
DOC: Fixing indentation of example of DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b1f0ad8eda2aa..039c287189a6c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -528,7 +528,7 @@ class DataFrame(NDFrame, OpsMixin): >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) - x y + x y 0 0 0 1 0 3 2 2 3
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40544
2021-03-20T18:07:14Z
2021-03-28T21:03:33Z
2021-03-28T21:03:33Z
2021-03-28T21:03:39Z
CLN: Don't catch TypeError in FrameApply.agg
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 3a2c2d7124963..cb20f6c69e6f1 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -633,25 +633,18 @@ def agg(self): obj = self.obj axis = self.axis - try: - if axis == 1: - result = FrameRowApply( - obj.T, - self.orig_f, - self.raw, - self.result_type, - self.args, - self.kwargs, - ).agg() - result = result.T if result is not None else result - else: - result = super().agg() - except TypeError as err: - exc = TypeError( - "DataFrame constructor called with " - f"incompatible data and dtype: {err}" - ) - raise exc from err + if axis == 1: + result = FrameRowApply( + obj.T, + self.orig_f, + self.raw, + self.result_type, + self.args, + self.kwargs, + ).agg() + result = result.T if result is not None else result + else: + result = super().agg() if result is None: result = obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index 690d6bed0cb9b..73bc5b14335d4 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -272,6 +272,14 @@ def test_agg_cython_table_raises_series(series, func, expected): series.agg(func) +def test_agg_none_to_type(): + # GH 40543 + df = DataFrame({"a": [None]}) + msg = re.escape("int() argument must be a string") + with pytest.raises(TypeError, match=msg): + df.agg({"a": int}) + + def test_transform_none_to_type(): # GH#34377 df = DataFrame({"a": [None]})
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Ref: #34224. Back when transform and agg shared code, this was introduced to stop transform from having an infinite recursive loop. Now they do not, it is no longer necessary and only serves to obfusticate the error message.
https://api.github.com/repos/pandas-dev/pandas/pulls/40543
2021-03-20T17:52:48Z
2021-03-26T14:46:59Z
2021-03-26T14:46:59Z
2021-03-26T14:54:27Z
DOC: suppress warnings from CategoricalBlock deprecation
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40542
2021-03-20T17:26:44Z
2021-03-21T20:59:07Z
2021-03-21T20:59:07Z
2021-03-21T21:45:23Z
Revert "PERF: increase the minimum number of elements to use numexpr for ops from 1e4 to 1e6"
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 4f14ea73d5a88..0dbe5e8d83741 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -40,7 +40,7 @@ } # the minimum prod shape that we will use numexpr -_MIN_ELEMENTS = 1_000_000 +_MIN_ELEMENTS = 10000 def set_use_numexpr(v=True): diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 96347ba5a733f..30f88ba5e76f6 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -12,7 +12,7 @@ ) from pandas.core.computation import expressions as expr -_frame = DataFrame(np.random.randn(1000000, 4), columns=list("ABCD"), dtype="float64") +_frame = DataFrame(np.random.randn(10000, 4), columns=list("ABCD"), dtype="float64") _frame2 = DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64") _mixed = DataFrame( {
Reverts pandas-dev/pandas#40502 Troubleshooting the CI
https://api.github.com/repos/pandas-dev/pandas/pulls/40541
2021-03-20T16:48:37Z
2021-03-20T18:17:10Z
2021-03-20T18:17:10Z
2021-03-20T18:18:15Z
TYP: make typing DatetimeLikeScalar a Union instead of TypeVar
diff --git a/pandas/_typing.py b/pandas/_typing.py index e95dff2e69ff0..f90ef33434773 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -80,7 +80,7 @@ # scalars PythonScalar = Union[str, int, float, bool] -DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta") +DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] Scalar = Union[PythonScalar, PandasScalar] diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 0900688e04374..67241a866ef35 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -563,8 +563,7 @@ def _validate_comparison_value(self, other): raise InvalidComparison(other) if isinstance(other, self._recognized_scalars) or other is NaT: - # error: Too many arguments for "object" - other = self._scalar_type(other) # type: ignore[call-arg] + other = self._scalar_type(other) try: self._check_compatible_with(other) except (TypeError, IncompatibleFrequency) as err: @@ -614,16 +613,14 @@ def _validate_shift_value(self, fill_value): if is_valid_na_for_dtype(fill_value, self.dtype): fill_value = NaT elif isinstance(fill_value, self._recognized_scalars): - # error: Too many arguments for "object" - fill_value = self._scalar_type(fill_value) # type: ignore[call-arg] + fill_value = self._scalar_type(fill_value) else: # only warn if we're not going to raise if self._scalar_type is Period and lib.is_integer(fill_value): # kludge for #31971 since Period(integer) tries to cast to str new_fill = Period._from_ordinal(fill_value, freq=self.freq) else: - # error: Too many arguments for "object" - new_fill = self._scalar_type(fill_value) # type: ignore[call-arg] + new_fill = self._scalar_type(fill_value) # stacklevel here is chosen to be correct when called from # DataFrame.shift or Series.shift @@ -684,8 +681,7 @@ def _validate_scalar( raise TypeError(msg) elif isinstance(value, self._recognized_scalars): - # error: Too many arguments for "object" - value = self._scalar_type(value) # type: ignore[call-arg] + value = self._scalar_type(value) else: msg = self._validation_error_message(value, allow_listlike)
In `_typing.py` change `DatetimeLikeScalar` from `TypeVar` to `Union` Allows removal of 4 `#type: ignore` lines Per split out from #39501
https://api.github.com/repos/pandas-dev/pandas/pulls/40540
2021-03-20T16:21:51Z
2021-03-21T21:01:24Z
2021-03-21T21:01:24Z
2021-03-22T11:59:32Z
TYP: make EA registry private
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 78a7f1890b5de..b5a17e1ef882e 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -33,7 +33,7 @@ from pandas.core.dtypes.base import ( ExtensionDtype, - registry, + _registry as registry, ) from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index d83405803753a..129c6c061d11c 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -388,7 +388,7 @@ def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]: ... class MyExtensionDtype(ExtensionDtype): ... name = "myextension" """ - registry.register(cls) + _registry.register(cls) return cls @@ -452,4 +452,4 @@ def find( return None -registry = Registry() +_registry = Registry() diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 32ea82d9c0402..b9e785ff2f887 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -23,7 +23,7 @@ Optional, ) -from pandas.core.dtypes.base import registry +from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 5d2b7c43f6765..bfe588883d9f3 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -5,7 +5,7 @@ import pytest import pytz -from pandas.core.dtypes.base import registry +from pandas.core.dtypes.base import _registry as registry import pandas as pd import pandas._testing as tm diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index e7f3e8c659316..2592a0263c585 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -4,7 +4,7 @@ from pandas._libs.tslibs import iNaT from pandas._libs.tslibs.period import IncompatibleFrequency -from pandas.core.dtypes.base import registry +from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import PeriodDtype import pandas as pd diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index ca311768dc2d9..51a7969162abf 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -4,7 +4,7 @@ import pytest import pytz -from pandas.core.dtypes.base import registry +from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical, diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index e8cdcfcaafa86..199347383b171 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -5,7 +5,7 @@ import pandas.util._test_decorators as td -from pandas.core.dtypes.base import registry as ea_registry +from pandas.core.dtypes.base import _registry as ea_registry from pandas.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype,
From #39501 splitting out the part to make the EA `registry` private. This will make it so that `pyright --verifytypes` accepts pandas public API.
https://api.github.com/repos/pandas-dev/pandas/pulls/40538
2021-03-20T16:11:26Z
2021-03-21T21:00:40Z
2021-03-21T21:00:40Z
2021-03-22T11:58:59Z
REF: move shift logic from BlockManager to DataFrame
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b1f0ad8eda2aa..7d5c56b63ff86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5080,28 +5080,45 @@ def shift( axis = self._get_axis_number(axis) ncols = len(self.columns) - if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0: - # We will infer fill_value to match the closest column - # Use a column that we know is valid for our column's dtype GH#38434 - label = self.columns[0] + if ( + axis == 1 + and periods != 0 + and ncols > 0 + and (fill_value is lib.no_default or len(self._mgr.arrays) > 1) + ): + # Exclude single-array-with-fill_value case so we issue a FutureWarning + # if an integer is passed with datetimelike dtype GH#31971 + from pandas import concat + # tail: the data that is still in our shifted DataFrame if periods > 0: - result = self.iloc[:, :-periods] - for col in range(min(ncols, abs(periods))): - # TODO(EA2D): doing this in a loop unnecessary with 2D EAs - # Define filler inside loop so we get a copy - filler = self.iloc[:, 0].shift(len(self)) - result.insert(0, label, filler, allow_duplicates=True) + tail = self.iloc[:, :-periods] else: - result = self.iloc[:, -periods:] - for col in range(min(ncols, abs(periods))): - # Define filler inside loop so we get a copy - filler = self.iloc[:, -1].shift(len(self)) - result.insert( - len(result.columns), label, filler, allow_duplicates=True - ) + tail = self.iloc[:, -periods:] + # pin a simple Index to avoid costly casting + tail.columns = range(len(tail.columns)) + + if fill_value is not lib.no_default: + # GH#35488 + # TODO(EA2D): with 2D EAs we could construct other directly + ser = Series(fill_value, index=self.index) + else: + # We infer fill_value to match the closest column + if periods > 0: + ser = self.iloc[:, 0].shift(len(self)) + else: + ser = self.iloc[:, -1].shift(len(self)) + + width = min(abs(periods), ncols) + other = concat([ser] * width, axis=1) + + if periods > 0: + result = concat([other, tail], axis=1) + else: + result = concat([tail, other], axis=1) + result = cast(DataFrame, result) result.columns = self.columns.copy() return result diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da78fc5dfba76..806054dbf8295 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -621,25 +621,6 @@ def shift(self, periods: int, axis: int, fill_value) -> BlockManager: if fill_value is lib.no_default: fill_value = None - if axis == 0 and self.ndim == 2 and self.nblocks > 1: - # GH#35488 we need to watch out for multi-block cases - # We only get here with fill_value not-lib.no_default - ncols = self.shape[0] - if periods > 0: - indexer = [-1] * periods + list(range(ncols - periods)) - else: - nper = abs(periods) - indexer = list(range(nper, ncols)) + [-1] * nper - result = self.reindex_indexer( - self.items, - indexer, - axis=0, - fill_value=fill_value, - allow_dups=True, - consolidate=False, - ) - return result - return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value) def fillna(self, value, limit, inplace: bool, downcast) -> BlockManager: diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index 1a12cbff47092..bf53e41442182 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -39,15 +39,8 @@ def test_transform_ufunc(axis, float_frame, frame_or_series): @pytest.mark.parametrize("op", frame_transform_kernels) -def test_transform_groupby_kernel(axis, float_frame, op, using_array_manager, request): +def test_transform_groupby_kernel(axis, float_frame, op, request): # GH 35964 - if using_array_manager and op == "pct_change" and axis in (1, "columns"): - # TODO(ArrayManager) shift with axis=1 - request.node.add_marker( - pytest.mark.xfail( - reason="shift axis=1 not yet implemented for ArrayManager" - ) - ) args = [0.0] if op == "fillna" else [] if axis == 0 or axis == "index":
Partially-fixes one of the currently-disabled ArrayManager tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/40536
2021-03-20T15:07:43Z
2021-03-23T16:47:15Z
2021-03-23T16:47:15Z
2021-06-26T07:44:15Z
TYP: first_valid_index & last_valid_index
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c20b2840a40ab..2e34bc0563210 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11143,7 +11143,7 @@ def __ixor__(self, other): # Misc methods @final - def _find_valid_index(self, how: str): + def _find_valid_index(self, *, how: str) -> Optional[Hashable]: """ Retrieves the index of the first valid value. @@ -11156,16 +11156,16 @@ def _find_valid_index(self, how: str): ------- idx_first_valid : type of index """ - idxpos = find_valid_index(self._values, how) + idxpos = find_valid_index(self._values, how=how) if idxpos is None: return None return self.index[idxpos] @final @doc(position="first", klass=_shared_doc_kwargs["klass"]) - def first_valid_index(self): + def first_valid_index(self) -> Optional[Hashable]: """ - Return index for {position} non-NA/null value. + Return index for {position} non-NA value or None, if no NA value is found. Returns ------- @@ -11176,12 +11176,12 @@ def first_valid_index(self): If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ - return self._find_valid_index("first") + return self._find_valid_index(how="first") @final @doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"]) - def last_valid_index(self): - return self._find_valid_index("last") + def last_valid_index(self) -> Optional[Hashable]: + return self._find_valid_index(how="last") def _doc_params(cls): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index c2193056cc974..41d7fed66469d 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -158,7 +158,7 @@ def clean_interp_method(method: str, **kwargs) -> str: return method -def find_valid_index(values, how: str): +def find_valid_index(values, *, how: str) -> Optional[int]: """ Retrieves the index of the first valid value. @@ -256,8 +256,17 @@ def interpolate_1d( # These are sets of index pointers to invalid values... i.e. {0, 1, etc... all_nans = set(np.flatnonzero(invalid)) - start_nans = set(range(find_valid_index(yvalues, "first"))) - end_nans = set(range(1 + find_valid_index(yvalues, "last"), len(valid))) + + first_valid_index = find_valid_index(yvalues, how="first") + if first_valid_index is None: # no nan found in start + first_valid_index = 0 + start_nans = set(range(first_valid_index)) + + last_valid_index = find_valid_index(yvalues, how="last") + if last_valid_index is None: # no nan found in end + last_valid_index = len(yvalues) + end_nans = set(range(1 + last_valid_index, len(valid))) + mid_nans = all_nans - start_nans - end_nans # Like the sets above, preserve_nans contains indices of invalid values, @@ -595,8 +604,12 @@ def _interpolate_with_limit_area( invalid = isna(values) if not invalid.all(): - first = find_valid_index(values, "first") - last = find_valid_index(values, "last") + first = find_valid_index(values, how="first") + if first is None: + first = 0 + last = find_valid_index(values, how="last") + if last is None: + last = len(values) values = interpolate_2d( values,
Typing of `first_valid_index` & `last_valid_index` + make the `how` param in internal functions a named parameter for better readability.
https://api.github.com/repos/pandas-dev/pandas/pulls/40535
2021-03-20T14:57:52Z
2021-03-23T16:23:02Z
2021-03-23T16:23:02Z
2021-03-23T17:08:08Z
CLN: Optional[Hashable] in dict type hints
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c20b2840a40ab..c9cbc858fae29 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -231,7 +231,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): _metadata: List[str] = [] _is_copy: Optional[weakref.ReferenceType[NDFrame]] = None _mgr: Manager - _attrs: Dict[Optional[Hashable], Any] + _attrs: Dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- @@ -241,7 +241,7 @@ def __init__( self, data: Manager, copy: bool = False, - attrs: Optional[Mapping[Optional[Hashable], Any]] = None, + attrs: Optional[Mapping[Hashable, Any]] = None, ): # copy kwarg is retained for mypy compat, is not used @@ -320,7 +320,7 @@ def _as_manager(self: FrameOrSeries, typ: str) -> FrameOrSeries: # attrs and flags @property - def attrs(self) -> Dict[Optional[Hashable], Any]: + def attrs(self) -> Dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. @@ -337,7 +337,7 @@ def attrs(self) -> Dict[Optional[Hashable], Any]: return self._attrs @attrs.setter - def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None: + def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @final @@ -803,7 +803,7 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: # error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index, # None, None]"; expected "bool" [arg-type] # error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index, - # None, None]"; expected "Optional[Mapping[Optional[Hashable], Any]]" + # None, None]"; expected "Optional[Mapping[Hashable, Any]]" new_values, # type: ignore[arg-type] *new_axes, # type: ignore[arg-type] ).__finalize__(self, method="swapaxes") diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 7c83beca1ae71..b7493ebeadf34 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -572,7 +572,7 @@ def read_json( if dtype is None and orient != "table": # error: Incompatible types in assignment (expression has type "bool", variable # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], - # Type[int], Type[complex], Type[bool], Type[object], Dict[Optional[Hashable], + # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable, # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], # Type[int], Type[complex], Type[bool], Type[object]]], None]") dtype = True # type: ignore[assignment] @@ -921,7 +921,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): # error: Non-overlapping identity check (left operand type: # "Union[ExtensionDtype, str, dtype[Any], Type[object], - # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], # Type[str], Type[float], Type[int], Type[complex], Type[bool], # Type[object]]]]", right operand type: "Literal[True]") elif self.dtype is True: # type: ignore[comparison-overlap] diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e3347468828d1..200565b837dea 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1563,7 +1563,7 @@ def to_sql( if not is_dict_like(dtype): # error: Value expression in dictionary comprehension has incompatible # type "Union[ExtensionDtype, str, dtype[Any], Type[object], - # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], # Type[str], Type[float], Type[int], Type[complex], Type[bool], # Type[object]]]]"; expected type "Union[ExtensionDtype, str, # dtype[Any], Type[object]]" @@ -2060,7 +2060,7 @@ def to_sql( if not is_dict_like(dtype): # error: Value expression in dictionary comprehension has incompatible # type "Union[ExtensionDtype, str, dtype[Any], Type[object], - # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], # Type[str], Type[float], Type[int], Type[complex], Type[bool], # Type[object]]]]"; expected type "Union[ExtensionDtype, str, # dtype[Any], Type[object]]" diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index b355cba6354da..b649368a80820 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -609,8 +609,8 @@ def _make_legend(self): self.legend_handles ) # error: Incompatible types in assignment (expression has type - # "Iterator[Optional[Hashable]]", variable has type - # "List[Optional[Hashable]]") + # "Iterator[Hashable]", variable has type + # "List[Hashable]") self.legend_labels = reversed( # type: ignore[assignment] self.legend_labels )
Minor clean-up. ```python >>> from typing import Hashable >>> isinstance(None, Hashable) True ``` In old versions of mypy, mypy had a bug and didn't see this, but is has been fixed now. There are many instances of `Optional[Hashable]` instead of just `Hashable` in the code base, other than in these dicts. I've assumed those should be left as-is for readability and not because it's strictly needed.
https://api.github.com/repos/pandas-dev/pandas/pulls/40534
2021-03-20T14:40:45Z
2021-03-23T01:33:04Z
2021-03-23T01:33:04Z
2021-03-23T10:10:17Z
STYLE loosen inconsistent namespace check
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b8524a302f4c9..8fd9bc3424ed5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -86,11 +86,10 @@ repos: types: [python] exclude: ^pandas/_typing\.py$ - id: inconsistent-namespace-usage - name: 'Check for inconsistent use of pandas namespace in tests' + name: 'Check for inconsistent use of pandas namespace' entry: python scripts/check_for_inconsistent_pandas_namespace.py language: python types: [python] - files: ^pandas/tests/ - id: incorrect-code-directives name: Check for incorrect code block or IPython directives language: pygrep diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py index 488237a6f5a8b..bfb1be8705495 100644 --- a/asv_bench/benchmarks/arithmetic.py +++ b/asv_bench/benchmarks/arithmetic.py @@ -140,9 +140,7 @@ def setup(self, op, shape): # construct dataframe with 2 blocks arr1 = np.random.randn(n_rows, n_cols // 2).astype("f8") arr2 = np.random.randn(n_rows, n_cols // 2).astype("f4") - df = pd.concat( - [pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True - ) + df = pd.concat([DataFrame(arr1), DataFrame(arr2)], axis=1, ignore_index=True) # should already be the case, but just to be sure df._consolidate_inplace() @@ -151,7 +149,7 @@ def setup(self, op, shape): arr2 = np.random.randn(n_rows, n_cols // 2).astype("i8") arr3 = np.random.randn(n_rows, n_cols // 4).astype("f8") df2 = pd.concat( - [pd.DataFrame(arr1), pd.DataFrame(arr2), pd.DataFrame(arr3)], + [DataFrame(arr1), DataFrame(arr2), DataFrame(arr3)], axis=1, ignore_index=True, ) @@ -459,9 +457,9 @@ class OffsetArrayArithmetic: def setup(self, offset): N = 10000 - rng = pd.date_range(start="1/1/2000", periods=N, freq="T") + rng = date_range(start="1/1/2000", periods=N, freq="T") self.rng = rng - self.ser = pd.Series(rng) + self.ser = Series(rng) def time_add_series_offset(self, offset): with warnings.catch_warnings(record=True): @@ -478,7 +476,7 @@ class ApplyIndex: def setup(self, offset): N = 10000 - rng = pd.date_range(start="1/1/2000", periods=N, freq="T") + rng = date_range(start="1/1/2000", periods=N, freq="T") self.rng = rng def time_apply_index(self, offset): @@ -490,17 +488,17 @@ class BinaryOpsMultiIndex: param_names = ["func"] def setup(self, func): - date_range = pd.date_range("20200101 00:00", "20200102 0:00", freq="S") + array = date_range("20200101 00:00", "20200102 0:00", freq="S") level_0_names = [str(i) for i in range(30)] - index = pd.MultiIndex.from_product([level_0_names, date_range]) + index = pd.MultiIndex.from_product([level_0_names, array]) column_names = ["col_1", "col_2"] - self.df = pd.DataFrame( + self.df = DataFrame( np.random.rand(len(index), 2), index=index, columns=column_names ) - self.arg_df = pd.DataFrame( + self.arg_df = DataFrame( np.random.randint(1, 10, (len(level_0_names), 2)), index=level_0_names, columns=column_names, diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index 5006a0dbf1f98..35e5818cd3b2b 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -28,7 +28,7 @@ def setup(self): data = np.random.randn(N)[:-i] idx = rng[:-i] data[100:] = np.nan - self.series[i] = pd.Series(pd.SparseArray(data), index=idx) + self.series[i] = Series(SparseArray(data), index=idx) def time_series_to_frame(self): pd.DataFrame(self.series) @@ -63,7 +63,7 @@ def setup(self): ) def time_sparse_series_from_coo(self): - pd.Series.sparse.from_coo(self.matrix) + Series.sparse.from_coo(self.matrix) class ToCoo: diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index adea9f6c19996..9bacb30b78a64 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -207,12 +207,12 @@ def box_expected(expected, box_cls, transpose=True): """ if box_cls is pd.array: expected = pd.array(expected) - elif box_cls is pd.Index: - expected = pd.Index(expected) - elif box_cls is pd.Series: - expected = pd.Series(expected) - elif box_cls is pd.DataFrame: - expected = pd.Series(expected).to_frame() + elif box_cls is Index: + expected = Index(expected) + elif box_cls is Series: + expected = Series(expected) + elif box_cls is DataFrame: + expected = Series(expected).to_frame() if transpose: # for vector operations, we need a DataFrame to be a single-row, # not a single-column, in order to operate against non-DataFrame @@ -400,7 +400,7 @@ def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None) "x": state.rand(n) * 2 - 1, "y": state.rand(n) * 2 - 1, } - df = pd.DataFrame(columns, index=index, columns=sorted(columns)) + df = DataFrame(columns, index=index, columns=sorted(columns)) if df.index[-1] == end: df = df.iloc[:-1] return df diff --git a/pandas/conftest.py b/pandas/conftest.py index aa43746d0e7d5..3fdde3261bd68 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -328,7 +328,7 @@ def unique_nulls_fixture(request): # ---------------------------------------------------------------- -@pytest.fixture(params=[pd.DataFrame, pd.Series]) +@pytest.fixture(params=[DataFrame, Series]) def frame_or_series(request): """ Fixture to parametrize over DataFrame and Series. @@ -338,7 +338,7 @@ def frame_or_series(request): # error: List item 0 has incompatible type "Type[Index]"; expected "Type[IndexOpsMixin]" @pytest.fixture( - params=[pd.Index, pd.Series], ids=["index", "series"] # type: ignore[list-item] + params=[Index, Series], ids=["index", "series"] # type: ignore[list-item] ) def index_or_series(request): """ @@ -356,9 +356,7 @@ def index_or_series(request): index_or_series2 = index_or_series -@pytest.fixture( - params=[pd.Index, pd.Series, pd.array], ids=["index", "series", "array"] -) +@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"]) def index_or_series_or_array(request): """ Fixture to parametrize over Index, Series, and ExtensionArray @@ -559,7 +557,7 @@ def index_with_missing(request): # ---------------------------------------------------------------- @pytest.fixture def empty_series(): - return pd.Series([], index=[], dtype=np.float64) + return Series([], index=[], dtype=np.float64) @pytest.fixture @@ -596,7 +594,7 @@ def _create_series(index): """ Helper for the _series dict """ size = len(index) data = np.random.randn(size) - return pd.Series(data, index=index, name="a") + return Series(data, index=index, name="a") _series = { @@ -1437,7 +1435,7 @@ def any_numpy_dtype(request): ("boolean", [True, np.nan, False]), ("boolean", [True, pd.NA, False]), ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), - ("datetime", [pd.Timestamp("20130101"), np.nan, pd.Timestamp("20180101")]), + ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), # The following two dtypes are commented out due to GH 23554 # ('complex', [1 + 1j, np.nan, 2 + 2j]), @@ -1445,8 +1443,8 @@ def any_numpy_dtype(request): # np.nan, np.timedelta64(2, 'D')]), ("timedelta", [timedelta(1), np.nan, timedelta(2)]), ("time", [time(1), np.nan, time(2)]), - ("period", [pd.Period(2013), pd.NaT, pd.Period(2018)]), - ("interval", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]), + ("period", [Period(2013), pd.NaT, Period(2018)]), + ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), ] ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 5550da7421e00..a9c1de8a382ea 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -186,8 +186,8 @@ def __init__( if not data.index.is_unique or not data.columns.is_unique: raise ValueError("style is not supported for non-unique indices.") self.data: DataFrame = data - self.index: pd.Index = data.index - self.columns: pd.Index = data.columns + self.index: Index = data.index + self.columns: Index = data.columns self.table_styles = table_styles if not isinstance(uuid_len, int) or not uuid_len >= 0: raise TypeError("``uuid_len`` must be an integer in range [0, 32].") @@ -913,7 +913,7 @@ def _apply( result.columns = data.columns else: result = func(data, **kwargs) - if not isinstance(result, pd.DataFrame): + if not isinstance(result, DataFrame): if not isinstance(result, np.ndarray): raise TypeError( f"Function {repr(func)} must return a DataFrame or ndarray " @@ -1565,7 +1565,7 @@ def css(rgba) -> str: if s.ndim == 1: return [css(rgba) for rgba in rgbas] else: - return pd.DataFrame( + return DataFrame( [[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns, @@ -1655,7 +1655,7 @@ def css(x): if s.ndim == 1: return [css(x) for x in normed] else: - return pd.DataFrame( + return DataFrame( [[css(x) for x in row] for row in normed], index=s.index, columns=s.columns, diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 1a7e2d1d820f7..62d368264752b 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1372,9 +1372,9 @@ def array_likes(request): data = memoryview(arr) elif name == "array": # stdlib array - from array import array as array_stdlib + import array - data = array_stdlib("i", arr) + data = array.array("i", arr) elif name == "dask": import dask.array diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 869255505eb74..ca68885fdc470 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1236,14 +1236,14 @@ def __len__(self, n): def test_constructor_stdlib_array(self): # GH 4297 # support Array - from array import array as stdlib_array + import array - result = DataFrame({"A": stdlib_array("i", range(10))}) + result = DataFrame({"A": array.array("i", range(10))}) expected = DataFrame({"A": list(range(10))}) tm.assert_frame_equal(result, expected, check_dtype=False) expected = DataFrame([list(range(10)), list(range(10))]) - result = DataFrame([stdlib_array("i", range(10)), stdlib_array("i", range(10))]) + result = DataFrame([array.array("i", range(10)), array.array("i", range(10))]) tm.assert_frame_equal(result, expected, check_dtype=False) def test_constructor_range(self): diff --git a/scripts/check_for_inconsistent_pandas_namespace.py b/scripts/check_for_inconsistent_pandas_namespace.py index c84a92324f976..3c21821e794a9 100644 --- a/scripts/check_for_inconsistent_pandas_namespace.py +++ b/scripts/check_for_inconsistent_pandas_namespace.py @@ -2,7 +2,7 @@ Check that test suite file doesn't use the pandas namespace inconsistently. We check for cases of ``Series`` and ``pd.Series`` appearing in the same file -(likewise for some other common classes). +(likewise for other pandas objects). This is meant to be run as a pre-commit hook - to run it manually, you can do: @@ -15,43 +15,50 @@ though note that you may need to manually fixup some imports and that you will also need the additional dependency `tokenize-rt` (which is left out from the pre-commit hook so that it uses the same virtualenv as the other local ones). + +The general structure is similar to that of some plugins from +https://github.com/asottile/pyupgrade . """ import argparse import ast +import sys from typing import ( MutableMapping, + NamedTuple, Optional, Sequence, Set, - Tuple, ) -ERROR_MESSAGE = "Found both `pd.{name}` and `{name}` in {path}" -EXCLUDE = { - "eval", # built-in, different from `pd.eval` - "np", # pd.np is deprecated but still tested -} -Offset = Tuple[int, int] +ERROR_MESSAGE = ( + "{path}:{lineno}:{col_offset}: " + "Found both '{prefix}.{name}' and '{name}' in {path}" +) + + +class OffsetWithNamespace(NamedTuple): + lineno: int + col_offset: int + namespace: str class Visitor(ast.NodeVisitor): def __init__(self) -> None: - self.pandas_namespace: MutableMapping[Offset, str] = {} - self.no_namespace: Set[str] = set() + self.pandas_namespace: MutableMapping[OffsetWithNamespace, str] = {} + self.imported_from_pandas: Set[str] = set() def visit_Attribute(self, node: ast.Attribute) -> None: - if ( - isinstance(node.value, ast.Name) - and node.value.id == "pd" - and node.attr not in EXCLUDE - ): - self.pandas_namespace[(node.lineno, node.col_offset)] = node.attr + if isinstance(node.value, ast.Name) and node.value.id in {"pandas", "pd"}: + offset_with_namespace = OffsetWithNamespace( + node.lineno, node.col_offset, node.value.id + ) + self.pandas_namespace[offset_with_namespace] = node.attr self.generic_visit(node) - def visit_Name(self, node: ast.Name) -> None: - if node.id not in EXCLUDE: - self.no_namespace.add(node.id) + def visit_ImportFrom(self, node: ast.ImportFrom) -> None: + if node.module is not None and "pandas" in node.module: + self.imported_from_pandas.update(name.name for name in node.names) self.generic_visit(node) @@ -64,9 +71,11 @@ def replace_inconsistent_pandas_namespace(visitor: Visitor, content: str) -> str tokens = src_to_tokens(content) for n, i in reversed_enumerate(tokens): + offset_with_namespace = OffsetWithNamespace(i.offset[0], i.offset[1], i.src) if ( - i.offset in visitor.pandas_namespace - and visitor.pandas_namespace[i.offset] in visitor.no_namespace + offset_with_namespace in visitor.pandas_namespace + and visitor.pandas_namespace[offset_with_namespace] + in visitor.imported_from_pandas ): # Replace `pd` tokens[n] = i._replace(src="") @@ -85,16 +94,28 @@ def check_for_inconsistent_pandas_namespace( visitor = Visitor() visitor.visit(tree) - inconsistencies = visitor.no_namespace.intersection( + inconsistencies = visitor.imported_from_pandas.intersection( visitor.pandas_namespace.values() ) + if not inconsistencies: # No inconsistent namespace usage, nothing to replace. - return content + return None if not replace: - msg = ERROR_MESSAGE.format(name=inconsistencies.pop(), path=path) - raise RuntimeError(msg) + inconsistency = inconsistencies.pop() + lineno, col_offset, prefix = next( + key for key, val in visitor.pandas_namespace.items() if val == inconsistency + ) + msg = ERROR_MESSAGE.format( + lineno=lineno, + col_offset=col_offset, + prefix=prefix, + name=inconsistency, + path=path, + ) + sys.stdout.write(msg) + sys.exit(1) return replace_inconsistent_pandas_namespace(visitor, content) diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py index 9562a30ba0ffd..eb995158d8cb4 100644 --- a/scripts/tests/test_inconsistent_namespace_check.py +++ b/scripts/tests/test_inconsistent_namespace_check.py @@ -4,35 +4,58 @@ check_for_inconsistent_pandas_namespace, ) -BAD_FILE_0 = "cat_0 = Categorical()\ncat_1 = pd.Categorical()" -BAD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = Categorical()" -GOOD_FILE_0 = "cat_0 = Categorical()\ncat_1 = Categorical()" +BAD_FILE_0 = ( + "from pandas import Categorical\n" + "cat_0 = Categorical()\n" + "cat_1 = pd.Categorical()" +) +BAD_FILE_1 = ( + "from pandas import Categorical\n" + "cat_0 = pd.Categorical()\n" + "cat_1 = Categorical()" +) +BAD_FILE_2 = ( + "from pandas import Categorical\n" + "cat_0 = pandas.Categorical()\n" + "cat_1 = Categorical()" +) +GOOD_FILE_0 = ( + "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()" +) GOOD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = pd.Categorical()" +GOOD_FILE_2 = "from array import array\nimport pandas as pd\narr = pd.array([])" PATH = "t.py" -@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1]) -def test_inconsistent_usage(content): - msg = r"Found both `pd\.Categorical` and `Categorical` in t\.py" - with pytest.raises(RuntimeError, match=msg): +@pytest.mark.parametrize( + "content, expected", + [ + (BAD_FILE_0, "t.py:3:8: Found both 'pd.Categorical' and 'Categorical' in t.py"), + (BAD_FILE_1, "t.py:2:8: Found both 'pd.Categorical' and 'Categorical' in t.py"), + ( + BAD_FILE_2, + "t.py:2:8: Found both 'pandas.Categorical' and 'Categorical' in t.py", + ), + ], +) +def test_inconsistent_usage(content, expected, capsys): + with pytest.raises(SystemExit): check_for_inconsistent_pandas_namespace(content, PATH, replace=False) + result, _ = capsys.readouterr() + assert result == expected -@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1]) -def test_consistent_usage(content): +@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1, GOOD_FILE_2]) +@pytest.mark.parametrize("replace", [True, False]) +def test_consistent_usage(content, replace): # should not raise - check_for_inconsistent_pandas_namespace(content, PATH, replace=False) + check_for_inconsistent_pandas_namespace(content, PATH, replace=replace) -@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1]) +@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1, BAD_FILE_2]) def test_inconsistent_usage_with_replace(content): result = check_for_inconsistent_pandas_namespace(content, PATH, replace=True) - expected = "cat_0 = Categorical()\ncat_1 = Categorical()" - assert result == expected - - -@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1]) -def test_consistent_usage_with_replace(content): - result = check_for_inconsistent_pandas_namespace(content, PATH, replace=True) - expected = content + expected = ( + "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()" + ) assert result == expected
For context, see #40468, where the error which was fixed was arguably a false-positive. I've loosened the check so that now it only errors / replaces if: - you imported an object from somewhere in pandas `e.g. from pandas.io.formats import info` - you also accessed that object as `pd.` or `pandas.` (e.g. `pd.info` or `pandas.info`) Before, it just checked whether an object appeared both with and without the `pd.` prefix, which was too tight a check - sorry about that :flushed: I've also added line number and column offset of the inconsistency for ease of checking/fixing
https://api.github.com/repos/pandas-dev/pandas/pulls/40532
2021-03-20T12:29:02Z
2021-04-02T14:50:54Z
2021-04-02T14:50:54Z
2021-04-02T16:30:17Z
REF: share external_values ArrayManager/BlockManager
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 34b3d83c066c2..acadfcbbdf302 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -61,9 +61,7 @@ from pandas.core.arrays import ( DatetimeArray, ExtensionArray, - IntervalArray, PandasArray, - PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype @@ -87,6 +85,7 @@ ) from pandas.core.internals.blocks import ( ensure_block_shape, + external_values, new_block, to_native_types, ) @@ -1203,12 +1202,7 @@ def dtype(self): def external_values(self): """The array that Series.values returns""" - if isinstance(self.array, (PeriodArray, IntervalArray)): - return self.array.astype(object) - elif isinstance(self.array, (DatetimeArray, TimedeltaArray)): - return self.array._data - else: - return self.array + return external_values(self.array) def internal_values(self): """The array that Series._values returns""" diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 174ea8760b0db..01660e34300ce 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -96,7 +96,9 @@ ExtensionArray, FloatingArray, IntegerArray, + IntervalArray, PandasArray, + PeriodArray, TimedeltaArray, ) from pandas.core.base import PandasObject @@ -217,16 +219,9 @@ def is_view(self) -> bool: def is_categorical(self) -> bool: return self._holder is Categorical + @final def external_values(self): - """ - The array that Series.values returns (public attribute). - - This has some historical constraints, and is overridden in block - subclasses to return the correct array (e.g. period returns - object ndarray and datetimetz a datetime64[ns] ndarray instead of - proper extension array). - """ - return self.values + return external_values(self.values) def internal_values(self): """ @@ -1770,8 +1765,7 @@ class ObjectValuesExtensionBlock(HybridMixin, ExtensionBlock): Series[T].values is an ndarray of objects. """ - def external_values(self): - return self.values.astype(object) + pass class NumericBlock(Block): @@ -1956,12 +1950,6 @@ def is_view(self) -> bool: # check the ndarray values of the DatetimeIndex values return self.values._data.base is not None - def external_values(self): - # NB: this is different from np.asarray(self.values), since that - # return an object-dtype ndarray of Timestamps. - # Avoid FutureWarning in .astype in casting from dt64tz to dt64 - return self.values._data - class TimeDeltaBlock(DatetimeLikeBlockMixin): __slots__ = () @@ -2296,3 +2284,23 @@ def to_native_types( values[mask] = na_rep values = values.astype(object, copy=False) return values + + +def external_values(values: ArrayLike) -> ArrayLike: + """ + The array that Series.values returns (public attribute). + + This has some historical constraints, and is overridden in block + subclasses to return the correct array (e.g. period returns + object ndarray and datetimetz a datetime64[ns] ndarray instead of + proper extension array). + """ + if isinstance(values, (PeriodArray, IntervalArray)): + return values.astype(object) + elif isinstance(values, (DatetimeArray, TimedeltaArray)): + # NB: for datetime64tz this is different from np.asarray(values), since + # that returns an object-dtype ndarray of Timestamps. + # Avoid FutureWarning in .astype in casting from dt64tz to dt64 + return values._data + else: + return values
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40529
2021-03-20T03:52:13Z
2021-03-22T13:07:03Z
2021-03-22T13:07:03Z
2021-03-22T14:24:10Z
CLN: ensure_platform_int earlier
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 94bd8b49777cf..a6bfff9bb00fe 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -191,7 +191,7 @@ def is_lexsorted(list_of_arrays: list) -> bint: @cython.boundscheck(False) @cython.wraparound(False) -def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): +def groupsort_indexer(const intp_t[:] index, Py_ssize_t ngroups): """ Compute a 1-d indexer. @@ -200,7 +200,7 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): Parameters ---------- - index: int64 ndarray + index: np.ndarray[np.intp] Mappings from group -> position. ngroups: int64 Number of groups. @@ -209,7 +209,7 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): ------- ndarray[intp_t, ndim=1] Indexer - ndarray[int64_t, ndim=1] + ndarray[intp_t, ndim=1] Group Counts Notes @@ -218,13 +218,12 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): """ cdef: Py_ssize_t i, loc, label, n - ndarray[int64_t] counts, where - ndarray[intp_t] indexer + ndarray[intp_t] indexer, where, counts - counts = np.zeros(ngroups + 1, dtype=np.int64) + counts = np.zeros(ngroups + 1, dtype=np.intp) n = len(index) indexer = np.zeros(n, dtype=np.intp) - where = np.zeros(ngroups + 1, dtype=np.int64) + where = np.zeros(ngroups + 1, dtype=np.intp) with nogil: diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in index cdf4ef3b119d2..929cb86c41036 100644 --- a/pandas/_libs/algos_take_helper.pxi.in +++ b/pandas/_libs/algos_take_helper.pxi.in @@ -8,6 +8,32 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in # take_1d, take_2d # ---------------------------------------------------------------------- + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_1d_intp_intp( + const intp_t[:] values, + const intp_t[:] indexer, + intp_t[::1] out, + intp_t fill_value=-1, +): + cdef: + Py_ssize_t i, n, idx + intp_t fv + + n = indexer.shape[0] + + fv = fill_value + + with nogil: + for i in range(n): + idx = indexer[i] + if idx == -1: + out[i] = fv + else: + out[i] = values[idx] + + {{py: # c_type_in, c_type_out diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index f09a6c04aecbf..11e08bfd181b0 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -37,6 +37,7 @@ from pandas._libs.util cimport ( ) from pandas._libs.algos import ( + ensure_platform_int, groupsort_indexer, rank_1d, take_2d_axis1_float64_float64, @@ -111,7 +112,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, ngroups, size - ndarray[int64_t] _counts + ndarray[intp_t] _counts ndarray[float64_t, ndim=2] data ndarray[intp_t] indexer float64_t* ptr @@ -121,7 +122,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ngroups = len(counts) N, K = (<object>values).shape - indexer, _counts = groupsort_indexer(labels, ngroups) + indexer, _counts = groupsort_indexer(ensure_platform_int(labels), ngroups) counts[:] = _counts[1:] data = np.empty((K, N), dtype=np.float64) diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index c2947de943e1a..7888a15a7cb26 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -21,10 +21,9 @@ from numpy cimport ( cnp.import_array() from pandas._libs.algos import ( - ensure_int64, - ensure_platform_int, groupsort_indexer, take_1d_int64_int64, + take_1d_intp_intp, ) @@ -34,16 +33,16 @@ def inner_join(const intp_t[:] left, const intp_t[:] right, cdef: Py_ssize_t i, j, k, count = 0 ndarray[intp_t] left_sorter, right_sorter - ndarray[int64_t] left_count, right_count - ndarray[int64_t] left_indexer, right_indexer - int64_t lc, rc + ndarray[intp_t] left_count, right_count + ndarray[intp_t] left_indexer, right_indexer + intp_t lc, rc Py_ssize_t loc, left_pos = 0, right_pos = 0, position = 0 Py_ssize_t offset # NA group in location 0 - left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) - right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) + left_sorter, left_count = groupsort_indexer(left, max_groups) + right_sorter, right_count = groupsort_indexer(right, max_groups) with nogil: # First pass, determine size of result set, do not use the NA group @@ -58,8 +57,8 @@ def inner_join(const intp_t[:] left, const intp_t[:] right, left_pos = left_count[0] right_pos = right_count[0] - left_indexer = np.empty(count, dtype=np.int64) - right_indexer = np.empty(count, dtype=np.int64) + left_indexer = np.empty(count, dtype=np.intp) + right_indexer = np.empty(count, dtype=np.intp) with nogil: for i in range(1, max_groups + 1): @@ -85,17 +84,17 @@ def left_outer_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups, bint sort=True): cdef: Py_ssize_t i, j, k, count = 0 - ndarray[int64_t] left_count, right_count + ndarray[intp_t] left_count, right_count ndarray[intp_t] rev, left_sorter, right_sorter - ndarray[int64_t] left_indexer, right_indexer - int64_t lc, rc + ndarray[intp_t] left_indexer, right_indexer + intp_t lc, rc Py_ssize_t loc, left_pos = 0, right_pos = 0, position = 0 Py_ssize_t offset # NA group in location 0 - left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) - right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) + left_sorter, left_count = groupsort_indexer(left, max_groups) + right_sorter, right_count = groupsort_indexer(right, max_groups) with nogil: # First pass, determine size of result set, do not use the NA group @@ -109,8 +108,8 @@ def left_outer_join(const intp_t[:] left, const intp_t[:] right, left_pos = left_count[0] right_pos = right_count[0] - left_indexer = np.empty(count, dtype=np.int64) - right_indexer = np.empty(count, dtype=np.int64) + left_indexer = np.empty(count, dtype=np.intp) + right_indexer = np.empty(count, dtype=np.intp) with nogil: for i in range(1, max_groups + 1): @@ -142,11 +141,10 @@ def left_outer_join(const intp_t[:] left, const intp_t[:] right, # this is a short-cut to avoid groupsort_indexer # otherwise, the `else` path also works in this case rev = np.empty(len(left), dtype=np.intp) - rev.put(ensure_platform_int(left_sorter), np.arange(len(left))) + rev.put(left_sorter, np.arange(len(left))) else: rev, _ = groupsort_indexer(left_indexer, len(left)) - rev = ensure_platform_int(rev) right_indexer = right_indexer.take(rev) left_indexer = left_indexer.take(rev) @@ -159,16 +157,16 @@ def full_outer_join(const intp_t[:] left, const intp_t[:] right, cdef: Py_ssize_t i, j, k, count = 0 ndarray[intp_t] left_sorter, right_sorter - ndarray[int64_t] left_count, right_count - ndarray[int64_t] left_indexer, right_indexer - int64_t lc, rc - int64_t left_pos = 0, right_pos = 0 + ndarray[intp_t] left_count, right_count + ndarray[intp_t] left_indexer, right_indexer + intp_t lc, rc + intp_t left_pos = 0, right_pos = 0 Py_ssize_t offset, position = 0 # NA group in location 0 - left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) - right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) + left_sorter, left_count = groupsort_indexer(left, max_groups) + right_sorter, right_count = groupsort_indexer(right, max_groups) with nogil: # First pass, determine size of result set, do not use the NA group @@ -185,8 +183,8 @@ def full_outer_join(const intp_t[:] left, const intp_t[:] right, left_pos = left_count[0] right_pos = right_count[0] - left_indexer = np.empty(count, dtype=np.int64) - right_indexer = np.empty(count, dtype=np.int64) + left_indexer = np.empty(count, dtype=np.intp) + right_indexer = np.empty(count, dtype=np.intp) with nogil: for i in range(1, max_groups + 1): @@ -217,31 +215,29 @@ def full_outer_join(const intp_t[:] left, const intp_t[:] right, _get_result_indexer(right_sorter, right_indexer)) -cdef ndarray[int64_t] _get_result_indexer( - ndarray[intp_t] sorter, ndarray[int64_t] indexer +cdef ndarray[intp_t] _get_result_indexer( + ndarray[intp_t] sorter, ndarray[intp_t] indexer ): if len(sorter) > 0: # cython-only equivalent to # `res = algos.take_nd(sorter, indexer, fill_value=-1)` - res = np.empty(len(indexer), dtype=np.int64) - take_1d_int64_int64(ensure_int64(sorter), ensure_platform_int(indexer), res, -1) - # FIXME: sorter is intp_t, not int64_t, opposite for indexer; - # will this break on 32bit builds? + res = np.empty(len(indexer), dtype=np.intp) + take_1d_intp_intp(sorter, indexer, res, -1) else: # length-0 case - res = np.empty(len(indexer), dtype=np.int64) + res = np.empty(len(indexer), dtype=np.intp) res[:] = -1 return res -def ffill_indexer(const int64_t[:] indexer): +def ffill_indexer(const intp_t[:] indexer): cdef: Py_ssize_t i, n = len(indexer) - ndarray[int64_t] result - int64_t val, last_obs + ndarray[intp_t] result + intp_t val, last_obs - result = np.empty(n, dtype=np.int64) + result = np.empty(n, dtype=np.intp) last_obs = -1 for i in range(n): diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 1398db6960cc8..3c88590991d77 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1981,9 +1981,9 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: """ categories = self.categories r, counts = libalgos.groupsort_indexer( - self.codes.astype("int64", copy=False), categories.size + ensure_platform_int(self.codes), categories.size ) - counts = counts.cumsum() + counts = ensure_int64(counts).cumsum() _result = (r[start:end] for start, end in zip(counts, counts[1:])) return dict(zip(categories, _result)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e5e7b446d9cb2..094f4a67d2e61 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4154,7 +4154,7 @@ def _get_leaf_sorter(labels: List[np.ndarray]) -> np.ndarray: return np.empty(0, dtype=np.intp) if len(labels) == 1: - return get_group_index_sorter(labels[0]) + return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level @@ -4224,7 +4224,7 @@ def _get_leaf_sorter(labels: List[np.ndarray]) -> np.ndarray: if level == 0: # outer most level, take the fast route ngroups = 1 + new_lev_codes.max() left_indexer, counts = libalgos.groupsort_indexer( - ensure_int64(new_lev_codes), ngroups + new_lev_codes, ngroups ) # missing values are placed first; drop them! diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 88fcc13502439..02c41538ca123 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -604,7 +604,7 @@ def get_group_index_sorter( (alpha + beta * ngroups) < (count * np.log(count)) # type: ignore[operator] ) if do_groupsort: - sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) + sorter, _ = algos.groupsort_indexer(ensure_platform_int(group_index), ngroups) # sorter _should_ already be intp, but mypy is not yet able to verify else: sorter = group_index.argsort(kind="mergesort") diff --git a/pandas/tests/libs/test_join.py b/pandas/tests/libs/test_join.py index f5426c71511bb..eeb66f8941260 100644 --- a/pandas/tests/libs/test_join.py +++ b/pandas/tests/libs/test_join.py @@ -264,8 +264,8 @@ def test_left_outer_join_bug(): lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False) - exp_lidx = np.arange(len(left), dtype=np.int64) - exp_ridx = -np.ones(len(left), dtype=np.int64) + exp_lidx = np.arange(len(left), dtype=np.intp) + exp_ridx = -np.ones(len(left), dtype=np.intp) exp_ridx[left == 1] = 1 exp_ridx[left == 3] = 0 diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index c8df18ddaeebe..cd800b3f3a452 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -2116,8 +2116,8 @@ def test_is_lexsorted(): def test_groupsort_indexer(): - a = np.random.randint(0, 1000, 100).astype(np.int64) - b = np.random.randint(0, 1000, 100).astype(np.int64) + a = np.random.randint(0, 1000, 100).astype(np.intp) + b = np.random.randint(0, 1000, 100).astype(np.intp) result = libalgos.groupsort_indexer(a, 1000)[0]
https://api.github.com/repos/pandas-dev/pandas/pulls/40528
2021-03-20T03:48:52Z
2021-03-23T20:31:50Z
2021-03-23T20:31:50Z
2021-03-23T20:39:48Z
DEPR: CategoricalBlock; combine Block.replace methods
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index ea9f9abc4a4c7..da2fa304e5b07 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -9,7 +9,6 @@ ) from pandas.core.internals.blocks import ( # io.pytables, io.packers Block, - CategoricalBlock, DatetimeBlock, DatetimeTZBlock, ExtensionBlock, @@ -28,7 +27,6 @@ __all__ = [ "Block", - "CategoricalBlock", "NumericBlock", "DatetimeBlock", "DatetimeTZBlock", @@ -48,3 +46,18 @@ "create_block_manager_from_arrays", "create_block_manager_from_blocks", ] + + +def __getattr__(name: str): + import warnings + + if name == "CategoricalBlock": + warnings.warn( + "CategoricalBlock is deprecated and will be removed in a future version. " + "Use ExtensionBlock instead.", + FutureWarning, + stacklevel=2, + ) + return ExtensionBlock + + raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'") diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 99e54bace8915..174ea8760b0db 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -673,6 +673,7 @@ def copy(self, deep: bool = True): # --------------------------------------------------------------------- # Replace + @final def replace( self, to_replace, @@ -687,6 +688,23 @@ def replace( """ inplace = validate_bool_kwarg(inplace, "inplace") + # Note: the checks we do in NDFrame.replace ensure we never get + # here with listlike to_replace or value, as those cases + # go through _replace_list + + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + blk = self if inplace else self.copy() + blk.values.replace(to_replace, value, inplace=True) + return [blk] + + regex = should_use_regex(regex, to_replace) + + if regex: + return self._replace_regex(to_replace, value, inplace=inplace) + if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. @@ -694,8 +712,6 @@ def replace( # replace_list instead of replace. return [self] if inplace else [self.copy()] - values = self.values - mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly @@ -720,7 +736,7 @@ def replace( else: # split so that we only upcast where necessary return self.split_and_operate( - type(self).replace, to_replace, value, inplace=inplace, regex=regex + type(self).replace, to_replace, value, inplace=True, regex=regex ) @final @@ -1223,7 +1239,7 @@ def take_nd( Take values according to indexer and return them as a block.bb """ - # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock + # algos.take_nd dispatches for DatetimeTZBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping @@ -1422,7 +1438,7 @@ class ExtensionBlock(Block): Notes ----- This holds all 3rd-party extension array types. It's also the immediate - parent class for our internal extension types' blocks, CategoricalBlock. + parent class for our internal extension types' blocks. ExtensionArrays are limited to 1-D. """ @@ -1579,7 +1595,6 @@ def take_nd( def _can_hold_element(self, element: Any) -> bool: # TODO: We may need to think about pushing this onto the array. - # We're doing the same as CategoricalBlock here. return True def _slice(self, slicer): @@ -2019,41 +2034,6 @@ def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: def _can_hold_element(self, element: Any) -> bool: return True - def replace( - self, - to_replace, - value, - inplace: bool = False, - regex: bool = False, - ) -> List[Block]: - # Note: the checks we do in NDFrame.replace ensure we never get - # here with listlike to_replace or value, as those cases - # go through _replace_list - - regex = should_use_regex(regex, to_replace) - - if regex: - return self._replace_regex(to_replace, value, inplace=inplace) - else: - return super().replace(to_replace, value, inplace=inplace, regex=False) - - -class CategoricalBlock(ExtensionBlock): - __slots__ = () - - def replace( - self, - to_replace, - value, - inplace: bool = False, - regex: bool = False, - ) -> List[Block]: - inplace = validate_bool_kwarg(inplace, "inplace") - result = self if inplace else self.copy() - - result.values.replace(to_replace, value, inplace=True) - return [result] - # ----------------------------------------------------------------- # Constructor Helpers @@ -2116,7 +2096,7 @@ def get_block_type(values, dtype: Optional[Dtype] = None): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif isinstance(dtype, CategoricalDtype): - cls = CategoricalBlock + cls = ExtensionBlock elif vtype is Timestamp: cls = DatetimeTZBlock elif vtype is Interval or vtype is Period: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ea264da4c7b5f..da78fc5dfba76 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -67,7 +67,6 @@ ) from pandas.core.internals.blocks import ( Block, - CategoricalBlock, DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, @@ -1867,13 +1866,6 @@ def _form_blocks( object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_) blocks.extend(object_blocks) - if len(items_dict["CategoricalBlock"]) > 0: - cat_blocks = [ - new_block(array, klass=CategoricalBlock, placement=i, ndim=2) - for i, array in items_dict["CategoricalBlock"] - ] - blocks.extend(cat_blocks) - if len(items_dict["ExtensionBlock"]): external_blocks = [ new_block(array, klass=ExtensionBlock, placement=i, ndim=2) diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py index d4630b20db85f..0665a07c482f9 100644 --- a/pandas/tests/internals/test_api.py +++ b/pandas/tests/internals/test_api.py @@ -26,7 +26,6 @@ def test_namespace(): ] expected = [ "Block", - "CategoricalBlock", "NumericBlock", "DatetimeBlock", "DatetimeTZBlock", diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index e530f3e37883a..21d55e40a07fb 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -258,6 +258,7 @@ def test_read_expands_user_home_dir( ), ], ) + @pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") def test_read_fspath_all(self, reader, module, path, datapath): pytest.importorskip(module) path = datapath(*path) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index ab0b3b08a11e8..162094428dbc0 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -20,6 +20,7 @@ @filter_sparse @pytest.mark.single +@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") class TestFeather: def check_error_on_write(self, df, exc, err_msg): # check that we are raising the exception diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 3ef77d2fbacd0..f3cfa033409cb 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -573,6 +573,7 @@ def test_write_column_index_nonstring(self, pa): self.check_error_on_write(df, engine, ValueError, msg) +@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:FutureWarning") class TestParquetPyArrow(Base): def test_basic(self, pa, df_full):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40527
2021-03-20T01:59:54Z
2021-03-20T15:41:37Z
2021-03-20T15:41:37Z
2021-03-20T16:53:03Z
CLN: remove FloatBlock, share _can_hold_na
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index da2fa304e5b07..1be8df2fabfd4 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -12,7 +12,6 @@ DatetimeBlock, DatetimeTZBlock, ExtensionBlock, - FloatBlock, NumericBlock, ObjectBlock, TimeDeltaBlock, @@ -31,7 +30,6 @@ "DatetimeBlock", "DatetimeTZBlock", "ExtensionBlock", - "FloatBlock", "ObjectBlock", "TimeDeltaBlock", "make_block", diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 174ea8760b0db..c2d7dd28c6d80 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -157,7 +157,6 @@ class Block(PandasObject): is_bool = False is_object = False is_extension = False - _can_hold_na = False _can_consolidate = True _validate_ndim = True @@ -212,6 +211,17 @@ def is_view(self) -> bool: values = cast(np.ndarray, values) return values.base is not None + @final + @property + def _can_hold_na(self) -> bool: + """ + Can we store NA values in this Block? + """ + values = self.values + if isinstance(values, np.ndarray): + return values.dtype.kind not in ["b", "i", "u"] + return values._can_hold_na + @final @property def is_categorical(self) -> bool: @@ -1504,11 +1514,6 @@ def _holder(self): # For extension blocks, the holder is values-dependent. return type(self.values) - @property - def _can_hold_na(self): - # The default ExtensionArray._can_hold_na is True - return self._holder._can_hold_na - @property def is_view(self) -> bool: """Extension arrays are never treated as views.""" @@ -1787,19 +1792,11 @@ def _can_hold_element(self, element: Any) -> bool: # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" return can_hold_element(self.dtype, element) # type: ignore[arg-type] - @property - def _can_hold_na(self): - return self.dtype.kind not in ["b", "i", "u"] - @property def is_bool(self): return self.dtype.kind == "b" -class FloatBlock(NumericBlock): - __slots__ = () - - class NDArrayBackedExtensionBlock(HybridMixin, Block): """ Block backed by an NDArrayBackedExtensionArray @@ -1909,7 +1906,6 @@ class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" is_numeric = False - _can_hold_na = True def array_values(self): return ensure_wrapped_if_datetimelike(self.values) @@ -1938,7 +1934,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): __slots__ = () is_extension = True - _can_hold_na = True is_numeric = False internal_values = Block.internal_values @@ -1970,7 +1965,6 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin): class ObjectBlock(Block): __slots__ = () is_object = True - _can_hold_na = True values: np.ndarray @@ -2109,9 +2103,7 @@ def get_block_type(values, dtype: Optional[Dtype] = None): cls = DatetimeBlock elif kind == "m": cls = TimeDeltaBlock - elif kind == "f": - cls = FloatBlock - elif kind in ["c", "i", "u", "b"]: + elif kind in ["f", "c", "i", "u", "b"]: cls = NumericBlock else: cls = ObjectBlock diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da78fc5dfba76..35c21fb8eefa8 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1839,13 +1839,9 @@ def _form_blocks( items_dict[block_type.__name__].append((i, v)) blocks: List[Block] = [] - if len(items_dict["FloatBlock"]): - float_blocks = _multi_blockify(items_dict["FloatBlock"]) - blocks.extend(float_blocks) - if len(items_dict["NumericBlock"]): - complex_blocks = _multi_blockify(items_dict["NumericBlock"]) - blocks.extend(complex_blocks) + numeric_blocks = _multi_blockify(items_dict["NumericBlock"]) + blocks.extend(numeric_blocks) if len(items_dict["TimeDeltaBlock"]): timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"]) diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index ee46d13055010..2402c70a166b7 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -14,7 +14,9 @@ class CustomBlock(ExtensionBlock): _holder = np.ndarray - _can_hold_na = False + # error: Cannot override final attribute "_can_hold_na" + # (previously declared in base class "Block") + _can_hold_na = False # type: ignore[misc] @pytest.fixture diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py index 0665a07c482f9..60fbd2da70e79 100644 --- a/pandas/tests/internals/test_api.py +++ b/pandas/tests/internals/test_api.py @@ -30,7 +30,6 @@ def test_namespace(): "DatetimeBlock", "DatetimeTZBlock", "ExtensionBlock", - "FloatBlock", "ObjectBlock", "TimeDeltaBlock", "make_block",
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40526
2021-03-20T01:49:55Z
2021-03-22T13:03:21Z
2021-03-22T13:03:21Z
2021-03-22T14:28:26Z
BUG: to_json failing on PyPy
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index edf23bf89d7e1..c7bc337239faf 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) +- Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index f9fc5c301b3b2..bbcee479aeb5a 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -272,18 +272,6 @@ static PyObject *get_sub_attr(PyObject *obj, char *attr, char *subAttr) { return ret; } -static int is_simple_frame(PyObject *obj) { - PyObject *check = get_sub_attr(obj, "_mgr", "is_mixed_type"); - int ret = (check == Py_False); - - if (!check) { - return 0; - } - - Py_DECREF(check); - return ret; -} - static Py_ssize_t get_attr_length(PyObject *obj, char *attr) { PyObject *tmp = PyObject_GetAttrString(obj, attr); Py_ssize_t ret; @@ -301,6 +289,17 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) { return ret; } +static int is_simple_frame(PyObject *obj) { + PyObject *mgr = PyObject_GetAttrString(obj, "_mgr"); + if (!mgr) { + return 0; + } + int ret = (get_attr_length(mgr, "blocks") <= 1); + + Py_DECREF(mgr); + return ret; +} + static npy_int64 get_long_attr(PyObject *o, const char *attr) { npy_int64 long_val; PyObject *value = PyObject_GetAttrString(o, attr);
- [x] closes #39837 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Not sure how best to test this - confirmed this now works when run on PyPy, but since we don't test against PyPy the examples in the issue (or any other simple case) which fail on PyPy will still pass on master. Benchmarks look similar, might be minor performance improvement for the single block case: ``` before after ratio [b524462e] [b1f4bcc8] <master> <bug/is_mixed_type> 194±5ms 190±2ms 0.98 io.json.ToJSONISO.time_iso_format('columns') 208±5ms 198±6ms 0.95 io.json.ToJSONISO.time_iso_format('index') 186±4ms 173±3ms 0.93 io.json.ToJSONISO.time_iso_format('records') 193±10ms 187±3ms 0.97 io.json.ToJSONISO.time_iso_format('split') 164±4ms 165±3ms 1.00 io.json.ToJSONISO.time_iso_format('values') 115±4ms 114±3ms 0.99 io.json.ToJSONLines.time_delta_int_tstamp_lines 142±4ms 132±6ms 0.93 io.json.ToJSONLines.time_float_int_lines 140±4ms 142±4ms 1.02 io.json.ToJSONLines.time_float_int_str_lines 152±2ms 153±3ms 1.01 io.json.ToJSONLines.time_float_longint_str_lines 110±3ms 107±3ms 0.97 io.json.ToJSONLines.time_floats_with_dt_index_lines 112±3ms 104±3ms 0.93 io.json.ToJSONLines.time_floats_with_int_idex_lines 54.3M 55M 1.01 io.json.ToJSONMem.peakmem_float 55.4M 55.2M 1.00 io.json.ToJSONMem.peakmem_int 88.6±2ms 78.8±2ms 0.89 io.json.ToJSON.time_to_json('index', 'df') 70.7±2ms 61.7±2ms 0.87 io.json.ToJSON.time_to_json('values', 'df') 94.9±3ms 81.4±1ms 0.86 io.json.ToJSON.time_to_json('index', 'df_date_idx') 76.3±0.9ms 65.1±1ms 0.85 io.json.ToJSON.time_to_json('records', 'df_date_idx') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40525
2021-03-20T00:20:37Z
2021-03-23T15:17:56Z
2021-03-23T15:17:56Z
2021-03-23T15:19:21Z
TYP: io.sas
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 392dfa22ee67b..05cc742b45d83 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -13,6 +13,8 @@ Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ +from __future__ import annotations + from collections import abc from datetime import ( datetime, @@ -34,7 +36,10 @@ ) import pandas as pd -from pandas import isna +from pandas import ( + DataFrame, + isna, +) from pandas.io.common import get_handle from pandas.io.sas._sas import Parser @@ -150,6 +155,9 @@ class SAS7BDATReader(ReaderBase, abc.Iterator): bytes. """ + _int_length: int + _cached_page: bytes | None + def __init__( self, path_or_buf, @@ -198,29 +206,29 @@ def __init__( self.close() raise - def column_data_lengths(self): + def column_data_lengths(self) -> np.ndarray: """Return a numpy int64 array of the column data lengths""" return np.asarray(self._column_data_lengths, dtype=np.int64) - def column_data_offsets(self): + def column_data_offsets(self) -> np.ndarray: """Return a numpy int64 array of the column offsets""" return np.asarray(self._column_data_offsets, dtype=np.int64) - def column_types(self): + def column_types(self) -> np.ndarray: """ Returns a numpy character array of the column types: s (string) or d (double) """ return np.asarray(self._column_types, dtype=np.dtype("S1")) - def close(self): + def close(self) -> None: self.handles.close() - def _get_properties(self): + def _get_properties(self) -> None: # Check magic number self._path_or_buf.seek(0) - self._cached_page = self._path_or_buf.read(288) + self._cached_page = cast(bytes, self._path_or_buf.read(288)) if self._cached_page[0 : len(const.magic)] != const.magic: raise ValueError("magic number mismatch (not a SAS file?)") @@ -294,9 +302,11 @@ def _get_properties(self): ) # Read the rest of the header into cached_page. - buf = self._path_or_buf.read(self.header_length - 288) + buf = cast(bytes, self._path_or_buf.read(self.header_length - 288)) self._cached_page += buf - if len(self._cached_page) != self.header_length: + # error: Argument 1 to "len" has incompatible type "Optional[bytes]"; + # expected "Sized" + if len(self._cached_page) != self.header_length: # type: ignore[arg-type] raise ValueError("The SAS7BDAT file appears to be truncated.") self._page_length = self._read_int( @@ -355,7 +365,7 @@ def __next__(self): return da # Read a single float of the given width (4 or 8). - def _read_float(self, offset, width): + def _read_float(self, offset: int, width: int): if width not in (4, 8): self.close() raise ValueError("invalid float width") @@ -388,24 +398,24 @@ def _read_bytes(self, offset: int, length: int): raise ValueError("The cached page is too small.") return self._cached_page[offset : offset + length] - def _parse_metadata(self): + def _parse_metadata(self) -> None: done = False while not done: - self._cached_page = self._path_or_buf.read(self._page_length) + self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length)) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: raise ValueError("Failed to read a meta data page from the SAS file.") done = self._process_page_meta() - def _process_page_meta(self): + def _process_page_meta(self) -> bool: self._read_page_header() pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types if self._current_page_type in pt: self._process_page_metadata() is_data_page = self._current_page_type & const.page_data_type is_mix_page = self._current_page_type in const.page_mix_types - return ( + return bool( is_data_page or is_mix_page or self._current_page_data_subheader_pointers != [] @@ -422,7 +432,7 @@ def _read_page_header(self): tx, const.subheader_count_length ) - def _process_page_metadata(self): + def _process_page_metadata(self) -> None: bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): @@ -439,7 +449,8 @@ def _process_page_metadata(self): ) self._process_subheader(subheader_index, pointer) - def _get_subheader_index(self, signature, compression, ptype): + def _get_subheader_index(self, signature: bytes, compression, ptype) -> int: + # TODO: return here could be made an enum index = const.subheader_signature_to_index.get(signature) if index is None: f1 = (compression == const.compressed_subheader_id) or (compression == 0) @@ -451,7 +462,9 @@ def _get_subheader_index(self, signature, compression, ptype): raise ValueError("Unknown subheader signature") return index - def _process_subheader_pointers(self, offset: int, subheader_pointer_index: int): + def _process_subheader_pointers( + self, offset: int, subheader_pointer_index: int + ) -> _SubheaderPointer: subheader_pointer_length = self._subheader_pointer_length total_offset = offset + subheader_pointer_length * subheader_pointer_index @@ -473,11 +486,13 @@ def _process_subheader_pointers(self, offset: int, subheader_pointer_index: int) return x - def _read_subheader_signature(self, offset): + def _read_subheader_signature(self, offset: int) -> bytes: subheader_signature = self._read_bytes(offset, self._int_length) return subheader_signature - def _process_subheader(self, subheader_index, pointer): + def _process_subheader( + self, subheader_index: int, pointer: _SubheaderPointer + ) -> None: offset = pointer.offset length = pointer.length @@ -505,7 +520,7 @@ def _process_subheader(self, subheader_index, pointer): processor(offset, length) - def _process_rowsize_subheader(self, offset, length): + def _process_rowsize_subheader(self, offset: int, length: int) -> None: int_len = self._int_length lcs_offset = offset @@ -534,7 +549,7 @@ def _process_rowsize_subheader(self, offset, length): self._lcs = self._read_int(lcs_offset, 2) self._lcp = self._read_int(lcp_offset, 2) - def _process_columnsize_subheader(self, offset, length): + def _process_columnsize_subheader(self, offset: int, length: int) -> None: int_len = self._int_length offset += int_len self.column_count = self._read_int(offset, int_len) @@ -545,10 +560,10 @@ def _process_columnsize_subheader(self, offset, length): ) # Unknown purpose - def _process_subheader_counts(self, offset, length): + def _process_subheader_counts(self, offset: int, length: int) -> None: pass - def _process_columntext_subheader(self, offset, length): + def _process_columntext_subheader(self, offset: int, length: int) -> None: offset += self._int_length text_block_size = self._read_int(offset, const.text_block_size_length) @@ -600,7 +615,7 @@ def _process_columntext_subheader(self, offset, length): self.encoding or self.default_encoding ) - def _process_columnname_subheader(self, offset, length): + def _process_columnname_subheader(self, offset: int, length: int) -> None: int_len = self._int_length offset += int_len column_name_pointers_count = (length - 2 * int_len - 12) // 8 @@ -632,7 +647,7 @@ def _process_columnname_subheader(self, offset, length): name_str = self.column_names_strings[idx] self.column_names.append(name_str[col_offset : col_offset + col_len]) - def _process_columnattributes_subheader(self, offset, length): + def _process_columnattributes_subheader(self, offset: int, length: int) -> None: int_len = self._int_length column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8) for i in range(column_attributes_vectors_count): @@ -658,11 +673,11 @@ def _process_columnattributes_subheader(self, offset, length): x = self._read_int(col_types, const.column_type_length) self._column_types.append(b"d" if x == 1 else b"s") - def _process_columnlist_subheader(self, offset, length): + def _process_columnlist_subheader(self, offset: int, length: int) -> None: # unknown purpose pass - def _process_format_subheader(self, offset, length): + def _process_format_subheader(self, offset: int, length: int) -> None: int_len = self._int_length text_subheader_format = ( offset + const.column_format_text_subheader_index_offset + 3 * int_len @@ -711,7 +726,7 @@ def _process_format_subheader(self, offset, length): self.column_formats.append(column_format) self.columns.append(col) - def read(self, nrows=None): + def read(self, nrows: int | None = None) -> DataFrame | None: if (nrows is None) and (self.chunksize is not None): nrows = self.chunksize @@ -747,7 +762,7 @@ def read(self, nrows=None): def _read_next_page(self): self._current_page_data_subheader_pointers = [] - self._cached_page = self._path_or_buf.read(self._page_length) + self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length)) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: @@ -770,12 +785,12 @@ def _read_next_page(self): return False - def _chunk_to_dataframe(self): + def _chunk_to_dataframe(self) -> DataFrame: n = self._current_row_in_chunk_index m = self._current_row_in_file_index ix = range(m - n, m) - rslt = pd.DataFrame(index=ix) + rslt = DataFrame(index=ix) js, jb = 0, 0 for j in range(self.column_count):
The way we pass state back and forth is a bit nutty, punting on that.
https://api.github.com/repos/pandas-dev/pandas/pulls/40524
2021-03-19T22:23:55Z
2021-03-23T16:35:07Z
2021-03-23T16:35:07Z
2021-03-23T16:36:18Z
DOC/MAINT: Clean up docs build warnings
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 864bd0684d445..53929ad9eadc8 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -305,7 +305,7 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi Notes ----- See the `user guide - <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_ + <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`__ for more. Examples diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index ea561dca9a090..1cc0465121335 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -154,7 +154,7 @@ Notes ----- See the `user guide -<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_ +<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__ for more. %(examples)s\ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7138995d1b018..c20b2840a40ab 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7958,7 +7958,7 @@ def resample( Notes ----- See the `user guide - <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_ + <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index f372db5287604..c132712020cac 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -150,7 +150,7 @@ class CategoricalIndex(NDArrayBackedExtensionIndex, accessor.PandasDelegate): Notes ----- See the `user guide - <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_ + <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__ for more. Examples diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 97492f35232e3..ea2667246b4d3 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -267,7 +267,7 @@ class MultiIndex(Index): Notes ----- See the `user guide - <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_ + <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__ for more. Examples diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 49eb87a3bc8ba..66d84ef85880c 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -143,7 +143,7 @@ Notes ----- See the `user guide -<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. +<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`__ for more. """ _shared_docs[
For example: `Duplicate explicit target name: "user guide"` I was seeing these warnings in the dask docs build because dask pulls docstrings from pandas. I think using anonomous links like this should fix it based on https://github.com/sphinx-doc/sphinx/issues/3921
https://api.github.com/repos/pandas-dev/pandas/pulls/40523
2021-03-19T22:16:08Z
2021-03-20T01:24:05Z
2021-03-20T01:24:05Z
2021-03-25T20:52:44Z
DOC/TYP: index.take return val
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 44743aa0dffcf..50c1a7924bd4a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -929,19 +929,20 @@ def astype(self, dtype, copy=True): Parameters ---------- - indices : list + indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True - fill_value : bool, default None + fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by - -1 is regarded as NA. If Index doesn't hold NA, raise ValueError. + -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- - numpy.ndarray - Elements of given indices. + Index + An index formed of elements at the given indices. Will be the same + type as self, except for RangeIndex. See Also -------- @@ -950,7 +951,9 @@ def astype(self, dtype, copy=True): """ @Appender(_index_shared_docs["take"] % _index_doc_kwargs) - def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): + def take( + self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs + ): if kwargs: nv.validate_take((), kwargs) indices = ensure_platform_int(indices)
xref https://github.com/pandas-dev/pandas/pull/40513#discussion_r597541594
https://api.github.com/repos/pandas-dev/pandas/pulls/40521
2021-03-19T18:52:21Z
2021-05-23T11:41:58Z
2021-05-23T11:41:58Z
2021-05-23T11:42:13Z
STYLE no no-string-hints, combine some hooks
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aa8c2b74d7a7e..e3dd6b018b8aa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,10 +50,6 @@ repos: rev: 5.7.0 hooks: - id: isort -- repo: https://github.com/MarcoGorelli/no-string-hints - rev: v0.1.7 - hooks: - - id: no-string-hints - repo: https://github.com/asottile/pyupgrade rev: v2.10.0 hooks: @@ -111,12 +107,6 @@ repos: pandas/tests/io/excel/test_writers\.py |pandas/tests/io/pytables/common\.py |pandas/tests/io/pytables/test_store\.py$ - - id: no-pandas-api-types - name: Check code for instances of pd.api.types - entry: (pd|pandas)\.api\.types\. - language: pygrep - types: [python] - files: ^pandas/tests/ - id: non-standard-imports name: Check for non-standard imports language: pygrep @@ -128,6 +118,11 @@ repos: # Check for imports from collections.abc instead of `from collections import abc` |from\ collections\.abc\ import + + # Numpy + |from\ numpy\ import\ random + |from\ numpy\.random\ import + types: [python] - id: non-standard-imports-in-tests name: Check for non-standard imports in test suite language: pygrep @@ -143,26 +138,17 @@ repos: # Check for use of pandas.testing instead of tm |pd\.testing\. + + # Check for pd.api.types instead of from pandas.api.types import ... + |(pd|pandas)\.api\.types\. types: [python] files: ^pandas/tests/ - - id: non-standard-numpy-random-related-imports - name: Check for non-standard numpy.random-related imports excluding pandas/_testing.py - language: pygrep - exclude: pandas/_testing.py + - id: np-bool-and-np-object + name: Check for use of np.bool/np.object instead of np.bool_/np.object_ entry: | (?x) - # Check for imports from np.random.<method> instead of `from numpy import random` or `from numpy.random import <method>` - from\ numpy\ import\ random - |from\ numpy.random\ import - types: [python] - - id: np-bool - name: Check for use of np.bool instead of np.bool_ - entry: np\.bool[^_8] - language: pygrep - types_or: [python, cython, rst] - - id: np-object - name: Check for use of np.object instead of np.object_ - entry: np\.object[^_8] + np\.bool[^_8] + |np\.object[^_8] language: pygrep types_or: [python, cython, rst] - id: pip-to-conda
I got a PR into [pyupgrade](https://github.com/asottile/pyupgrade/blob/c90a455fceacdecc7484dab8519688353b6b2931/pyupgrade/_plugins/typing_pep563.py) to do what no-string-hints did (and in a much better way, their maintainer is amazing), so my little tool can be archived / removed from here :tada: Also combining some other hooks while I'm here
https://api.github.com/repos/pandas-dev/pandas/pulls/40516
2021-03-19T08:46:09Z
2021-03-19T14:47:39Z
2021-03-19T14:47:39Z
2021-03-19T15:01:36Z
CLN/PERF: remove catching of numpy deprecation warning in comparison_op
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 04737d91c0d4e..333bdbf57bab3 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -6,7 +6,6 @@ from functools import partial import operator from typing import Any -import warnings import numpy as np @@ -232,7 +231,7 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: """ # NB: We assume extract_array has already been called on left and right lvalues = ensure_wrapped_if_datetimelike(left) - rvalues = right + rvalues = ensure_wrapped_if_datetimelike(right) rvalues = lib.item_from_zerodim(rvalues) if isinstance(rvalues, list): @@ -267,10 +266,7 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) else: - with warnings.catch_warnings(): - # suppress warnings from numpy about element-wise comparison - warnings.simplefilter("ignore", DeprecationWarning) - res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) + res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) return res_values
xref https://github.com/pandas-dev/pandas/issues/39146 In some of the `FrameWithFrameWide` benchmarks, this takes up around 15% of the time. While I don't think this should be needed (but let's see if warnings appear in the CI builds).
https://api.github.com/repos/pandas-dev/pandas/pulls/40515
2021-03-19T08:35:33Z
2021-03-19T17:15:26Z
2021-03-19T17:15:26Z
2021-03-19T17:15:29Z
CLN/TYP: remove unused arguments in merge
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index c01bf3931b27a..5ceac80c340ba 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -10,6 +10,8 @@ import string from typing import ( TYPE_CHECKING, + Hashable, + List, Optional, Tuple, cast, @@ -124,14 +126,13 @@ def merge( merge.__doc__ = _merge_doc % "\nleft : DataFrame" -def _groupby_and_merge(by, on, left: DataFrame, right: DataFrame, merge_pieces): +def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): """ groupby & merge; we are always performing a left-by type operation Parameters ---------- by: field to group - on: duplicates field left: DataFrame right: DataFrame merge_pieces: function for merging @@ -307,9 +308,7 @@ def _merger(x, y): check = set(left_by).difference(left.columns) if len(check) != 0: raise KeyError(f"{check} not found in left columns") - result, _ = _groupby_and_merge( - left_by, on, left, right, lambda x, y: _merger(x, y) - ) + result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) elif right_by is not None: if isinstance(right_by, str): right_by = [right_by] @@ -317,7 +316,7 @@ def _merger(x, y): if len(check) != 0: raise KeyError(f"{check} not found in right columns") result, _ = _groupby_and_merge( - right_by, on, right, left, lambda x, y: _merger(y, x) + right_by, right, left, lambda x, y: _merger(y, x) ) else: result = _merger(left, right) @@ -708,7 +707,7 @@ def __init__( if validate is not None: self._validate(validate) - def get_result(self): + def get_result(self) -> DataFrame: if self.indicator: self.left, self.right = self._indicator_pre_merge(self.left, self.right) @@ -774,7 +773,7 @@ def _indicator_pre_merge( return left, right - def _indicator_post_merge(self, result): + def _indicator_post_merge(self, result: DataFrame) -> DataFrame: result["_left_indicator"] = result["_left_indicator"].fillna(0) result["_right_indicator"] = result["_right_indicator"].fillna(0) @@ -790,7 +789,7 @@ def _indicator_post_merge(self, result): result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) return result - def _maybe_restore_index_levels(self, result): + def _maybe_restore_index_levels(self, result: DataFrame) -> None: """ Restore index levels specified as `on` parameters @@ -949,7 +948,6 @@ def _get_join_info(self): self.left.index, self.right.index, left_indexer, - right_indexer, how="right", ) else: @@ -961,7 +959,6 @@ def _get_join_info(self): self.right.index, self.left.index, right_indexer, - left_indexer, how="left", ) else: @@ -979,9 +976,8 @@ def _create_join_index( index: Index, other_index: Index, indexer, - other_indexer, how: str = "left", - ): + ) -> Index: """ Create a join index by rearranging one index to match another @@ -1126,7 +1122,7 @@ def _get_merge_keys(self): return left_keys, right_keys, join_names - def _maybe_coerce_merge_keys(self): + def _maybe_coerce_merge_keys(self) -> None: # we have valid merges but we may have to further # coerce these if they are originally incompatible types # @@ -1285,7 +1281,7 @@ def _create_cross_configuration( cross_col, ) - def _validate_specification(self): + def _validate_specification(self) -> None: if self.how == "cross": if ( self.left_index @@ -1372,7 +1368,7 @@ def _validate_specification(self): if self.how != "cross" and len(self.right_on) != len(self.left_on): raise ValueError("len(right_on) must equal len(left_on)") - def _validate(self, validate: str): + def _validate(self, validate: str) -> None: # Check uniqueness of each if self.left_index: @@ -1479,10 +1475,10 @@ def restore_dropped_levels_multijoin( left: MultiIndex, right: MultiIndex, dropped_level_names, - join_index, - lindexer, - rindexer, -): + join_index: Index, + lindexer: np.ndarray, + rindexer: np.ndarray, +) -> Tuple[List[Index], np.ndarray, List[Hashable]]: """ *this is an internal non-public method* @@ -1500,7 +1496,7 @@ def restore_dropped_levels_multijoin( right index dropped_level_names : str array list of non-common level names - join_index : MultiIndex + join_index : Index the index of the join between the common levels of left and right lindexer : intp array @@ -1514,8 +1510,8 @@ def restore_dropped_levels_multijoin( levels of combined multiindexes labels : intp array labels of combined multiindexes - names : str array - names of combined multiindexes + names : List[Hashable] + names of combined multiindex levels """ @@ -1604,7 +1600,7 @@ def __init__( sort=True, # factorize sorts ) - def get_result(self): + def get_result(self) -> DataFrame: join_index, left_indexer, right_indexer = self._get_join_info() llabels, rlabels = _items_overlap_with_suffix( @@ -1653,7 +1649,7 @@ def _asof_by_function(direction: str): } -def _get_cython_type_upcast(dtype): +def _get_cython_type_upcast(dtype) -> str: """ Upcast a dtype to 'int64_t', 'double', or 'object' """ if is_integer_dtype(dtype): return "int64_t"
https://api.github.com/repos/pandas-dev/pandas/pulls/40513
2021-03-19T05:11:09Z
2021-03-21T21:12:06Z
2021-03-21T21:12:06Z
2021-03-21T21:48:22Z
CLN/PERF: remove unused out kwd in take_nd
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cf153ddd2cbbd..3b7a6037a9715 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5240,6 +5240,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather("example.feather") result @@ -5323,6 +5324,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index ba1b2a0f0e76e..2a6080e38a732 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -3,7 +3,6 @@ import functools from typing import ( TYPE_CHECKING, - Optional, overload, ) @@ -33,7 +32,6 @@ def take_nd( arr: np.ndarray, indexer, axis: int = ..., - out: Optional[np.ndarray] = ..., fill_value=..., allow_fill: bool = ..., ) -> np.ndarray: @@ -45,7 +43,6 @@ def take_nd( arr: ExtensionArray, indexer, axis: int = ..., - out: Optional[np.ndarray] = ..., fill_value=..., allow_fill: bool = ..., ) -> ArrayLike: @@ -56,7 +53,6 @@ def take_nd( arr: ArrayLike, indexer, axis: int = 0, - out: Optional[np.ndarray] = None, fill_value=lib.no_default, allow_fill: bool = True, ) -> ArrayLike: @@ -79,10 +75,6 @@ def take_nd( indices are filed with fill_value axis : int, default 0 Axis to take from - out : ndarray or None, default None - Optional output array, must be appropriate type to hold input and - fill_value together, if indexer has any -1 value entries; call - maybe_promote to determine this type for any fill_value fill_value : any, default np.nan Fill value to replace -1 values with allow_fill : boolean, default True @@ -104,14 +96,13 @@ def take_nd( return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) arr = np.asarray(arr) - return _take_nd_ndarray(arr, indexer, axis, out, fill_value, allow_fill) + return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) def _take_nd_ndarray( arr: np.ndarray, indexer, axis: int, - out: Optional[np.ndarray], fill_value, allow_fill: bool, ) -> np.ndarray: @@ -121,8 +112,12 @@ def _take_nd_ndarray( dtype, fill_value = arr.dtype, arr.dtype.type() else: indexer = ensure_platform_int(indexer) - indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, out, fill_value, allow_fill + + if not allow_fill: + return arr.take(indexer, axis=axis) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value ) flip_order = False @@ -132,23 +127,20 @@ def _take_nd_ndarray( if flip_order: arr = arr.T axis = arr.ndim - axis - 1 - if out is not None: - out = out.T # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value - if out is None: - out_shape_ = list(arr.shape) - out_shape_[axis] = len(indexer) - out_shape = tuple(out_shape_) - if arr.flags.f_contiguous and axis == arr.ndim - 1: - # minor tweak that can make an order-of-magnitude difference - # for dataframes initialized directly from 2-d ndarrays - # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its - # f-contiguous transpose) - out = np.empty(out_shape, dtype=dtype, order="F") - else: - out = np.empty(out_shape, dtype=dtype) + out_shape_ = list(arr.shape) + out_shape_[axis] = len(indexer) + out_shape = tuple(out_shape_) + if arr.flags.f_contiguous and axis == arr.ndim - 1: + # minor tweak that can make an order-of-magnitude difference + # for dataframes initialized directly from 2-d ndarrays + # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its + # f-contiguous transpose) + out = np.empty(out_shape, dtype=dtype, order="F") + else: + out = np.empty(out_shape, dtype=dtype) func = _get_take_nd_function( arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info @@ -192,8 +184,8 @@ def take_1d( if not allow_fill: return arr.take(indexer) - indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, None, fill_value, allow_fill + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value ) # at this point, it's guaranteed that dtype can hold both the arr values @@ -517,32 +509,22 @@ def _take_2d_multi_object( def _take_preprocess_indexer_and_fill_value( arr: np.ndarray, indexer: np.ndarray, - out: Optional[np.ndarray], fill_value, - allow_fill: bool, ): mask_info = None - if not allow_fill: - dtype, fill_value = arr.dtype, arr.dtype.type() - mask_info = None, False - else: - # check for promotion based on types only (do this first because - # it's faster than computing a mask) - dtype, fill_value = maybe_promote(arr.dtype, fill_value) - if dtype != arr.dtype and (out is None or out.dtype != dtype): - # check if promotion is actually required based on indexer - mask = indexer == -1 - needs_masking = mask.any() - mask_info = mask, needs_masking - if needs_masking: - if out is not None and out.dtype != dtype: - raise TypeError("Incompatible type for fill_value") - else: - # if not, then depromote, set fill_value to dummy - # (it won't be used but we don't want the cython code - # to crash when trying to cast it to dtype) - dtype, fill_value = arr.dtype, arr.dtype.type() - - indexer = ensure_platform_int(indexer) - return indexer, dtype, fill_value, mask_info + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + mask = indexer == -1 + needs_masking = mask.any() + mask_info = mask, needs_masking + if not needs_masking: + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + return dtype, fill_value, mask_info diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 979c7aa990184..1f79823746a87 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1155,7 +1155,7 @@ def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) group_keys = self.grouper._get_group_keys() labels, _, n_groups = self.grouper.group_info sorted_index = get_group_index_sorter(labels, n_groups) - sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) + sorted_labels = labels.take(sorted_index) sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() starts, ends = lib.generate_slices(sorted_labels, n_groups) @@ -1190,7 +1190,7 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) group_keys = self.grouper._get_group_keys() labels, _, n_groups = self.grouper.group_info sorted_index = get_group_index_sorter(labels, n_groups) - sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) + sorted_labels = labels.take(sorted_index) sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() starts, ends = lib.generate_slices(sorted_labels, n_groups) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index a222a8cc464fb..c5d36d1588a5f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -71,7 +71,6 @@ maybe_fill, ) -import pandas.core.algorithms as algorithms from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.frame import DataFrame @@ -756,7 +755,7 @@ def _aggregate_series_fast(self, obj: Series, func: F): # avoids object / Series creation overhead indexer = get_group_index_sorter(group_index, ngroups) obj = obj.take(indexer) - group_index = algorithms.take_nd(group_index, indexer, allow_fill=False) + group_index = group_index.take(indexer) grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups) result, counts = grouper.get_result() return result, counts @@ -989,9 +988,9 @@ def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0): assert isinstance(axis, int), axis @cache_readonly - def slabels(self): + def slabels(self) -> np.ndarray: # Sorted labels - return algorithms.take_nd(self.labels, self._sort_idx, allow_fill=False) + return self.labels.take(self._sort_idx) @cache_readonly def _sort_idx(self) -> np.ndarray: # np.ndarray[np.intp] diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e8b83af16254a..c50dd06f6a96f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2979,7 +2979,7 @@ def _union(self, other: Index, sort): missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) if len(missing) > 0: - other_diff = algos.take_nd(rvals, missing, allow_fill=False) + other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: # error: Incompatible types in assignment (expression has type @@ -4253,9 +4253,7 @@ def _get_leaf_sorter(labels: List[np.ndarray]) -> np.ndarray: ) if right_lev_indexer is not None: - right_indexer = algos.take_nd( - right_lev_indexer, join_index.codes[level], allow_fill=False - ) + right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 244fcb9f49ec6..76878d0a0b82a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3532,14 +3532,10 @@ def equals(self, other: object) -> bool: if not np.array_equal(self_mask, other_mask): return False self_codes = self_codes[~self_mask] - self_values = algos.take_nd( - np.asarray(self.levels[i]._values), self_codes, allow_fill=False - ) + self_values = self.levels[i]._values.take(self_codes) other_codes = other_codes[~other_mask] - other_values = algos.take_nd( - np.asarray(other.levels[i]._values), other_codes, allow_fill=False - ) + other_values = other.levels[i]._values.take(other_codes) # since we use NaT both datetime64 and timedelta64 we can have a # situation where a level is typed say timedelta64 in self (IOW it diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da78fc5dfba76..9286976713f9f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -306,7 +306,7 @@ def items(self) -> Index: def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) - return algos.take_nd(dtypes, self.blknos, allow_fill=False) + return dtypes.take(self.blknos) @property def arrays(self) -> List[ArrayLike]: @@ -1179,7 +1179,7 @@ def value_getitem(placement): is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True - new_blknos = np.empty(self.nblocks, dtype=np.int64) + new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 3aa4d26f7dc8f..88fcc13502439 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -33,7 +33,6 @@ from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import isna -import pandas.core.algorithms as algorithms from pandas.core.construction import extract_array if TYPE_CHECKING: @@ -643,10 +642,10 @@ def _reorder_by_uniques(uniques, labels): mask = labels < 0 # move labels to right locations (ie, unsort ascending labels) - labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False) + labels = reverse_indexer.take(labels) np.putmask(labels, mask, -1) # sort observed ids - uniques = algorithms.take_nd(uniques, sorter, allow_fill=False) + uniques = uniques.take(sorter) return uniques, labels diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 1c7c02ea41f2c..4a2e3f971670e 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -72,34 +72,6 @@ class TestTake: # Standard incompatible fill error. fill_error = re.compile("Incompatible type for fill_value") - def test_1d_with_out(self, dtype_can_hold_na, writeable): - dtype, can_hold_na = dtype_can_hold_na - - data = np.random.randint(0, 2, 4).astype(dtype) - data.flags.writeable = writeable - - indexer = [2, 1, 0, 1] - out = np.empty(4, dtype=dtype) - algos.take_nd(data, indexer, out=out) - - expected = data.take(indexer) - tm.assert_almost_equal(out, expected) - - indexer = [2, 1, 0, -1] - out = np.empty(4, dtype=dtype) - - if can_hold_na: - algos.take_nd(data, indexer, out=out) - expected = data.take(indexer) - expected[3] = np.nan - tm.assert_almost_equal(out, expected) - else: - with pytest.raises(TypeError, match=self.fill_error): - algos.take_nd(data, indexer, out=out) - - # No Exception otherwise. - data.take(indexer, out=out) - def test_1d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype data = np.random.randint(0, 2, 4).astype(dtype) @@ -116,46 +88,6 @@ def test_1d_fill_nonna(self, dtype_fill_out_dtype): assert (result[[0, 1, 2, 3]] == data[indexer]).all() assert result.dtype == dtype - def test_2d_with_out(self, dtype_can_hold_na, writeable): - dtype, can_hold_na = dtype_can_hold_na - - data = np.random.randint(0, 2, (5, 3)).astype(dtype) - data.flags.writeable = writeable - - indexer = [2, 1, 0, 1] - out0 = np.empty((4, 3), dtype=dtype) - out1 = np.empty((5, 4), dtype=dtype) - algos.take_nd(data, indexer, out=out0, axis=0) - algos.take_nd(data, indexer, out=out1, axis=1) - - expected0 = data.take(indexer, axis=0) - expected1 = data.take(indexer, axis=1) - tm.assert_almost_equal(out0, expected0) - tm.assert_almost_equal(out1, expected1) - - indexer = [2, 1, 0, -1] - out0 = np.empty((4, 3), dtype=dtype) - out1 = np.empty((5, 4), dtype=dtype) - - if can_hold_na: - algos.take_nd(data, indexer, out=out0, axis=0) - algos.take_nd(data, indexer, out=out1, axis=1) - - expected0 = data.take(indexer, axis=0) - expected1 = data.take(indexer, axis=1) - expected0[3, :] = np.nan - expected1[:, 3] = np.nan - - tm.assert_almost_equal(out0, expected0) - tm.assert_almost_equal(out1, expected1) - else: - for i, out in enumerate([out0, out1]): - with pytest.raises(TypeError, match=self.fill_error): - algos.take_nd(data, indexer, out=out, axis=i) - - # No Exception otherwise. - data.take(indexer, out=out, axis=i) - def test_2d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype data = np.random.randint(0, 2, (5, 3)).astype(dtype) @@ -180,57 +112,6 @@ def test_2d_fill_nonna(self, dtype_fill_out_dtype): assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all() assert result.dtype == dtype - def test_3d_with_out(self, dtype_can_hold_na): - dtype, can_hold_na = dtype_can_hold_na - - data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype) - indexer = [2, 1, 0, 1] - - out0 = np.empty((4, 4, 3), dtype=dtype) - out1 = np.empty((5, 4, 3), dtype=dtype) - out2 = np.empty((5, 4, 4), dtype=dtype) - - algos.take_nd(data, indexer, out=out0, axis=0) - algos.take_nd(data, indexer, out=out1, axis=1) - algos.take_nd(data, indexer, out=out2, axis=2) - - expected0 = data.take(indexer, axis=0) - expected1 = data.take(indexer, axis=1) - expected2 = data.take(indexer, axis=2) - - tm.assert_almost_equal(out0, expected0) - tm.assert_almost_equal(out1, expected1) - tm.assert_almost_equal(out2, expected2) - - indexer = [2, 1, 0, -1] - out0 = np.empty((4, 4, 3), dtype=dtype) - out1 = np.empty((5, 4, 3), dtype=dtype) - out2 = np.empty((5, 4, 4), dtype=dtype) - - if can_hold_na: - algos.take_nd(data, indexer, out=out0, axis=0) - algos.take_nd(data, indexer, out=out1, axis=1) - algos.take_nd(data, indexer, out=out2, axis=2) - - expected0 = data.take(indexer, axis=0) - expected1 = data.take(indexer, axis=1) - expected2 = data.take(indexer, axis=2) - - expected0[3, :, :] = np.nan - expected1[:, 3, :] = np.nan - expected2[:, :, 3] = np.nan - - tm.assert_almost_equal(out0, expected0) - tm.assert_almost_equal(out1, expected1) - tm.assert_almost_equal(out2, expected2) - else: - for i, out in enumerate([out0, out1, out2]): - with pytest.raises(TypeError, match=self.fill_error): - algos.take_nd(data, indexer, out=out, axis=i) - - # No Exception otherwise. - data.take(indexer, out=out, axis=i) - def test_3d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype @@ -321,24 +202,13 @@ def test_2d_float32(self): # axis=0 result = algos.take_nd(arr, indexer, axis=0) - result2 = np.empty_like(result) - algos.take_nd(arr, indexer, axis=0, out=result2) - tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=0) expected[[2, 4], :] = np.nan tm.assert_almost_equal(result, expected) - # this now accepts a float32! # test with float64 out buffer - out = np.empty((len(indexer), arr.shape[1]), dtype="float32") - algos.take_nd(arr, indexer, out=out) # it works! - # axis=1 result = algos.take_nd(arr, indexer, axis=1) - result2 = np.empty_like(result) - algos.take_nd(arr, indexer, axis=1, out=result2) - tm.assert_almost_equal(result, result2) - expected = arr.take(indexer, axis=1) expected[:, [2, 4]] = np.nan tm.assert_almost_equal(result, expected) @@ -351,42 +221,22 @@ def test_2d_datetime64(self): # axis=0 result = algos.take_nd(arr, indexer, axis=0) - result2 = np.empty_like(result) - algos.take_nd(arr, indexer, axis=0, out=result2) - tm.assert_almost_equal(result, result2) - expected = arr.take(indexer, axis=0) expected.view(np.int64)[[2, 4], :] = iNaT tm.assert_almost_equal(result, expected) result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1)) - result2 = np.empty_like(result) - algos.take_nd( - arr, indexer, out=result2, axis=0, fill_value=datetime(2007, 1, 1) - ) - tm.assert_almost_equal(result, result2) - expected = arr.take(indexer, axis=0) expected[[2, 4], :] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected) # axis=1 result = algos.take_nd(arr, indexer, axis=1) - result2 = np.empty_like(result) - algos.take_nd(arr, indexer, axis=1, out=result2) - tm.assert_almost_equal(result, result2) - expected = arr.take(indexer, axis=1) expected.view(np.int64)[:, [2, 4]] = iNaT tm.assert_almost_equal(result, expected) result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1)) - result2 = np.empty_like(result) - algos.take_nd( - arr, indexer, out=result2, axis=1, fill_value=datetime(2007, 1, 1) - ) - tm.assert_almost_equal(result, result2) - expected = arr.take(indexer, axis=1) expected[:, [2, 4]] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected)
And in a few places where we know allow_fill=False, just call the numpy version directly.
https://api.github.com/repos/pandas-dev/pandas/pulls/40510
2021-03-19T01:39:03Z
2021-03-22T14:32:36Z
2021-03-22T14:32:36Z
2021-04-06T19:19:10Z
TYP: parsers.pyi
diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi new file mode 100644 index 0000000000000..1051c319b769b --- /dev/null +++ b/pandas/_libs/parsers.pyi @@ -0,0 +1,77 @@ +from typing import ( + Hashable, + Literal, +) + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Dtype, +) + +STR_NA_VALUES: set[str] + + +def sanitize_objects( + values: np.ndarray, # ndarray[object] + na_values: set, + convert_empty: bool = ..., +) -> int: ... + + +class TextReader: + unnamed_cols: set[str] + table_width: int # int64_t + leading_cols: int # int64_t + header: list[list[int]] # non-negative integers + + def __init__( + self, + source, + delimiter: bytes | str = ..., # single-character only + header=..., + header_start=..., + header_end=..., + index_col=..., + names=..., + tokenize_chunksize: int = ..., # int64_t + delim_whitespace: bool = ..., + converters=..., + skipinitialspace: bool = ..., + escapechar: bytes | str | None = ..., # single-character only + doublequote: bool = ..., + quotechar: str | bytes | None = ..., # at most 1 character + quoting: int = ..., + lineterminator: bytes | str | None = ..., # at most 1 character + comment=..., + decimal: bytes | str = ..., # single-character only + thousands: bytes | str | None = ..., # single-character only + dtype: Dtype | dict[Hashable, Dtype] = ..., + usecols=..., + error_bad_lines: bool = ..., + warn_bad_lines: bool = ..., + na_filter: bool = ..., + na_values=..., + na_fvalues=..., + keep_default_na: bool = ..., + true_values=..., + false_values=..., + allow_leading_cols: bool = ..., + low_memory: bool = ..., + skiprows=..., + skipfooter: int = ..., # int64_t + verbose: bool = ..., + mangle_dupe_cols: bool = ..., + float_precision: Literal["round_trip", "legacy", "high"] | None = ..., + skip_blank_lines: bool = ..., + encoding_errors: bytes | str = ... + ): ... + + def set_error_bad_lines(self, status: int) -> None: ... + def set_noconvert(self, i: int) -> None: ... + def remove_noconvert(self, i: int) -> None: ... + + def close(self) -> None: ... + + def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ... diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index a11bf370412d2..153ac4b5f0893 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -319,19 +319,21 @@ cdef class TextReader: int64_t leading_cols, table_width, skipfooter, buffer_lines bint allow_leading_cols, mangle_dupe_cols, low_memory bint delim_whitespace - object delimiter, converters + object delimiter # bytes or str + object converters object na_values - object header, orig_header, names, header_start, header_end + object orig_header, names, header_start, header_end + list header # list[list[non-negative integers]] object index_col object skiprows object dtype object usecols list dtype_cast_order # list[np.dtype] - set unnamed_cols - set noconvert + set unnamed_cols # set[str] + set noconvert # set[int] def __cinit__(self, source, - delimiter=b',', + delimiter=b',', # bytes | str header=0, header_start=0, header_end=0, @@ -341,14 +343,14 @@ cdef class TextReader: bint delim_whitespace=False, converters=None, bint skipinitialspace=False, - escapechar=None, + escapechar=None, # bytes | str bint doublequote=True, quotechar=b'"', - quoting=0, - lineterminator=None, + quoting=0, # int + lineterminator=None, # bytes | str comment=None, - decimal=b'.', - thousands=None, + decimal=b'.', # bytes | str + thousands=None, # bytes | str dtype=None, usecols=None, bint error_bad_lines=True, @@ -362,7 +364,7 @@ cdef class TextReader: bint allow_leading_cols=True, bint low_memory=False, skiprows=None, - skipfooter=0, + skipfooter=0, # int64_t bint verbose=False, bint mangle_dupe_cols=True, float_precision=None, @@ -518,7 +520,7 @@ cdef class TextReader: self.parser.header_end = -1 self.parser.header = -1 self.parser_start = 0 - self.header = [] + prelim_header = [] else: if isinstance(header, list): if len(header) > 1: @@ -534,16 +536,19 @@ cdef class TextReader: self.parser_start = header[-1] + 1 self.parser.header_start = header[0] self.parser.header = header[0] - self.header = header + prelim_header = header else: self.parser.header_start = header self.parser.header_end = header self.parser_start = header + 1 self.parser.header = header - self.header = [ header ] + prelim_header = [ header ] self.names = names - self.header, self.table_width, self.unnamed_cols = self._get_header() + header, table_width, unnamed_cols = self._get_header(prelim_header) + self.header = header + self.table_width = table_width + self.unnamed_cols = unnamed_cols if not self.table_width: raise EmptyDataError("No columns to parse from file") @@ -561,7 +566,7 @@ cdef class TextReader: self.close() parser_del(self.parser) - def close(self): + def close(self) -> None: # also preemptively free all allocated memory parser_free(self.parser) if self.true_set: @@ -571,10 +576,10 @@ cdef class TextReader: kh_destroy_str_starts(self.false_set) self.false_set = NULL - def set_error_bad_lines(self, int status): + def set_error_bad_lines(self, int status) -> None: self.parser.error_bad_lines = status - def _set_quoting(self, quote_char, quoting): + def _set_quoting(self, quote_char: str | bytes | None, quoting: int): if not isinstance(quoting, int): raise TypeError('"quoting" must be an integer') @@ -618,13 +623,13 @@ cdef class TextReader: self.parser.cb_io = &buffer_rd_bytes self.parser.cb_cleanup = &del_rd_source - cdef _get_header(self): + cdef _get_header(self, list prelim_header): # header is now a list of lists, so field_count should use header[0] cdef: Py_ssize_t i, start, field_count, passed_count, unnamed_count, level char *word - object name, old_name + str name, old_name uint64_t hr, data_line = 0 list header = [] set unnamed_cols = set() @@ -632,7 +637,7 @@ cdef class TextReader: if self.parser.header_start >= 0: # Header is in the file - for level, hr in enumerate(self.header): + for level, hr in enumerate(prelim_header): this_header = [] @@ -697,7 +702,7 @@ cdef class TextReader: # If we have grabbed an extra line, but it's not in our # format, save in the buffer, and create an blank extra # line for the rest of the parsing code. - if hr == self.header[-1]: + if hr == prelim_header[-1]: lc = len(this_header) ic = (len(self.index_col) if self.index_col is not None else 0) @@ -764,7 +769,7 @@ cdef class TextReader: return header, field_count, unnamed_cols - def read(self, rows=None): + def read(self, rows: int | None = None) -> dict[int, "ArrayLike"]: """ rows=None --> read all rows """ @@ -777,6 +782,7 @@ cdef class TextReader: return columns + # -> dict[int, "ArrayLike"] cdef _read_low_memory(self, rows): cdef: size_t rows_read = 0 @@ -830,6 +836,7 @@ cdef class TextReader: if status < 0: raise_parser_error('Error tokenizing data', self.parser) + # -> dict[int, "ArrayLike"] cdef _read_rows(self, rows, bint trim): cdef: int64_t buffered_lines @@ -889,13 +896,16 @@ cdef class TextReader: elapsed = time.time() - self.clocks.pop(-1) print(f'{what} took: {elapsed * 1000:.2f} ms') - def set_noconvert(self, i): + def set_noconvert(self, i: int) -> None: self.noconvert.add(i) - def remove_noconvert(self, i): + def remove_noconvert(self, i: int) -> None: self.noconvert.remove(i) - def _convert_column_data(self, rows=None, upcast_na=False, footer=0): + # TODO: upcast_na only ever False, footer never passed + def _convert_column_data( + self, rows: int | None = None, upcast_na: bool = False, footer: int = 0 + ) -> dict[int, "ArrayLike"]: cdef: int64_t i int nused @@ -904,6 +914,7 @@ cdef class TextReader: object name, na_flist, col_dtype = None bint na_filter = 0 int64_t num_cols + dict result start = self.parser_start @@ -1020,6 +1031,7 @@ cdef class TextReader: return results + # -> tuple["ArrayLike", int]: cdef inline _convert_tokens(self, Py_ssize_t i, int start, int end, object name, bint na_filter, kh_str_starts_t *na_hashset, @@ -1181,13 +1193,14 @@ cdef class TextReader: else: raise TypeError(f"the dtype {dtype} is not supported for parsing") + # -> tuple[ndarray[object], int] cdef _string_convert(self, Py_ssize_t i, int64_t start, int64_t end, bint na_filter, kh_str_starts_t *na_hashset): return _string_box_utf8(self.parser, i, start, end, na_filter, na_hashset, self.encoding_errors) - def _get_converter(self, i, name): + def _get_converter(self, i: int, name): if self.converters is None: return None @@ -1197,7 +1210,7 @@ cdef class TextReader: # Converter for position, if any return self.converters.get(i) - cdef _get_na_list(self, i, name): + cdef _get_na_list(self, Py_ssize_t i, name): if self.na_values is None: return None, set() @@ -1319,6 +1332,7 @@ def _maybe_upcast(arr): # Type conversions / inference support code +# -> tuple[ndarray[object], int] cdef _string_box_utf8(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_starts_t *na_hashset, @@ -1432,6 +1446,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col, return np.asarray(codes), result, na_count +# -> ndarray[f'|S{width}'] cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, int64_t width): cdef: @@ -1473,6 +1488,7 @@ cdef: char* cneginfty = b'-Infinity' +# -> tuple[ndarray[float64_t], int] | tuple[None, None] cdef _try_double(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_starts_t *na_hashset, object na_flist): @@ -1482,7 +1498,7 @@ cdef _try_double(parser_t *parser, int64_t col, float64_t *data float64_t NA = na_values[np.float64] kh_float64_t *na_fset - ndarray result + ndarray[float64_t] result bint use_na_flist = len(na_flist) > 0 lines = line_end - line_start @@ -1712,6 +1728,7 @@ cdef inline int _try_int64_nogil(parser_t *parser, int64_t col, return 0 +# -> tuple[ndarray[bool], int] cdef _try_bool_flex(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, const kh_str_starts_t *na_hashset, @@ -1890,7 +1907,9 @@ cdef raise_parser_error(object base, parser_t *parser): raise ParserError(message) -def _concatenate_chunks(list chunks): +# chunks: list[dict[int, "ArrayLike"]] +# -> dict[int, "ArrayLike"] +def _concatenate_chunks(list chunks) -> dict: cdef: list names = list(chunks[0].keys()) object name @@ -1964,6 +1983,7 @@ for k in list(na_values): na_values[np.dtype(k)] = na_values[k] +# -> ArrayLike cdef _apply_converter(object f, parser_t *parser, int64_t col, int64_t line_start, int64_t line_end): cdef: @@ -1986,14 +2006,15 @@ cdef _apply_converter(object f, parser_t *parser, int64_t col, return lib.maybe_convert_objects(result) -def _maybe_encode(values): +cdef list _maybe_encode(list values): if values is None: return [] return [x.encode('utf-8') if isinstance(x, str) else x for x in values] +# TODO: only ever called with convert_empty=False def sanitize_objects(ndarray[object] values, set na_values, - bint convert_empty=True): + bint convert_empty=True) -> int: """ Convert specified values, including the given set na_values and empty strings if convert_empty is True, to np.nan. @@ -2003,6 +2024,10 @@ def sanitize_objects(ndarray[object] values, set na_values, values : ndarray[object] na_values : set convert_empty : bool, default True + + Returns + ------- + na_count : int """ cdef: Py_ssize_t i, n diff --git a/pandas/_libs/window/indexers.pyi b/pandas/_libs/window/indexers.pyi new file mode 100644 index 0000000000000..a32fe2f0f8b03 --- /dev/null +++ b/pandas/_libs/window/indexers.pyi @@ -0,0 +1,13 @@ +import numpy as np + +def calculate_variable_window_bounds( + num_values: int, # int64_t + window_size: int, # int64_t + min_periods, + center: bool, + closed: str | None, + index: np.ndarray, # const int64_t[:] +) -> tuple[ + np.ndarray, # np.ndarray[np.int64] + np.ndarray, # np.ndarray[np.int64] +]: ... diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx index 5e2b137db64a6..d188770576e05 100644 --- a/pandas/_libs/window/indexers.pyx +++ b/pandas/_libs/window/indexers.pyx @@ -15,7 +15,7 @@ def calculate_variable_window_bounds( int64_t window_size, object min_periods, # unused but here to match get_window_bounds signature bint center, - object closed, + str closed, const int64_t[:] index ): """ diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index 2cf68fc8995ee..1ad80b2e4c908 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -113,8 +113,17 @@ def get_window_bounds( closed: Optional[str] = None, ) -> Tuple[np.ndarray, np.ndarray]: + # error: Argument 4 to "calculate_variable_window_bounds" has incompatible + # type "Optional[bool]"; expected "bool" + # error: Argument 6 to "calculate_variable_window_bounds" has incompatible + # type "Optional[ndarray]"; expected "ndarray" return calculate_variable_window_bounds( - num_values, self.window_size, min_periods, center, closed, self.index_array + num_values, + self.window_size, + min_periods, + center, # type: ignore[arg-type] + closed, + self.index_array, # type: ignore[arg-type] ) diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 8ba38a44ecd2e..915a17fc702c3 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -12,6 +12,7 @@ Hashable, Iterator, Sequence, + cast, ) import numpy as np @@ -42,6 +43,8 @@ class CSVFormatter: + cols: np.ndarray + def __init__( self, formatter: DataFrameFormatter, @@ -136,9 +139,7 @@ def _initialize_quotechar(self, quotechar: str | None) -> str | None: def has_mi_columns(self) -> bool: return bool(isinstance(self.obj.columns, ABCMultiIndex)) - def _initialize_columns( - self, cols: Sequence[Hashable] | None - ) -> Sequence[Hashable]: + def _initialize_columns(self, cols: Sequence[Hashable] | None) -> np.ndarray: # validate mi options if self.has_mi_columns: if cols is not None: @@ -155,10 +156,7 @@ def _initialize_columns( # update columns to include possible multiplicity of dupes # and make sure cols is just a list of labels new_cols = self.obj.columns - if isinstance(new_cols, ABCIndex): - return new_cols._format_native_types(**self._number_format) - else: - return list(new_cols) + return new_cols._format_native_types(**self._number_format) def _initialize_chunksize(self, chunksize: int | None) -> int: if chunksize is None: @@ -214,7 +212,9 @@ def write_cols(self) -> Sequence[Hashable]: else: return self.header else: - return self.cols + # self.cols is an ndarray derived from Index._format_native_types, + # so its entries are strings, i.e. hashable + return cast(Sequence[Hashable], self.cols) @property def encoded_labels(self) -> list[Hashable]: @@ -308,12 +308,10 @@ def _save_chunk(self, start_i: int, end_i: int) -> None: data = [res.iget_values(i) for i in range(len(res.items))] ix = self.data_index[slicer]._format_native_types(**self._number_format) - # error: Argument 4 to "write_csv_rows" has incompatible type - # "Sequence[Hashable]"; expected "ndarray" libwriters.write_csv_rows( data, ix, self.nlevels, - self.cols, # type: ignore[arg-type] + self.cols, self.writer, ) diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 135e093cdc1e0..8305ff64c42c6 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -213,14 +213,14 @@ def read(self, nrows=None): names = self._maybe_dedup_names(names) # rename dict keys - data = sorted(data.items()) - data = {k: v for k, (i, v) in zip(names, data)} + data_tups = sorted(data.items()) + data = {k: v for k, (i, v) in zip(names, data_tups)} names, data = self._do_date_conversions(names, data) else: # rename dict keys - data = sorted(data.items()) + data_tups = sorted(data.items()) # ugh, mutation @@ -233,9 +233,9 @@ def read(self, nrows=None): names = self._filter_usecols(names) # columns as list - alldata = [x[1] for x in data] + alldata = [x[1] for x in data_tups] - data = {k: v for k, (i, v) in zip(names, data)} + data = {k: v for k, (i, v) in zip(names, data_tups)} names, data = self._do_date_conversions(names, data) index, names = self._make_index(data, alldata, names)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40508
2021-03-18T23:20:59Z
2021-04-14T14:25:21Z
2021-04-14T14:25:21Z
2021-04-14T14:27:55Z
BUG: Respect ignore_index in Series.explode
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 9c8968f7f8223..77fdf38416f8c 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -630,6 +630,7 @@ Reshaping - Allow :class:`Index` to be passed to the :func:`numpy.all` function (:issue:`40180`) - Bug in :meth:`DataFrame.stack` not preserving ``CategoricalDtype`` in a ``MultiIndex`` (:issue:`36991`) - Bug in :func:`to_datetime` raising error when input sequence contains unhashable items (:issue:`39756`) +- Bug in :meth:`Series.explode` preserving index when ``ignore_index`` was ``True`` and values were scalars (:issue:`40487`) Sparse ^^^^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index 83eb4c38bc163..27042f7de9dc1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3857,7 +3857,8 @@ def explode(self, ignore_index: bool = False) -> Series: dtype: object """ if not len(self) or not is_object_dtype(self): - return self.copy() + result = self.copy() + return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py index 1f0fbd1cc5ecb..c73737dad89aa 100644 --- a/pandas/tests/series/methods/test_explode.py +++ b/pandas/tests/series/methods/test_explode.py @@ -134,3 +134,11 @@ def test_explode_sets(): result = s.explode().sort_values() expected = pd.Series(["a", "b", "c"], index=[1, 1, 1]) tm.assert_series_equal(result, expected) + + +def test_explode_scalars_can_ignore_index(): + # https://github.com/pandas-dev/pandas/issues/40487 + s = pd.Series([1, 2, 3], index=["a", "b", "c"]) + result = s.explode(ignore_index=True) + expected = pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected)
- [x] closes https://github.com/pandas-dev/pandas/issues/40487 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40507
2021-03-18T23:06:09Z
2021-03-23T20:44:38Z
2021-03-23T20:44:38Z
2021-03-23T21:00:13Z
DOC: Fix documentation for DataFrame.groupby.transform
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1f79823746a87..f33833193e4e0 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -119,19 +119,19 @@ class providing the base-class of operations. _apply_docs = { "template": """ - Apply function `func` group-wise and combine the results together. + Apply function ``func`` group-wise and combine the results together. - The function passed to `apply` must take a {input} as its first - argument and return a DataFrame, Series or scalar. `apply` will + The function passed to ``apply`` must take a {input} as its first + argument and return a DataFrame, Series or scalar. ``apply`` will then take care of combining the results back together into a single - dataframe or series. `apply` is therefore a highly flexible + dataframe or series. ``apply`` is therefore a highly flexible grouping method. - While `apply` is a very flexible method, its downside is that + While ``apply`` is a very flexible method, its downside is that using it can be quite a bit slower than using more specific methods - like `agg` or `transform`. Pandas offers a wide range of method that will - be much faster than using `apply` for their specific purposes, so try to - use them before reaching for `apply`. + like ``agg`` or ``transform``. Pandas offers a wide range of method that will + be much faster than using ``apply`` for their specific purposes, so try to + use them before reaching for ``apply``. Parameters ---------- @@ -140,7 +140,7 @@ class providing the base-class of operations. returns a dataframe, a series or a scalar. In addition the callable may take positional and keyword arguments. args, kwargs : tuple and dict - Optional positional and keyword arguments to pass to `func`. + Optional positional and keyword arguments to pass to ``func``. Returns ------- @@ -157,9 +157,9 @@ class providing the base-class of operations. Notes ----- - In the current implementation `apply` calls `func` twice on the + In the current implementation ``apply`` calls ``func`` twice on the first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if `func` has + path. This can lead to unexpected behavior if ``func`` has side-effects, as they will take effect twice for the first group. @@ -350,12 +350,12 @@ class providing the base-class of operations. See Also -------- -%(klass)s.groupby.apply : Apply function func group-wise - and combine the results together. +%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine + the results together. %(klass)s.groupby.aggregate : Aggregate using one or more operations over the specified axis. -%(klass)s.transform : Transforms the Series on each group - based on the given function. +%(klass)s.transform : Call ``func`` on self producing a %(klass)s with + transformed values. Notes -----
- The doc template should no longer reference "Series" transforms explicitly. - Make consistent the formatting for references to function argument names.
https://api.github.com/repos/pandas-dev/pandas/pulls/40506
2021-03-18T20:10:06Z
2021-03-25T01:09:12Z
2021-03-25T01:09:12Z
2021-03-25T01:45:18Z
BUG: Remove artificial precision limit in rolling var & std
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 9c8968f7f8223..8190c80d774bc 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -299,6 +299,24 @@ cast to ``dtype=object`` (:issue:`38709`) ser ser2 + +.. _whatsnew_130.notable_bug_fixes.rolling_var_precision: + +Removed artificial truncation in rolling variance and standard deviation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`core.window.Rolling.std` and :meth:`core.window.Rolling.var` will no longer +artificially truncate results that are less than ``~1e-8`` and ``~1e-15`` respectively to +zero (:issue:`37051`, :issue:`40448`, :issue:`39872`). + +However, floating point artifacts may now exist in the results when rolling over larger values. + +.. ipython:: python + + s = pd.Series([7, 5, 5, 5]) + s.rolling(3).var() + + .. _whatsnew_130.api_breaking.deps: Increased minimum versions for dependencies diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index efacfad40ef82..46041b6a37a17 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -283,10 +283,6 @@ cdef inline float64_t calc_var(int64_t minp, int ddof, float64_t nobs, result = 0 else: result = ssqdm_x / (nobs - <float64_t>ddof) - # Fix for numerical imprecision. - # Can be result < 0 once Kahan Summation is implemented - if result < 1e-14: - result = 0 else: result = NaN diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 6db86b940737e..0fa49dccda573 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1882,7 +1882,10 @@ def median( The default ``ddof`` of 1 used in :meth:`Series.std` is different than the default ``ddof`` of 0 in :func:`numpy.std`. - A minimum of one period is required for the rolling calculation.\n + A minimum of one period is required for the rolling calculation. + + The implementation is susceptible to floating point imprecision as + shown in the example below.\n """ ).replace("\n", "", 1), create_section_header("Examples"), @@ -1890,13 +1893,13 @@ def median( """ >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) >>> s.rolling(3).std() - 0 NaN - 1 NaN - 2 0.577350 - 3 1.000000 - 4 1.000000 - 5 1.154701 - 6 0.000000 + 0 NaN + 1 NaN + 2 5.773503e-01 + 3 1.000000e+00 + 4 1.000000e+00 + 5 1.154701e+00 + 6 2.580957e-08 dtype: float64 """ ).replace("\n", "", 1), @@ -1931,7 +1934,10 @@ def std(self, ddof: int = 1, *args, **kwargs): The default ``ddof`` of 1 used in :meth:`Series.var` is different than the default ``ddof`` of 0 in :func:`numpy.var`. - A minimum of one period is required for the rolling calculation.\n + A minimum of one period is required for the rolling calculation. + + The implementation is susceptible to floating point imprecision as + shown in the example below.\n """ ).replace("\n", "", 1), create_section_header("Examples"), @@ -1939,13 +1945,13 @@ def std(self, ddof: int = 1, *args, **kwargs): """ >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) >>> s.rolling(3).var() - 0 NaN - 1 NaN - 2 0.333333 - 3 1.000000 - 4 1.000000 - 5 1.333333 - 6 0.000000 + 0 NaN + 1 NaN + 2 3.333333e-01 + 3 1.000000e+00 + 4 1.000000e+00 + 5 1.333333e+00 + 6 6.661338e-16 dtype: float64 """ ).replace("\n", "", 1), diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 70c076e086fb7..0af0bba5f5f8c 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1150,3 +1150,25 @@ def test_rolling_descending_date_order_with_offset(window, frame_or_series): idx = date_range(start="2020-01-03", end="2020-01-01", freq="-1d") expected = frame_or_series([np.nan, 3, 2], index=idx) tm.assert_equal(result, expected) + + +def test_rolling_var_floating_artifact_precision(): + # GH 37051 + s = Series([7, 5, 5, 5]) + result = s.rolling(3).var() + expected = Series([np.nan, np.nan, 4 / 3, 0]) + tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15) + + +def test_rolling_std_small_values(): + # GH 37051 + s = Series( + [ + 0.00000054, + 0.00000053, + 0.00000054, + ] + ) + result = s.rolling(2).std() + expected = Series([np.nan, 7.071068e-9, 7.071068e-9]) + tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)
- [x] closes #37051, xref #40448, xref #39872 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40505
2021-03-18T19:41:14Z
2021-03-21T21:06:58Z
2021-03-21T21:06:58Z
2021-03-21T22:02:18Z
TYP: nattype.pyi
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi new file mode 100644 index 0000000000000..0f81dcb4b2df1 --- /dev/null +++ b/pandas/_libs/tslibs/nattype.pyi @@ -0,0 +1,135 @@ + +from datetime import datetime + +import numpy as np + +NaT: NaTType +iNaT: int +nat_strings: set[str] + +def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ... + +class NaTType(datetime): + value: np.int64 + + def asm8(self) -> np.datetime64: ... + def to_datetime64(self) -> np.datetime64: ... + def to_numpy(self, dtype=..., copy: bool = ...) -> np.datetime64: ... + + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + + @property + def day_of_year(self) -> float: ... + @property + def dayofyear(self) -> float: ... + @property + def days_in_month(self) -> float: ... + @property + def daysinmonth(self) -> float: ... + @property + def day_of_week(self) -> float: ... + @property + def dayofweek(self) -> float: ... + @property + def week(self) -> float: ... + @property + def weekofyear(self) -> float: ... + + def day_name(self) -> float: ... + def month_name(self) -> float: ... + + # error: Return type "float" of "weekday" incompatible with return + # type "int" in supertype "date" + def weekday(self) -> float: ... # type: ignore[override] + + # error: Return type "float" of "isoweekday" incompatible with return + # type "int" in supertype "date" + def isoweekday(self) -> float: ... # type: ignore[override] + + def total_seconds(self) -> float: ... + + # error: Signature of "today" incompatible with supertype "datetime" + def today(self, *args, **kwargs) -> NaTType: ... # type: ignore[override] + # error: Signature of "today" incompatible with supertype "datetime" + def now(self, *args, **kwargs) -> NaTType: ... # type: ignore[override] + + def to_pydatetime(self) -> NaTType: ... + def date(self) -> NaTType: ... + + def round(self) -> NaTType: ... + def floor(self) -> NaTType: ... + def ceil(self) -> NaTType: ... + + def tz_convert(self) -> NaTType: ... + def tz_localize(self) -> NaTType: ... + + def replace(self, *args, **kwargs) -> NaTType: ... + + # error: Return type "float" of "year" incompatible with return + # type "int" in supertype "date" + @property + def year(self) -> float: ... # type: ignore[override] + + @property + def quarter(self) -> float: ... + + # error: Return type "float" of "month" incompatible with return + # type "int" in supertype "date" + @property + def month(self) -> float: ... # type: ignore[override] + + # error: Return type "float" of "day" incompatible with return + # type "int" in supertype "date" + @property + def day(self) -> float: ... # type: ignore[override] + + # error: Return type "float" of "hour" incompatible with return + # type "int" in supertype "date" + @property + def hour(self) -> float: ... # type: ignore[override] + + # error: Return type "float" of "minute" incompatible with return + # type "int" in supertype "date" + @property + def minute(self) -> float: ... # type: ignore[override] + + # error: Return type "float" of "second" incompatible with return + # type "int" in supertype "date" + @property + def second(self) -> float: ... # type: ignore[override] + + @property + def millisecond(self) -> float: ... + + # error: Return type "float" of "microsecond" incompatible with return + # type "int" in supertype "date" + @property + def microsecond(self) -> float: ... # type: ignore[override] + + @property + def nanosecond(self) -> float: ... + + # inject Timedelta properties + @property + def days(self) -> float: ... + @property + def microseconds(self) -> float: ... + @property + def nanoseconds(self) -> float: ... + + # inject Period properties + @property + def qyear(self) -> float: ... diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 15f54c11be0a0..963777a0ddc3c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -33,6 +33,7 @@ ArrayLike, DtypeObj, FrameOrSeriesUnion, + Scalar, ) from pandas.util._decorators import doc @@ -763,6 +764,7 @@ def factorize( dtype = original.dtype else: values, dtype = _ensure_data(values) + na_value: Scalar if original.dtype.kind in ["m", "M"]: # Note: factorize_array will cast NaT bc it has a __int__ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e8b83af16254a..59b6bccbba496 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -38,6 +38,7 @@ ) from pandas._libs.tslibs import ( IncompatibleFrequency, + NaTType, OutOfBoundsDatetime, Timestamp, tz_compare, @@ -2371,7 +2372,7 @@ def __reduce__(self): # -------------------------------------------------------------------- # Null Handling Methods - _na_value = np.nan + _na_value: Union[float, NaTType] = np.nan """The expected NA value to use with this index.""" @cache_readonly diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 31ad8b7d8a295..e194148f0fc24 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -24,6 +24,7 @@ ) from pandas._libs.tslibs import ( BaseOffset, + NaTType, Resolution, Tick, ) @@ -218,7 +219,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): _can_hold_na = True - _na_value = NaT + _na_value: NaTType = NaT """The expected NA value to use with this index.""" def _convert_tolerance(self, tolerance, target): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 4a08e733b770c..9959174373034 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -368,7 +368,10 @@ def maybe_squeeze_dt64tz(dta: ArrayLike) -> ArrayLike: # TODO(EA2D): kludge not needed with 2D EAs if isinstance(dta, DatetimeArray) and dta.ndim == 2 and dta.tz is not None: assert dta.shape[0] == 1 - dta = dta[0] + # error: Incompatible types in assignment (expression has type + # "Union[DatetimeLikeArrayMixin, Union[Any, NaTType]]", variable has + # type "Union[ExtensionArray, ndarray]") + dta = dta[0] # type: ignore[assignment] return dta diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 45f275664b206..5fadf7752b049 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -18,6 +18,7 @@ from pandas._libs import ( NaT, + NaTType, Timedelta, iNaT, lib, @@ -414,11 +415,8 @@ def new_func( if datetimelike: result = _wrap_results(result, orig_values.dtype, fill_value=iNaT) if not skipna: - # error: Argument 3 to "_mask_datetimelike_result" has incompatible type - # "Optional[ndarray]"; expected "ndarray" - result = _mask_datetimelike_result( - result, axis, mask, orig_values # type: ignore[arg-type] - ) + assert mask is not None # checked above + result = _mask_datetimelike_result(result, axis, mask, orig_values) return result @@ -601,7 +599,7 @@ def _mask_datetimelike_result( axis: Optional[int], mask: np.ndarray, orig_values: np.ndarray, -): +) -> Union[np.ndarray, np.datetime64, np.timedelta64, NaTType]: if isinstance(result, np.ndarray): # we need to apply the mask result = result.astype("i8").view(orig_values.dtype) @@ -609,7 +607,7 @@ def _mask_datetimelike_result( result[axis_mask] = iNaT else: if mask.any(): - result = NaT + return NaT return result @@ -1435,19 +1433,19 @@ def _get_counts( def _maybe_null_out( - result: np.ndarray, + result: np.ndarray | float | NaTType, axis: Optional[int], mask: Optional[np.ndarray], shape: Tuple[int, ...], min_count: int = 1, -) -> np.ndarray | float: +) -> np.ndarray | float | NaTType: """ Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) """ - if mask is not None and axis is not None and getattr(result, "ndim", False): + if mask is not None and axis is not None and isinstance(result, np.ndarray): null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if np.any(null_mask): if is_numeric_dtype(result): @@ -1461,9 +1459,7 @@ def _maybe_null_out( result[null_mask] = None elif result is not NaT: if check_below_min_count(shape, mask, min_count): - # error: Incompatible types in assignment (expression has type - # "float", variable has type "ndarray") - result = np.nan # type: ignore[assignment] + result = np.nan return result diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 2fd91c07ff4ac..5f33d00530361 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -701,7 +701,7 @@ def to_datetime( infer_datetime_format: bool = False, origin="unix", cache: bool = True, -) -> Union[DatetimeIndex, Series, DatetimeScalar, NaTType]: +) -> Optional[Union[DatetimeIndex, Series, DatetimeScalar, NaTType]]: """ Convert argument to datetime. diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index c5aa4a061a05b..0278b22995089 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -202,7 +202,8 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar: elif cell_type == "time": result = pd.to_datetime(str(cell)) result = cast(pd.Timestamp, result) - return result.time() + # error: Item "str" of "Union[float, str, NaTType]" has no attribute "time" + return result.time() # type: ignore[union-attr] else: self.close() raise ValueError(f"Unrecognized type {cell_type}") diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3514fbc8c6293..6c13350df2fa3 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1723,7 +1723,10 @@ def _format_datetime64_dateonly( if date_format: return x.strftime(date_format) else: - return x._date_repr + # error: Item "NaTType" of "Union[NaTType, Any]" has no attribute "_date_repr" + # The underlying problem here is that mypy doesn't understand that NaT + # is a singleton, so that the check above excludes it here. + return x._date_repr # type: ignore[union-attr] def get_format_datetime64( @@ -1801,13 +1804,15 @@ def get_format_timedelta64( consider_values = values_int != iNaT one_day_nanos = 86400 * 10 ** 9 - even_days = ( - # error: Unsupported operand types for % ("ExtensionArray" and "int") - np.logical_and( - consider_values, values_int % one_day_nanos != 0 # type: ignore[operator] - ).sum() - == 0 - ) + # error: Unsupported operand types for % ("ExtensionArray" and "int") + not_midnight = values_int % one_day_nanos != 0 # type: ignore[operator] + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "Union[Any, ExtensionArray, ndarray]"; expected + # "Union[Union[int, float, complex, str, bytes, generic], + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + both = np.logical_and(consider_values, not_midnight) # type: ignore[arg-type] + even_days = both.sum() == 0 if even_days: format = None
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40503
2021-03-18T18:27:03Z
2021-03-22T12:43:14Z
2021-03-22T12:43:14Z
2021-03-22T14:30:26Z
PERF: increase the minimum number of elements to use numexpr for ops from 1e4 to 1e6
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 0dbe5e8d83741..4f14ea73d5a88 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -40,7 +40,7 @@ } # the minimum prod shape that we will use numexpr -_MIN_ELEMENTS = 10000 +_MIN_ELEMENTS = 1_000_000 def set_use_numexpr(v=True): diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 30f88ba5e76f6..96347ba5a733f 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -12,7 +12,7 @@ ) from pandas.core.computation import expressions as expr -_frame = DataFrame(np.random.randn(10000, 4), columns=list("ABCD"), dtype="float64") +_frame = DataFrame(np.random.randn(1000000, 4), columns=list("ABCD"), dtype="float64") _frame2 = DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64") _mixed = DataFrame( {
See https://github.com/pandas-dev/pandas/issues/40500 for the analysis.
https://api.github.com/repos/pandas-dev/pandas/pulls/40502
2021-03-18T16:27:29Z
2021-03-20T01:14:14Z
2021-03-20T01:14:13Z
2021-03-24T08:28:12Z
PERF: optimize is_numeric_v_string_like
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 7a2d6468f1b63..32ea82d9c0402 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1100,7 +1100,7 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: # This exists to silence numpy deprecation warnings, see GH#29553 -def is_numeric_v_string_like(a, b): +def is_numeric_v_string_like(a: ArrayLike, b): """ Check if we are comparing a string-like object to a numeric ndarray. NumPy doesn't like to compare such objects, especially numeric arrays @@ -1108,7 +1108,7 @@ def is_numeric_v_string_like(a, b): Parameters ---------- - a : array-like, scalar + a : array-like The first object to check. b : array-like, scalar The second object to check. @@ -1120,16 +1120,8 @@ def is_numeric_v_string_like(a, b): Examples -------- - >>> is_numeric_v_string_like(1, 1) - False - >>> is_numeric_v_string_like("foo", "foo") - False - >>> is_numeric_v_string_like(1, "foo") # non-array numeric - False >>> is_numeric_v_string_like(np.array([1]), "foo") True - >>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check - True >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) True >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) @@ -1142,17 +1134,15 @@ def is_numeric_v_string_like(a, b): is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) - is_a_numeric_array = is_a_array and is_numeric_dtype(a) - is_b_numeric_array = is_b_array and is_numeric_dtype(b) - is_a_string_array = is_a_array and is_string_like_dtype(a) - is_b_string_array = is_b_array and is_string_like_dtype(b) + is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b") + is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b") + is_a_string_array = is_a_array and a.dtype.kind in ("S", "U") + is_b_string_array = is_b_array and b.dtype.kind in ("S", "U") - is_a_scalar_string_like = not is_a_array and isinstance(a, str) is_b_scalar_string_like = not is_b_array and isinstance(b, str) return ( (is_a_numeric_array and is_b_scalar_string_like) - or (is_b_numeric_array and is_a_scalar_string_like) or (is_a_numeric_array and is_b_string_array) or (is_b_numeric_array and is_a_string_array) ) @@ -1305,37 +1295,6 @@ def is_numeric_dtype(arr_or_dtype) -> bool: ) -def is_string_like_dtype(arr_or_dtype) -> bool: - """ - Check whether the provided array or dtype is of a string-like dtype. - - Unlike `is_string_dtype`, the object dtype is excluded because it - is a mixed dtype. - - Parameters - ---------- - arr_or_dtype : array-like - The array or dtype to check. - - Returns - ------- - boolean - Whether or not the array or dtype is of the string dtype. - - Examples - -------- - >>> is_string_like_dtype(str) - True - >>> is_string_like_dtype(object) - False - >>> is_string_like_dtype(np.array(['a', 'b'])) - True - >>> is_string_like_dtype(pd.Series([1, 2])) - False - """ - return _is_dtype(arr_or_dtype, lambda dtype: dtype.kind in ("S", "U")) - - def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 59d6f9a51ed43..8c2cff21c114e 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -35,7 +35,6 @@ is_object_dtype, is_scalar, is_string_dtype, - is_string_like_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ExtensionDtype @@ -258,7 +257,7 @@ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> np.ndarray: dtype = values.dtype shape = values.shape - if is_string_like_dtype(dtype): + if dtype.kind in ("S", "U"): result = np.zeros(values.shape, dtype=bool) else: result = np.empty(shape, dtype=bool) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 248798408381e..406aec9d4c16e 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -469,14 +469,11 @@ def test_is_datetime_or_timedelta_dtype(): def test_is_numeric_v_string_like(): - assert not com.is_numeric_v_string_like(1, 1) - assert not com.is_numeric_v_string_like(1, "foo") - assert not com.is_numeric_v_string_like("foo", "foo") + assert not com.is_numeric_v_string_like(np.array([1]), 1) assert not com.is_numeric_v_string_like(np.array([1]), np.array([2])) assert not com.is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) assert com.is_numeric_v_string_like(np.array([1]), "foo") - assert com.is_numeric_v_string_like("foo", np.array([1])) assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) @@ -521,14 +518,6 @@ def test_is_numeric_dtype(): assert com.is_numeric_dtype(pd.Index([1, 2.0])) -def test_is_string_like_dtype(): - assert not com.is_string_like_dtype(object) - assert not com.is_string_like_dtype(pd.Series([1, 2])) - - assert com.is_string_like_dtype(str) - assert com.is_string_like_dtype(np.array(["a", "b"])) - - def test_is_float_dtype(): assert not com.is_float_dtype(str) assert not com.is_float_dtype(int)
In some of the arithmetic benchmarks (xref https://github.com/pandas-dev/pandas/issues/39146#issuecomment-799482699), just this `is_numeric_v_string_like` check takes up 15-35% of the overall time. This improves the performance of this check by using some ndarray-specialized dtype checks (checking the kind instead of the generic functions): ``` In [1]: from pandas.core.dtypes.common import is_numeric_v_string_like In [2]: arr = np.array([1, 2, 3]) In [3]: %timeit is_numeric_v_string_like(arr, 2.0) 2.3 µs ± 46.9 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) # <-- master 482 ns ± 41.8 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) # <-- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40501
2021-03-18T16:17:55Z
2021-03-19T16:51:29Z
2021-03-19T16:51:29Z
2021-03-19T16:51:32Z
[ArrayManager] TST: include subset of ArrayManager tests in all CI builds
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ec4c87e8c91b0..d73940c1010ad 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -29,3 +29,8 @@ fi echo $PYTEST_CMD sh -c "$PYTEST_CMD" + +PYTEST_AM_CMD="PANDAS_DATA_MANAGER=array pytest -m \"$PATTERN and arraymanager\" -n $PYTEST_WORKERS --dist=loadfile -s --strict-markers --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas" + +echo $PYTEST_AM_CMD +sh -c "$PYTEST_AM_CMD" diff --git a/pandas/conftest.py b/pandas/conftest.py index 688ad6dcc5e48..f3356d2998ff8 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -85,6 +85,9 @@ def pytest_configure(config): config.addinivalue_line( "markers", "arm_slow: mark a test as slow for arm64 architecture" ) + config.addinivalue_line( + "markers", "arraymanager: mark a test to run with ArrayManager enabled" + ) def pytest_addoption(parser): @@ -121,6 +124,13 @@ def pytest_runtest_setup(item): pytest.skip("skipping high memory test since --run-high-memory was not set") +def pytest_collection_modifyitems(items): + for item in items: + # mark all tests in the pandas/tests/frame directory with "arraymanager" + if "/frame/" in item.nodeid: + item.add_marker(pytest.mark.arraymanager) + + # Hypothesis hypothesis.settings.register_profile( "ci", diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 199347383b171..1d41426b93db6 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -926,7 +926,9 @@ def test_setitem_duplicate_columns_not_inplace(self): tm.assert_frame_equal(df_view, df_copy) tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize("value", [1, np.array([[1], [1]]), [[1], [1]]]) + @pytest.mark.parametrize( + "value", [1, np.array([[1], [1]], dtype="int64"), [[1], [1]]] + ) def test_setitem_same_dtype_not_inplace(self, value, using_array_manager, request): # GH#39510 if not using_array_manager: diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 672ab20fb9791..b294c97409951 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -382,7 +382,8 @@ def kurt(x): pass # TODO: Ensure warning isn't emitted in the first place - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") + # ignore mean of empty slice and all-NaN + @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test_median(self, float_frame_with_na, int_frame): def wrapper(x): if isna(x).any(): diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 253f416bd6f18..cc4042822bc8b 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -905,7 +905,7 @@ def test_unstack_nan_index3(self, using_array_manager): if using_array_manager: # INFO(ArrayManager) with ArrayManager preserve dtype where possible cols = right.columns[[1, 2, 3, 5]] - right[cols] = right[cols].astype("int64") + right[cols] = right[cols].astype(df["C"].dtype) tm.assert_frame_equal(left, right) def test_unstack_nan_index4(self):
xref #39146
https://api.github.com/repos/pandas-dev/pandas/pulls/40496
2021-03-18T08:48:04Z
2021-03-25T08:28:21Z
2021-03-25T08:28:21Z
2021-03-31T20:00:30Z
[ArrayManager] TST: run all tests with separate not slow / slow build
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 014c666a17084..d6744f578560c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -141,6 +141,9 @@ jobs: data_manager: name: Test experimental data manager runs-on: ubuntu-latest + strategy: + matrix: + pattern: ["not slow and not network and not clipboard", "slow"] steps: - name: Checkout @@ -152,43 +155,11 @@ jobs: - name: Run tests env: PANDAS_DATA_MANAGER: array + PATTERN: ${{ matrix.pattern }} + PYTEST_WORKERS: "auto" run: | source activate pandas-dev + ci/run_tests.sh - pytest pandas/tests/frame/ - pytest pandas/tests/reductions/ - pytest pandas/tests/generic/test_generic.py - pytest pandas/tests/arithmetic/ - pytest pandas/tests/groupby/ - pytest pandas/tests/resample/ - pytest pandas/tests/reshape/merge - pytest pandas/tests/series/ - pytest pandas/tests/indexing/ - - pytest pandas/tests/test_* - pytest pandas/tests/api/ - pytest pandas/tests/apply/ - pytest pandas/tests/arrays/ - pytest pandas/tests/base/ - pytest pandas/tests/computation/ - pytest pandas/tests/config/ - pytest pandas/tests/dtypes/ - pytest pandas/tests/extension/ - pytest pandas/tests/generic/ - pytest pandas/tests/indexes/ - pytest pandas/tests/internals/ - pytest pandas/tests/io/test_* -m "not slow and not clipboard" - pytest pandas/tests/io/excel/ -m "not slow and not clipboard" - pytest pandas/tests/io/formats/ -m "not slow and not clipboard" - pytest pandas/tests/io/parser/ -m "not slow and not clipboard" - pytest pandas/tests/io/sas/ -m "not slow and not clipboard" - pytest pandas/tests/io/xml/ -m "not slow and not clipboard" - pytest pandas/tests/libs/ - pytest pandas/tests/plotting/ - pytest pandas/tests/scalar/ - pytest pandas/tests/strings/ - pytest pandas/tests/tools/ - pytest pandas/tests/tseries/ - pytest pandas/tests/tslibs/ - pytest pandas/tests/util/ - pytest pandas/tests/window/ + - name: Print skipped tests + run: python ci/print_skipped.py diff --git a/pandas/tests/reshape/concat/__init__.py b/pandas/tests/reshape/concat/__init__.py index e69de29bb2d1d..777923be02398 100644 --- a/pandas/tests/reshape/concat/__init__.py +++ b/pandas/tests/reshape/concat/__init__.py @@ -0,0 +1,4 @@ +import pandas.util._test_decorators as td + +# TODO(ArrayManager) concat axis=0 +pytestmark = td.skip_array_manager_not_yet_implemented diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index e467dbb7d49b6..5cc65feee869b 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_categorical_dtype from pandas import ( @@ -438,6 +440,7 @@ def test_crosstab_normalize_arrays(self): ) tm.assert_frame_equal(test_case, norm_sum) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 def test_crosstab_with_empties(self): # Check handling of empties df = DataFrame( diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c50886ba43019..8d8a83c233444 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -8,6 +8,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( Categorical, @@ -1197,6 +1199,7 @@ def test_pivot_table_with_margins_set_margin_name(self, margin_name): margins_name=margin_name, ) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 def test_pivot_timegrouper(self): df = DataFrame( { diff --git a/pandas/tests/reshape/test_pivot_multilevel.py b/pandas/tests/reshape/test_pivot_multilevel.py index df2ae0d52c660..ab41a94d1ff25 100644 --- a/pandas/tests/reshape/test_pivot_multilevel.py +++ b/pandas/tests/reshape/test_pivot_multilevel.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( Index, @@ -196,6 +198,7 @@ def test_pivot_list_like_columns( tm.assert_frame_equal(result, expected) +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 def test_pivot_multiindexed_rows_and_cols(): # GH 36360
xref #39146
https://api.github.com/repos/pandas-dev/pandas/pulls/40495
2021-03-18T08:10:16Z
2021-03-20T01:15:23Z
2021-03-20T01:15:23Z
2021-03-20T19:55:21Z
[ArrayManager] TST: enable remaining top-level tests
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b40670d743e18..014c666a17084 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -165,6 +165,7 @@ jobs: pytest pandas/tests/series/ pytest pandas/tests/indexing/ + pytest pandas/tests/test_* pytest pandas/tests/api/ pytest pandas/tests/apply/ pytest pandas/tests/arrays/ @@ -175,6 +176,7 @@ jobs: pytest pandas/tests/extension/ pytest pandas/tests/generic/ pytest pandas/tests/indexes/ + pytest pandas/tests/internals/ pytest pandas/tests/io/test_* -m "not slow and not clipboard" pytest pandas/tests/io/excel/ -m "not slow and not clipboard" pytest pandas/tests/io/formats/ -m "not slow and not clipboard" diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index ba85ff1a044d6..c63d5271f1fae 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -9,6 +9,7 @@ import pytest from pandas._libs.internals import BlockPlacement +import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_scalar @@ -38,6 +39,10 @@ ) from pandas.core.internals.blocks import new_block +# this file contains BlockManager specific tests +# TODO(ArrayManager) factor out interleave_dtype tests +pytestmark = td.skip_array_manager_invalid_test + @pytest.fixture(params=[new_block, make_block]) def block_maker(request): diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 911f1c7ebe31c..ea95f90d3a2cb 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -29,6 +29,9 @@ def df(): return DataFrame({"A": [1, 2, 3]}) +# TODO(ArrayManager) dask is still accessing the blocks +# https://github.com/dask/dask/pull/7318 +@td.skip_array_manager_not_yet_implemented def test_dask(df): toolz = import_module("toolz") # noqa
xref https://github.com/pandas-dev/pandas/issues/39146
https://api.github.com/repos/pandas-dev/pandas/pulls/40494
2021-03-18T07:46:20Z
2021-03-18T10:09:19Z
2021-03-18T10:09:18Z
2021-03-18T10:09:24Z
DOC: Styler docs - split PR from #39720
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3abb39d2194c0..5ec2141028fa4 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -217,8 +217,10 @@ def set_tooltips( css_class: Optional[str] = None, ) -> Styler: """ - Add string based tooltips that will appear in the `Styler` HTML result. These - tooltips are applicable only to`<td>` elements. + Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips. + + These string based tooltips are only applicable to ``<td>`` HTML elements, + and cannot be used for column or index headers. .. versionadded:: 1.3.0 @@ -227,7 +229,7 @@ def set_tooltips( ttips : DataFrame DataFrame containing strings that will be translated to tooltips, mapped by identical column and index values that must exist on the underlying - `Styler` data. None, NaN values, and empty strings will be ignored and + Styler data. None, NaN values, and empty strings will be ignored and not affect the rendered HTML. props : list-like or str, optional List of (attr, value) tuples or a valid CSS string. If ``None`` adopts @@ -671,21 +673,33 @@ def format( def set_td_classes(self, classes: DataFrame) -> Styler: """ - Add string based CSS class names to data cells that will appear within the - `Styler` HTML result. These classes are added within specified `<td>` elements. + Set the DataFrame of strings added to the ``class`` attribute of ``<td>`` + HTML elements. Parameters ---------- classes : DataFrame DataFrame containing strings that will be translated to CSS classes, - mapped by identical column and index values that must exist on the - underlying `Styler` data. None, NaN values, and empty strings will + mapped by identical column and index key values that must exist on the + underlying Styler data. None, NaN values, and empty strings will be ignored and not affect the rendered HTML. Returns ------- self : Styler + See Also + -------- + Styler.set_table_styles: Set the table styles included within the ``<style>`` + HTML element. + Styler.set_table_attributes: Set the table attributes added to the ``<table>`` + HTML element. + + Notes + ----- + Can be used in combination with ``Styler.set_table_styles`` to define an + internal CSS solution without reference to external CSS files. + Examples -------- >>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) @@ -707,16 +721,16 @@ def set_td_classes(self, classes: DataFrame) -> Styler: Form of the output with new additional css classes, >>> df = pd.DataFrame([[1]]) - >>> css = pd.DataFrame(["other-class"]) + >>> css = pd.DataFrame([["other-class"]]) >>> s = Styler(df, uuid="_", cell_ids=False).set_td_classes(css) >>> s.hide_index().render() - '<style type="text/css" ></style>' - '<table id="T__" >' + '<style type="text/css"></style>' + '<table id="T__">' ' <thead>' ' <tr><th class="col_heading level0 col0" >0</th></tr>' ' </thead>' ' <tbody>' - ' <tr><td class="data row0 col0 other-class" >1</td></tr>' + ' <tr><td class="data row0 col0 other-class" >1</td></tr>' ' </tbody>' '</table>' """ @@ -736,7 +750,7 @@ def set_td_classes(self, classes: DataFrame) -> Styler: def render(self, **kwargs) -> str: """ - Render the built up styles to HTML. + Render the ``Styler`` including all applied styles to HTML. Parameters ---------- @@ -753,7 +767,7 @@ def render(self, **kwargs) -> str: Notes ----- - ``Styler`` objects have defined the ``_repr_html_`` method + Styler objects have defined the ``_repr_html_`` method which automatically calls ``self.render()`` when it's the last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view @@ -779,7 +793,7 @@ def render(self, **kwargs) -> str: def _update_ctx(self, attrs: DataFrame) -> None: """ - Update the state of the Styler for data cells. + Update the state of the ``Styler`` for data cells. Collects a mapping of {index_label: [('<property>', '<value>'), ..]}. @@ -839,7 +853,7 @@ def __deepcopy__(self, memo) -> Styler: def clear(self) -> None: """ - Reset the styler, removing any previously applied styles. + Reset the ``Styler``, removing any previously applied styles. Returns None. """ @@ -923,10 +937,11 @@ def apply( Parameters ---------- func : function - ``func`` should take a Series or DataFrame (depending - on ``axis``), and return an object with the same shape. - Must return a DataFrame with identical index and - column labels or an ndarray with same shape as input when ``axis=None``. + ``func`` should take a Series if ``axis`` in [0,1] and return an object + of same length, also with identical index if the object is a Series. + ``func`` should take a DataFrame if ``axis`` is ``None`` and return either + an ndarray with the same shape or a DataFrame with identical columns and + index. .. versionchanged:: 1.3.0 @@ -944,13 +959,16 @@ def apply( ------- self : Styler + See Also + -------- + Styler.where: Apply CSS-styles based on a conditional function elementwise. + Styler.applymap: Apply a CSS-styling function elementwise. + Notes ----- - The output of ``func`` should be elements having CSS style as string or, + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, if nothing is to be applied to that element, an empty string or ``None``. - The output shape must match the input, i.e. if - ``x`` is the input row, column, or table (depending on ``axis``), - then ``func(x).shape == x.shape`` should be ``True``. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, @@ -1001,13 +1019,14 @@ def applymap(self, func: Callable, subset=None, **kwargs) -> Styler: See Also -------- - Styler.where: Updates the HTML representation with a style which is - selected in accordance with the return value of a function. + Styler.where: Apply CSS-styles based on a conditional function elementwise. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. Notes ----- - The output of ``func`` should be a CSS style as string or, if nothing is to be - applied, an empty string or ``None``. + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, + if nothing is to be applied to that element, an empty string or ``None``. Examples -------- @@ -1030,7 +1049,7 @@ def where( **kwargs, ) -> Styler: """ - Apply a function elementwise. + Apply CSS-styles based on a conditional function elementwise. Updates the HTML representation with a style which is selected in accordance with the return value of a function. @@ -1055,7 +1074,15 @@ def where( See Also -------- - Styler.applymap: Updates the HTML representation with the result. + Styler.applymap: Apply a CSS-styling function elementwise. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. + + Examples + -------- + >>> def cond(v): + ... return v > 1 and v != 4 + >>> df = pd.DataFrame([[1, 2], [3, 4]]) + >>> df.style.where(cond, value='color:red;', other='font-size:2em;') """ if other is None: other = "" @@ -1092,10 +1119,9 @@ def set_precision(self, precision: int) -> Styler: def set_table_attributes(self, attributes: str) -> Styler: """ - Set the table attributes. + Set the table attributes added to the ``<table>`` HTML element. - These are the items that show up in the opening ``<table>`` tag - in addition to automatic (by default) id. + These are items in addition to automatic (by default) ``id`` attribute. Parameters ---------- @@ -1105,6 +1131,13 @@ def set_table_attributes(self, attributes: str) -> Styler: ------- self : Styler + See Also + -------- + Styler.set_table_styles: Set the table styles included within the ``<style>`` + HTML element. + Styler.set_td_classes: Set the DataFrame of strings added to the ``class`` + attribute of ``<td>`` HTML elements. + Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) @@ -1116,9 +1149,9 @@ def set_table_attributes(self, attributes: str) -> Styler: def export(self) -> List[Tuple[Callable, Tuple, Dict]]: """ - Export the styles to applied to the current Styler. + Export the styles applied to the current ``Styler``. - Can be applied to a second style with ``Styler.use``. + Can be applied to a second Styler with ``Styler.use``. Returns ------- @@ -1126,13 +1159,13 @@ def export(self) -> List[Tuple[Callable, Tuple, Dict]]: See Also -------- - Styler.use: Set the styles on the current Styler. + Styler.use: Set the styles on the current ``Styler``. """ return self._todo def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> Styler: """ - Set the styles on the current Styler. + Set the styles on the current ``Styler``. Possibly uses styles from ``Styler.export``. @@ -1147,14 +1180,14 @@ def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> Styler: See Also -------- - Styler.export : Export the styles to applied to the current Styler. + Styler.export : Export the styles to applied to the current ``Styler``. """ self._todo.extend(styles) return self def set_uuid(self, uuid: str) -> Styler: """ - Set the uuid for a Styler. + Set the uuid applied to ``id`` attributes of HTML elements. Parameters ---------- @@ -1163,13 +1196,19 @@ def set_uuid(self, uuid: str) -> Styler: Returns ------- self : Styler + + Notes + ----- + Almost all HTML elements within the table, and including the ``<table>`` element + are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where + ``<extra>`` is typically a more specific identifier, such as ``row1_col2``. """ self.uuid = uuid return self def set_caption(self, caption: str) -> Styler: """ - Set the caption on a Styler. + Set the text added to a ``<caption>`` HTML element. Parameters ---------- @@ -1189,9 +1228,7 @@ def set_table_styles( overwrite: bool = True, ) -> Styler: """ - Set the table styles on a Styler. - - These are placed in a ``<style>`` tag before the generated HTML table. + Set the table styles included within the ``<style>`` HTML element. This function can be used to style the entire table, columns, rows or specific HTML selectors. @@ -1232,6 +1269,13 @@ def set_table_styles( ------- self : Styler + See Also + -------- + Styler.set_td_classes: Set the DataFrame of strings added to the ``class`` + attribute of ``<td>`` HTML elements. + Styler.set_table_attributes: Set the table attributes added to the ``<table>`` + HTML element. + Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4), @@ -1295,7 +1339,7 @@ def set_table_styles( def set_na_rep(self, na_rep: str) -> Styler: """ - Set the missing data representation on a Styler. + Set the missing data representation on a ``Styler``. .. versionadded:: 1.0.0 @@ -1505,7 +1549,8 @@ def css(rgba) -> str: def set_properties(self, subset=None, **kwargs) -> Styler: """ - Method to set one or more non-data dependent properties or each cell. + Set defined CSS-properties to each ``<td>`` HTML element within the given + subset. Parameters ---------- @@ -1518,6 +1563,11 @@ def set_properties(self, subset=None, **kwargs) -> Styler: ------- self : Styler + Notes + ----- + This is a convenience methods which wraps the :meth:`Styler.applymap` calling a + function returning the CSS-properties independently of the data. + Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) @@ -1865,8 +1915,8 @@ def pipe(self, func: Callable, *args, **kwargs): See Also -------- DataFrame.pipe : Analogous method for DataFrame. - Styler.apply : Apply a function row-wise, column-wise, or table-wise to - modify the dataframe's styling. + Styler.apply : Apply a CSS-styling function column-wise, row-wise, or + table-wise. Notes ----- @@ -1915,7 +1965,7 @@ def pipe(self, func: Callable, *args, **kwargs): class _Tooltips: """ An extension to ``Styler`` that allows for and manipulates tooltips on hover - of table data-cells in the HTML result. + of ``<td>`` cells in the HTML result. Parameters ---------- @@ -1924,7 +1974,7 @@ class _Tooltips: css_props: list-like, default; see Notes List of (attr, value) tuples defining properties of the CSS class. tooltips: DataFrame, default empty - DataFrame of strings aligned with underlying ``Styler`` data for tooltip + DataFrame of strings aligned with underlying Styler data for tooltip display. Notes @@ -2025,7 +2075,7 @@ def _translate(self, styler_data: FrameOrSeriesUnion, uuid: str, d: Dict): """ Mutate the render dictionary to allow for tooltips: - - Add `<span>` HTML element to each data cells `display_value`. Ignores + - Add ``<span>`` HTML element to each data cells ``display_value``. Ignores headers. - Add table level CSS styles to control pseudo classes.
Split the docs upgrade in #39720 into these smaller changes just to `style.py` and will focus *only* on the user guide in that PR after this.
https://api.github.com/repos/pandas-dev/pandas/pulls/40493
2021-03-18T07:40:03Z
2021-03-20T01:17:05Z
2021-03-20T01:17:05Z
2021-03-20T08:08:54Z
CLN: sqlalchemy deprecation, annotations, share shape properties
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index a8728050f8071..1af5b23e3393f 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -238,6 +238,8 @@ NODE_CLASSES = {} {{for dtype, dtype_title, closed, closed_title, cmp_left, cmp_right, cmp_left_converse, cmp_right_converse, fused_prefix in nodes}} + +@cython.internal cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode: """Non-terminal node for an IntervalTree diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1ff481553e413..87d155953f514 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1120,6 +1120,7 @@ except AttributeError: pass +@cython.internal cdef class Seen: """ Class for keeping track of the types of elements @@ -2580,7 +2581,7 @@ def tuples_to_object_array(ndarray[object] tuples): return result -def to_object_array_tuples(rows: object): +def to_object_array_tuples(rows: object) -> np.ndarray: """ Convert a list of tuples into an object array. Any subclass of tuple in `rows` will be casted to tuple. @@ -2592,7 +2593,7 @@ def to_object_array_tuples(rows: object): Returns ------- - numpy array of the object dtype. + np.ndarray[object, ndim=2] """ cdef: Py_ssize_t i, j, n, k, tmp diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx index 1e51a578c44ea..ecb7041fb2c5a 100644 --- a/pandas/_libs/ops.pyx +++ b/pandas/_libs/ops.pyx @@ -32,7 +32,7 @@ from pandas._libs.util cimport ( @cython.wraparound(False) @cython.boundscheck(False) -def scalar_compare(object[:] values, object val, object op): +def scalar_compare(object[:] values, object val, object op) -> ndarray: """ Compare each element of `values` array with the scalar `val`, with the comparison operation described by `op`. @@ -114,7 +114,7 @@ def scalar_compare(object[:] values, object val, object op): @cython.wraparound(False) @cython.boundscheck(False) -def vec_compare(ndarray[object] left, ndarray[object] right, object op): +def vec_compare(ndarray[object] left, ndarray[object] right, object op) -> ndarray: """ Compare the elements of `left` with the elements of `right` pointwise, with the comparison operation described by `op`. @@ -180,7 +180,7 @@ def vec_compare(ndarray[object] left, ndarray[object] right, object op): @cython.wraparound(False) @cython.boundscheck(False) -def scalar_binop(object[:] values, object val, object op): +def scalar_binop(object[:] values, object val, object op) -> ndarray: """ Apply the given binary operator `op` between each element of the array `values` and the scalar `val`. @@ -217,7 +217,7 @@ def scalar_binop(object[:] values, object val, object op): @cython.wraparound(False) @cython.boundscheck(False) -def vec_binop(object[:] left, object[:] right, object op): +def vec_binop(object[:] left, object[:] right, object op) -> ndarray: """ Apply the given binary operator `op` pointwise to the elements of arrays `left` and `right`. diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 75dbb4b74aabd..05b255c40f4b2 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -41,7 +41,7 @@ ctypedef fused reshape_t: @cython.boundscheck(False) def unstack(reshape_t[:, :] values, const uint8_t[:] mask, Py_ssize_t stride, Py_ssize_t length, Py_ssize_t width, - reshape_t[:, :] new_values, uint8_t[:, :] new_mask): + reshape_t[:, :] new_values, uint8_t[:, :] new_mask) -> None: """ Transform long values to wide new_values. @@ -111,7 +111,10 @@ def explode(ndarray[object] values): Returns ------- - tuple(values, counts) + ndarray[object] + result + ndarray[int64_t] + counts """ cdef: Py_ssize_t i, j, count, n diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 2879528b2c501..d86d3261d404e 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -286,7 +286,7 @@ cdef class _NaT(datetime): # This allows Timestamp(ts.isoformat()) to always correctly roundtrip. return "NaT" - def __hash__(self): + def __hash__(self) -> int: return NPY_NAT @property diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index 30d9f5e64b282..02bdae3a8dbac 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -90,7 +90,7 @@ def ints_to_pydatetime( object freq=None, bint fold=False, str box="datetime" -): +) -> np.ndarray: """ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp. @@ -116,7 +116,7 @@ def ints_to_pydatetime( Returns ------- - ndarray of dtype specified by box + ndarray[object] of type specified by box """ cdef: Py_ssize_t i, n = len(arr) @@ -223,7 +223,7 @@ cdef inline int _reso_stamp(npy_datetimestruct *dts): return RESO_DAY -def get_resolution(const int64_t[:] stamps, tzinfo tz=None): +def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution: cdef: Py_ssize_t i, n = len(stamps) npy_datetimestruct dts @@ -332,7 +332,7 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t @cython.wraparound(False) @cython.boundscheck(False) -def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None): +def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool: """ Check if all of the given (nanosecond) timestamps are normalized to midnight, i.e. hour == minute == second == 0. If the optional timezone diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 6adda1fe92044..9fbeb67aa35e9 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -77,7 +77,7 @@ def write_csv_rows( @cython.boundscheck(False) @cython.wraparound(False) -def convert_json_to_lines(arr: object) -> str: +def convert_json_to_lines(arr: str) -> str: """ replace comma separated json with line feeds, paying special attention to quotes & brackets diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 848e467afb7b6..678e532f05772 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -8,12 +8,16 @@ Type, TypeVar, Union, + cast, ) import numpy as np from pandas._libs import lib -from pandas._typing import Shape +from pandas._typing import ( + F, + Shape, +) from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import ( @@ -41,7 +45,7 @@ ) -def ravel_compat(meth): +def ravel_compat(meth: F) -> F: """ Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape. @@ -58,7 +62,7 @@ def method(self, *args, **kwargs): order = "F" if flags.f_contiguous else "C" return result.reshape(self.shape, order=order) - return method + return cast(F, method) class NDArrayBackedExtensionArray(ExtensionArray): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3a468758ab3fd..0590b2d585401 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6130,15 +6130,14 @@ def _maybe_disable_logical_methods(self, opname: str_t): # This call will raise make_invalid_op(opname)(self) + @final @property def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ - # not using "(len(self), )" to return "correct" shape if the values - # consists of a >1 D array (see GH-27775) - # overridden in MultiIndex.shape to avoid materializing the values - return self._values.shape + # See GH#27775, GH#27384 for history/reasoning in how this is defined. + return (len(self),) def ensure_index_from_sequences(sequences, names=None): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 97492f35232e3..587e2ab232efb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -752,15 +752,6 @@ def dtypes(self) -> Series: } ) - @property - def shape(self) -> Shape: - """ - Return a tuple of the shape of the underlying data. - """ - # overriding the base Index.shape definition to avoid materializing - # the values (GH-27384, GH-27775) - return (len(self),) - def __len__(self) -> int: return len(self.codes[0]) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 456d87766bdb7..f37faa4ab844b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -450,7 +450,7 @@ def take( **kwargs, ) - def tolist(self): + def tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) @@ -494,13 +494,13 @@ def _minmax(self, meth: str): return self.start + self.step * no_steps - def min(self, axis=None, skipna=True, *args, **kwargs) -> int: + def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: """The minimum value of the RangeIndex""" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax("min") - def max(self, axis=None, skipna=True, *args, **kwargs) -> int: + def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: """The maximum value of the RangeIndex""" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 87be5c0997072..375901bc3fb58 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -56,7 +56,7 @@ def combine_hash_arrays(arrays: Iterator[np.ndarray], num_items: int) -> np.ndar Returns ------- - np.ndarray[int64] + np.ndarray[uint64] Should be the same as CPython's tupleobject.c """ @@ -184,7 +184,7 @@ def hash_tuples( Returns ------- - ndarray of hashed values array + ndarray[np.uint64] of hashed values """ if not is_list_like(vals): raise TypeError("must be convertible to a list-of-tuples") @@ -227,7 +227,7 @@ def _hash_categorical(cat: Categorical, encoding: str, hash_key: str) -> np.ndar Returns ------- - ndarray of hashed values array, same size as len(c) + ndarray[np.uint64] of hashed values, same size as len(c) """ # Convert ExtensionArrays to ndarrays values = np.asarray(cat.categories._values) @@ -274,7 +274,8 @@ def hash_array( Returns ------- - 1d uint64 numpy array of hash values, same length as the vals + ndarray[np.uint64, ndim=1] + Hashed values, same length as the vals. """ if not hasattr(vals, "dtype"): raise TypeError("must pass a ndarray-like") diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 75f133745e3a2..39d12c5b05c2f 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -27,7 +27,7 @@ from pandas import DataFrame -def convert_to_line_delimits(s): +def convert_to_line_delimits(s: str) -> str: """ Helper function that converts JSON lists to line delimited JSON. """ diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 7d923e57834ea..a22f0cd8dff83 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1991,8 +1991,14 @@ def bar(connection, data): def main(connectable): with connectable.connect() as conn: with conn.begin(): - foo_data = conn.run_callable(foo) - conn.run_callable(bar, foo_data) + if _gt14(): + # https://github.com/sqlalchemy/sqlalchemy/commit/ + # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973 + foo_data = foo(conn) + bar(conn, foo_data) + else: + foo_data = conn.run_callable(foo) + conn.run_callable(bar, foo_data) DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn) main(self.conn)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40491
2021-03-18T00:42:26Z
2021-03-20T01:18:39Z
2021-03-20T01:18:39Z
2021-03-20T01:47:46Z
REF: share to_native_types with ArrayManager
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index ef9981f40efe1..460d19290d56f 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -88,6 +88,7 @@ from pandas.core.internals.blocks import ( ensure_block_shape, new_block, + to_native_types, ) if TYPE_CHECKING: @@ -634,7 +635,7 @@ def replace_list( ) def to_native_types(self, **kwargs): - return self.apply_with_block("to_native_types", **kwargs) + return self.apply(to_native_types, **kwargs) @property def is_mixed_type(self) -> bool: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3fd1ebaca19f0..99e54bace8915 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -97,6 +97,7 @@ FloatingArray, IntegerArray, PandasArray, + TimedeltaArray, ) from pandas.core.base import PandasObject import pandas.core.common as com @@ -260,9 +261,11 @@ def get_block_values_for_json(self) -> np.ndarray: # TODO(EA2D): reshape will be unnecessary with 2D EAs return np.asarray(self.values).reshape(self.shape) + @final @property def fill_value(self): - return np.nan + # Used in reindex_indexer + return na_value_for_dtype(self.dtype, compat=False) @property def mgr_locs(self) -> BlockPlacement: @@ -652,24 +655,11 @@ def should_store(self, value: ArrayLike) -> bool: """ return is_dtype_equal(value.dtype, self.dtype) + @final def to_native_types(self, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format """ - values = self.values - - mask = isna(values) - itemsize = writers.word_len(na_rep) - - if not self.is_object and not quoting and itemsize: - values = values.astype(str) - if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize: - # enlarge for the na_rep - values = values.astype(f"<U{itemsize}") - else: - values = np.array(values, dtype="object") - - values[mask] = na_rep - values = values.astype(object, copy=False) - return self.make_block(values) + result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) + return self.make_block(result) # block actions # @final @@ -1498,11 +1488,6 @@ def _holder(self): # For extension blocks, the holder is values-dependent. return type(self.values) - @property - def fill_value(self): - # Used in reindex_indexer - return self.values.dtype.na_value - @property def _can_hold_na(self): # The default ExtensionArray._can_hold_na is True @@ -1565,15 +1550,6 @@ def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: def array_values(self) -> ExtensionArray: return self.values - def to_native_types(self, na_rep="nan", quoting=None, **kwargs): - """override to use ExtensionArray astype for the conversion""" - values = self.values - mask = isna(values) - - new_values = np.asarray(values.astype(object)) - new_values[mask] = na_rep - return self.make_block(new_values) - def take_nd( self, indexer, @@ -1808,41 +1784,6 @@ def is_bool(self): class FloatBlock(NumericBlock): __slots__ = () - def to_native_types( - self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs - ): - """ convert to our native types format """ - values = self.values - - # see gh-13418: no special formatting is desired at the - # output (important for appropriate 'quoting' behaviour), - # so do not pass it through the FloatArrayFormatter - if float_format is None and decimal == ".": - mask = isna(values) - - if not quoting: - values = values.astype(str) - else: - values = np.array(values, dtype="object") - - values[mask] = na_rep - values = values.astype(object, copy=False) - return self.make_block(values) - - from pandas.io.formats.format import FloatArrayFormatter - - formatter = FloatArrayFormatter( - values, - na_rep=na_rep, - float_format=float_format, - decimal=decimal, - quoting=quoting, - fixed_width=False, - ) - res = formatter.get_result_as_array() - res = res.astype(object, copy=False) - return self.make_block(res) - class NDArrayBackedExtensionBlock(HybridMixin, Block): """ @@ -1962,18 +1903,6 @@ def array_values(self): def _holder(self): return type(self.array_values()) - @property - def fill_value(self): - return na_value_for_dtype(self.dtype) - - def to_native_types(self, na_rep="NaT", **kwargs): - """ convert to our native types format """ - arr = self.array_values() - - result = arr._format_native_types(na_rep=na_rep, **kwargs) - result = result.astype(object, copy=False) - return self.make_block(result) - class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () @@ -1999,7 +1928,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): internal_values = Block.internal_values _can_hold_element = DatetimeBlock._can_hold_element - to_native_types = DatetimeBlock.to_native_types diff = DatetimeBlock.diff where = DatetimeBlock.where putmask = DatetimeLikeBlockMixin.putmask @@ -2316,3 +2244,75 @@ def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: # We can't, and don't need to, reshape. values = np.asarray(values).reshape(1, -1) return values + + +def to_native_types( + values: ArrayLike, + *, + na_rep="nan", + quoting=None, + float_format=None, + decimal=".", + **kwargs, +) -> np.ndarray: + """ convert to our native types format """ + values = ensure_wrapped_if_datetimelike(values) + + if isinstance(values, (DatetimeArray, TimedeltaArray)): + result = values._format_native_types(na_rep=na_rep, **kwargs) + result = result.astype(object, copy=False) + return result + + elif isinstance(values, ExtensionArray): + mask = isna(values) + + new_values = np.asarray(values.astype(object)) + new_values[mask] = na_rep + return new_values + + elif values.dtype.kind == "f": + # see GH#13418: no special formatting is desired at the + # output (important for appropriate 'quoting' behaviour), + # so do not pass it through the FloatArrayFormatter + if float_format is None and decimal == ".": + mask = isna(values) + + if not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype="object") + + values[mask] = na_rep + values = values.astype(object, copy=False) + return values + + from pandas.io.formats.format import FloatArrayFormatter + + formatter = FloatArrayFormatter( + values, + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + quoting=quoting, + fixed_width=False, + ) + res = formatter.get_result_as_array() + res = res.astype(object, copy=False) + return res + + else: + + mask = isna(values) + itemsize = writers.word_len(na_rep) + + if values.dtype != _dtype_obj and not quoting and itemsize: + values = values.astype(str) + if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize: + # enlarge for the na_rep + values = values.astype(f"<U{itemsize}") + else: + values = np.array(values, dtype="object") + + values[mask] = na_rep + values = values.astype(object, copy=False) + return values
also combine implementations of Block.fill_value
https://api.github.com/repos/pandas-dev/pandas/pulls/40490
2021-03-17T22:50:04Z
2021-03-19T16:46:37Z
2021-03-19T16:46:37Z
2021-03-19T17:01:12Z
TYP: index.pyi
diff --git a/pandas/_libs/index.pyi b/pandas/_libs/index.pyi new file mode 100644 index 0000000000000..979619c3d14c4 --- /dev/null +++ b/pandas/_libs/index.pyi @@ -0,0 +1,86 @@ +import numpy as np + +class IndexEngine: + over_size_threshold: bool + + def __init__(self, vgetter, n: int): ... + + def __contains__(self, val: object) -> bool: ... + + # -> int | slice | np.ndarray[bool] + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + + def sizeof(self, deep: bool = False) -> int: ... + def __sizeof__(self) -> int: ... + + @property + def is_unique(self) -> bool: ... + + @property + def is_monotonic_increasing(self) -> bool: ... + + @property + def is_monotonic_decreasing(self) -> bool: ... + + def get_backfill_indexer(self, other: np.ndarray, limit: int | None =...) -> np.ndarray: ... + def get_pad_indexer(self, other: np.ndarray, limit: int | None =...) -> np.ndarray: ... + + @property + def is_mapping_populated(self) -> bool: ... + + def clear_mapping(self): ... + def get_indexer(self, values: np.ndarray) -> np.ndarray: ... # np.ndarray[np.intp] + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[ + np.ndarray, # np.ndarray[np.intp] + np.ndarray, # np.ndarray[np.intp] + ]: ... + + +class Float64Engine(IndexEngine): ... +class Float32Engine(IndexEngine): ... + +class Int64Engine(IndexEngine): ... +class Int32Engine(IndexEngine): ... +class Int16Engine(IndexEngine): ... +class Int8Engine(IndexEngine): ... + +class UInt64Engine(IndexEngine): ... +class UInt32Engine(IndexEngine): ... +class UInt16Engine(IndexEngine): ... +class UInt8Engine(IndexEngine): ... + +class ObjectEngine(IndexEngine): ... + +class DatetimeEngine(Int64Engine): ... +class TimedeltaEngine(DatetimeEngine): ... +class PeriodEngine(Int64Engine): ... + + +class BaseMultiIndexCodesEngine: + levels: list[np.ndarray] + offsets: np.ndarray # ndarray[uint64_t, ndim=1] + + def __init__( + self, + levels: list[np.ndarray], # all entries hashable + labels: list[np.ndarray], # all entries integer-dtyped + offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1] + ): ... + + def get_indexer( + self, + target: np.ndarray, # np.ndarray[object] + ) -> np.ndarray: ... # np.ndarray[np.intp] + + def _extract_level_codes(self, target: object): ... + + def get_indexer_with_fill( + self, + target: np.ndarray, # np.ndarray[object] of tuples + values: np.ndarray, # np.ndarray[object] of tuples + method: str, + limit: int | None, + ) -> np.ndarray: ... # np.ndarray[np.int64] diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 71f4b0c0ae18f..47e6d417bb925 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -259,7 +259,7 @@ cdef class IndexEngine: self.monotonic_inc = 0 self.monotonic_dec = 0 - def get_indexer(self, ndarray values): + def get_indexer(self, ndarray values) -> np.ndarray: self._ensure_mapping_populated() return self.mapping.lookup(values) @@ -269,6 +269,11 @@ cdef class IndexEngine: return the labels in the same order as the target and a missing indexer into the targets (which correspond to the -1 indices in the results + + Returns + ------- + indexer : np.ndarray[np.intp] + missing : np.ndarray[np.intp] """ cdef: ndarray values, x @@ -455,7 +460,7 @@ cdef class DatetimeEngine(Int64Engine): # we may get datetime64[ns] or timedelta64[ns], cast these to int64 return super().get_indexer_non_unique(targets.view("i8")) - def get_indexer(self, ndarray values): + def get_indexer(self, ndarray values) -> np.ndarray: self._ensure_mapping_populated() if values.dtype != self._get_box_dtype(): return np.repeat(-1, len(values)).astype(np.intp) @@ -572,17 +577,17 @@ cdef class BaseMultiIndexCodesEngine: # integers representing labels: we will use its get_loc and get_indexer self._base.__init__(self, lambda: lab_ints, len(lab_ints)) - def _codes_to_ints(self, codes): + def _codes_to_ints(self, ndarray[uint64_t] codes) -> np.ndarray: raise NotImplementedError("Implemented by subclass") - def _extract_level_codes(self, object target): + def _extract_level_codes(self, ndarray[object] target) -> np.ndarray: """ Map the requested list of (tuple) keys to their integer representations for searching in the underlying integer index. Parameters ---------- - target : list-like of keys + target : ndarray[object] Each key is a tuple, with a label for each level of the index. Returns @@ -607,7 +612,7 @@ cdef class BaseMultiIndexCodesEngine: Returns ------- - np.ndarray[int64_t, ndim=1] of the indexer of `target` into + np.ndarray[intp_t, ndim=1] of the indexer of `target` into `self.values` """ lab_ints = self._extract_level_codes(target) @@ -635,7 +640,7 @@ cdef class BaseMultiIndexCodesEngine: the same as the length of all tuples in `values` values : ndarray[object] of tuples must be sorted and all have the same length. Should be the set of - the MultiIndex's values. Needed only if `method` is not None + the MultiIndex's values. method: string "backfill" or "pad" limit: int or None @@ -643,7 +648,7 @@ cdef class BaseMultiIndexCodesEngine: Returns ------- - np.ndarray[int64_t, ndim=1] of the indexer of `target` into `values`, + np.ndarray[intp_t, ndim=1] of the indexer of `target` into `values`, filled with the `method` (and optionally `limit`) specified """ assert method in ("backfill", "pad") @@ -714,9 +719,7 @@ cdef class BaseMultiIndexCodesEngine: return self._base.get_loc(self, lab_int) - def get_indexer_non_unique(self, ndarray target): - # This needs to be overridden just because the default one works on - # target._values, and target can be itself a MultiIndex. + def get_indexer_non_unique(self, ndarray[object] target): lab_ints = self._extract_level_codes(target) indexer = self._base.get_indexer_non_unique(self, lab_ints) diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 67cd6c63c1faa..0a2893ac49a49 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -405,12 +405,7 @@ def _cmp_method(self, other, op): _str_na_value = StringDtype.na_value def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): - from pandas.arrays import ( - BooleanArray, - IntegerArray, - StringArray, - ) - from pandas.core.arrays.string_ import StringDtype + from pandas.arrays import BooleanArray if dtype is None: dtype = StringDtype() diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index fc4eeebc86642..e85d09a479d16 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -320,7 +320,7 @@ def _outer_indexer( # would we like our indexing holder to defer to us _defer_to_indexing = False - _engine_type = libindex.ObjectEngine + _engine_type: Type[libindex.IndexEngine] = libindex.ObjectEngine # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False @@ -723,8 +723,8 @@ def _cleanup(self) -> None: self._engine.clear_mapping() @cache_readonly - def _engine(self) -> libindex.ObjectEngine: - # property, for now, slow to look up + def _engine(self) -> libindex.IndexEngine: + # For base class (object dtype) we get ObjectEngine # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e194148f0fc24..bac00b2399121 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -7,6 +7,7 @@ Any, List, Optional, + Sequence, Tuple, TypeVar, Union, @@ -536,7 +537,7 @@ def shift(self: _T, periods: int = 1, freq=None) -> _T: # -------------------------------------------------------------------- # List-like Methods - def _get_delete_freq(self, loc: int): + def _get_delete_freq(self, loc: Union[int, slice, Sequence[int]]): """ Find the `freq` for self.delete(loc). """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fedb955ce83b9..9751e12c373cd 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -557,7 +557,7 @@ def from_tuples( arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): - tuples = tuples._values + tuples = np.asarray(tuples._values) arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): @@ -2689,11 +2689,16 @@ def _get_indexer( target, method=method, limit=limit, tolerance=tolerance ) + # TODO: explicitly raise here? we only have one test that + # gets here, and it is checking that we raise with method="nearest" + if method == "pad" or method == "backfill": if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) + # TODO: get_indexer_with_fill docstring says values must be _sorted_ + # but that doesn't appear to be enforced indexer = self._engine.get_indexer_with_fill( target=target._values, values=self._values, method=method, limit=limit ) @@ -2705,6 +2710,8 @@ def _get_indexer( else: indexer = self._engine.get_indexer(target._values) + # Note: we only get here (in extant tests at least) with + # target.nlevels == self.nlevels return ensure_platform_int(indexer) def get_slice_bound(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40486
2021-03-17T19:19:52Z
2021-03-30T21:21:28Z
2021-03-30T21:21:28Z
2021-03-30T21:56:43Z
Backport PR #40471: compat: sqlalchemy deprecations
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 02b06b164a2a1..b93f097b93441 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -5,6 +5,7 @@ from contextlib import contextmanager from datetime import date, datetime, time +from distutils.version import LooseVersion from functools import partial import re from typing import Iterator, List, Optional, Union, overload @@ -55,6 +56,16 @@ def _is_sqlalchemy_connectable(con): return False +def _gt14() -> bool: + """ + Check if sqlalchemy.__version__ is at least 1.4.0, when several + deprecations were made. + """ + import sqlalchemy + + return LooseVersion(sqlalchemy.__version__) >= LooseVersion("1.4.0") + + def _convert_params(sql, params): """Convert SQL and params args to DBAPI2.0 compliant format.""" args = [sql] @@ -715,7 +726,10 @@ def sql_schema(self): def _execute_create(self): # Inserting table into database, add to MetaData object - self.table = self.table.tometadata(self.pd_sql.meta) + if _gt14(): + self.table = self.table.to_metadata(self.pd_sql.meta) + else: + self.table = self.table.tometadata(self.pd_sql.meta) self.table.create() def create(self): @@ -1409,9 +1423,17 @@ def to_sql( # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: - table_names = engine.table_names( - schema=schema or self.meta.schema, connection=conn - ) + if _gt14(): + from sqlalchemy import inspect + + insp = inspect(conn) + table_names = insp.get_table_names( + schema=schema or self.meta.schema + ) + else: + table_names = engine.table_names( + schema=schema or self.meta.schema, connection=conn + ) if name not in table_names: msg = ( f"The provided table name '{name}' is not found exactly as " @@ -1426,9 +1448,15 @@ def tables(self): return self.meta.tables def has_table(self, name, schema=None): - return self.connectable.run_callable( - self.connectable.dialect.has_table, name, schema or self.meta.schema - ) + if _gt14(): + import sqlalchemy as sa + + insp = sa.inspect(self.connectable) + return insp.has_table(name, schema or self.meta.schema) + else: + return self.connectable.run_callable( + self.connectable.dialect.has_table, name, schema or self.meta.schema + ) def get_table(self, table_name, schema=None): schema = schema or self.meta.schema diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 16d4bc65094f8..f8a8b662f2652 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -44,10 +44,11 @@ import pandas._testing as tm import pandas.io.sql as sql -from pandas.io.sql import read_sql_query, read_sql_table +from pandas.io.sql import _gt14, read_sql_query, read_sql_table try: import sqlalchemy + from sqlalchemy import inspect from sqlalchemy.ext import declarative from sqlalchemy.orm import session as sa_session import sqlalchemy.schema @@ -1331,7 +1332,11 @@ def test_create_table(self): pandasSQL = sql.SQLDatabase(temp_conn) pandasSQL.to_sql(temp_frame, "temp_frame") - assert temp_conn.has_table("temp_frame") + if _gt14(): + insp = inspect(temp_conn) + assert insp.has_table("temp_frame") + else: + assert temp_conn.has_table("temp_frame") def test_drop_table(self): temp_conn = self.connect() @@ -1343,11 +1348,18 @@ def test_drop_table(self): pandasSQL = sql.SQLDatabase(temp_conn) pandasSQL.to_sql(temp_frame, "temp_frame") - assert temp_conn.has_table("temp_frame") + if _gt14(): + insp = inspect(temp_conn) + assert insp.has_table("temp_frame") + else: + assert temp_conn.has_table("temp_frame") pandasSQL.drop_table("temp_frame") - assert not temp_conn.has_table("temp_frame") + if _gt14(): + assert not insp.has_table("temp_frame") + else: + assert not temp_conn.has_table("temp_frame") def test_roundtrip(self): self._roundtrip() @@ -1689,9 +1701,10 @@ def test_nan_string(self): tm.assert_frame_equal(result, df) def _get_index_columns(self, tbl_name): - from sqlalchemy.engine import reflection + from sqlalchemy import inspect + + insp = inspect(self.conn) - insp = reflection.Inspector.from_engine(self.conn) ixs = insp.get_indexes(tbl_name) ixs = [i["column_names"] for i in ixs] return ixs
Backport PR #40471
https://api.github.com/repos/pandas-dev/pandas/pulls/40481
2021-03-17T11:57:30Z
2021-03-17T13:09:35Z
2021-03-17T13:09:34Z
2021-03-25T10:37:14Z
REF: prepare (upcast) scalar before dispatching to arithmetic array ops
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 6e4aa1a5efacf..52900d9b62dc2 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -395,6 +395,7 @@ def _cmp_method(self, other, op): if isinstance(other, PandasArray): other = other._ndarray + other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) pd_op = ops.get_array_op(op) other = ensure_wrapped_if_datetimelike(other) with np.errstate(all="ignore"): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7f970a72cb12c..8ac94111cca56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6804,6 +6804,7 @@ def _arith_method(self, other, op): return ops.frame_arith_method_with_reindex(self, other, op) axis = 1 # only relevant for Series other case + other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index f6bde348888a1..9cccf1cff60a1 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -35,6 +35,7 @@ comparison_op, get_array_op, logical_op, + maybe_prepare_scalar_for_op, ) from pandas.core.ops.common import ( # noqa:F401 get_op_result_name, @@ -428,6 +429,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): axis = self._get_axis_number(axis) if axis is not None else 1 + other = maybe_prepare_scalar_for_op(other, self.shape) self, other = align_method_FRAME(self, other, axis, flex=True, level=level) if isinstance(other, ABCDataFrame): diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 2ff93b203a001..39c6fa13f79a4 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -202,12 +202,11 @@ def arithmetic_op(left: ArrayLike, right: Any, op): ndarray or ExtensionArray Or a 2-tuple of these in the case of divmod or rdivmod. """ - # NB: We assume that extract_array and ensure_wrapped_if_datetimelike - # has already been called on `left` and `right`. + # have already been called on `left` and `right`, + # and `maybe_prepare_scalar_for_op` has already been called on `right` # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) - right = _maybe_upcast_for_op(right, left.shape) if ( should_extension_dispatch(left, right) @@ -439,7 +438,7 @@ def get_array_op(op): raise NotImplementedError(op_name) -def _maybe_upcast_for_op(obj, shape: Shape): +def maybe_prepare_scalar_for_op(obj, shape: Shape): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. diff --git a/pandas/core/series.py b/pandas/core/series.py index 0715d892d7ed3..da25ca8e3f653 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5315,6 +5315,7 @@ def _arith_method(self, other, op): lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) + rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape) rvalues = ensure_wrapped_if_datetimelike(rvalues) with np.errstate(all="ignore"):
This moves the potential upcasting of python/numpy scalars to pandas scalars out of the actual array op (`arithmetic_op`), but moves it up to the Series / DataFrame level before calling the array op. This avoids calling it repeatedly on the same scalar for each column.
https://api.github.com/repos/pandas-dev/pandas/pulls/40479
2021-03-17T09:48:45Z
2021-04-28T14:49:50Z
2021-04-28T14:49:50Z
2021-04-28T14:50:46Z
BUG: frame.any/all with bool_only=True and Categorical[bool]
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 97bffb35c28d9..58da2570015b5 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -8,6 +8,7 @@ import numpy as np from pandas._libs import lib +from pandas._typing import ArrayLike is_bool = lib.is_bool @@ -420,3 +421,28 @@ def is_dataclass(item): return is_dataclass(item) and not isinstance(item, type) except ImportError: return False + + +def is_inferred_bool_dtype(arr: ArrayLike) -> bool: + """ + Check if this is a ndarray[bool] or an ndarray[object] of bool objects. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + + Returns + ------- + bool + + Notes + ----- + This does not include the special treatment is_bool_dtype uses for + Categorical. + """ + dtype = arr.dtype + if dtype == np.dtype(bool): + return True + elif dtype == np.dtype("object"): + return lib.is_bool_array(arr.ravel("K")) + return False diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 9da019fb2ef95..99a1706c671b1 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -33,7 +33,6 @@ ) from pandas.core.dtypes.common import ( ensure_int64, - is_bool_dtype, is_datetime64_ns_dtype, is_dtype_equal, is_extension_array_dtype, @@ -50,6 +49,7 @@ ABCPandasArray, ABCSeries, ) +from pandas.core.dtypes.inference import is_inferred_bool_dtype from pandas.core.dtypes.missing import ( array_equals, isna, @@ -676,10 +676,7 @@ def get_bool_data(self, copy: bool = False) -> ArrayManager: copy : bool, default False Whether to copy the blocks """ - return self._get_data_subset( - lambda arr: is_bool_dtype(arr.dtype) - or (is_object_dtype(arr.dtype) and lib.is_bool_array(arr)) - ) + return self._get_data_subset(is_inferred_bool_dtype) def get_numeric_data(self, copy: bool = False) -> ArrayManager: """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 32cecec01b8be..97f10207ee555 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -68,6 +68,7 @@ ABCPandasArray, ABCSeries, ) +from pandas.core.dtypes.inference import is_inferred_bool_dtype from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, @@ -157,7 +158,6 @@ class Block(PandasObject): __slots__ = ["_mgr_locs", "values", "ndim"] is_numeric = False - is_bool = False is_object = False is_extension = False _can_consolidate = True @@ -230,6 +230,14 @@ def _can_hold_na(self) -> bool: def is_categorical(self) -> bool: return self._holder is Categorical + @final + @property + def is_bool(self) -> bool: + """ + We can be bool if a) we are bool dtype or b) object dtype with bool objects. + """ + return is_inferred_bool_dtype(self.values) + @final def external_values(self): return external_values(self.values) @@ -1785,10 +1793,6 @@ def _can_hold_element(self, element: Any) -> bool: # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" return can_hold_element(self.dtype, element) # type: ignore[arg-type] - @property - def is_bool(self): - return self.dtype.kind == "b" - class NDArrayBackedExtensionBlock(HybridMixin, Block): """ @@ -1955,14 +1959,6 @@ class ObjectBlock(Block): values: np.ndarray - @property - def is_bool(self): - """ - we can be a bool if we have only bool values but are of type - object - """ - return lib.is_bool_array(self.values.ravel("K")) - @maybe_split def reduce(self, func, ignore_failures: bool = False) -> List[Block]: """ diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index d24320ad17709..672ab20fb9791 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1163,6 +1163,9 @@ def test_any_all_object_bool_only(self): df._consolidate_inplace() df["C"] = Series([True, True]) + # Categorical of bools is _not_ considered booly + df["D"] = df["C"].astype("category") + # The underlying bug is in DataFrame._get_bool_data, so we check # that while we're here res = df._get_bool_data()
Started with sharing some code, unearthed a bug along the way. Nothing user-facing since it is only in ArrayManager.
https://api.github.com/repos/pandas-dev/pandas/pulls/40477
2021-03-17T03:37:05Z
2021-03-23T22:50:18Z
2021-03-23T22:50:18Z
2021-03-23T23:10:42Z
TYP: get_reverse_indexer, get_group_index_sorter
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 8b643c03b6a19..5352ca53e1b54 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -13,14 +13,16 @@ import numpy as np cimport numpy as cnp from numpy cimport ( - NPY_INT64, + NPY_INTP, int64_t, + intp_t, ndarray, ) cnp.import_array() from pandas._libs.algos import ensure_int64 + from pandas._libs.util cimport is_integer_object @@ -30,7 +32,7 @@ cdef class BlockPlacement: # __slots__ = '_as_slice', '_as_array', '_len' cdef: slice _as_slice - ndarray _as_array # Note: this still allows `None` + ndarray _as_array # Note: this still allows `None`; will be intp_t bint _has_slice, _has_array, _is_known_slice_like def __cinit__(self, val): @@ -53,12 +55,12 @@ cdef class BlockPlacement: self._as_slice = slc self._has_slice = True else: - arr = np.empty(0, dtype=np.int64) + arr = np.empty(0, dtype=np.intp) self._as_array = arr self._has_array = True else: # Cython memoryview interface requires ndarray to be writeable. - arr = np.require(val, dtype=np.int64, requirements='W') + arr = np.require(val, dtype=np.intp, requirements='W') assert arr.ndim == 1, arr.shape self._as_array = arr self._has_array = True @@ -125,8 +127,8 @@ cdef class BlockPlacement: if not self._has_array: start, stop, step, _ = slice_get_indices_ex(self._as_slice) # NOTE: this is the C-optimized equivalent of - # `np.arange(start, stop, step, dtype=np.int64)` - self._as_array = cnp.PyArray_Arange(start, stop, step, NPY_INT64) + # `np.arange(start, stop, step, dtype=np.intp)` + self._as_array = cnp.PyArray_Arange(start, stop, step, NPY_INTP) self._has_array = True return self._as_array @@ -325,13 +327,13 @@ cdef slice_getitem(slice slc, ind): else: # NOTE: # this is the C-optimized equivalent of - # `np.arange(s_start, s_stop, s_step, dtype=np.int64)[ind]` - return cnp.PyArray_Arange(s_start, s_stop, s_step, NPY_INT64)[ind] + # `np.arange(s_start, s_stop, s_step, dtype=np.intp)[ind]` + return cnp.PyArray_Arange(s_start, s_stop, s_step, NPY_INTP)[ind] @cython.boundscheck(False) @cython.wraparound(False) -cdef slice indexer_as_slice(int64_t[:] vals): +cdef slice indexer_as_slice(intp_t[:] vals): cdef: Py_ssize_t i, n, start, stop int64_t d diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1ff481553e413..fc3e1ecfb55c1 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -451,7 +451,7 @@ def fast_zip(list ndarrays) -> ndarray[object]: return result -def get_reverse_indexer(const int64_t[:] indexer, Py_ssize_t length): +def get_reverse_indexer(const intp_t[:] indexer, Py_ssize_t length) -> ndarray: """ Reverse indexing operation. @@ -459,14 +459,25 @@ def get_reverse_indexer(const int64_t[:] indexer, Py_ssize_t length): indexer_inv[indexer[x]] = x - .. note:: If indexer is not unique, only first occurrence is accounted. + Parameters + ---------- + indexer : np.ndarray[np.intp] + length : int + + Returns + ------- + np.ndarray[np.intp] + + Notes + ----- + If indexer is not unique, only first occurrence is accounted. """ cdef: Py_ssize_t i, n = len(indexer) - ndarray[int64_t] rev_indexer - int64_t idx + ndarray[intp_t] rev_indexer + intp_t idx - rev_indexer = np.empty(length, dtype=np.int64) + rev_indexer = np.empty(length, dtype=np.intp) rev_indexer[:] = -1 for i in range(n): idx = indexer[i] @@ -808,23 +819,32 @@ def generate_bins_dt64(ndarray[int64_t] values, const int64_t[:] binner, @cython.boundscheck(False) @cython.wraparound(False) -def get_level_sorter(const int64_t[:] label, const int64_t[:] starts): +def get_level_sorter( + ndarray[int64_t, ndim=1] codes, const intp_t[:] starts +) -> ndarray: """ Argsort for a single level of a multi-index, keeping the order of higher levels unchanged. `starts` points to starts of same-key indices w.r.t to leading levels; equivalent to: - np.hstack([label[starts[i]:starts[i+1]].argsort(kind='mergesort') + np.hstack([codes[starts[i]:starts[i+1]].argsort(kind='mergesort') + starts[i] for i in range(len(starts) - 1)]) + + Parameters + ---------- + codes : np.ndarray[int64_t, ndim=1] + starts : np.ndarray[intp, ndim=1] + + Returns + ------- + np.ndarray[np.int, ndim=1] """ cdef: - int64_t l, r - Py_ssize_t i - ndarray[int64_t, ndim=1] out = np.empty(len(label), dtype=np.int64) - ndarray[int64_t, ndim=1] label_arr = np.asarray(label) + Py_ssize_t i, l, r + ndarray[intp_t, ndim=1] out = np.empty(len(codes), dtype=np.intp) for i in range(len(starts) - 1): l, r = starts[i], starts[i + 1] - out[l:r] = l + label_arr[l:r].argsort(kind='mergesort') + out[l:r] = l + codes[l:r].argsort(kind='mergesort') return out diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 74e96015b4544..a222a8cc464fb 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -991,10 +991,10 @@ def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0): @cache_readonly def slabels(self): # Sorted labels - return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False) + return algorithms.take_nd(self.labels, self._sort_idx, allow_fill=False) @cache_readonly - def sort_idx(self): + def _sort_idx(self) -> np.ndarray: # np.ndarray[np.intp] # Counting sort indexer return get_group_index_sorter(self.labels, self.ngroups) @@ -1013,7 +1013,7 @@ def __iter__(self): @cache_readonly def sorted_data(self) -> FrameOrSeries: - return self.data.take(self.sort_idx, axis=self.axis) + return self.data.take(self._sort_idx, axis=self.axis) def _chop(self, sdata, slice_obj: slice) -> NDFrame: raise AbstractMethodError(self) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3a468758ab3fd..a5c0a5c6694e5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4135,13 +4135,22 @@ def _join_level( """ from pandas.core.indexes.multi import MultiIndex - def _get_leaf_sorter(labels): + def _get_leaf_sorter(labels: List[np.ndarray]) -> np.ndarray: """ Returns sorter for the inner most level while preserving the order of higher levels. + + Parameters + ---------- + labels : list[np.ndarray] + Each ndarray has signed integer dtype, not necessarily identical. + + Returns + ------- + np.ndarray[np.intp] """ if labels[0].size == 0: - return np.empty(0, dtype="int64") + return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(labels[0]) @@ -4154,7 +4163,7 @@ def _get_leaf_sorter(labels): starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) - return lib.get_level_sorter(lab, ensure_int64(starts)) + return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") @@ -4189,12 +4198,12 @@ def _get_leaf_sorter(labels): join_index = left[left_indexer] else: - left_lev_indexer = ensure_int64(left_lev_indexer) + left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] - new_lev_codes = algos.take_nd( - rev_indexer, old_codes[old_codes != -1], allow_fill=False - ) + + taker = old_codes[old_codes != -1] + new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes @@ -4204,6 +4213,7 @@ def _get_leaf_sorter(labels): if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) + left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] @@ -4213,11 +4223,12 @@ def _get_leaf_sorter(labels): if level == 0: # outer most level, take the fast route ngroups = 1 + new_lev_codes.max() left_indexer, counts = libalgos.groupsort_indexer( - new_lev_codes, ngroups + ensure_int64(new_lev_codes), ngroups ) # missing values are placed first; drop them! - left_indexer = left_indexer[counts[0] :] + # error: Value of type "Optional[ndarray]" is not indexable + left_indexer = left_indexer[counts[0] :] # type: ignore[index] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 97492f35232e3..31aa5e301d17c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1952,7 +1952,7 @@ def _sort_levels_monotonic(self) -> MultiIndex: lev = lev.take(indexer) # indexer to reorder the level codes - indexer = ensure_int64(indexer) + indexer = ensure_platform_int(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) level_codes = algos.take_nd(ri, level_codes) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 6a0286b1c40ef..613669b8cc1d8 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -145,7 +145,7 @@ def _indexer_and_to_sort(self): ngroups = len(obs_ids) indexer = get_group_index_sorter(comp_index, ngroups) - + indexer = ensure_platform_int(indexer) return indexer, to_sort @cache_readonly diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 720643d3d98aa..10c13327c79d3 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -582,6 +582,16 @@ def get_group_index_sorter( Both algorithms are `stable` sort and that is necessary for correctness of groupby operations. e.g. consider: df.groupby(key)[col].transform('first') + + Parameters + ---------- + group_index : np.ndarray + signed integer dtype + ngroups : int or None, default None + + Returns + ------- + np.ndarray[np.intp] """ if ngroups is None: # error: Incompatible types in assignment (expression has type "number[Any]", @@ -596,9 +606,9 @@ def get_group_index_sorter( ) if do_groupsort: sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) - return ensure_platform_int(sorter) else: - return group_index.argsort(kind="mergesort") + sorter = group_index.argsort(kind="mergesort") + return ensure_platform_int(sorter) def compress_group_index(group_index, sort: bool = True): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index c63d5271f1fae..ef1c3ec0c2860 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -259,7 +259,7 @@ def _check(blk): def test_mgr_locs(self): assert isinstance(self.fblock.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal( - self.fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.int64) + self.fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.intp) ) def test_attrs(self): @@ -277,7 +277,7 @@ def test_delete(self): newb.delete(0) assert isinstance(newb.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal( - newb.mgr_locs.as_array, np.array([2, 4], dtype=np.int64) + newb.mgr_locs.as_array, np.array([2, 4], dtype=np.intp) ) assert (newb.values[0] == 1).all() @@ -285,14 +285,14 @@ def test_delete(self): newb.delete(1) assert isinstance(newb.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal( - newb.mgr_locs.as_array, np.array([0, 4], dtype=np.int64) + newb.mgr_locs.as_array, np.array([0, 4], dtype=np.intp) ) assert (newb.values[1] == 2).all() newb = self.fblock.copy() newb.delete(2) tm.assert_numpy_array_equal( - newb.mgr_locs.as_array, np.array([0, 2], dtype=np.int64) + newb.mgr_locs.as_array, np.array([0, 2], dtype=np.intp) ) assert (newb.values[1] == 1).all() @@ -665,7 +665,7 @@ def test_consolidate_ordering_issues(self, mgr): assert cons.nblocks == 1 assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement) tm.assert_numpy_array_equal( - cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.int64) + cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp) ) def test_reindex_items(self): @@ -1095,7 +1095,7 @@ def test_slice_iter(self, slc, expected): ) def test_slice_to_array_conversion(self, slc, arr): tm.assert_numpy_array_equal( - BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.int64) + BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.intp) ) def test_blockplacement_add(self): diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 60c42878497c2..0532de9998c5f 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -197,9 +197,9 @@ def test_maybe_booleans_to_slice(self): assert result == slice(0, 0) def test_get_reverse_indexer(self): - indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64) + indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.intp) result = lib.get_reverse_indexer(indexer, 5) - expected = np.array([4, 2, 3, 6, 7], dtype=np.int64) + expected = np.array([4, 2, 3, 6, 7], dtype=np.intp) tm.assert_numpy_array_equal(result, expected)
nailing down all the "np.intp"s bit by little bit. AFAICT we're not going to be able to annotate ndarrays with ndim/dtype any time soon. Might be worth looking into https://pypi.org/project/nptyping/
https://api.github.com/repos/pandas-dev/pandas/pulls/40476
2021-03-16T23:51:56Z
2021-03-20T01:09:25Z
2021-03-20T01:09:25Z
2021-03-20T01:38:19Z
CLN: intp_t instead of int64_t for indexers in libs funcs
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 5783d3c2353aa..047eb848b7540 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -199,8 +199,10 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): Returns ------- - tuple - 1-d indexer ordered by groups, group counts. + ndarray[intp_t, ndim=1] + Indexer + ndarray[int64_t, ndim=1] + Group Counts Notes ----- @@ -208,11 +210,12 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): """ cdef: Py_ssize_t i, loc, label, n - ndarray[int64_t] counts, where, result + ndarray[int64_t] counts, where + ndarray[intp_t] indexer counts = np.zeros(ngroups + 1, dtype=np.int64) n = len(index) - result = np.zeros(n, dtype=np.int64) + indexer = np.zeros(n, dtype=np.intp) where = np.zeros(ngroups + 1, dtype=np.int64) with nogil: @@ -228,10 +231,10 @@ def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): # this is our indexer for i in range(n): label = index[i] + 1 - result[where[label]] = i + indexer[where[label]] = i where[label] += 1 - return result, counts + return indexer, counts @cython.boundscheck(False) diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in index 4eefd9d1f7267..cdf4ef3b119d2 100644 --- a/pandas/_libs/algos_take_helper.pxi.in +++ b/pandas/_libs/algos_take_helper.pxi.in @@ -66,7 +66,7 @@ def take_1d_{{name}}_{{dest}}(const {{c_type_in}}[:] values, {{else}} def take_1d_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=1] values, {{endif}} - const int64_t[:] indexer, + const intp_t[:] indexer, {{c_type_out}}[:] out, fill_value=np.nan): @@ -102,7 +102,7 @@ def take_2d_axis0_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values, {{else}} def take_2d_axis0_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values, {{endif}} - ndarray[int64_t] indexer, + ndarray[intp_t] indexer, {{c_type_out}}[:, :] out, fill_value=np.nan): cdef: @@ -156,7 +156,7 @@ def take_2d_axis1_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values, {{else}} def take_2d_axis1_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values, {{endif}} - ndarray[int64_t] indexer, + ndarray[intp_t] indexer, {{c_type_out}}[:, :] out, fill_value=np.nan): diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 1bfb66cbf21ac..89020f2078584 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -19,6 +19,7 @@ from numpy cimport ( int16_t, int32_t, int64_t, + intp_t, ndarray, uint8_t, uint16_t, @@ -141,6 +142,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, Py_ssize_t i, j, N, K, ngroups, size ndarray[int64_t] _counts ndarray[float64_t, ndim=2] data + ndarray[intp_t] indexer float64_t* ptr assert min_count == -1, "'min_count' only used in add and prod" diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 511b373bc7e1f..c2947de943e1a 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -33,7 +33,8 @@ def inner_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 - ndarray[int64_t] left_count, right_count, left_sorter, right_sorter + ndarray[intp_t] left_sorter, right_sorter + ndarray[int64_t] left_count, right_count ndarray[int64_t] left_indexer, right_indexer int64_t lc, rc Py_ssize_t loc, left_pos = 0, right_pos = 0, position = 0 @@ -84,8 +85,8 @@ def left_outer_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups, bint sort=True): cdef: Py_ssize_t i, j, k, count = 0 - ndarray[int64_t] left_count, right_count, left_sorter, right_sorter - ndarray rev + ndarray[int64_t] left_count, right_count + ndarray[intp_t] rev, left_sorter, right_sorter ndarray[int64_t] left_indexer, right_indexer int64_t lc, rc Py_ssize_t loc, left_pos = 0, right_pos = 0, position = 0 @@ -157,7 +158,8 @@ def full_outer_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 - ndarray[int64_t] left_count, right_count, left_sorter, right_sorter + ndarray[intp_t] left_sorter, right_sorter + ndarray[int64_t] left_count, right_count ndarray[int64_t] left_indexer, right_indexer int64_t lc, rc int64_t left_pos = 0, right_pos = 0 @@ -215,12 +217,16 @@ def full_outer_join(const intp_t[:] left, const intp_t[:] right, _get_result_indexer(right_sorter, right_indexer)) -cdef _get_result_indexer(ndarray[int64_t] sorter, ndarray[int64_t] indexer): +cdef ndarray[int64_t] _get_result_indexer( + ndarray[intp_t] sorter, ndarray[int64_t] indexer +): if len(sorter) > 0: # cython-only equivalent to # `res = algos.take_nd(sorter, indexer, fill_value=-1)` res = np.empty(len(indexer), dtype=np.int64) - take_1d_int64_int64(sorter, indexer, res, -1) + take_1d_int64_int64(ensure_int64(sorter), ensure_platform_int(indexer), res, -1) + # FIXME: sorter is intp_t, not int64_t, opposite for indexer; + # will this break on 32bit builds? else: # length-0 case res = np.empty(len(indexer), dtype=np.int64) diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index c1abd8bbf39d0..ba1b2a0f0e76e 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -117,10 +117,10 @@ def _take_nd_ndarray( ) -> np.ndarray: if indexer is None: - indexer = np.arange(arr.shape[axis], dtype=np.int64) + indexer = np.arange(arr.shape[axis], dtype=np.intp) dtype, fill_value = arr.dtype, arr.dtype.type() else: - indexer = ensure_int64(indexer, copy=False) + indexer = ensure_platform_int(indexer) indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( arr, indexer, out, fill_value, allow_fill ) @@ -317,7 +317,7 @@ def _get_take_nd_function( if func is None: def func(arr, indexer, out, fill_value=np.nan): - indexer = ensure_int64(indexer) + indexer = ensure_platform_int(indexer) _take_nd_object( arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info ) @@ -468,7 +468,7 @@ def wrapper( def _take_nd_object( arr: np.ndarray, - indexer: np.ndarray, + indexer: np.ndarray, # np.ndarray[np.intp] out: np.ndarray, axis: int, fill_value, @@ -544,4 +544,5 @@ def _take_preprocess_indexer_and_fill_value( # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() + indexer = ensure_platform_int(indexer) return indexer, dtype, fill_value, mask_info diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 864bd0684d445..00667aae5c9ff 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1961,7 +1961,8 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: Returns ------- - dict of categories -> indexers + Dict[Hashable, np.ndarray[np.intp]] + dict of categories -> indexers Examples -------- @@ -1979,7 +1980,7 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: """ categories = self.categories r, counts = libalgos.groupsort_indexer( - self.codes.astype("int64"), categories.size + self.codes.astype("int64", copy=False), categories.size ) counts = counts.cumsum() _result = (r[start:end] for start, end in zip(counts, counts[1:])) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 10c13327c79d3..3aa4d26f7dc8f 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -606,6 +606,7 @@ def get_group_index_sorter( ) if do_groupsort: sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) + # sorter _should_ already be intp, but mypy is not yet able to verify else: sorter = group_index.argsort(kind="mergesort") return ensure_platform_int(sorter) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index f685680515a8f..da438826a939a 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1717,9 +1717,9 @@ def test_groupby_categorical_indices_unused_categories(): grouped = df.groupby("key", sort=False) result = grouped.indices expected = { - "b": np.array([0, 1], dtype="int64"), - "a": np.array([2], dtype="int64"), - "c": np.array([], dtype="int64"), + "b": np.array([0, 1], dtype="intp"), + "a": np.array([2], dtype="intp"), + "c": np.array([], dtype="intp"), } assert result.keys() == expected.keys() for key in result.keys(): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 876df69ae7f63..c8df18ddaeebe 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -2123,19 +2123,19 @@ def test_groupsort_indexer(): # need to use a stable sort # np.argsort returns int, groupsort_indexer - # always returns int64 + # always returns intp expected = np.argsort(a, kind="mergesort") - expected = expected.astype(np.int64) + expected = expected.astype(np.intp) tm.assert_numpy_array_equal(result, expected) # compare with lexsort # np.lexsort returns int, groupsort_indexer - # always returns int64 + # always returns intp key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) - expected = expected.astype(np.int64) + expected = expected.astype(np.intp) tm.assert_numpy_array_equal(result, expected)
The places where this will need updating proliferate, so right now im just looking to see how the 32bit builds handle this.
https://api.github.com/repos/pandas-dev/pandas/pulls/40475
2021-03-16T22:18:14Z
2021-03-20T02:34:22Z
2021-03-20T02:34:22Z
2021-03-20T03:05:32Z
CLN: factorize returns ndarray[intp], not int64
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 1bbffaa7bb5d2..e402a4b7c0ccc 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -66,13 +66,18 @@ cdef class Factorizer: self.uniques = ObjectVector() self.count = 0 - def get_count(self): + def get_count(self) -> int: return self.count def factorize( self, ndarray[object] values, sort=False, na_sentinel=-1, na_value=None - ): + ) -> np.ndarray: """ + + Returns + ------- + np.ndarray[np.intp] + Examples -------- Factorize values with nans replaced by na_sentinel @@ -80,6 +85,9 @@ cdef class Factorizer: >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) array([ 0, 1, 20]) """ + cdef: + ndarray[intp_t] labels + if self.uniques.external_view_exists: uniques = ObjectVector() uniques.extend(self.uniques.to_array()) @@ -89,8 +97,6 @@ cdef class Factorizer: mask = (labels == na_sentinel) # sort on if sort: - if labels.dtype != np.intp: - labels = labels.astype(np.intp) sorter = self.uniques.to_array().argsort() reverse_indexer = np.empty(len(sorter), dtype=np.intp) reverse_indexer.put(sorter, np.arange(len(sorter))) @@ -119,8 +125,12 @@ cdef class Int64Factorizer: return self.count def factorize(self, const int64_t[:] values, sort=False, - na_sentinel=-1, na_value=None): + na_sentinel=-1, na_value=None) -> np.ndarray: """ + Returns + ------- + ndarray[intp_t] + Examples -------- Factorize values with nans replaced by na_sentinel @@ -128,6 +138,9 @@ cdef class Int64Factorizer: >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) array([ 0, 1, 20]) """ + cdef: + ndarray[intp_t] labels + if self.uniques.external_view_exists: uniques = Int64Vector() uniques.extend(self.uniques.to_array()) @@ -138,9 +151,6 @@ cdef class Int64Factorizer: # sort on if sort: - if labels.dtype != np.intp: - labels = labels.astype(np.intp) - sorter = self.uniques.to_array().argsort() reverse_indexer = np.empty(len(sorter), dtype=np.intp) reverse_indexer.put(sorter, np.arange(len(sorter))) diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 0b6bb170cc531..6ace327ca3599 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -539,12 +539,12 @@ cdef class {{name}}HashTable(HashTable): ------- uniques : ndarray[{{dtype}}] Unique values of input, not sorted - labels : ndarray[int64] (if return_inverse=True) + labels : ndarray[intp_t] (if return_inverse=True) The labels from values to uniques """ cdef: Py_ssize_t i, idx, count = count_prior, n = len(values) - int64_t[:] labels + intp_t[:] labels int ret = 0 {{c_type}} val, na_value2 khiter_t k @@ -553,7 +553,7 @@ cdef class {{name}}HashTable(HashTable): uint8_t[:] mask_values if return_inverse: - labels = np.empty(n, dtype=np.int64) + labels = np.empty(n, dtype=np.intp) ud = uniques.data use_na_value = na_value is not None use_mask = mask is not None @@ -614,7 +614,7 @@ cdef class {{name}}HashTable(HashTable): labels[i] = idx if return_inverse: - return uniques.to_array(), np.asarray(labels) + return uniques.to_array(), labels.base # .base -> underlying ndarray return uniques.to_array() def unique(self, const {{dtype}}_t[:] values, bint return_inverse=False): @@ -633,7 +633,7 @@ cdef class {{name}}HashTable(HashTable): ------- uniques : ndarray[{{dtype}}] Unique values of input, not sorted - labels : ndarray[int64] (if return_inverse) + labels : ndarray[intp_t] (if return_inverse) The labels from values to uniques """ uniques = {{name}}Vector() @@ -668,7 +668,7 @@ cdef class {{name}}HashTable(HashTable): ------- uniques : ndarray[{{dtype}}] Unique values of input, not sorted - labels : ndarray[int64] + labels : ndarray[intp_t] The labels from values to uniques """ uniques_vector = {{name}}Vector() @@ -918,12 +918,12 @@ cdef class StringHashTable(HashTable): ------- uniques : ndarray[object] Unique values of input, not sorted - labels : ndarray[int64] (if return_inverse=True) + labels : ndarray[intp_t] (if return_inverse=True) The labels from values to uniques """ cdef: Py_ssize_t i, idx, count = count_prior, n = len(values) - int64_t[:] labels + intp_t[:] labels int64_t[:] uindexer int ret = 0 object val @@ -933,7 +933,7 @@ cdef class StringHashTable(HashTable): bint use_na_value if return_inverse: - labels = np.zeros(n, dtype=np.int64) + labels = np.zeros(n, dtype=np.intp) uindexer = np.empty(n, dtype=np.int64) use_na_value = na_value is not None @@ -972,13 +972,13 @@ cdef class StringHashTable(HashTable): uindexer[count] = i if return_inverse: self.table.vals[k] = count - labels[i] = <int64_t>count + labels[i] = count count += 1 elif return_inverse: # k falls into a previous bucket # only relevant in case we need to construct the inverse idx = self.table.vals[k] - labels[i] = <int64_t>idx + labels[i] = idx free(vecs) @@ -987,7 +987,7 @@ cdef class StringHashTable(HashTable): uniques.append(values[uindexer[i]]) if return_inverse: - return uniques.to_array(), np.asarray(labels) + return uniques.to_array(), labels.base # .base -> underlying ndarray return uniques.to_array() def unique(self, ndarray[object] values, bint return_inverse=False): @@ -1193,19 +1193,19 @@ cdef class PyObjectHashTable(HashTable): ------- uniques : ndarray[object] Unique values of input, not sorted - labels : ndarray[int64] (if return_inverse=True) + labels : ndarray[intp_t] (if return_inverse=True) The labels from values to uniques """ cdef: Py_ssize_t i, idx, count = count_prior, n = len(values) - int64_t[:] labels + intp_t[:] labels int ret = 0 object val khiter_t k bint use_na_value if return_inverse: - labels = np.empty(n, dtype=np.int64) + labels = np.empty(n, dtype=np.intp) use_na_value = na_value is not None for i in range(n): @@ -1240,7 +1240,7 @@ cdef class PyObjectHashTable(HashTable): labels[i] = idx if return_inverse: - return uniques.to_array(), np.asarray(labels) + return uniques.to_array(), labels.base # .base -> underlying ndarray return uniques.to_array() def unique(self, ndarray[object] values, bint return_inverse=False): @@ -1259,7 +1259,7 @@ cdef class PyObjectHashTable(HashTable): ------- uniques : ndarray[object] Unique values of input, not sorted - labels : ndarray[int64] (if return_inverse) + labels : ndarray[intp_t] (if return_inverse) The labels from values to uniques """ uniques = ObjectVector() @@ -1292,7 +1292,7 @@ cdef class PyObjectHashTable(HashTable): ------- uniques : ndarray[object] Unique values of input, not sorted - labels : ndarray[int64] + labels : ndarray[intp_t] The labels from values to uniques """ uniques_vector = ObjectVector() diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 1b79d68c13570..511b373bc7e1f 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -10,6 +10,7 @@ from numpy cimport ( int16_t, int32_t, int64_t, + intp_t, ndarray, uint8_t, uint16_t, @@ -20,6 +21,7 @@ from numpy cimport ( cnp.import_array() from pandas._libs.algos import ( + ensure_int64, ensure_platform_int, groupsort_indexer, take_1d_int64_int64, @@ -27,7 +29,7 @@ from pandas._libs.algos import ( @cython.boundscheck(False) -def inner_join(const int64_t[:] left, const int64_t[:] right, +def inner_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 @@ -39,8 +41,8 @@ def inner_join(const int64_t[:] left, const int64_t[:] right, # NA group in location 0 - left_sorter, left_count = groupsort_indexer(left, max_groups) - right_sorter, right_count = groupsort_indexer(right, max_groups) + left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) + right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) with nogil: # First pass, determine size of result set, do not use the NA group @@ -78,7 +80,7 @@ def inner_join(const int64_t[:] left, const int64_t[:] right, @cython.boundscheck(False) -def left_outer_join(const int64_t[:] left, const int64_t[:] right, +def left_outer_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups, bint sort=True): cdef: Py_ssize_t i, j, k, count = 0 @@ -91,8 +93,8 @@ def left_outer_join(const int64_t[:] left, const int64_t[:] right, # NA group in location 0 - left_sorter, left_count = groupsort_indexer(left, max_groups) - right_sorter, right_count = groupsort_indexer(right, max_groups) + left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) + right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) with nogil: # First pass, determine size of result set, do not use the NA group @@ -151,7 +153,7 @@ def left_outer_join(const int64_t[:] left, const int64_t[:] right, @cython.boundscheck(False) -def full_outer_join(const int64_t[:] left, const int64_t[:] right, +def full_outer_join(const intp_t[:] left, const intp_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 @@ -163,8 +165,8 @@ def full_outer_join(const int64_t[:] left, const int64_t[:] right, # NA group in location 0 - left_sorter, left_count = groupsort_indexer(left, max_groups) - right_sorter, right_count = groupsort_indexer(right, max_groups) + left_sorter, left_count = groupsort_indexer(ensure_int64(left), max_groups) + right_sorter, right_count = groupsort_indexer(ensure_int64(right), max_groups) with nogil: # First pass, determine size of result set, do not use the NA group diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index c01bf3931b27a..3c1279d62b126 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1973,7 +1973,7 @@ def _get_single_indexer(join_key, index, sort: bool = False): left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) left_indexer, right_indexer = libjoin.left_outer_join( - ensure_int64(left_key), ensure_int64(right_key), count, sort=sort + left_key, right_key, count, sort=sort ) return left_indexer, right_indexer @@ -2029,9 +2029,9 @@ def _factorize_keys( Returns ------- - array + np.ndarray[np.intp] Left (resp. right if called with `key='right'`) labels, as enumerated type. - array + np.ndarray[np.intp] Right (resp. left if called with `key='right'`) labels, as enumerated type. int Number of unique elements in union of left and right labels. @@ -2117,6 +2117,8 @@ def _factorize_keys( llab = rizer.factorize(lk) rlab = rizer.factorize(rk) + assert llab.dtype == np.intp, llab.dtype + assert rlab.dtype == np.intp, rlab.dtype count = rizer.get_count() @@ -2142,13 +2144,16 @@ def _factorize_keys( return llab, rlab, count -def _sort_labels(uniques: np.ndarray, left, right): +def _sort_labels( + uniques: np.ndarray, left: np.ndarray, right: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + # Both returned ndarrays are np.intp llength = len(left) labels = np.concatenate([left, right]) _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1) - new_labels = ensure_int64(new_labels) + assert new_labels.dtype == np.intp new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right diff --git a/pandas/tests/libs/test_join.py b/pandas/tests/libs/test_join.py index 0bdb7b0e71e2d..f5426c71511bb 100644 --- a/pandas/tests/libs/test_join.py +++ b/pandas/tests/libs/test_join.py @@ -46,8 +46,8 @@ def test_outer_join_indexer(self, dtype): tm.assert_numpy_array_equal(rindexer, exp) def test_cython_left_outer_join(self): - left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp) + right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.intp) max_group = 5 ls, rs = left_outer_join(left, right, max_group) @@ -70,8 +70,8 @@ def test_cython_left_outer_join(self): tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False) def test_cython_right_outer_join(self): - left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp) + right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.intp) max_group = 5 rs, ls = left_outer_join(right, left, max_group) @@ -116,8 +116,8 @@ def test_cython_right_outer_join(self): tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False) def test_cython_inner_join(self): - left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = np.array([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp) + right = np.array([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.intp) max_group = 5 ls, rs = inner_join(left, right, max_group) @@ -256,10 +256,10 @@ def test_left_outer_join_bug(): 0, 2, ], - dtype=np.int64, + dtype=np.intp, ) - right = np.array([3, 1], dtype=np.int64) + right = np.array([3, 1], dtype=np.intp) max_groups = 4 lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False)
https://api.github.com/repos/pandas-dev/pandas/pulls/40474
2021-03-16T21:55:59Z
2021-03-20T01:10:03Z
2021-03-20T01:10:02Z
2021-03-20T01:36:48Z
TYP: tslib.pyi
diff --git a/pandas/_libs/tslib.pyi b/pandas/_libs/tslib.pyi new file mode 100644 index 0000000000000..641e62e7c8973 --- /dev/null +++ b/pandas/_libs/tslib.pyi @@ -0,0 +1,29 @@ +from datetime import tzinfo + +import numpy as np + +def format_array_from_datetime( + values: np.ndarray, # np.ndarray[np.int64] + tz: tzinfo | None = ..., + format: str | None = ..., + na_rep: object = ... +) -> np.ndarray: ... # np.ndarray[object] + + +def array_with_unit_to_datetime( + values: np.ndarray, + unit: str, + errors: str = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... + + +def array_to_datetime( + values: np.ndarray, # np.ndarray[object] + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + require_iso8601: bool = ..., + allow_mixed: bool = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... +# returned ndarray may be object dtype or datetime64[ns] diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 337e131f0a2c9..02c64fac0c009 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -100,7 +100,7 @@ def format_array_from_datetime( tzinfo tz=None, str format=None, object na_rep=None -): +) -> np.ndarray: """ return a np object array of the string formatted values @@ -113,6 +113,9 @@ def format_array_from_datetime( na_rep : optional, default is None a nat format + Returns + ------- + np.ndarray[object] """ cdef: int64_t val, ns, N = len(values) @@ -200,7 +203,7 @@ def array_with_unit_to_datetime( Parameters ---------- - values : ndarray of object + values : ndarray Date-like objects to convert. unit : str Time unit to use during conversion. @@ -411,7 +414,9 @@ cpdef array_to_datetime( Returns ------- - tuple (ndarray, tzoffset) + np.ndarray + May be datetime64[ns] or object dtype + tzinfo or None """ cdef: Py_ssize_t i, n = len(values) @@ -635,7 +640,7 @@ cpdef array_to_datetime( return result, tz_out -cdef ignore_errors_out_of_bounds_fallback(ndarray[object] values): +cdef ndarray[object] ignore_errors_out_of_bounds_fallback(ndarray[object] values): """ Fallback for array_to_datetime if an OutOfBoundsDatetime is raised and errors == "ignore" @@ -689,7 +694,7 @@ cdef _array_to_datetime_object( Parameters ---------- - values : ndarray of object + values : ndarray[object] date-like objects to convert errors : str error behavior when parsing @@ -700,7 +705,8 @@ cdef _array_to_datetime_object( Returns ------- - tuple (ndarray, None) + np.ndarray[object] + Literal[None] """ cdef: Py_ssize_t i, n = len(values) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4a032c60d386d..c0a8c20832fa8 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2154,7 +2154,7 @@ def objects_to_datetime64ns( data = np.array(data, copy=False, dtype=np.object_) flags = data.flags - order = "F" if flags.f_contiguous else "C" + order: Literal["F", "C"] = "F" if flags.f_contiguous else "C" try: result, tz_parsed = tslib.array_to_datetime( data.ravel("K"), diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c1c14ade320a3..2fd91c07ff4ac 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -307,9 +307,9 @@ def _convert_listlike_datetimes( None or string for the Index name tz : object None or 'utc' - unit : string + unit : str None or string of the frequency of the passed data - errors : string + errors : str error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore' infer_datetime_format : bool, default False inferring format behavior from to_datetime @@ -529,7 +529,7 @@ def _to_datetime_with_format( return result # type: ignore[return-value] -def _to_datetime_with_unit(arg, unit, name, tz, errors: Optional[str]) -> Index: +def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index: """ to_datetime specalized to the case where a 'unit' is passed. """ @@ -1035,7 +1035,7 @@ def coerce(values): return values -def _attempt_YYYYMMDD(arg: np.ndarray, errors: Optional[str]) -> Optional[np.ndarray]: +def _attempt_YYYYMMDD(arg: np.ndarray, errors: str) -> Optional[np.ndarray]: """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 79a60c14f5eac..63b0bd235e57c 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -7,8 +7,6 @@ import numpy as np import pytest -from pandas._libs.tslib import Timestamp - from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike import pandas as pd @@ -16,6 +14,7 @@ Index, MultiIndex, Series, + Timestamp, date_range, ) import pandas._testing as tm diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 53d3187ee5664..1e9348dc410d7 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -10,7 +10,6 @@ import numpy as np import pytest -from pandas._libs.tslib import Timestamp from pandas.compat import ( IS64, np_datetime64_compat, @@ -29,6 +28,7 @@ RangeIndex, Series, TimedeltaIndex, + Timestamp, UInt64Index, date_range, isna, diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index b6c565bdeace5..c0337d1ad3ffe 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -14,7 +14,6 @@ import pytz import pandas._libs.json as ujson -from pandas._libs.tslib import Timestamp from pandas.compat import ( IS64, is_platform_windows, @@ -28,6 +27,7 @@ NaT, Series, Timedelta, + Timestamp, date_range, ) import pandas._testing as tm diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 297cbbe66cb6b..572bc09c96886 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -11,7 +11,6 @@ import numpy as np import pytest -from pandas._libs.tslib import Timestamp from pandas.errors import ( EmptyDataError, ParserError, @@ -21,6 +20,7 @@ DataFrame, Index, Series, + Timestamp, compat, ) import pandas._testing as tm diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 257dce7d28972..c7b5efa5bf0c9 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -19,7 +19,6 @@ import pytest import pytz -from pandas._libs.tslib import Timestamp from pandas._libs.tslibs import parsing from pandas._libs.tslibs.parsing import parse_datetime_string from pandas.compat import ( @@ -34,6 +33,7 @@ Index, MultiIndex, Series, + Timestamp, ) import pandas._testing as tm from pandas.core.indexes.datetimes import date_range diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index 7f813b8733061..44ea3866dd793 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -6,11 +6,10 @@ import pytest -from pandas._libs.tslib import Timestamp - from pandas import ( DataFrame, Index, + Timestamp, ) import pandas._testing as tm diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 91f6c100419b6..cefbea529e366 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1157,7 +1157,7 @@ def test_unit(self, cache): tm.assert_index_equal(result, expected) msg = "cannot convert input 11111111 with the unit 'D'" - with pytest.raises(tslib.OutOfBoundsDatetime, match=msg): + with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(values, unit="D", errors="raise", cache=cache) values = [1420043460000, iNaT, NaT, np.nan, "NaT"] @@ -1171,7 +1171,7 @@ def test_unit(self, cache): tm.assert_index_equal(result, expected) msg = "cannot convert input 1420043460000 with the unit 's'" - with pytest.raises(tslib.OutOfBoundsDatetime, match=msg): + with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(values, errors="raise", unit="s", cache=cache) # if we have a string, then we raise a ValueError @@ -1179,7 +1179,7 @@ def test_unit(self, cache): for val in ["foo", Timestamp("20130101")]: try: to_datetime(val, errors="raise", unit="s", cache=cache) - except tslib.OutOfBoundsDatetime as err: + except OutOfBoundsDatetime as err: raise AssertionError("incorrect exception raised") from err except ValueError: pass @@ -2347,7 +2347,7 @@ def test_epoch(self, units, epochs, epoch_1960, units_from_epochs): ("random_string", ValueError), ("epoch", ValueError), ("13-24-1990", ValueError), - (datetime(1, 1, 1), tslib.OutOfBoundsDatetime), + (datetime(1, 1, 1), OutOfBoundsDatetime), ], ) def test_invalid_origins(self, origin, exc, units, units_from_epochs):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40473
2021-03-16T20:37:52Z
2021-03-18T09:01:29Z
2021-03-18T09:01:29Z
2021-03-18T14:12:59Z
compat: sqlalchemy deprecations
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index fb08abb6fea45..e3347468828d1 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -9,6 +9,7 @@ datetime, time, ) +from distutils.version import LooseVersion from functools import partial import re from typing import ( @@ -77,6 +78,16 @@ def _is_sqlalchemy_connectable(con): return False +def _gt14() -> bool: + """ + Check if sqlalchemy.__version__ is at least 1.4.0, when several + deprecations were made. + """ + import sqlalchemy + + return LooseVersion(sqlalchemy.__version__) >= LooseVersion("1.4.0") + + def _convert_params(sql, params): """Convert SQL and params args to DBAPI2.0 compliant format.""" args = [sql] @@ -823,7 +834,10 @@ def sql_schema(self): def _execute_create(self): # Inserting table into database, add to MetaData object - self.table = self.table.tometadata(self.pd_sql.meta) + if _gt14(): + self.table = self.table.to_metadata(self.pd_sql.meta) + else: + self.table = self.table.tometadata(self.pd_sql.meta) self.table.create() def create(self): @@ -1596,9 +1610,17 @@ def to_sql( # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: - table_names = engine.table_names( - schema=schema or self.meta.schema, connection=conn - ) + if _gt14(): + from sqlalchemy import inspect + + insp = inspect(conn) + table_names = insp.get_table_names( + schema=schema or self.meta.schema + ) + else: + table_names = engine.table_names( + schema=schema or self.meta.schema, connection=conn + ) if name not in table_names: msg = ( f"The provided table name '{name}' is not found exactly as " @@ -1613,9 +1635,15 @@ def tables(self): return self.meta.tables def has_table(self, name: str, schema: Optional[str] = None): - return self.connectable.run_callable( - self.connectable.dialect.has_table, name, schema or self.meta.schema - ) + if _gt14(): + import sqlalchemy as sa + + insp = sa.inspect(self.connectable) + return insp.has_table(name, schema or self.meta.schema) + else: + return self.connectable.run_callable( + self.connectable.dialect.has_table, name, schema or self.meta.schema + ) def get_table(self, table_name: str, schema: Optional[str] = None): schema = schema or self.meta.schema diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index e57030a4bf125..7d923e57834ea 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -52,12 +52,14 @@ import pandas.io.sql as sql from pandas.io.sql import ( + _gt14, read_sql_query, read_sql_table, ) try: import sqlalchemy + from sqlalchemy import inspect from sqlalchemy.ext import declarative from sqlalchemy.orm import session as sa_session import sqlalchemy.schema @@ -1487,7 +1489,11 @@ def test_create_table(self): pandasSQL = sql.SQLDatabase(temp_conn) pandasSQL.to_sql(temp_frame, "temp_frame") - assert temp_conn.has_table("temp_frame") + if _gt14(): + insp = inspect(temp_conn) + assert insp.has_table("temp_frame") + else: + assert temp_conn.has_table("temp_frame") def test_drop_table(self): temp_conn = self.connect() @@ -1499,11 +1505,18 @@ def test_drop_table(self): pandasSQL = sql.SQLDatabase(temp_conn) pandasSQL.to_sql(temp_frame, "temp_frame") - assert temp_conn.has_table("temp_frame") + if _gt14(): + insp = inspect(temp_conn) + assert insp.has_table("temp_frame") + else: + assert temp_conn.has_table("temp_frame") pandasSQL.drop_table("temp_frame") - assert not temp_conn.has_table("temp_frame") + if _gt14(): + assert not insp.has_table("temp_frame") + else: + assert not temp_conn.has_table("temp_frame") def test_roundtrip(self): self._roundtrip() @@ -1843,9 +1856,10 @@ def test_nan_string(self): tm.assert_frame_equal(result, df) def _get_index_columns(self, tbl_name): - from sqlalchemy.engine import reflection + from sqlalchemy import inspect + + insp = inspect(self.conn) - insp = reflection.Inspector.from_engine(self.conn) ixs = insp.get_indexes(tbl_name) ixs = [i["column_names"] for i in ixs] return ixs
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40471
2021-03-16T18:50:39Z
2021-03-16T20:30:53Z
2021-03-16T20:30:53Z
2021-03-17T11:58:13Z
CI: pre-commit fixups
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f86d8755acb22..f3889ff360aa8 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -42,7 +42,6 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, - is_categorical_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, @@ -53,7 +52,10 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ABCMultiIndex +from pandas.core.dtypes.generic import ( + ABCCategorical, + ABCMultiIndex, +) from pandas.core.dtypes.missing import isna from pandas.core import nanops @@ -970,7 +972,7 @@ def sequence_to_td64ns( elif not isinstance(data, (np.ndarray, ExtensionArray)): # GH#24539 e.g. xarray, dask object data = np.asarray(data) - elif is_categorical_dtype(data.dtype): + elif isinstance(data, ABCCategorical): data = data.categories.take(data.codes, fill_value=NaT)._values copy = False diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 1ee366d452f5a..1a7e2d1d820f7 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1372,9 +1372,9 @@ def array_likes(request): data = memoryview(arr) elif name == "array": # stdlib array - from array import array + from array import array as array_stdlib - data = array("i", arr) + data = array_stdlib("i", arr) elif name == "dask": import dask.array
Fixes 2/3 of the failures on master that im aware of. sqlalchemy is the one left out.
https://api.github.com/repos/pandas-dev/pandas/pulls/40468
2021-03-16T17:01:16Z
2021-03-16T20:30:09Z
2021-03-16T20:30:09Z
2021-03-16T20:45:01Z
TYP: stronger typing in libindex
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f6f36f6ad523b..9159fa03c12c0 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -259,11 +259,11 @@ cdef class IndexEngine: self.monotonic_inc = 0 self.monotonic_dec = 0 - def get_indexer(self, values): + def get_indexer(self, ndarray values): self._ensure_mapping_populated() return self.mapping.lookup(values) - def get_indexer_non_unique(self, targets): + def get_indexer_non_unique(self, ndarray targets): """ Return an indexer suitable for taking from a non unique index return the labels in the same order as the target @@ -451,11 +451,11 @@ cdef class DatetimeEngine(Int64Engine): except KeyError: raise KeyError(val) - def get_indexer_non_unique(self, targets): + def get_indexer_non_unique(self, ndarray targets): # we may get datetime64[ns] or timedelta64[ns], cast these to int64 return super().get_indexer_non_unique(targets.view("i8")) - def get_indexer(self, values): + def get_indexer(self, ndarray values): self._ensure_mapping_populated() if values.dtype != self._get_box_dtype(): return np.repeat(-1, len(values)).astype('i4') @@ -594,7 +594,7 @@ cdef class BaseMultiIndexCodesEngine: in zip(self.levels, zip(*target))] return self._codes_to_ints(np.array(level_codes, dtype='uint64').T) - def get_indexer_no_fill(self, object target) -> np.ndarray: + def get_indexer(self, ndarray[object] target) -> np.ndarray: """ Returns an array giving the positions of each value of `target` in `self.values`, where -1 represents a value in `target` which does not @@ -602,7 +602,7 @@ cdef class BaseMultiIndexCodesEngine: Parameters ---------- - target : list-like of keys + target : ndarray[object] Each key is a tuple, with a label for each level of the index Returns @@ -613,8 +613,8 @@ cdef class BaseMultiIndexCodesEngine: lab_ints = self._extract_level_codes(target) return self._base.get_indexer(self, lab_ints) - def get_indexer(self, object target, object values = None, - object method = None, object limit = None) -> np.ndarray: + def get_indexer_with_fill(self, ndarray target, ndarray values, + str method, object limit) -> np.ndarray: """ Returns an array giving the positions of each value of `target` in `values`, where -1 represents a value in `target` which does not @@ -630,15 +630,15 @@ cdef class BaseMultiIndexCodesEngine: Parameters ---------- - target: list-like of tuples + target: ndarray[object] of tuples need not be sorted, but all must have the same length, which must be the same as the length of all tuples in `values` - values : list-like of tuples + values : ndarray[object] of tuples must be sorted and all have the same length. Should be the set of the MultiIndex's values. Needed only if `method` is not None method: string "backfill" or "pad" - limit: int, optional + limit: int or None if provided, limit the number of fills to this value Returns @@ -646,9 +646,6 @@ cdef class BaseMultiIndexCodesEngine: np.ndarray[int64_t, ndim=1] of the indexer of `target` into `values`, filled with the `method` (and optionally `limit`) specified """ - if method is None: - return self.get_indexer_no_fill(target) - assert method in ("backfill", "pad") cdef: int64_t i, j, next_code @@ -658,8 +655,8 @@ cdef class BaseMultiIndexCodesEngine: ndarray[int64_t, ndim=1] new_codes, new_target_codes ndarray[int64_t, ndim=1] sorted_indexer - target_order = np.argsort(target.values).astype('int64') - target_values = target.values[target_order] + target_order = np.argsort(target).astype('int64') + target_values = target[target_order] num_values, num_target_values = len(values), len(target_values) new_codes, new_target_codes = ( np.empty((num_values,)).astype('int64'), @@ -718,7 +715,7 @@ cdef class BaseMultiIndexCodesEngine: return self._base.get_loc(self, lab_int) - def get_indexer_non_unique(self, object target): + def get_indexer_non_unique(self, ndarray target): # This needs to be overridden just because the default one works on # target._values, and target can be itself a MultiIndex. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 12b343ab5d895..3a468758ab3fd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3378,7 +3378,11 @@ def get_loc(self, key, method=None, tolerance=None): @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) @final def get_indexer( - self, target, method=None, limit=None, tolerance=None + self, + target, + method: Optional[str_t] = None, + limit: Optional[int] = None, + tolerance=None, ) -> np.ndarray: method = missing.clean_reindex_fill_method(method) @@ -3403,7 +3407,11 @@ def get_indexer( return self._get_indexer(target, method, limit, tolerance) def _get_indexer( - self, target: Index, method=None, limit=None, tolerance=None + self, + target: Index, + method: Optional[str_t] = None, + limit: Optional[int] = None, + tolerance=None, ) -> np.ndarray: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) @@ -3467,7 +3475,7 @@ def _convert_tolerance( @final def _get_fill_indexer( - self, target: Index, method: str_t, limit=None, tolerance=None + self, target: Index, method: str_t, limit: Optional[int] = None, tolerance=None ) -> np.ndarray: target_values = target._get_engine_target() @@ -3487,7 +3495,7 @@ def _get_fill_indexer( @final def _get_fill_indexer_searchsorted( - self, target: Index, method: str_t, limit=None + self, target: Index, method: str_t, limit: Optional[int] = None ) -> np.ndarray: """ Fallback pad/backfill get_indexer that works for monotonic decreasing @@ -3520,7 +3528,9 @@ def _get_fill_indexer_searchsorted( return indexer @final - def _get_nearest_indexer(self, target: Index, limit, tolerance) -> np.ndarray: + def _get_nearest_indexer( + self, target: Index, limit: Optional[int], tolerance + ) -> np.ndarray: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 39f8e0b6e431e..f372db5287604 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -492,7 +492,11 @@ def _maybe_cast_indexer(self, key) -> int: return self._data._unbox_scalar(key) def _get_indexer( - self, target: Index, method=None, limit=None, tolerance=None + self, + target: Index, + method: Optional[str] = None, + limit: Optional[int] = None, + tolerance=None, ) -> np.ndarray: if self.equals(target): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0d89e75c097c1..97492f35232e3 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2676,7 +2676,11 @@ def _get_partial_string_timestamp_match_key(self, key): return key def _get_indexer( - self, target: Index, method=None, limit=None, tolerance=None + self, + target: Index, + method: Optional[str] = None, + limit: Optional[int] = None, + tolerance=None, ) -> np.ndarray: # empty indexer @@ -2699,8 +2703,8 @@ def _get_indexer( raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) - indexer = self._engine.get_indexer( - values=self._values, target=target, method=method, limit=limit + indexer = self._engine.get_indexer_with_fill( + target=target._values, values=self._values, method=method, limit=limit ) elif method == "nearest": raise NotImplementedError( @@ -2708,7 +2712,7 @@ def _get_indexer( "for MultiIndex; see GitHub issue 9365" ) else: - indexer = self._engine.get_indexer(target) + indexer = self._engine.get_indexer(target._values) return ensure_platform_int(indexer) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 010568af4982c..456d87766bdb7 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -396,7 +396,13 @@ def get_loc(self, key, method=None, tolerance=None): raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) - def _get_indexer(self, target: Index, method=None, limit=None, tolerance=None): + def _get_indexer( + self, + target: Index, + method: Optional[str] = None, + limit: Optional[int] = None, + tolerance=None, + ): if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit
and more consistent naming/args for MultiIndex engine. Preliminary before making index.pyi
https://api.github.com/repos/pandas-dev/pandas/pulls/40465
2021-03-16T14:43:33Z
2021-03-17T10:39:10Z
2021-03-17T10:39:10Z
2021-03-17T14:08:53Z
BUG/TST: run and fix all arithmetic tests with+without numexpr
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 387d8b463b8b4..67b68ce7365cc 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -2,7 +2,7 @@ Functions for arithmetic and comparison operations on NumPy arrays and ExtensionArrays. """ -from datetime import timedelta +import datetime from functools import partial import operator from typing import Any @@ -10,11 +10,13 @@ import numpy as np from pandas._libs import ( + NaT, Timedelta, Timestamp, lib, ops as libops, ) +from pandas._libs.tslibs import BaseOffset from pandas._typing import ( ArrayLike, Shape, @@ -154,8 +156,14 @@ def _na_arithmetic_op(left, right, op, is_cmp: bool = False): ------ TypeError : invalid operation """ + if isinstance(right, str): + # can never use numexpr + func = op + else: + func = partial(expressions.evaluate, op) + try: - result = expressions.evaluate(op, left, right) + result = func(left, right) except TypeError: if is_object_dtype(left) or is_object_dtype(right) and not is_cmp: # For object dtype, fallback to a masked operation (only operating @@ -201,8 +209,13 @@ def arithmetic_op(left: ArrayLike, right: Any, op): # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) right = _maybe_upcast_for_op(right, left.shape) - if should_extension_dispatch(left, right) or isinstance(right, Timedelta): - # Timedelta is included because numexpr will fail on it, see GH#31457 + if ( + should_extension_dispatch(left, right) + or isinstance(right, (Timedelta, BaseOffset, Timestamp)) + or right is NaT + ): + # Timedelta/Timestamp and other custom scalars are included in the check + # because numexpr will fail on it, see GH#31457 res_values = op(left, right) else: res_values = _na_arithmetic_op(left, right, op) @@ -246,7 +259,10 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: "Lengths must match to compare", lvalues.shape, rvalues.shape ) - if should_extension_dispatch(lvalues, rvalues): + if should_extension_dispatch(lvalues, rvalues) or ( + (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) + and not is_object_dtype(lvalues.dtype) + ): # Call the method on lvalues res_values = op(lvalues, rvalues) @@ -261,7 +277,7 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: # GH#36377 going through the numexpr path would incorrectly raise return invalid_comparison(lvalues, rvalues, op) - elif is_object_dtype(lvalues.dtype): + elif is_object_dtype(lvalues.dtype) or isinstance(rvalues, str): res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) else: @@ -438,11 +454,14 @@ def _maybe_upcast_for_op(obj, shape: Shape): Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ - if type(obj) is timedelta: + if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return Timedelta(obj) + elif type(obj) is datetime.datetime: + # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above + return Timestamp(obj) elif isinstance(obj, np.datetime64): # GH#28080 numpy casts integer-dtype to datetime64 when doing # array[int] + datetime64, which we do not allow diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index d90592c68e351..1e97db152c294 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -9,6 +9,18 @@ UInt64Index, ) import pandas._testing as tm +from pandas.core.computation import expressions as expr + + +@pytest.fixture( + autouse=True, scope="module", params=[0, 1000000], ids=["numexpr", "python"] +) +def switch_numexpr_min_elements(request): + _MIN_ELEMENTS = expr._MIN_ELEMENTS + expr._MIN_ELEMENTS = request.param + yield request.param + expr._MIN_ELEMENTS = _MIN_ELEMENTS + # ------------------------------------------------------------------ # Helper Functions diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index bdd954c1e2222..9e1d13eac5039 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -27,6 +27,7 @@ ) import pandas._testing as tm from pandas.core import ops +from pandas.core.computation import expressions as expr @pytest.fixture(params=[Index, Series, tm.to_array]) @@ -391,7 +392,7 @@ def test_div_negative_zero(self, zero, numeric_idx, op): # ------------------------------------------------------------------ @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) - def test_ser_div_ser(self, dtype1, any_real_dtype): + def test_ser_div_ser(self, switch_numexpr_min_elements, dtype1, any_real_dtype): # no longer do integer div for any ops, but deal with the 0's dtype2 = any_real_dtype @@ -405,6 +406,11 @@ def test_ser_div_ser(self, dtype1, any_real_dtype): name=None, ) expected.iloc[0:3] = np.inf + if first.dtype == "int64" and second.dtype == "float32": + # when using numexpr, the casting rules are slightly different + # and int64/float32 combo results in float32 instead of float64 + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + expected = expected.astype("float32") result = first / second tm.assert_series_equal(result, expected) @@ -890,7 +896,13 @@ def test_series_frame_radd_bug(self): # really raise this time now = pd.Timestamp.now().to_pydatetime() - msg = "unsupported operand type" + msg = "|".join( + [ + "unsupported operand type", + # wrong error message, see https://github.com/numpy/numpy/issues/18832 + "Concatenation operation", + ] + ) with pytest.raises(TypeError, match=msg): now + ts diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index c6816fa6481f4..4c31d15541412 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -174,9 +174,19 @@ def test_timestamp_compare(self): with pytest.raises(TypeError, match=msg): right_f(pd.Timestamp("20010109"), df) # nats - expected = left_f(df, pd.Timestamp("nat")) - result = right_f(pd.Timestamp("nat"), df) - tm.assert_frame_equal(result, expected) + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("nat")) + result = right_f(pd.Timestamp("nat"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'NaTType'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("nat")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("nat"), df) def test_mixed_comparison(self): # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
Closes #40462 This PR adds a auto-used fixture for `pandas/tests/arithmetic` that sets the `expressions._MIN_ELEMENTS` to 0, to force taking the numexpr path, even if our test data are small (which would otherwise never excercise the numexpr code path) Further, it makes some fixes to get the tests passing with this better coverage. The fixes are a bit quick-and-dirty, but at least already show what is needed to get things working.
https://api.github.com/repos/pandas-dev/pandas/pulls/40463
2021-03-16T10:40:58Z
2021-04-26T21:57:35Z
2021-04-26T21:57:35Z
2021-04-27T06:38:45Z
TST: csv parser segfaulting on malformed input.
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index b4ac419fba30c..297cbbe66cb6b 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -753,3 +753,12 @@ def test_encoding_surrogatepass(all_parsers): tm.assert_frame_equal(df, expected) with pytest.raises(UnicodeDecodeError, match="'utf-8' codec can't decode byte"): parser.read_csv(path) + + +def test_malformed_second_line(all_parsers): + # see GH14782 + parser = all_parsers + data = "\na\nb\n" + result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1) + expected = DataFrame({"a": ["b"]}) + tm.assert_frame_equal(result, expected)
xref #14782 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40458
2021-03-16T03:40:50Z
2021-03-16T20:31:41Z
2021-03-16T20:31:41Z
2021-03-16T21:45:30Z
CI: fix failing script/tests
diff --git a/Makefile b/Makefile index f47c50032f83c..1fdd3cfdcf027 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY : develop build clean clean_pyc doc lint-diff black +.PHONY : develop build clean clean_pyc doc lint-diff black test-scripts all: develop @@ -25,3 +25,6 @@ doc: cd doc; \ python make.py clean; \ python make.py html + +test-scripts: + pytest scripts diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py index cc3509af5b138..9562a30ba0ffd 100644 --- a/scripts/tests/test_inconsistent_namespace_check.py +++ b/scripts/tests/test_inconsistent_namespace_check.py @@ -1,6 +1,6 @@ import pytest -from scripts.check_for_inconsistent_pandas_namespace import ( +from ..check_for_inconsistent_pandas_namespace import ( check_for_inconsistent_pandas_namespace, ) diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 74819db7b878c..7e4c68ddc183b 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -2,7 +2,8 @@ import textwrap import pytest -import validate_docstrings + +from .. import validate_docstrings class BadDocstrings: @@ -162,7 +163,9 @@ def test_bad_class(self, capsys): ( "BadDocstrings", "indentation_is_not_a_multiple_of_four", - ("flake8 error: E111 indentation is not a multiple of four",), + # with flake8 3.9.0, the message ends with four spaces, + # whereas in earlier versions, it ended with "four" + ("flake8 error: E111 indentation is not a multiple of ",), ), ( "BadDocstrings", diff --git a/scripts/tests/test_validate_unwanted_patterns.py b/scripts/tests/test_validate_unwanted_patterns.py index 947666a730ee9..ef93fd1d21981 100644 --- a/scripts/tests/test_validate_unwanted_patterns.py +++ b/scripts/tests/test_validate_unwanted_patterns.py @@ -1,7 +1,8 @@ import io import pytest -import validate_unwanted_patterns + +from .. import validate_unwanted_patterns class TestBarePytestRaises:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40457
2021-03-16T02:21:25Z
2021-03-16T15:32:18Z
2021-03-16T15:32:18Z
2021-03-25T11:22:27Z
REF: back DatetimeBlock, TimedeltaBlock by DTA/TDA
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index 5f9e67a484d24..efa36a5bd3ae9 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -133,8 +133,14 @@ def _quantile_ea_compat( if not is_sparse(orig.dtype): # shape[0] should be 1 as long as EAs are 1D - assert result.shape == (1, len(qs)), result.shape - result = type(orig)._from_factorized(result[0], orig) + + if orig.ndim == 2: + # i.e. DatetimeArray + result = type(orig)._from_factorized(result, orig) + + else: + assert result.shape == (1, len(qs)), result.shape + result = type(orig)._from_factorized(result[0], orig) # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") return result # type: ignore[return-value] diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 6dfdc99f4fd9c..8318a02d5d214 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -3,6 +3,7 @@ import functools from typing import ( TYPE_CHECKING, + cast, overload, ) @@ -21,6 +22,7 @@ from pandas.core.construction import ensure_wrapped_if_datetimelike if TYPE_CHECKING: + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionArray @@ -89,7 +91,12 @@ def take_nd( if not isinstance(arr, np.ndarray): # i.e. ExtensionArray, - # includes for EA to catch DatetimeArray, TimedeltaArray + if arr.ndim == 2: + # e.g. DatetimeArray, TimedeltArray + arr = cast("NDArrayBackedExtensionArray", arr) + return arr.take( + indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis + ) return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) arr = np.asarray(arr) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 678e532f05772..427b3106ea10c 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -24,7 +24,10 @@ cache_readonly, doc, ) -from pandas.util._validators import validate_fillna_kwargs +from pandas.util._validators import ( + validate_bool_kwarg, + validate_fillna_kwargs, +) from pandas.core.dtypes.common import is_dtype_equal from pandas.core.dtypes.missing import array_equivalent @@ -39,6 +42,7 @@ from pandas.core.arrays.base import ExtensionArray from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer +from pandas.core.sorting import nargminmax NDArrayBackedExtensionArrayT = TypeVar( "NDArrayBackedExtensionArrayT", bound="NDArrayBackedExtensionArray" @@ -189,6 +193,22 @@ def equals(self, other) -> bool: def _values_for_argsort(self): return self._ndarray + # Signature of "argmin" incompatible with supertype "ExtensionArray" + def argmin(self, axis: int = 0, skipna: bool = True): # type:ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self.isna().any(): + raise NotImplementedError + return nargminmax(self, "argmin", axis=axis) + + # Signature of "argmax" incompatible with supertype "ExtensionArray" + def argmax(self, axis: int = 0, skipna: bool = True): # type:ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self.isna().any(): + raise NotImplementedError + return nargminmax(self, "argmax", axis=axis) + def copy(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT: new_data = self._ndarray.copy() return self._from_backing_data(new_data) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 46501c97cf38a..510bdfcb0079f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9544,6 +9544,9 @@ def func(values: np.ndarray): def blk_func(values, axis=1): if isinstance(values, ExtensionArray): + if values.ndim == 2: + # i.e. DatetimeArray, TimedeltaArray + return values._reduce(name, axis=1, skipna=skipna, **kwds) return values._reduce(name, skipna=skipna, **kwds) else: return op(values, axis=axis, skipna=skipna, **kwds) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 746e27c31f423..5442f90a25580 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -72,6 +72,7 @@ maybe_fill, ) +from pandas.core.arrays import ExtensionArray from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.frame import DataFrame @@ -267,7 +268,9 @@ def apply(self, f: F, data: FrameOrSeries, axis: int = 0): group_keys = self._get_group_keys() result_values = None - if data.ndim == 2 and np.any(data.dtypes.apply(is_extension_array_dtype)): + if data.ndim == 2 and any( + isinstance(x, ExtensionArray) for x in data._iter_column_arrays() + ): # calling splitter.fast_apply will raise TypeError via apply_frame_axis0 # if we pass EA instead of ndarray # TODO: can we have a workaround for EAs backed by ndarray? diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 950d229c45f9e..dacacb4329d12 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -493,9 +493,12 @@ def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T if isinstance(applied, list): applied = applied[0] arr = applied.values - if self.ndim == 2: - if isinstance(arr, np.ndarray): - arr = arr[0, :] + if self.ndim == 2 and arr.ndim == 2: + assert len(arr) == 1 + # error: Invalid index type "Tuple[int, slice]" for + # "Union[ndarray, ExtensionArray]"; expected type + # "Union[int, slice, ndarray]" + arr = arr[0, :] # type: ignore[index] result_arrays.append(arr) return type(self)(result_arrays, self._axes) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 09e214237b736..a77ea61d9e6de 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -27,7 +27,6 @@ writers, ) from pandas._libs.internals import BlockPlacement -from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, Dtype, @@ -47,7 +46,6 @@ maybe_downcast_numeric, maybe_downcast_to_dtype, maybe_upcast, - sanitize_to_nanoseconds, soft_convert_objects, ) from pandas.core.dtypes.common import ( @@ -938,7 +936,11 @@ def setitem(self, indexer, value): return self.coerce_to_target_dtype(value).setitem(indexer, value) if self.dtype.kind in ["m", "M"]: - arr = self.array_values.T + arr = self.values + if self.ndim > 1: + # Dont transpose with ndim=1 bc we would fail to invalidate + # arr.freq + arr = arr.T arr[indexer] = value return self @@ -1172,6 +1174,7 @@ def _interpolate_with_fill( limit_area=limit_area, ) + values = maybe_coerce_values(values) blocks = [self.make_block_same_class(values)] return self._maybe_downcast(blocks, downcast) @@ -1227,6 +1230,7 @@ def func(yvalues: np.ndarray) -> np.ndarray: # interp each column independently interp_values = np.apply_along_axis(func, axis, data) + interp_values = maybe_coerce_values(interp_values) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast) @@ -1788,27 +1792,32 @@ class NDArrayBackedExtensionBlock(HybridMixin, Block): Block backed by an NDArrayBackedExtensionArray """ + values: NDArrayBackedExtensionArray + + @property + def is_view(self) -> bool: + """ return a boolean if I am possibly a view """ + # check the ndarray values of the DatetimeIndex values + return self.values._ndarray.base is not None + def internal_values(self): # Override to return DatetimeArray and TimedeltaArray - return self.array_values + return self.values def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: """ return object dtype as boxed values, such as Timestamps/Timedelta """ - values = self.array_values + values = self.values if is_object_dtype(dtype): - # DTA/TDA constructor and astype can handle 2D - # error: "Callable[..., Any]" has no attribute "astype" - values = values.astype(object) # type: ignore[attr-defined] + values = values.astype(object) # TODO(EA2D): reshape not needed with 2D EAs return np.asarray(values).reshape(self.shape) def iget(self, key): # GH#31649 we need to wrap scalars in Timestamp/Timedelta # TODO(EA2D): this can be removed if we ever have 2D EA - # error: "Callable[..., Any]" has no attribute "reshape" - return self.array_values.reshape(self.shape)[key] # type: ignore[attr-defined] + return self.values.reshape(self.shape)[key] def putmask(self, mask, new) -> List[Block]: mask = extract_bool_array(mask) @@ -1817,16 +1826,13 @@ def putmask(self, mask, new) -> List[Block]: return self.astype(object).putmask(mask, new) # TODO(EA2D): reshape unnecessary with 2D EAs - # error: "Callable[..., Any]" has no attribute "reshape" - arr = self.array_values.reshape(self.shape) # type: ignore[attr-defined] - arr = cast("NDArrayBackedExtensionArray", arr) + arr = self.values.reshape(self.shape) arr.T.putmask(mask, new) return [self] def where(self, other, cond, errors="raise") -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs - # error: "Callable[..., Any]" has no attribute "reshape" - arr = self.array_values.reshape(self.shape) # type: ignore[attr-defined] + arr = self.values.reshape(self.shape) cond = extract_bool_array(cond) @@ -1837,7 +1843,6 @@ def where(self, other, cond, errors="raise") -> List[Block]: # TODO(EA2D): reshape not needed with 2D EAs res_values = res_values.reshape(self.values.shape) - res_values = maybe_coerce_values(res_values) nb = self.make_block_same_class(res_values) return [nb] @@ -1862,19 +1867,15 @@ def diff(self, n: int, axis: int = 0) -> List[Block]: by apply. """ # TODO(EA2D): reshape not necessary with 2D EAs - # error: "Callable[..., Any]" has no attribute "reshape" - values = self.array_values.reshape(self.shape) # type: ignore[attr-defined] + values = self.values.reshape(self.shape) new_values = values - values.shift(n, axis=axis) - new_values = maybe_coerce_values(new_values) return [self.make_block(new_values)] def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> List[Block]: - # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EA - # error: "Callable[..., Any]" has no attribute "reshape" - values = self.array_values.reshape(self.shape) # type: ignore[attr-defined] + # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs + values = self.values.reshape(self.shape) new_values = values.shift(periods, fill_value=fill_value, axis=axis) - new_values = maybe_coerce_values(new_values) return [self.make_block_same_class(new_values)] def fillna( @@ -1887,38 +1888,27 @@ def fillna( # TODO: don't special-case td64 return self.astype(object).fillna(value, limit, inplace, downcast) - values = self.array_values - # error: "Callable[..., Any]" has no attribute "copy" - values = values if inplace else values.copy() # type: ignore[attr-defined] - # error: "Callable[..., Any]" has no attribute "fillna" - new_values = values.fillna( # type: ignore[attr-defined] - value=value, limit=limit - ) - new_values = maybe_coerce_values(new_values) + values = self.values + values = values if inplace else values.copy() + new_values = values.fillna(value=value, limit=limit) return [self.make_block_same_class(values=new_values)] class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" + values: Union[DatetimeArray, TimedeltaArray] + is_numeric = False @cache_readonly def array_values(self): - return ensure_wrapped_if_datetimelike(self.values) + return self.values class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () - def set_inplace(self, locs, values): - """ - See Block.set.__doc__ - """ - values = conversion.ensure_datetime64ns(values, copy=False) - - self.values[locs] = values - class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): """ implement a datetime64 block with a tz attribute """ @@ -1936,13 +1926,10 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): putmask = DatetimeLikeBlockMixin.putmask fillna = DatetimeLikeBlockMixin.fillna - array_values = ExtensionBlock.array_values - - @property - def is_view(self) -> bool: - """ return a boolean if I am possibly a view """ - # check the ndarray values of the DatetimeIndex values - return self.values._data.base is not None + # error: Incompatible types in assignment (expression has type + # "Callable[[NDArrayBackedExtensionBlock], bool]", base class "ExtensionBlock" + # defined the type as "bool") [assignment] + is_view = NDArrayBackedExtensionBlock.is_view # type: ignore[assignment] class TimeDeltaBlock(DatetimeLikeBlockMixin): @@ -2029,15 +2016,11 @@ def maybe_coerce_values(values) -> ArrayLike: values = extract_array(values, extract_numpy=True) if isinstance(values, np.ndarray): - values = sanitize_to_nanoseconds(values) + values = ensure_wrapped_if_datetimelike(values) if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) - elif isinstance(values.dtype, np.dtype): - # i.e. not datetime64tz, extract DTA/TDA -> ndarray - values = values._data - return values diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index d5e549ec874da..02d582c439ea2 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -423,10 +423,17 @@ def _concatenate_join_units( concat_values = concat_values.copy() else: concat_values = concat_values.copy() - elif any(isinstance(t, ExtensionArray) for t in to_concat): + elif any(isinstance(t, ExtensionArray) and t.ndim == 1 for t in to_concat): # concatting with at least one EA means we are concatting a single column # the non-EA values are 2D arrays with shape (1, n) - to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] + # error: Invalid index type "Tuple[int, slice]" for + # "Union[ExtensionArray, ndarray]"; expected type "Union[int, slice, ndarray]" + to_concat = [ + t + if (isinstance(t, ExtensionArray) and t.ndim == 1) + else t[0, :] # type: ignore[index] + for t in to_concat + ] concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) concat_values = ensure_block_shape(concat_values, 2) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 856bd7d159c71..6024c083fcc6b 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -398,7 +398,7 @@ def nargsort( return indexer -def nargminmax(values, method: str): +def nargminmax(values, method: str, axis: int = 0): """ Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. @@ -407,6 +407,7 @@ def nargminmax(values, method: str): ---------- values : ExtensionArray method : {"argmax", "argmin"} + axis: int, default 0 Returns ------- @@ -418,7 +419,23 @@ def nargminmax(values, method: str): mask = np.asarray(isna(values)) values = values._values_for_argsort() - idx = np.arange(len(values)) + if values.ndim > 1: + if mask.any(): + if axis == 1: + zipped = zip(values, mask) + else: + zipped = zip(values.T, mask.T) + return np.array([_nanargminmax(v, m, func) for v, m in zipped]) + return func(values, axis=axis) + + return _nanargminmax(values, mask, func) + + +def _nanargminmax(values, mask, func) -> int: + """ + See nanargminmax.__doc__. + """ + idx = np.arange(values.shape[0]) non_nans = values[~mask] non_nan_idx = idx[~mask] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 99931123b0c81..8658bb654b787 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3963,7 +3963,9 @@ def _create_axes( typ = klass._get_atom(data_converted) kind = _dtype_to_kind(data_converted.dtype.name) - tz = _get_tz(data_converted.tz) if hasattr(data_converted, "tz") else None + tz = None + if getattr(data_converted, "tz", None) is not None: + tz = _get_tz(data_converted.tz) meta = metadata = ordered = None if is_categorical_dtype(data_converted.dtype): diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index f75b3800f623f..215b51dd88ef4 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1968,6 +1968,7 @@ def test_operators_datetimelike_with_timezones(self): td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H")) td2 = td1.copy() td2.iloc[1] = np.nan + assert td2._values.freq is None result = dt1 + td1[0] exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index a4fedd9a4c5da..6c1161294dd17 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -30,6 +30,7 @@ import numpy as np +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import pandas_dtype import pandas as pd @@ -217,11 +218,9 @@ def _values_for_factorize(self): return frozen, () def _values_for_argsort(self): - # Disable NumPy's shape inference by including an empty tuple... - # If all the elements of self are the same size P, NumPy will - # cast them to an (N, P) array, instead of an (N,) array of tuples. - frozen = [()] + [tuple(x.items()) for x in self] - return np.array(frozen, dtype=object)[1:] + # Bypass NumPy's shape inference to get a (N,) array of tuples. + frozen = [tuple(x.items()) for x in self] + return construct_1d_object_array_from_listlike(frozen) def make_data(): diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index b294c97409951..616405c01cc2a 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1003,6 +1003,43 @@ def test_idxmax_mixed_dtype(self): expected = Series([0, 2, 0], index=[1, 2, 3]) tm.assert_series_equal(result, expected) + # with NaTs + df.loc[0, 3] = pd.NaT + result = df.idxmax() + expected = Series([1, 0, 2], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + # with multi-column dt64 block + df[4] = dti[::-1] + df._consolidate_inplace() + + result = df.idxmax() + expected = Series([1, 0, 2, 0], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1, 2], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + def test_idxmax_dt64_multicolumn_axis1(self): + dti = date_range("2016-01-01", periods=3) + df = DataFrame({3: dti, 4: dti[::-1]}) + df.iloc[0, 0] = pd.NaT + + df._consolidate_inplace() + + result = df.idxmax(axis=1) + expected = Series([4, 3, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin(axis=1) + expected = Series([4, 3, 4]) + tm.assert_series_equal(result, expected) + # ---------------------------------------------------------------------- # Logical reductions diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index eb54887cea277..117612696df11 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1008,11 +1008,8 @@ def test_apply_function_with_indexing_return_column(): tm.assert_frame_equal(result, expected) -def test_apply_with_timezones_aware(using_array_manager, request): +def test_apply_with_timezones_aware(): # GH: 27212 - if not using_array_manager: - request.node.add_marker(pytest.mark.xfail(reason="GH-34998")) - dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2 index_no_tz = pd.DatetimeIndex(dates) index_tz = pd.DatetimeIndex(dates, tz="UTC") diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 12247e2445295..de508b8cd78ec 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -840,12 +840,7 @@ def test_omit_nuisance(df): # won't work with axis = 1 grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1) - msg = "|".join( - [ - "reduction operation 'sum' not allowed for this dtype", - "'DatetimeArray' does not implement reduction 'sum'", - ] - ) + msg = "'DatetimeArray' does not implement reduction 'sum'" with pytest.raises(TypeError, match=msg): grouped.agg(lambda x: x.sum(0, numeric_only=False)) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index fc06b85b1f954..c242623520b75 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -27,7 +27,10 @@ ) import pandas._testing as tm import pandas.core.algorithms as algos -from pandas.core.arrays import SparseArray +from pandas.core.arrays import ( + DatetimeArray, + SparseArray, +) from pandas.core.internals import ( BlockManager, SingleBlockManager, @@ -438,13 +441,26 @@ def test_copy(self, mgr): cp = mgr.copy(deep=True) for blk, cp_blk in zip(mgr.blocks, cp.blocks): + bvals = blk.values + cpvals = cp_blk.values + + tm.assert_equal(cpvals, bvals) + + if isinstance(cpvals, np.ndarray): + lbase = cpvals.base + rbase = bvals.base + else: + lbase = cpvals._ndarray.base + rbase = bvals._ndarray.base + # copy assertion we either have a None for a base or in case of # some blocks it is an array (e.g. datetimetz), but was copied - tm.assert_equal(cp_blk.values, blk.values) - if not isinstance(cp_blk.values, np.ndarray): - assert cp_blk.values._data.base is not blk.values._data.base + if isinstance(cpvals, DatetimeArray): + assert (lbase is None and rbase is None) or (lbase is not rbase) + elif not isinstance(cpvals, np.ndarray): + assert lbase is not rbase else: - assert cp_blk.values.base is None and blk.values.base is None + assert lbase is None and rbase is None def test_sparse(self): mgr = create_mgr("a: sparse-1; b: sparse-2")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry Broken off from #40149. Like #40149, this backs DTBlock/TDBlock by (sometimes 2D) DTA/TDA. Unlike #40149, this does _not_ make the DTA backing DatetimeTZBlock sometimes-2D.
https://api.github.com/repos/pandas-dev/pandas/pulls/40456
2021-03-16T01:19:45Z
2021-03-30T12:51:50Z
2021-03-30T12:51:49Z
2021-03-30T15:08:57Z
TYP: stubs for reshape, ops, ops_dispatch, hashing
diff --git a/pandas/_libs/hashing.pyi b/pandas/_libs/hashing.pyi new file mode 100644 index 0000000000000..2844ec9b06557 --- /dev/null +++ b/pandas/_libs/hashing.pyi @@ -0,0 +1,7 @@ +import numpy as np + +def hash_object_array( + arr: np.ndarray, # np.ndarray[object] + key: str, + encoding: str = ..., +) -> np.ndarray: ... # np.ndarray[np.uint64] diff --git a/pandas/_libs/ops.pyi b/pandas/_libs/ops.pyi new file mode 100644 index 0000000000000..b4f42f217a5db --- /dev/null +++ b/pandas/_libs/ops.pyi @@ -0,0 +1,43 @@ +from typing import ( + Any, + Callable, +) + +import numpy as np + +_BinOp = Callable[[Any, Any], Any] +_BoolOp = Callable[[Any, Any], bool] + + +def scalar_compare( + values: np.ndarray, # object[:] + val: object, + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> np.ndarray: ... # np.ndarray[bool] + +def vec_compare( + left: np.ndarray, # np.ndarray[object] + right: np.ndarray, # np.ndarray[object] + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> np.ndarray: ... # np.ndarray[bool] + + +def scalar_binop( + values: np.ndarray, # object[:] + val: object, + op: _BinOp, # binary operator +) -> np.ndarray: ... + + +def vec_binop( + left: np.ndarray, # object[:] + right: np.ndarray, # object[:] + op: _BinOp, # binary operator +) -> np.ndarray: ... + + +def maybe_convert_bool( + arr: np.ndarray, # np.ndarray[object] + true_values=..., + false_values=... +) -> np.ndarray: ... diff --git a/pandas/_libs/ops_dispatch.pyi b/pandas/_libs/ops_dispatch.pyi new file mode 100644 index 0000000000000..91b5a4dbaaebc --- /dev/null +++ b/pandas/_libs/ops_dispatch.pyi @@ -0,0 +1,5 @@ +import numpy as np + +def maybe_dispatch_ufunc_to_dunder_op( + self, ufunc: np.ufunc, method: str, *inputs, **kwargs +): ... diff --git a/pandas/_libs/reshape.pyi b/pandas/_libs/reshape.pyi new file mode 100644 index 0000000000000..7aaa18a7feff2 --- /dev/null +++ b/pandas/_libs/reshape.pyi @@ -0,0 +1,19 @@ +import numpy as np + +def unstack( + values: np.ndarray, # reshape_t[:, :] + mask: np.ndarray, # const uint8_t[:] + stride: int, + length: int, + width: int, + new_values: np.ndarray, # reshape_t[:, :] + new_mask: np.ndarray, # uint8_t[:, :] +) -> None: ... + + +def explode( + values: np.ndarray, # np.ndarray[object] +) -> tuple[ + np.ndarray, # np.ndarray[object] + np.ndarray, # np.ndarray[np.int64] +]: ...
manually curated, extra specificity in comments it is supported directly
https://api.github.com/repos/pandas-dev/pandas/pulls/40455
2021-03-16T01:06:12Z
2021-03-17T10:05:45Z
2021-03-17T10:05:45Z
2021-03-17T14:07:36Z
CLN: make `cell_context` DefaultDict like `ctx` - simplify code
diff --git a/asv_bench/benchmarks/io/style.py b/asv_bench/benchmarks/io/style.py index 4fc07bbabda06..6c0ca6fac6ec3 100644 --- a/asv_bench/benchmarks/io/style.py +++ b/asv_bench/benchmarks/io/style.py @@ -3,7 +3,7 @@ from pandas import DataFrame -class RenderApply: +class Render: params = [[12, 24, 36], [12, 120]] param_names = ["cols", "rows"] @@ -14,15 +14,21 @@ def setup(self, cols, rows): columns=[f"float_{i+1}" for i in range(cols)], index=[f"row_{i+1}" for i in range(rows)], ) - self._style_apply() - def time_render(self, cols, rows): + def time_apply_render(self, cols, rows): + self._style_apply() self.st.render() - def peakmem_apply(self, cols, rows): + def peakmem_apply_render(self, cols, rows): self._style_apply() + self.st.render() - def peakmem_render(self, cols, rows): + def time_classes_render(self, cols, rows): + self._style_classes() + self.st.render() + + def peakmem_classes_render(self, cols, rows): + self._style_classes() self.st.render() def _style_apply(self): @@ -32,3 +38,8 @@ def _apply_func(s): ] self.st = self.df.style.apply(_apply_func, axis=1) + + def _style_classes(self): + classes = self.df.applymap(lambda v: ("cls-1" if v > 0 else "")) + classes.index, classes.columns = self.df.index, self.df.columns + self.st = self.df.style.set_td_classes(classes) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index fef6d3b2b0aca..282a0dba8ac03 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -193,7 +193,7 @@ def __init__( self.hidden_index: bool = False self.hidden_columns: Sequence[int] = [] self.ctx: DefaultDict[Tuple[int, int], CSSList] = defaultdict(list) - self.cell_context: Dict[str, Any] = {} + self.cell_context: DefaultDict[Tuple[int, int], str] = defaultdict(str) self._todo: List[Tuple[Callable, Tuple, Dict]] = [] self.tooltips: Optional[_Tooltips] = None def_precision = get_option("display.precision") @@ -420,19 +420,11 @@ def _translate(self): if clabels: for c, value in enumerate(clabels[r]): - cs = [ - COL_HEADING_CLASS, - f"level{r}", - f"col{c}", - ] - cs.extend( - cell_context.get("col_headings", {}).get(r, {}).get(c, []) - ) es = { "type": "th", "value": value, "display_value": value, - "class": " ".join(cs), + "class": f"{COL_HEADING_CLASS} level{r} col{c}", "is_visible": _is_visible(c, r, col_lengths), } colspan = col_lengths.get((r, c), 0) @@ -492,7 +484,6 @@ def _translate(self): row_es.append(es) for c, value in enumerate(row_tup[1:]): - cs = [DATA_CLASS, f"row{r}", f"col{c}"] formatter = self._display_funcs[(r, c)] row_dict = { "type": "td", @@ -505,12 +496,14 @@ def _translate(self): # only add an id if the cell has a style props: CSSList = [] if self.cell_ids or (r, c) in ctx: - row_dict["id"] = "_".join(cs[1:]) + row_dict["id"] = f"row{r}_col{c}" props.extend(ctx[r, c]) # add custom classes from cell context - cs.extend(cell_context.get("data", {}).get(r, {}).get(c, [])) - row_dict["class"] = " ".join(cs) + cls = "" + if (r, c) in cell_context: + cls = " " + cell_context[r, c] + row_dict["class"] = f"{DATA_CLASS} row{r} col{c}{cls}" row_es.append(row_dict) if props: # (), [] won't be in cellstyle_map, cellstyle respectively @@ -736,15 +729,10 @@ def set_td_classes(self, classes: DataFrame) -> Styler: """ classes = classes.reindex_like(self.data) - mask = (classes.isna()) | (classes.eq("")) - self.cell_context["data"] = { - r: { - c: [str(classes.iloc[r, c])] - for c, cn in enumerate(classes.columns) - if not mask.iloc[r, c] - } - for r, rn in enumerate(classes.index) - } + for r, row_tup in enumerate(classes.itertuples()): + for c, value in enumerate(row_tup[1:]): + if not (pd.isna(value) or value == ""): + self.cell_context[(r, c)] = str(value) return self @@ -859,7 +847,7 @@ def clear(self) -> None: """ self.ctx.clear() self.tooltips = None - self.cell_context = {} + self.cell_context.clear() self._todo = [] def _compute(self):
Making the variables and code in `Styler` consistent with each other and consistent in patterns so it is easier to work with (for me!) and newcomers. Also removing redundant code `cell_context.get("col_headings", {})` would **never** return anything other `{}`
https://api.github.com/repos/pandas-dev/pandas/pulls/40453
2021-03-15T20:04:18Z
2021-03-22T12:59:31Z
2021-03-22T12:59:31Z
2021-03-22T14:04:57Z
TYP: fix ignores
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 6577f3604d14b..6adda1fe92044 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -34,7 +34,7 @@ def write_csv_rows( data_index : ndarray nlevels : int cols : ndarray - writer : object + writer : _csv.writer """ # In crude testing, N>100 yields little marginal improvement cdef: diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index f140ee08aef05..eb96c14286715 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -142,17 +142,10 @@ def quantile_ea_compat( mask = np.asarray(values.isna()) mask = np.atleast_2d(mask) - # error: Incompatible types in assignment (expression has type "ndarray", variable - # has type "ExtensionArray") - values, fill_value = values._values_for_factorize() # type: ignore[assignment] - # error: No overload variant of "atleast_2d" matches argument type "ExtensionArray" - values = np.atleast_2d(values) # type: ignore[call-overload] - - # error: Argument 1 to "quantile_with_mask" has incompatible type "ExtensionArray"; - # expected "ndarray" - result = quantile_with_mask( - values, mask, fill_value, qs, interpolation, axis # type: ignore[arg-type] - ) + arr, fill_value = values._values_for_factorize() + arr = np.atleast_2d(arr) + + result = quantile_with_mask(arr, mask, fill_value, qs, interpolation, axis) if not is_sparse(orig.dtype): # shape[0] should be 1 as long as EAs are 1D diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 0bdfc8fdb95a5..201b9fdcc51cc 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -152,8 +152,6 @@ def re_replacer(s): f = np.vectorize(re_replacer, otypes=[values.dtype]) if mask is None: - # error: Invalid index type "slice" for "ExtensionArray"; expected type - # "Union[int, ndarray]" - values[:] = f(values) # type: ignore[index] + values[:] = f(values) else: values[mask] = f(values[mask]) diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 31cbadb0e442b..c1abd8bbf39d0 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -177,9 +177,6 @@ def take_1d( Note: similarly to `take_nd`, this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. - - TODO(ArrayManager): mainly useful for ArrayManager, otherwise can potentially - be removed again if we don't end up with ArrayManager. """ if not isinstance(arr, np.ndarray): # ExtensionArray -> dispatch to their method diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 68909e30650c7..c45528d657404 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -326,7 +326,7 @@ def __getitem__( """ raise AbstractMethodError(self) - def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: + def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: """ Set one or more values inplace. diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 96bba51f34c08..4a032c60d386d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -7,9 +7,11 @@ tzinfo, ) from typing import ( + TYPE_CHECKING, Optional, Union, cast, + overload, ) import warnings @@ -79,6 +81,9 @@ Tick, ) +if TYPE_CHECKING: + from typing import Literal + _midnight = time(0, 0) @@ -1909,6 +1914,20 @@ def std( # Constructor Helpers +@overload +def sequence_to_datetimes( + data, allow_object: Literal[False] = ..., require_iso8601: bool = ... +) -> DatetimeArray: + ... + + +@overload +def sequence_to_datetimes( + data, allow_object: Literal[True] = ..., require_iso8601: bool = ... +) -> Union[np.ndarray, DatetimeArray]: + ... + + def sequence_to_datetimes( data, allow_object: bool = False, require_iso8601: bool = False ) -> Union[np.ndarray, DatetimeArray]: diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 75d9fcd3b4965..7251faee333bb 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -9,6 +9,7 @@ Tuple, Type, Union, + cast, ) import numpy as np @@ -485,7 +486,7 @@ def _cmp_method(self, other, op): # TODO(ARROW-9429): Add a .to_numpy() to ChunkedArray return BooleanArray._from_sequence(result.to_pandas().values) - def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: + def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: """Set one or more values inplace. Parameters @@ -509,6 +510,8 @@ def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: key = check_array_indexer(self, key) if is_integer(key): + key = cast(int, key) + if not is_scalar(value): raise ValueError("Must pass scalars with scalar indexer") elif isna(value): @@ -518,8 +521,7 @@ def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: # Slice data and insert in-between new_data = [ - # error: Slice index must be an integer or None - *self._data[0:key].chunks, # type: ignore[misc] + *self._data[0:key].chunks, pa.array([value], type=pa.string()), *self._data[(key + 1) :].chunks, ] @@ -530,11 +532,11 @@ def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: # This is probably extremely slow. # Convert all possible input key types to an array of integers - if is_bool_dtype(key): + if isinstance(key, slice): + key_array = np.array(range(len(self))[key]) + elif is_bool_dtype(key): # TODO(ARROW-9430): Directly support setitem(booleans) key_array = np.argwhere(key).flatten() - elif isinstance(key, slice): - key_array = np.array(range(len(self))[key]) else: # TODO(ARROW-9431): Directly support setitem(integers) key_array = np.asanyarray(key) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index be535495de8d0..7a2175a364a8a 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -44,7 +44,6 @@ ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( - AnyArrayLike, ArrayLike, Dtype, DtypeObj, @@ -407,27 +406,16 @@ def maybe_cast_result( assert not is_scalar(result) - if ( - is_extension_array_dtype(dtype) - and not is_categorical_dtype(dtype) - and dtype.kind != "M" - ): - # We have to special case categorical so as not to upcast - # things like counts back to categorical - - # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no - # attribute "construct_array_type" - cls = dtype.construct_array_type() # type: ignore[union-attr] - # error: Argument "dtype" to "maybe_cast_to_extension_array" has incompatible - # type "Union[dtype[Any], ExtensionDtype]"; expected "Optional[ExtensionDtype]" - result = maybe_cast_to_extension_array( - cls, result, dtype=dtype # type: ignore[arg-type] - ) + if isinstance(dtype, ExtensionDtype): + if not is_categorical_dtype(dtype) and dtype.kind != "M": + # We have to special case categorical so as not to upcast + # things like counts back to categorical - elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: - # error: Argument 2 to "maybe_downcast_to_dtype" has incompatible type - # "Union[dtype[Any], ExtensionDtype]"; expected "Union[str, dtype[Any]]" - result = maybe_downcast_to_dtype(result, dtype) # type: ignore[arg-type] + cls = dtype.construct_array_type() + result = maybe_cast_to_extension_array(cls, result, dtype=dtype) + + elif (numeric_only and is_numeric_dtype(dtype)) or not numeric_only: + result = maybe_downcast_to_dtype(result, dtype) return result @@ -549,17 +537,23 @@ def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray) -> np.ndarray: new_dtype = ensure_dtype_can_hold_na(result.dtype) if new_dtype != result.dtype: - # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible - # type "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], - # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, - # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - result = result.astype(new_dtype, copy=True) # type: ignore[arg-type] + result = result.astype(new_dtype, copy=True) np.place(result, mask, np.nan) return result +@overload +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: + ... + + +@overload +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: + ... + + def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: """ If we have a dtype that cannot hold NA values, find the best match that can. @@ -636,9 +630,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): kinds = ["i", "u", "f", "c", "m", "M"] if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in kinds: - # error: Incompatible types in assignment (expression has type - # "Union[dtype[Any], ExtensionDtype]", variable has type "dtype[Any]") - dtype = ensure_dtype_can_hold_na(dtype) # type: ignore[assignment] + dtype = ensure_dtype_can_hold_na(dtype) fv = na_value_for_dtype(dtype) return dtype, fv @@ -1471,7 +1463,7 @@ def soft_convert_objects( def convert_dtypes( - input_array: AnyArrayLike, + input_array: ArrayLike, convert_string: bool = True, convert_integer: bool = True, convert_boolean: bool = True, @@ -1483,7 +1475,7 @@ def convert_dtypes( Parameters ---------- - input_array : ExtensionArray, Index, Series or np.ndarray + input_array : ExtensionArray or np.ndarray convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True @@ -1707,15 +1699,10 @@ def maybe_cast_to_datetime( # GH 25843: Remove tz information since the dtype # didn't specify one - # error: Item "ndarray" of "Union[ndarray, DatetimeArray]" - # has no attribute "tz" - if dta.tz is not None: # type: ignore[union-attr] + if dta.tz is not None: # equiv: dta.view(dtype) # Note: NOT equivalent to dta.astype(dtype) - - # error: Item "ndarray" of "Union[ndarray, - # DatetimeArray]" has no attribute "tz_localize" - dta = dta.tz_localize(None) # type: ignore[union-attr] + dta = dta.tz_localize(None) value = dta elif is_datetime64tz: dtype = cast(DatetimeTZDtype, dtype) @@ -1725,38 +1712,19 @@ def maybe_cast_to_datetime( # be localized to the timezone. is_dt_string = is_string_dtype(value.dtype) dta = sequence_to_datetimes(value, allow_object=False) - # error: Item "ndarray" of "Union[ndarray, DatetimeArray]" - # has no attribute "tz" - if dta.tz is not None: # type: ignore[union-attr] - # error: Argument 1 to "astype" of - # "_ArrayOrScalarCommon" has incompatible type - # "Union[dtype[Any], ExtensionDtype, None]"; expected - # "Union[dtype[Any], None, type, _SupportsDType, str, - # Union[Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, - # Any]]]" - value = dta.astype( - dtype, copy=False # type: ignore[arg-type] - ) + if dta.tz is not None: + value = dta.astype(dtype, copy=False) elif is_dt_string: # Strings here are naive, so directly localize # equiv: dta.astype(dtype) # though deprecated - # error: Item "ndarray" of "Union[ndarray, - # DatetimeArray]" has no attribute "tz_localize" - value = dta.tz_localize( # type: ignore[union-attr] - dtype.tz - ) + value = dta.tz_localize(dtype.tz) else: # Numeric values are UTC at this point, # so localize and convert # equiv: Series(dta).astype(dtype) # though deprecated - # error: Item "ndarray" of "Union[ndarray, - # DatetimeArray]" has no attribute "tz_localize" - value = dta.tz_localize( # type: ignore[union-attr] - "UTC" - ).tz_convert(dtype.tz) + value = dta.tz_localize("UTC").tz_convert(dtype.tz) elif is_timedelta64: # if successful, we get a ndarray[td64ns] value, _ = sequence_to_td64ns(value) @@ -1789,14 +1757,12 @@ def maybe_cast_to_datetime( elif value.dtype == object: value = maybe_infer_to_datetimelike(value) - elif not isinstance(value, ABCExtensionArray): + elif isinstance(value, list): # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion - # error: Argument 1 to "maybe_infer_to_datetimelike" has incompatible type - # "Union[ExtensionArray, List[Any]]"; expected "Union[ndarray, List[Any]]" - value = maybe_infer_to_datetimelike(value) # type: ignore[arg-type] + value = maybe_infer_to_datetimelike(value) return value @@ -1974,10 +1940,8 @@ def construct_1d_arraylike_from_scalar( except OutOfBoundsDatetime: dtype = np.dtype(object) - if is_extension_array_dtype(dtype): - # error: Item "dtype" of "Union[dtype, ExtensionDtype]" has no - # attribute "construct_array_type" - cls = dtype.construct_array_type() # type: ignore[union-attr] + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() subarr = cls._from_sequence([value] * length, dtype=dtype) else: @@ -1994,11 +1958,7 @@ def construct_1d_arraylike_from_scalar( elif dtype.kind in ["M", "m"]: value = maybe_unbox_datetimelike(value, dtype) - # error: Argument "dtype" to "empty" has incompatible type - # "Union[dtype, ExtensionDtype]"; expected "Union[dtype, None, type, - # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]" - subarr = np.empty(length, dtype=dtype) # type: ignore[arg-type] + subarr = np.empty(length, dtype=dtype) subarr.fill(value) return subarr diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 9a72dee8d87ca..59d6f9a51ed43 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -38,6 +38,7 @@ is_string_like_dtype, needs_i8_conversion, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, @@ -232,49 +233,29 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False): """ dtype = values.dtype - if is_extension_array_dtype(dtype): + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray if inf_as_na and is_categorical_dtype(dtype): - # error: Item "ndarray" of "Union[ExtensionArray, ndarray]" has no attribute - # "to_numpy" - result = libmissing.isnaobj_old( - values.to_numpy() # type: ignore[union-attr] - ) + result = libmissing.isnaobj_old(values.to_numpy()) else: - # error: Item "ndarray" of "Union[ExtensionArray, ndarray]" has no attribute - # "isna" - result = values.isna() # type: ignore[union-attr] + result = values.isna() elif is_string_dtype(dtype): - # error: Argument 1 to "_isna_string_dtype" has incompatible type - # "ExtensionArray"; expected "ndarray" - # error: Argument 2 to "_isna_string_dtype" has incompatible type - # "ExtensionDtype"; expected "dtype[Any]" - result = _isna_string_dtype( - values, dtype, inf_as_na=inf_as_na # type: ignore[arg-type] - ) + result = _isna_string_dtype(values, inf_as_na=inf_as_na) elif needs_i8_conversion(dtype): # this is the NaT pattern result = values.view("i8") == iNaT else: if inf_as_na: - # error: Argument 1 to "__call__" of "ufunc" has incompatible type - # "ExtensionArray"; expected "Union[Union[int, float, complex, str, bytes, - # generic], Sequence[Union[int, float, complex, str, bytes, generic]], - # Sequence[Sequence[Any]], _SupportsArray]" - result = ~np.isfinite(values) # type: ignore[arg-type] + result = ~np.isfinite(values) else: - # error: Argument 1 to "__call__" of "ufunc" has incompatible type - # "ExtensionArray"; expected "Union[Union[int, float, complex, str, bytes, - # generic], Sequence[Union[int, float, complex, str, bytes, generic]], - # Sequence[Sequence[Any]], _SupportsArray]" - result = np.isnan(values) # type: ignore[arg-type] + result = np.isnan(values) return result -def _isna_string_dtype( - values: np.ndarray, dtype: np.dtype, inf_as_na: bool -) -> np.ndarray: +def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> np.ndarray: # Working around NumPy ticket 1542 + dtype = values.dtype shape = values.shape if is_string_like_dtype(dtype): @@ -592,10 +573,8 @@ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): numpy.datetime64('NaT') """ - if is_extension_array_dtype(dtype): - # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no - # attribute "na_value" - return dtype.na_value # type: ignore[union-attr] + if isinstance(dtype, ExtensionDtype): + return dtype.na_value elif needs_i8_conversion(dtype): return dtype.type("NaT", "ns") elif is_float_dtype(dtype): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9fdd979ce8eca..2374cc0b6a8fa 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -719,9 +719,7 @@ def __init__( values, columns, index, columns, dtype=None, typ=manager ) else: - # error: Incompatible types in assignment (expression has type - # "ndarray", variable has type "List[ExtensionArray]") - values = construct_2d_arraylike_from_scalar( # type: ignore[assignment] + arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), @@ -730,11 +728,10 @@ def __init__( ) mgr = ndarray_to_mgr( - # error: "List[ExtensionArray]" has no attribute "dtype" - values, + arr2d, index, columns, - dtype=values.dtype, # type: ignore[attr-defined] + dtype=arr2d.dtype, copy=False, typ=manager, ) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 002d989330160..4a08e733b770c 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -399,15 +399,15 @@ def dict_to_mgr( # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): + nan_dtype: DtypeObj + if dtype is None or ( isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible) ): # GH#1783 nan_dtype = np.dtype("object") else: - # error: Incompatible types in assignment (expression has type - # "Union[dtype, ExtensionDtype]", variable has type "dtype") - nan_dtype = dtype # type: ignore[assignment] + nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() @@ -727,27 +727,18 @@ def to_arrays( return arrays, columns if isinstance(data[0], (list, tuple)): - content = _list_to_arrays(data) + arr = _list_to_arrays(data) elif isinstance(data[0], abc.Mapping): - content, columns = _list_of_dict_to_arrays(data, columns) + arr, columns = _list_of_dict_to_arrays(data, columns) elif isinstance(data[0], ABCSeries): - content, columns = _list_of_series_to_arrays(data, columns) + arr, columns = _list_of_series_to_arrays(data, columns) else: # last ditch effort data = [tuple(x) for x in data] - content = _list_to_arrays(data) + arr = _list_to_arrays(data) - # error: Incompatible types in assignment (expression has type "List[ndarray]", - # variable has type "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, - # Any]]]") - content, columns = _finalize_columns_and_data( # type: ignore[assignment] - content, columns, dtype - ) - # error: Incompatible return value type (got "Tuple[ndarray, Index]", expected - # "Tuple[List[ExtensionArray], Index]") - # error: Incompatible return value type (got "Tuple[ndarray, Index]", expected - # "Tuple[List[ndarray], Index]") - return content, columns # type: ignore[return-value] + content, columns = _finalize_columns_and_data(arr, columns, dtype) + return content, columns def _list_to_arrays(data: List[Union[Tuple, List]]) -> np.ndarray: @@ -838,38 +829,22 @@ def _finalize_columns_and_data( content: np.ndarray, # ndim == 2 columns: Optional[Index], dtype: Optional[DtypeObj], -) -> Tuple[List[np.ndarray], Index]: +) -> Tuple[List[ArrayLike], Index]: """ Ensure we have valid columns, cast object dtypes if possible. """ - # error: Incompatible types in assignment (expression has type "List[Any]", variable - # has type "ndarray") - content = list(content.T) # type: ignore[assignment] + contents = list(content.T) try: - # error: Argument 1 to "_validate_or_indexify_columns" has incompatible type - # "ndarray"; expected "List[Any]" - columns = _validate_or_indexify_columns( - content, columns # type: ignore[arg-type] - ) + columns = _validate_or_indexify_columns(contents, columns) except AssertionError as err: # GH#26429 do not raise user-facing AssertionError raise ValueError(err) from err - if len(content) and content[0].dtype == np.object_: - # error: Incompatible types in assignment (expression has type - # "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, Any]]]", - # variable has type "ndarray") - # error: Argument 1 to "_convert_object_array" has incompatible type "ndarray"; - # expected "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, - # Any]]]" - content = _convert_object_array( # type: ignore[assignment] - content, dtype=dtype # type: ignore[arg-type] - ) - # error: Incompatible return value type (got "Tuple[ndarray, Union[Index, - # List[Union[str, int]]]]", expected "Tuple[List[ndarray], Union[Index, - # List[Union[str, int]]]]") - return content, columns # type: ignore[return-value] + if len(contents) and contents[0].dtype == np.object_: + contents = _convert_object_array(contents, dtype=dtype) + + return contents, columns def _validate_or_indexify_columns( diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 5e45d36e188a2..87be5c0997072 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -25,7 +25,6 @@ from pandas.core.dtypes.common import ( is_categorical_dtype, - is_extension_array_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( @@ -119,9 +118,7 @@ def hash_pandas_object( h = hash_array(obj._values, encoding, hash_key, categorize).astype( "uint64", copy=False ) - # error: Incompatible types in assignment (expression has type "Series", - # variable has type "ndarray") - h = Series(h, index=obj, dtype="uint64", copy=False) # type: ignore[assignment] + ser = Series(h, index=obj, dtype="uint64", copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj._values, encoding, hash_key, categorize).astype( @@ -141,11 +138,7 @@ def hash_pandas_object( arrays = itertools.chain([h], index_iter) h = combine_hash_arrays(arrays, 2) - # error: Incompatible types in assignment (expression has type "Series", - # variable has type "ndarray") - h = Series( # type: ignore[assignment] - h, index=obj.index, dtype="uint64", copy=False - ) + ser = Series(h, index=obj.index, dtype="uint64", copy=False) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series._values) for _, series in obj.items()) @@ -168,15 +161,11 @@ def hash_pandas_object( hashes = (x for x in _hashes) h = combine_hash_arrays(hashes, num_items) - # error: Incompatible types in assignment (expression has type "Series", - # variable has type "ndarray") - h = Series( # type: ignore[assignment] - h, index=obj.index, dtype="uint64", copy=False - ) + ser = Series(h, index=obj.index, dtype="uint64", copy=False) else: raise TypeError(f"Unexpected type for hashing {type(obj)}") - # error: Incompatible return value type (got "ndarray", expected "Series") - return h # type: ignore[return-value] + + return ser def hash_tuples( @@ -297,15 +286,11 @@ def hash_array( if is_categorical_dtype(dtype): vals = cast("Categorical", vals) return _hash_categorical(vals, encoding, hash_key) - elif is_extension_array_dtype(dtype): - # pandas/core/util/hashing.py:301: error: Item "ndarray" of - # "Union[ExtensionArray, ndarray]" has no attribute "_values_for_factorize" - # [union-attr] - vals, _ = vals._values_for_factorize() # type: ignore[union-attr] - - # error: Argument 1 to "_hash_ndarray" has incompatible type "ExtensionArray"; - # expected "ndarray" - return _hash_ndarray(vals, encoding, hash_key, categorize) # type: ignore[arg-type] + elif not isinstance(vals, np.ndarray): + # i.e. ExtensionArray + vals, _ = vals._values_for_factorize() + + return _hash_ndarray(vals, encoding, hash_key, categorize) def _hash_ndarray(
https://api.github.com/repos/pandas-dev/pandas/pulls/40452
2021-03-15T19:53:24Z
2021-03-17T10:00:16Z
2021-03-17T10:00:16Z
2021-03-17T14:09:24Z
Fix StringArray.astype for category dtype
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 63902b53ea36d..155486953f4ba 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -570,6 +570,7 @@ Conversion - Bug in creating a :class:`DataFrame` from an empty ``np.recarray`` not retaining the original dtypes (:issue:`40121`) - Bug in :class:`DataFrame` failing to raise ``TypeError`` when constructing from a ``frozenset`` (:issue:`40163`) - Bug in :class:`Index` construction silently ignoring a passed ``dtype`` when the data cannot be cast to that dtype (:issue:`21311`) +- Bug in :meth:`StringArray.astype` falling back to numpy and raising when converting to ``dtype='categorical'`` (:issue:`40450`) - Bug in :class:`DataFrame` construction with a dictionary containing an arraylike with ``ExtensionDtype`` and ``copy=True`` failing to make a copy (:issue:`38939`) - diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 0a2893ac49a49..666afb65e19ff 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -327,6 +327,9 @@ def astype(self, dtype, copy=True): arr[mask] = "0" values = arr.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) + elif isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(self, dtype=dtype, copy=copy) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 4e068690c41e5..d23c44733949a 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -350,6 +350,38 @@ def test_astype_bytes(self): assert result.dtypes == np.dtype("S3") +class TestAstypeString: + @pytest.mark.parametrize( + "data, dtype", + [ + ([True, NA], "boolean"), + (["A", NA], "category"), + (["2020-10-10", "2020-10-10"], "datetime64[ns]"), + (["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"), + ( + ["2012-01-01 00:00:00-05:00", NaT], + "datetime64[ns, US/Eastern]", + ), + ([1, None], "UInt16"), + (["1/1/2021", "2/1/2021"], "period[M]"), + (["1/1/2021", "2/1/2021", NaT], "period[M]"), + (["1 Day", "59 Days", NaT], "timedelta64[ns]"), + # currently no way to parse IntervalArray from a list of strings + ], + ) + def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request): + if dtype == "boolean" or ( + dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data + ): + mark = pytest.mark.xfail( + reason="TODO StringArray.astype() with missing values #GH40566" + ) + request.node.add_marker(mark) + # GH-40351 + s = Series(data, dtype=dtype) + tm.assert_series_equal(s, s.astype("string").astype(dtype)) + + class TestAstypeCategorical: def test_astype_categorical_to_other(self): cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
- [X] closes #40351 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This was failing due to `elif np.issubdtype(dtype, np.floating)` [here](https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/string_.py#L330), which fails for the pandas dtypes. StringArrays are now being cast to Categorical just as they were before https://github.com/pandas-dev/pandas/pull/38530, when the casting still happened inside `Block`.
https://api.github.com/repos/pandas-dev/pandas/pulls/40450
2021-03-15T18:19:50Z
2021-04-02T19:09:57Z
2021-04-02T19:09:57Z
2021-04-02T19:10:02Z
PERF: no need to check for DataFrame in pandas.core.computation.expressions
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 0dbe5e8d83741..ae1928b8535f9 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -19,8 +19,6 @@ from pandas._typing import FuncType -from pandas.core.dtypes.generic import ABCDataFrame - from pandas.core.computation.check import NUMEXPR_INSTALLED from pandas.core.ops import roperator @@ -83,14 +81,8 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # check for dtype compatibility dtypes: Set[str] = set() for o in [a, b]: - # Series implements dtypes, check for dimension count as well - if hasattr(o, "dtypes") and o.ndim > 1: - s = o.dtypes.value_counts() - if len(s) > 1: - return False - dtypes |= set(s.index.astype(str)) # ndarray and Series Case - elif hasattr(o, "dtype"): + if hasattr(o, "dtype"): dtypes |= {o.dtype.name} # allowed are a superset @@ -190,8 +182,6 @@ def _where_numexpr(cond, a, b): def _has_bool_dtype(x): - if isinstance(x, ABCDataFrame): - return "bool" in x.dtypes try: return x.dtype == bool except AttributeError: diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 30f88ba5e76f6..43b119e7e1087 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -12,7 +12,7 @@ ) from pandas.core.computation import expressions as expr -_frame = DataFrame(np.random.randn(10000, 4), columns=list("ABCD"), dtype="float64") +_frame = DataFrame(np.random.randn(10001, 4), columns=list("ABCD"), dtype="float64") _frame2 = DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64") _mixed = DataFrame( { @@ -36,6 +36,11 @@ _integer2 = DataFrame( np.random.randint(1, 100, size=(101, 4)), columns=list("ABCD"), dtype="int64" ) +_array = _frame["A"].values.copy() +_array2 = _frame2["A"].values.copy() + +_array_mixed = _mixed["D"].values.copy() +_array_mixed2 = _mixed2["D"].values.copy() @pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") @@ -43,7 +48,9 @@ class TestExpressions: def setup_method(self, method): self.frame = _frame.copy() + self.array = _array.copy() self.frame2 = _frame2.copy() + self.array2 = _array2.copy() self.mixed = _mixed.copy() self.mixed2 = _mixed2.copy() self._MIN_ELEMENTS = expr._MIN_ELEMENTS @@ -134,25 +141,19 @@ def test_invalid(self): # no op result = expr._can_use_numexpr( - operator.add, None, self.frame, self.frame, "evaluate" - ) - assert not result - - # mixed - result = expr._can_use_numexpr( - operator.add, "+", self.mixed, self.frame, "evaluate" + operator.add, None, self.array, self.array, "evaluate" ) assert not result # min elements result = expr._can_use_numexpr( - operator.add, "+", self.frame2, self.frame2, "evaluate" + operator.add, "+", self.array2, self.array2, "evaluate" ) assert not result # ok, we only check on first part of expression result = expr._can_use_numexpr( - operator.add, "+", self.frame, self.frame2, "evaluate" + operator.add, "+", self.array, self.array2, "evaluate" ) assert result @@ -160,7 +161,9 @@ def test_invalid(self): "opname,op_str", [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], ) - @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)]) + @pytest.mark.parametrize( + "left,right", [(_array, _array2), (_array_mixed, _array_mixed2)] + ) def test_binary_ops(self, opname, op_str, left, right): def testit(): @@ -170,16 +173,9 @@ def testit(): op = getattr(operator, opname) - result = expr._can_use_numexpr(op, op_str, left, left, "evaluate") - assert result != left._is_mixed_type - result = expr.evaluate(op, left, left, use_numexpr=True) expected = expr.evaluate(op, left, left, use_numexpr=False) - - if isinstance(result, DataFrame): - tm.assert_frame_equal(result, expected) - else: - tm.assert_numpy_array_equal(result, expected.values) + tm.assert_numpy_array_equal(result, expected) result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") assert not result @@ -203,7 +199,9 @@ def testit(): ("ne", "!="), ], ) - @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)]) + @pytest.mark.parametrize( + "left,right", [(_array, _array2), (_array_mixed, _array_mixed2)] + ) def test_comparison_ops(self, opname, op_str, left, right): def testit(): f12 = left + 1 @@ -211,15 +209,9 @@ def testit(): op = getattr(operator, opname) - result = expr._can_use_numexpr(op, op_str, left, f12, "evaluate") - assert result != left._is_mixed_type - result = expr.evaluate(op, left, f12, use_numexpr=True) expected = expr.evaluate(op, left, f12, use_numexpr=False) - if isinstance(result, DataFrame): - tm.assert_frame_equal(result, expected) - else: - tm.assert_numpy_array_equal(result, expected.values) + tm.assert_numpy_array_equal(result, expected) result = expr._can_use_numexpr(op, op_str, right, f22, "evaluate") assert not result
In practice, this case is only covered by specific tests using DataFrame in `test_expressions.py`, not from actual usage through ops (so I just updated the failing tests to not use any DataFrame as input to those checks). Although our test suite might not be the best suited to check that, since many tests are small and thus don't use the numexpr path... (fixing that in https://github.com/pandas-dev/pandas/pull/40463)
https://api.github.com/repos/pandas-dev/pandas/pulls/40445
2021-03-15T11:12:27Z
2021-03-23T13:46:02Z
2021-03-23T13:46:02Z
2021-03-23T13:59:52Z
REF: move roperators to pandas.core
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index d74ef16765ef9..3b1774ade6f85 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -118,7 +118,7 @@ import sys import pandas blocklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis', - 'lxml', 'matplotlib', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy', + 'lxml', 'matplotlib', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy', 'tables', 'urllib.request', 'xlrd', 'xlsxwriter', 'xlwt'} # GH#28227 for some of these check for top-level modules, while others are diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c13eb3f109354..d7861e5157b66 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -103,6 +103,7 @@ ) from pandas.core.base import PandasObject import pandas.core.common as com +import pandas.core.computation.expressions as expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, @@ -1307,8 +1308,6 @@ def where(self, other, cond, errors="raise") -> List[Block]: ------- List[Block] """ - import pandas.core.computation.expressions as expressions - assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 8ace64fedacb9..4ebcd6533af2e 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -29,7 +29,10 @@ ) from pandas.core.dtypes.missing import isna -from pandas.core import algorithms +from pandas.core import ( + algorithms, + roperator, +) from pandas.core.ops.array_ops import ( # noqa:F401 arithmetic_op, comp_method_OBJECT_ARRAY, @@ -53,7 +56,7 @@ kleene_xor, ) from pandas.core.ops.methods import add_flex_arithmetic_methods # noqa:F401 -from pandas.core.ops.roperator import ( # noqa:F401 +from pandas.core.roperator import ( # noqa:F401 radd, rand_, rdiv, @@ -319,7 +322,7 @@ def should_reindex_frame_op( """ assert isinstance(left, ABCDataFrame) - if op is operator.pow or op is rpow: + if op is operator.pow or op is roperator.rpow: # GH#32685 pow has special semantics for operating with null values return False diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 333bdbf57bab3..1d7c16de0c05d 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -44,11 +44,14 @@ notna, ) +import pandas.core.computation.expressions as expressions from pandas.core.construction import ensure_wrapped_if_datetimelike -from pandas.core.ops import missing +from pandas.core.ops import ( + missing, + roperator, +) from pandas.core.ops.dispatch import should_extension_dispatch from pandas.core.ops.invalid import invalid_comparison -from pandas.core.ops.roperator import rpow def comp_method_OBJECT_ARRAY(op, x, y): @@ -120,7 +123,7 @@ def _masked_arith_op(x: np.ndarray, y, op): # 1 ** np.nan is 1. So we have to unmask those. if op is pow: mask = np.where(x == 1, False, mask) - elif op is rpow: + elif op is roperator.rpow: mask = np.where(y == 1, False, mask) if mask.any(): @@ -152,8 +155,6 @@ def _na_arithmetic_op(left, right, op, is_cmp: bool = False): ------ TypeError : invalid operation """ - import pandas.core.computation.expressions as expressions - try: result = expressions.evaluate(op, left, right) except TypeError: diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index 700c4a946e2b2..df22919ed19f1 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -8,16 +8,7 @@ ABCSeries, ) -from pandas.core.ops.roperator import ( - radd, - rdivmod, - rfloordiv, - rmod, - rmul, - rpow, - rsub, - rtruediv, -) +from pandas.core.ops import roperator def _get_method_wrappers(cls): @@ -89,19 +80,19 @@ def _create_methods(cls, arith_method, comp_method): new_methods.update( { "add": arith_method(operator.add), - "radd": arith_method(radd), + "radd": arith_method(roperator.radd), "sub": arith_method(operator.sub), "mul": arith_method(operator.mul), "truediv": arith_method(operator.truediv), "floordiv": arith_method(operator.floordiv), "mod": arith_method(operator.mod), "pow": arith_method(operator.pow), - "rmul": arith_method(rmul), - "rsub": arith_method(rsub), - "rtruediv": arith_method(rtruediv), - "rfloordiv": arith_method(rfloordiv), - "rpow": arith_method(rpow), - "rmod": arith_method(rmod), + "rmul": arith_method(roperator.rmul), + "rsub": arith_method(roperator.rsub), + "rtruediv": arith_method(roperator.rtruediv), + "rfloordiv": arith_method(roperator.rfloordiv), + "rpow": arith_method(roperator.rpow), + "rmod": arith_method(roperator.rmod), } ) new_methods["div"] = new_methods["truediv"] @@ -109,7 +100,7 @@ def _create_methods(cls, arith_method, comp_method): if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods["divmod"] = arith_method(divmod) - new_methods["rdivmod"] = arith_method(rdivmod) + new_methods["rdivmod"] = arith_method(roperator.rdivmod) new_methods.update( { diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 20b7510c33160..ea6223765523d 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -31,11 +31,7 @@ is_scalar, ) -from pandas.core.ops.roperator import ( - rdivmod, - rfloordiv, - rmod, -) +from pandas.core.ops import roperator def fill_zeros(result, x, y): @@ -167,7 +163,7 @@ def dispatch_fill_zeros(op, left, right, result): mask_zero_div_zero(left, right, result[0]), fill_zeros(result[1], left, right), ) - elif op is rdivmod: + elif op is roperator.rdivmod: result = ( mask_zero_div_zero(right, left, result[0]), fill_zeros(result[1], right, left), @@ -176,12 +172,12 @@ def dispatch_fill_zeros(op, left, right, result): # Note: no need to do this for truediv; in py3 numpy behaves the way # we want. result = mask_zero_div_zero(left, right, result) - elif op is rfloordiv: + elif op is roperator.rfloordiv: # Note: no need to do this for rtruediv; in py3 numpy behaves the way # we want. result = mask_zero_div_zero(right, left, result) elif op is operator.mod: result = fill_zeros(result, left, right) - elif op is rmod: + elif op is roperator.rmod: result = fill_zeros(result, right, left) return result diff --git a/pandas/core/ops/roperator.py b/pandas/core/roperator.py similarity index 100% rename from pandas/core/ops/roperator.py rename to pandas/core/roperator.py
This PR does 2 things: - change `pandas.core.ops.roperator` imports to all import the module and use `roperator.<rop>` instead of importing the actual functions (this makes the usage similar as for the non-reversed operators with `import operator`) - move `roperator.py` from pandas/core/ops to pandas/core, so it can be imported in `pandas.core.computation.expressions` without relying on the `pandas.core.ops` module, which in its turn means we can move the inline import of `pandas.core.computation.expressions` in `pandas.core.ops.array_ops` to a top-level import (better for performance for a function that is called repeatedly) I think the first change is useful anyway (can also do it as a separate PR), for the second point, a possible alternative would be to move `pandas.core.computation.expressions` from the computation/ module to the ops/ module One consequence of this, though: by always importing this in `array_ops`, it gets imported on `import pandas` time, and thus `numexpr` always gets imported at that point (while now it's only a delayed import when doing a first operation). Now, I checked with `python -X importtime -c "import numpy, pandas, numexpr"`, and after already having import numpy (and pandas), also import numexpr is only a small addition (most of the import of numexpr is import numpy). Based on running that a few times, it seems to adds 1.5-2 % to the total pandas import time.
https://api.github.com/repos/pandas-dev/pandas/pulls/40444
2021-03-15T10:40:05Z
2021-03-23T16:47:38Z
2021-03-23T16:47:38Z
2021-03-24T07:26:28Z
[ArrayManager] DataFrame constructor from ndarray
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 83ecdbce5fa80..fdfaffbda97b4 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -34,6 +34,7 @@ from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_datetime64tz_dtype, + is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_integer_dtype, @@ -60,6 +61,7 @@ TimedeltaArray, ) from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) @@ -316,10 +318,30 @@ def ndarray_to_mgr( index, columns = _get_axes( values.shape[0], values.shape[1], index=index, columns=columns ) - values = values.T _check_values_indices_shape_match(values, index, columns) + if typ == "array": + + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if dtype is None and is_object_dtype(values.dtype): + arrays = [ + ensure_wrapped_if_datetimelike( + maybe_infer_to_datetimelike(values[:, i].copy()) + ) + for i in range(values.shape[1]) + ] + else: + if is_datetime_or_timedelta_dtype(values.dtype): + values = ensure_wrapped_if_datetimelike(values) + arrays = [values[:, i].copy() for i in range(values.shape[1])] + + return ArrayManager(arrays, [index, columns], verify_integrity=False) + + values = values.T + # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type @@ -358,13 +380,13 @@ def _check_values_indices_shape_match( Check that the shape implied by our axes matches the actual shape of the data. """ - if values.shape[0] != len(columns): + if values.shape[1] != len(columns) or values.shape[0] != len(index): # Could let this raise in Block constructor, but we get a more # helpful exception message this way. - if values.shape[1] == 0: + if values.shape[0] == 0: raise ValueError("Empty data passed with indices specified.") - passed = values.T.shape + passed = values.shape implied = (len(index), len(columns)) raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 544960113fafc..1583b3f91bea2 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -428,11 +428,27 @@ def test_astype_to_incorrect_datetimelike(self, unit): other = f"m8[{unit}]" df = DataFrame(np.array([[1, 2, 3]], dtype=dtype)) - msg = fr"Cannot cast DatetimeArray to dtype timedelta64\[{unit}\]" + msg = "|".join( + [ + # BlockManager path + fr"Cannot cast DatetimeArray to dtype timedelta64\[{unit}\]", + # ArrayManager path + "cannot astype a datetimelike from " + fr"\[datetime64\[ns\]\] to \[timedelta64\[{unit}\]\]", + ] + ) with pytest.raises(TypeError, match=msg): df.astype(other) - msg = fr"Cannot cast TimedeltaArray to dtype datetime64\[{unit}\]" + msg = "|".join( + [ + # BlockManager path + fr"Cannot cast TimedeltaArray to dtype datetime64\[{unit}\]", + # ArrayManager path + "cannot astype a timedelta from " + fr"\[timedelta64\[ns\]\] to \[datetime64\[{unit}\]\]", + ] + ) df = DataFrame(np.array([[1, 2, 3]], dtype=other)) with pytest.raises(TypeError, match=msg): df.astype(dtype) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c565567754da0..1e10f8869ec5f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -46,6 +46,7 @@ ) import pandas._testing as tm from pandas.arrays import ( + DatetimeArray, IntervalArray, PeriodArray, SparseArray, @@ -2569,6 +2570,13 @@ def test_construction_from_set_raises(self, typ): with pytest.raises(TypeError, match=msg): Series(values) + def test_construction_from_ndarray_datetimelike(self): + # ensure the underlying arrays are properly wrapped as EA when + # constructed from 2D ndarray + arr = np.arange(0, 12, dtype="datetime64[ns]").reshape(4, 3) + df = DataFrame(arr) + assert all(isinstance(arr, DatetimeArray) for arr in df._mgr.arrays) + def get1(obj): if isinstance(obj, Series):
xref https://github.com/pandas-dev/pandas/issues/39146 Currently, the `DataFrame(ndarray)` construction for ArrayManager still went through BlockManager and then `mgr_to_mgr` conversion, which is less efficient. This PR adds a check in `ndarray_to_mgr` for directly creating an ArrayManager. ~It's still WIP, because there are still some validation steps that are now done inside BlockManager/Block inits, that need to be factored out / shared / added to ArrayManager path.~
https://api.github.com/repos/pandas-dev/pandas/pulls/40441
2021-03-15T09:30:56Z
2021-04-26T09:34:19Z
2021-04-26T09:34:19Z
2021-04-26T17:42:54Z
⬆️ UPGRADE: Autoupdate pre-commit config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 99ba2feadbca7..aa8c2b74d7a7e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ minimum_pre_commit_version: 2.9.2 exclude: ^LICENSES/|\.(html|csv|svg)$ repos: - repo: https://github.com/MarcoGorelli/absolufy-imports - rev: v0.2.1 + rev: v0.3.0 hooks: - id: absolufy-imports files: ^pandas/ @@ -33,7 +33,7 @@ repos: exclude: ^pandas/_libs/src/(klib|headers)/ args: [--quiet, '--extensions=c,h', '--headers=h', --recursive, '--filter=-readability/casting,-runtime/int,-build/include_subdir'] - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.0 hooks: - id: flake8 additional_dependencies: [flake8-comprehensions>=3.1.0] diff --git a/pandas/_typing.py b/pandas/_typing.py index 3e584774e539a..e95dff2e69ff0 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -49,7 +49,7 @@ from pandas import Interval from pandas.core.arrays.base import ExtensionArray from pandas.core.frame import DataFrame - from pandas.core.generic import NDFrame # noqa: F401 + from pandas.core.generic import NDFrame from pandas.core.groupby.generic import ( DataFrameGroupBy, SeriesGroupBy,
<!-- START pr-commits --> <!-- END pr-commits --> ## Base PullRequest default branch (https://github.com/pandas-dev/pandas/tree/master) ## Command results <details> <summary>Details: </summary> <details> <summary><em>add path</em></summary> ```Shell /home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin ``` </details> <details> <summary><em>pip install pre-commit</em></summary> ```Shell Collecting pre-commit Downloading pre_commit-2.11.1-py2.py3-none-any.whl (187 kB) Collecting identify>=1.0.0 Downloading identify-2.1.3-py2.py3-none-any.whl (98 kB) Collecting toml Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB) Collecting pyyaml>=5.1 Downloading PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB) Collecting cfgv>=2.0.0 Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB) Collecting nodeenv>=0.11.1 Using cached nodeenv-1.5.0-py2.py3-none-any.whl (21 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.4.2-py2.py3-none-any.whl (7.2 MB) Collecting distlib<1,>=0.3.1 Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB) Collecting six<2,>=1.9.0 Using cached six-1.15.0-py2.py3-none-any.whl (10 kB) Collecting filelock<4,>=3.0.0 Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB) Collecting appdirs<2,>=1.4.3 Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB) Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-2.1.3 nodeenv-1.5.0 pre-commit-2.11.1 pyyaml-5.4.1 six-1.15.0 toml-0.10.2 virtualenv-20.4.2 ``` </details> <details> <summary><em>pre-commit autoupdate || (exit 0);</em></summary> ```Shell Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports. updating v0.2.2 -> v0.3.0. Updating https://github.com/python/black ... already up to date. Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell. already up to date. Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. already up to date. Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint. =====> /home/runner/.cache/pre-commit/repoj060n6sd/.pre-commit-hooks.yaml does not exist Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. updating 3.8.4 -> 3.9.0. Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort. already up to date. Updating https://github.com/MarcoGorelli/no-string-hints ... [INFO] Initializing environment for https://github.com/MarcoGorelli/no-string-hints. Cannot update because the update target is missing these hooks: no-string-hints Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade. already up to date. Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks. already up to date. Updating https://github.com/asottile/yesqa ... already up to date. ``` </details> <details> <summary><em>pre-commit run -a || (exit 0);</em></summary> ```Shell [INFO] Initializing environment for https://github.com/cpplint/cpplint. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-comprehensions>=3.1.0. [INFO] Initializing environment for https://github.com/MarcoGorelli/no-string-hints. [INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/python/black. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/codespell-project/codespell. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/cpplint/cpplint. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/PyCQA/isort. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/MarcoGorelli/no-string-hints. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/pyupgrade. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/yesqa. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... absolufy-imports.......................................................................................Passed black..................................................................................................Passed codespell..............................................................................................Passed Fix End of Files.......................................................................................Passed Trim Trailing Whitespace...............................................................................Passed cpplint................................................................................................Passed flake8.................................................................................................Passed flake8 (cython)........................................................................................Passed flake8 (cython template)...............................................................................Passed isort..................................................................................................Passed no-string-hints........................................................................................Passed pyupgrade..............................................................................................Passed rst ``code`` is two backticks..........................................................................Passed rst directives end with two colons.....................................................................Passed rst ``inline code`` next to normal text................................................................Passed Strip unnecessary `# noqa`s............................................................................Failed - hook id: yesqa - exit code: 1 - files were modified by this hook Rewriting pandas/_typing.py flake8-rst.............................................................................................Passed Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias..........................Passed Check for inconsistent use of pandas namespace in tests................................................Passed Check for incorrect code block or IPython directives...................................................Passed Check code for instances of os.remove..................................................................Passed Check code for instances of pd.api.types...............................................................Passed Check for non-standard imports.........................................................................Passed Check for non-standard imports in test suite...........................................................Passed Check for non-standard numpy.random-related imports excluding pandas/_testing.py.......................Passed Check for use of np.bool instead of np.bool_...........................................................Passed Check for use of np.object instead of np.object_.......................................................Passed Generate pip dependency from conda.....................................................................Passed Validate correct capitalization among titles in documentation..........................................Passed Check for use of foo.__class__ instead of type(foo)....................................................Passed Check for use of bare pytest raises....................................................................Passed Check for use of private functions across modules......................................................Passed Check for import of private attributes across modules..................................................Passed Check for use of pytest.xfail..........................................................................Passed Check for use of not concatenated strings..............................................................Passed Check for strings with wrong placed spaces.............................................................Passed Check for outdated annotation syntax and missing error codes...........................................Passed ``` </details> </details> ## Changed files <details> <summary>Changed 2 files: </summary> - .pre-commit-config.yaml - pandas/_typing.py </details> <hr> [:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action)
https://api.github.com/repos/pandas-dev/pandas/pulls/40439
2021-03-15T07:24:18Z
2021-03-15T08:36:28Z
2021-03-15T08:36:28Z
2021-03-16T14:28:45Z
DOC: Minor formatting in user_guide/window
diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index 4a560f6b23932..d09c1ab9a1409 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -168,7 +168,7 @@ parameter: ============= ==================== Value Behavior ============= ==================== -``right'`` close right endpoint +``'right'`` close right endpoint ``'left'`` close left endpoint ``'both'`` close both endpoints ``'neither'`` open endpoints @@ -214,7 +214,7 @@ ending indices of the windows. Additionally, ``num_values``, ``min_periods``, `` and will automatically be passed to ``get_window_bounds`` and the defined method must always accept these arguments. -For example, if we have the following :class:``DataFrame``: +For example, if we have the following :class:`DataFrame` .. ipython:: python @@ -370,8 +370,8 @@ two :class:`Series` or any combination of :class:`DataFrame`/:class:`Series` or with the passed Series, thus returning a DataFrame. * :class:`DataFrame`/:class:`DataFrame`: by default compute the statistic for matching column names, returning a DataFrame. If the keyword argument ``pairwise=True`` is - passed then computes the statistic for each pair of columns, returning a - ``MultiIndexed DataFrame`` whose ``index`` are the dates in question (see :ref:`the next section + passed then computes the statistic for each pair of columns, returning a :class:`DataFrame` with a + :class:`MultiIndex` whose values are the dates in question (see :ref:`the next section <window.corr_pairwise>`). For example:
https://api.github.com/repos/pandas-dev/pandas/pulls/40438
2021-03-15T04:09:18Z
2021-03-15T07:44:12Z
2021-03-15T07:44:12Z
2021-03-15T17:23:03Z
ENH: `escape` html argument in Styler.format
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a9a5041e4a410..87f80ee29a95f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -135,6 +135,7 @@ Other enhancements - :meth:`.Styler.set_table_styles` amended to optionally allow certain css-string input arguments (:issue:`39564`) - :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None`` (:issue:`39359`) - :meth:`.Styler.apply` and :meth:`.Styler.applymap` now raise errors if wrong format CSS is passed on render (:issue:`39660`) +- :meth:`.Styler.format` adds keyword argument ``escape`` for optional HTML escaping (:issue:`40437`) - Builtin highlighting methods in :class:`Styler` have a more consistent signature and css customisability (:issue:`40242`) - :meth:`Series.loc.__getitem__` and :meth:`Series.loc.__setitem__` with :class:`MultiIndex` now raising helpful error message when indexer has too many dimensions (:issue:`35349`) - :meth:`pandas.read_stata` and :class:`StataReader` support reading data from compressed files. diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3abb39d2194c0..60fb854928f72 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -48,6 +48,7 @@ from pandas.core.indexes.api import Index jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") +from markupsafe import escape as escape_func # markupsafe is jinja2 dependency BaseFormatter = Union[str, Callable] ExtFormatter = Union[BaseFormatter, Dict[Any, Optional[BaseFormatter]]] @@ -113,6 +114,12 @@ class Styler: .. versionadded:: 1.2.0 + escape : bool, default False + Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in cell display + strings with HTML-safe sequences. + + ... versionadded:: 1.3.0 + Attributes ---------- env : Jinja2 jinja2.Environment @@ -169,6 +176,7 @@ def __init__( cell_ids: bool = True, na_rep: Optional[str] = None, uuid_len: int = 5, + escape: bool = False, ): # validate ordered args if isinstance(data, pd.Series): @@ -202,7 +210,7 @@ def __init__( ] = defaultdict(lambda: partial(_default_formatter, precision=def_precision)) self.precision = precision # can be removed on set_precision depr cycle self.na_rep = na_rep # can be removed on set_na_rep depr cycle - self.format(formatter=None, precision=precision, na_rep=na_rep) + self.format(formatter=None, precision=precision, na_rep=na_rep, escape=escape) def _repr_html_(self) -> str: """ @@ -549,6 +557,7 @@ def format( subset: Optional[Union[slice, Sequence[Any]]] = None, na_rep: Optional[str] = None, precision: Optional[int] = None, + escape: bool = False, ) -> Styler: """ Format the text display value of cells. @@ -572,6 +581,12 @@ def format( .. versionadded:: 1.3.0 + escape : bool, default False + Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in cell display + string with HTML-safe sequences. Escaping is done before ``formatter``. + + .. versionadded:: 1.3.0 + Returns ------- self : Styler @@ -611,7 +626,7 @@ def format( 0 MISS 1.000 A 1 2.000 MISS 3.000 - Using a format specification on consistent column dtypes + Using a ``formatter`` specification on consistent column dtypes >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) 0 1 2 @@ -634,15 +649,34 @@ def format( 0 MISS 1.00 A 1 2.0 PASS 3.00 - Using a callable formatting function + Using a callable ``formatter`` function. >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS') 0 1 2 0 MISS 1.0000 STRING 1 2.0 MISS FLOAT - """ - if all((formatter is None, subset is None, precision is None, na_rep is None)): + + Using a ``formatter`` with HTML ``escape`` and ``na_rep``. + + >>> df = pd.DataFrame([['<div></div>', '"A&B"', None]]) + >>> s = df.style.format('<a href="a.com/{0}">{0}</a>', escape=True, na_rep="NA") + >>> s.render() + ... + <td .. ><a href="a.com/&lt;div&gt;&lt;/div&gt;">&lt;div&gt;&lt;/div&gt;</a></td> + <td .. ><a href="a.com/&#34;A&amp;B&#34;">&#34;A&amp;B&#34;</a></td> + <td .. >NA</td> + ... + """ + if all( + ( + formatter is None, + subset is None, + precision is None, + na_rep is None, + escape is False, + ) + ): self._display_funcs.clear() return self # clear the formatter / revert to default and avoid looping @@ -660,7 +694,7 @@ def format( except KeyError: format_func = None format_func = _maybe_wrap_formatter( - format_func, na_rep=na_rep, precision=precision + format_func, na_rep=na_rep, precision=precision, escape=escape ) for row, value in data[[col]].itertuples(): @@ -2154,6 +2188,7 @@ def _maybe_wrap_formatter( formatter: Optional[BaseFormatter] = None, na_rep: Optional[str] = None, precision: Optional[int] = None, + escape: bool = False, ) -> Callable: """ Allows formatters to be expressed as str, callable or None, where None returns @@ -2170,10 +2205,19 @@ def _maybe_wrap_formatter( else: raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") + def _str_escape(x, escape: bool): + """if escaping: only use on str, else return input""" + if escape and isinstance(x, str): + return escape_func(x) + else: + return x + + display_func = lambda x: formatter_func(partial(_str_escape, escape=escape)(x)) + if na_rep is None: - return formatter_func + return display_func else: - return lambda x: na_rep if pd.isna(x) else formatter_func(x) + return lambda x: na_rep if pd.isna(x) else display_func(x) def _maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 977b92e217868..cb1db27099cd1 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -647,6 +647,36 @@ def test_format_clear(self): self.styler.format() assert (0, 0) not in self.styler._display_funcs # formatter cleared to default + def test_format_escape(self): + df = DataFrame([['<>&"']]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=False) + expected = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>' + assert expected in s.render() + + # only the value should be escaped before passing to the formatter + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True) + ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' + assert ex in s.render() + + def test_format_escape_na_rep(self): + # tests the na_rep is not escaped + df = DataFrame([['<>&"', None]]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True, na_rep="&") + ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' + expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>' + assert ex in s.render() + assert expected2 in s.render() + + def test_format_escape_floats(self): + # test given formatter for number format is not impacted by escape + s = self.df.style.format("{:.1f}", escape=True) + for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]: + assert expected in s.render() + # tests precision of floats is not impacted by escape + s = self.df.style.format(precision=1, escape=True) + for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]: + assert expected in s.render() + def test_nonunique_raises(self): df = DataFrame([[1, 2]], columns=["A", "A"]) msg = "style is not supported for non-unique indices."
- [x] closes #40388 Adds an HTML `escape` parameter to `Styler.format`, which acts to escape the data value **before** it is passed to a formatter. For example, `Styler.format('<a href="a.com/{0}">{0}</a>', escape=True)` will not escape the HTML as part of the formatting function, it will only escape the value `{0}` going into the formatter.
https://api.github.com/repos/pandas-dev/pandas/pulls/40437
2021-03-14T22:42:47Z
2021-03-23T17:03:14Z
2021-03-23T17:03:14Z
2021-03-23T17:32:29Z
TYP: fix type ignores
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index e327f48f9a888..c77b9172e2091 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -93,9 +93,7 @@ def dec(f): is_decorating = not kwargs and len(args) == 1 and callable(args[0]) if is_decorating: f = args[0] - # error: Incompatible types in assignment (expression has type - # "List[<nothing>]", variable has type "Tuple[Any, ...]") - args = [] # type: ignore[assignment] + args = () return dec(f) else: return dec diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index e551f05efa31b..03fb6a68bf2f2 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -27,41 +27,39 @@ _tz_regex = re.compile("[+-]0000$") -def tz_replacer(s): - if isinstance(s, str): - if s.endswith("Z"): - s = s[:-1] - elif _tz_regex.search(s): - s = s[:-5] - return s +def _tz_replacer(tstring): + if isinstance(tstring, str): + if tstring.endswith("Z"): + tstring = tstring[:-1] + elif _tz_regex.search(tstring): + tstring = tstring[:-5] + return tstring -def np_datetime64_compat(s, *args, **kwargs): +def np_datetime64_compat(tstring: str, unit: str = "ns"): """ provide compat for construction of strings to numpy datetime64's with tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ - s = tz_replacer(s) - # error: No overload variant of "datetime64" matches argument types "Any", - # "Tuple[Any, ...]", "Dict[str, Any]" - return np.datetime64(s, *args, **kwargs) # type: ignore[call-overload] + tstring = _tz_replacer(tstring) + return np.datetime64(tstring, unit) -def np_array_datetime64_compat(arr, *args, **kwargs): +def np_array_datetime64_compat(arr, dtype="M8[ns]"): """ provide compat for construction of an array of strings to a np.array(..., dtype=np.datetime64(..)) tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ - # is_list_like + # is_list_like; can't import as it would be circular if hasattr(arr, "__iter__") and not isinstance(arr, (str, bytes)): - arr = [tz_replacer(s) for s in arr] + arr = [_tz_replacer(s) for s in arr] else: - arr = tz_replacer(arr) + arr = _tz_replacer(arr) - return np.array(arr, *args, **kwargs) + return np.array(arr, dtype=dtype) __all__ = [ diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 614a637f2d904..b2d30f3540e77 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -14,9 +14,9 @@ from pandas.core.dtypes.common import ( is_categorical_dtype, is_dtype_equal, - is_extension_array_dtype, is_sparse, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCCategoricalIndex, ABCSeries, @@ -64,14 +64,13 @@ def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: # are not coming from Index/Series._values), eg in BlockManager.quantile arr = ensure_wrapped_if_datetimelike(arr) - if is_extension_array_dtype(dtype) and isinstance(arr, np.ndarray): - # numpy's astype cannot handle ExtensionDtypes - return pd_array(arr, dtype=dtype, copy=False) - # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type - # "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], None, type, - # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], - # List[Any], _DTypeDict, Tuple[Any, Any]]]" - return arr.astype(dtype, copy=False) # type: ignore[arg-type] + if isinstance(dtype, ExtensionDtype): + if isinstance(arr, np.ndarray): + # numpy's astype cannot handle ExtensionDtypes + return pd_array(arr, dtype=dtype, copy=False) + return arr.astype(dtype, copy=False) + + return arr.astype(dtype, copy=False) def concat_compat(to_concat, axis: int = 0, ea_compat_axis: bool = False): @@ -115,7 +114,7 @@ def is_nonempty(x) -> bool: all_empty = not len(non_empties) single_dtype = len({x.dtype for x in to_concat}) == 1 - any_ea = any(is_extension_array_dtype(x.dtype) for x in to_concat) + any_ea = any(isinstance(x.dtype, ExtensionDtype) for x in to_concat) if any_ea: # we ignore axis here, as internally concatting with EAs is always @@ -354,7 +353,7 @@ def _concat_datetime(to_concat, axis=0): result = type(to_concat[0])._concat_same_type(to_concat, axis=axis) - if result.ndim == 2 and is_extension_array_dtype(result.dtype): + if result.ndim == 2 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): kludge not necessary with 2D EAs assert result.shape[0] == 1 result = result[0] diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d44d2a564fb78..3d3b8b745d4ab 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -83,7 +83,7 @@ class PandasExtensionDtype(ExtensionDtype): num = 100 shape: Tuple[int, ...] = () itemsize = 8 - base = None + base: Optional[DtypeObj] = None isbuiltin = 0 isnative = 0 _cache: Dict[str_type, PandasExtensionDtype] = {} @@ -180,9 +180,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): type: Type[CategoricalDtypeType] = CategoricalDtypeType kind: str_type = "O" str = "|O08" - # error: Incompatible types in assignment (expression has type "dtype", - # base class "PandasExtensionDtype" defined the type as "None") - base = np.dtype("O") # type: ignore[assignment] + base = np.dtype("O") _metadata = ("categories", "ordered") _cache: Dict[str_type, PandasExtensionDtype] = {} @@ -676,9 +674,7 @@ class DatetimeTZDtype(PandasExtensionDtype): kind: str_type = "M" str = "|M8[ns]" num = 101 - # error: Incompatible types in assignment (expression has type "dtype", - # base class "PandasExtensionDtype" defined the type as "None") - base = np.dtype("M8[ns]") # type: ignore[assignment] + base = np.dtype("M8[ns]") na_value = NaT _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") @@ -844,9 +840,7 @@ class PeriodDtype(dtypes.PeriodDtypeBase, PandasExtensionDtype): type: Type[Period] = Period kind: str_type = "O" str = "|O08" - # error: Incompatible types in assignment (expression has type "dtype", - # base class "PandasExtensionDtype" defined the type as "None") - base = np.dtype("O") # type: ignore[assignment] + base = np.dtype("O") num = 102 _metadata = ("freq",) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") @@ -1046,9 +1040,7 @@ class IntervalDtype(PandasExtensionDtype): name = "interval" kind: str_type = "O" str = "|O08" - # error: Incompatible types in assignment (expression has type "dtype", - # base class "PandasExtensionDtype" defined the type as "None") - base = np.dtype("O") # type: ignore[assignment] + base = np.dtype("O") num = 103 _metadata = ( "subtype", diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index b6f476d864011..0c2110de731c4 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -292,12 +292,9 @@ def _convert_arr_indexer(self, keyarr): if is_integer_dtype(keyarr) or ( lib.infer_dtype(keyarr, skipna=False) == "integer" ): - dtype = np.uint64 + dtype = np.dtype(np.uint64) - # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type - # "Optional[Type[unsignedinteger[Any]]]"; expected "Union[str, dtype[Any], - # None]" - return com.asarray_tuplesafe(keyarr, dtype=dtype) # type: ignore[arg-type] + return com.asarray_tuplesafe(keyarr, dtype=dtype) _float64_descr_args = { diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index e446786802239..3970d2f23f867 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -6,6 +6,7 @@ from typing import ( TYPE_CHECKING, Any, + Callable, Hashable, List, Optional, @@ -900,7 +901,7 @@ def _arith_method(self, other, op): ]: return op(self._int64index, other) - step = False + step: Optional[Callable] = None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op @@ -913,8 +914,7 @@ def _arith_method(self, other, op): # apply if we have an override if step: with np.errstate(all="ignore"): - # error: "bool" not callable - rstep = step(left.step, right) # type: ignore[operator] + rstep = step(left.step, right) # we don't have a representable op # so return a base index diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py index be0828f5303b8..468c338e215fe 100644 --- a/pandas/core/internals/api.py +++ b/pandas/core/internals/api.py @@ -13,7 +13,10 @@ from pandas._libs.internals import BlockPlacement from pandas._typing import Dtype -from pandas.core.dtypes.common import is_datetime64tz_dtype +from pandas.core.dtypes.common import ( + is_datetime64tz_dtype, + pandas_dtype, +) from pandas.core.arrays import DatetimeArray from pandas.core.internals.blocks import ( @@ -39,11 +42,10 @@ def make_block( - Block.make_block_same_class - Block.__init__ """ - # error: Argument 2 to "extract_pandas_array" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], - # Type[complex], Type[bool], Type[object], None]"; expected "Union[dtype[Any], - # ExtensionDtype, None]" - values, dtype = extract_pandas_array(values, dtype, ndim) # type: ignore[arg-type] + if dtype is not None: + dtype = pandas_dtype(dtype) + + values, dtype = extract_pandas_array(values, dtype, ndim) if klass is None: dtype = dtype or values.dtype diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a4d79284c45fd..45f275664b206 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -28,6 +28,7 @@ DtypeObj, F, Scalar, + Shape, ) from pandas.compat._optional import import_optional_dependency @@ -650,22 +651,16 @@ def nanmean( values, skipna, fill_value=0, mask=mask ) dtype_sum = dtype_max - dtype_count = np.float64 + dtype_count = np.dtype(np.float64) # not using needs_i8_conversion because that includes period if dtype.kind in ["m", "M"]: - # error: Incompatible types in assignment (expression has type "Type[float64]", - # variable has type "dtype[Any]") - dtype_sum = np.float64 # type: ignore[assignment] + dtype_sum = np.dtype(np.float64) elif is_integer_dtype(dtype): - # error: Incompatible types in assignment (expression has type "Type[float64]", - # variable has type "dtype[Any]") - dtype_sum = np.float64 # type: ignore[assignment] + dtype_sum = np.dtype(np.float64) elif is_float_dtype(dtype): dtype_sum = dtype - # error: Incompatible types in assignment (expression has type "dtype[Any]", - # variable has type "Type[float64]") - dtype_count = dtype # type: ignore[assignment] + dtype_count = dtype count = _get_counts(values.shape, mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) @@ -787,7 +782,7 @@ def get_empty_reduction_result( def _get_counts_nanvar( - value_counts: Tuple[int], + values_shape: Shape, mask: Optional[np.ndarray], axis: Optional[int], ddof: int, @@ -799,7 +794,7 @@ def _get_counts_nanvar( Parameters ---------- - values_shape : Tuple[int] + values_shape : Tuple[int, ...] shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing @@ -816,7 +811,7 @@ def _get_counts_nanvar( d : scalar or array """ dtype = get_dtype(dtype) - count = _get_counts(value_counts, mask, axis, dtype=dtype) + count = _get_counts(values_shape, mask, axis, dtype=dtype) # error: Unsupported operand types for - ("int" and "generic") # error: Unsupported operand types for - ("float" and "generic") d = count - dtype.type(ddof) # type: ignore[operator] @@ -991,11 +986,7 @@ def nansem( if not is_float_dtype(values.dtype): values = values.astype("f8") - # error: Argument 1 to "_get_counts_nanvar" has incompatible type - # "Tuple[int, ...]"; expected "Tuple[int]" - count, _ = _get_counts_nanvar( - values.shape, mask, axis, ddof, values.dtype # type: ignore[arg-type] - ) + count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof) return np.sqrt(var) / np.sqrt(count) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index a768ec8ad4eb3..3514fbc8c6293 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -82,8 +82,11 @@ notna, ) -from pandas.core.arrays.datetimes import DatetimeArray -from pandas.core.arrays.timedeltas import TimedeltaArray +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + TimedeltaArray, +) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array @@ -106,7 +109,6 @@ if TYPE_CHECKING: from pandas import ( - Categorical, DataFrame, Series, ) @@ -1565,12 +1567,9 @@ def _format_strings(self) -> List[str]: # no attribute "_formatter" formatter = values._formatter(boxed=True) # type: ignore[union-attr] - if is_categorical_dtype(values.dtype): + if isinstance(values, Categorical): # Categorical is special for now, so that we can preserve tzinfo - - # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no - # attribute "_internal_get_values" - array = values._internal_get_values() # type: ignore[union-attr] + array = values._internal_get_values() else: array = np.asarray(values)
https://api.github.com/repos/pandas-dev/pandas/pulls/40434
2021-03-14T19:56:05Z
2021-03-15T11:35:46Z
2021-03-15T11:35:46Z
2021-03-15T14:13:35Z
TYP: stubs for tslibs
diff --git a/pandas/_libs/tslibs/ccalendar.pyi b/pandas/_libs/tslibs/ccalendar.pyi new file mode 100644 index 0000000000000..500a0423bc9cf --- /dev/null +++ b/pandas/_libs/tslibs/ccalendar.pyi @@ -0,0 +1,13 @@ + +DAYS: list[str] +MONTH_ALIASES: dict[int, str] +MONTH_NUMBERS: dict[str, int] +MONTHS: list[str] +int_to_weekday: dict[int, str] + +def get_firstbday(year: int, month: int) -> int: ... +def get_lastbday(year: int, month: int) -> int: ... +def get_day_of_year(year: int, month: int, day: int) -> int: ... +def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ... +def get_week_of_year(year: int, month: int, day: int) -> int: ... +def get_days_in_month(year: int, month: int) -> int: ... diff --git a/pandas/_libs/tslibs/strptime.pyi b/pandas/_libs/tslibs/strptime.pyi new file mode 100644 index 0000000000000..3748c169bb1c6 --- /dev/null +++ b/pandas/_libs/tslibs/strptime.pyi @@ -0,0 +1,11 @@ +from typing import Optional + +import numpy as np + +def array_strptime( + values: np.ndarray, # np.ndarray[object] + fmt: Optional[str], + exact: bool = True, + errors: str = "raise" +) -> tuple[np.ndarray, np.ndarray]: ... +# first ndarray is M8[ns], second is object ndarray of Optional[tzinfo] diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi new file mode 100644 index 0000000000000..04a1b391dc30a --- /dev/null +++ b/pandas/_libs/tslibs/timezones.pyi @@ -0,0 +1,32 @@ +from datetime import ( + datetime, + tzinfo, +) +from typing import ( + Callable, + Optional, + Union, +) + +import numpy as np + +# imported from dateutil.tz +dateutil_gettz: Callable[[str], tzinfo] + + +def tz_standardize(tz: tzinfo) -> tzinfo: ... + +def tz_compare(start: Optional[tzinfo], end: Optional[tzinfo]) -> bool: ... + +def infer_tzinfo( + start: Optional[datetime], end: Optional[datetime], +) -> Optional[tzinfo]: ... + +# ndarrays returned are both int64_t +def get_dst_info(tz: tzinfo) -> tuple[np.ndarray, np.ndarray, str]: ... + +def maybe_get_tz(tz: Optional[Union[str, int, np.int64, tzinfo]]) -> Optional[tzinfo]: ... + +def get_timezone(tz: tzinfo) -> Union[tzinfo, str]: ... + +def is_utc(tz: Optional[tzinfo]) -> bool: ... diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 92065e1c3d4c5..0809033b02934 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -67,6 +67,7 @@ cdef inline bint treat_tz_as_dateutil(tzinfo tz): return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') +# Returns str or tzinfo object cpdef inline object get_timezone(tzinfo tz): """ We need to do several things here: @@ -80,6 +81,8 @@ cpdef inline object get_timezone(tzinfo tz): the tz name. It needs to be a string so that we can serialize it with UJSON/pytables. maybe_get_tz (below) is the inverse of this process. """ + if tz is None: + raise TypeError("tz argument cannot be None") if is_utc(tz): return tz else: @@ -364,6 +367,8 @@ cpdef bint tz_compare(tzinfo start, tzinfo end): elif is_utc(end): # Ensure we don't treat tzlocal as equal to UTC when running in UTC return False + elif start is None or end is None: + return start is None and end is None return get_timezone(start) == get_timezone(end) diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi new file mode 100644 index 0000000000000..f47885a2e3306 --- /dev/null +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -0,0 +1,25 @@ +from datetime import ( + timedelta, + tzinfo, +) +from typing import ( + Iterable, + Optional, + Union, +) + +import numpy as np + +def tz_convert_from_utc( + vals: np.ndarray, # const int64_t[:] + tz: tzinfo, +) -> np.ndarray: ... # np.ndarray[np.int64] + +def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... + +def tz_localize_to_utc( + vals: np.ndarray, # np.ndarray[np.int64] + tz: Optional[tzinfo], + ambiguous: Optional[Union[str, bool, Iterable[bool]]] = None, + nonexistent: Optional[Union[str, timedelta, np.timedelta64]] = None, +) -> np.ndarray: ... # np.ndarray[np.int64] diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi new file mode 100644 index 0000000000000..6ed1e10ef2353 --- /dev/null +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -0,0 +1,47 @@ +""" +For cython types that cannot be represented precisely, closest-available +python equivalents are used, and the precise types kept as adjacent comments. +""" +from datetime import tzinfo +from typing import ( + Optional, + Union, +) + +import numpy as np + +from pandas._libs.tslibs.dtypes import Resolution +from pandas._libs.tslibs.offsets import BaseOffset + +def dt64arr_to_periodarr( + stamps: np.ndarray, # const int64_t[:] + freq: int, + tz: Optional[tzinfo], +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] + + +def is_date_array_normalized( + stamps: np.ndarray, # const int64_t[:] + tz: Optional[tzinfo] = None, +) -> bool: ... + + +def normalize_i8_timestamps( + stamps: np.ndarray, # const int64_t[:] + tz: Optional[tzinfo], +) -> np.ndarray: ... # np.ndarray[np.int64] + + +def get_resolution( + stamps: np.ndarray, # const int64_t[:] + tz: Optional[tzinfo] = None, +) -> Resolution: ... + + +def ints_to_pydatetime( + arr: np.ndarray, # const int64_t[:}] + tz: Optional[tzinfo] = None, + freq: Optional[Union[str, BaseOffset]] = None, + fold: bool = False, + box: str = "datetime", +) -> np.ndarray: ... # np.ndarray[object] diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 9822356d11d7c..ba238e2b789f0 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -285,7 +285,7 @@ def _convert_listlike_datetimes( name: Hashable = None, tz: Optional[Timezone] = None, unit: Optional[str] = None, - errors: Optional[str] = None, + errors: str = "raise", infer_datetime_format: bool = False, dayfirst: Optional[bool] = None, yearfirst: Optional[bool] = None, @@ -428,7 +428,7 @@ def _array_strptime_with_fallback( tz, fmt: str, exact: bool, - errors: Optional[str], + errors: str, infer_datetime_format: bool, ) -> Optional[Index]: """ @@ -476,7 +476,7 @@ def _to_datetime_with_format( tz, fmt: str, exact: bool, - errors: Optional[str], + errors: str, infer_datetime_format: bool, ) -> Optional[Index]: """
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40433
2021-03-14T18:17:06Z
2021-03-16T13:18:21Z
2021-03-16T13:18:21Z
2021-03-16T14:26:50Z
CLN: remove unused file opening and mmap code from parsers.pyx
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index baf5633db0cb3..a11bf370412d2 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -284,21 +284,10 @@ cdef extern from "parser/tokenizer.h": cdef extern from "parser/io.h": - void *new_mmap(char *fname) - int del_mmap(void *src) - void* buffer_mmap_bytes(void *source, size_t nbytes, - size_t *bytes_read, int *status) - - void *new_file_source(char *fname, size_t buffer_size) except NULL - void *new_rd_source(object obj) except NULL - int del_file_source(void *src) int del_rd_source(void *src) - void* buffer_file_bytes(void *source, size_t nbytes, - size_t *bytes_read, int *status) - void* buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read, int *status, const char *encoding_errors) diff --git a/pandas/_libs/src/parser/io.c b/pandas/_libs/src/parser/io.c index 449f0b55bff70..2ed0cef3cdc58 100644 --- a/pandas/_libs/src/parser/io.c +++ b/pandas/_libs/src/parser/io.c @@ -9,83 +9,10 @@ The full license is in the LICENSE file, distributed with this software. #include "io.h" -#include <sys/stat.h> -#include <fcntl.h> - -#ifndef O_BINARY -#define O_BINARY 0 -#endif // O_BINARY - -#ifdef _WIN32 -#define USE_WIN_UTF16 -#include <Windows.h> -#endif - /* On-disk FILE, uncompressed */ -void *new_file_source(char *fname, size_t buffer_size) { - file_source *fs = (file_source *)malloc(sizeof(file_source)); - if (fs == NULL) { - PyErr_NoMemory(); - return NULL; - } - -#ifdef USE_WIN_UTF16 - // Fix gh-15086 properly - convert UTF8 to UTF16 that Windows widechar API - // accepts. This is needed because UTF8 might _not_ be convertible to MBCS - // for some conditions, as MBCS is locale-dependent, and not all unicode - // symbols can be expressed in it. - { - wchar_t* wname = NULL; - int required = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0); - if (required == 0) { - free(fs); - PyErr_SetFromWindowsErr(0); - return NULL; - } - wname = (wchar_t*)malloc(required * sizeof(wchar_t)); - if (wname == NULL) { - free(fs); - PyErr_NoMemory(); - return NULL; - } - if (MultiByteToWideChar(CP_UTF8, 0, fname, -1, wname, required) < - required) { - free(wname); - free(fs); - PyErr_SetFromWindowsErr(0); - return NULL; - } - fs->fd = _wopen(wname, O_RDONLY | O_BINARY); - free(wname); - } -#else - fs->fd = open(fname, O_RDONLY | O_BINARY); -#endif - if (fs->fd == -1) { - free(fs); - PyErr_SetFromErrnoWithFilename(PyExc_OSError, fname); - return NULL; - } - - // Only allocate this heap memory if we are not memory-mapping the file - fs->buffer = (char *)malloc((buffer_size + 1) * sizeof(char)); - - if (fs->buffer == NULL) { - close(fs->fd); - free(fs); - PyErr_NoMemory(); - return NULL; - } - - memset(fs->buffer, '\0', buffer_size + 1); - fs->size = buffer_size; - - return (void *)fs; -} - void *new_rd_source(PyObject *obj) { rd_source *rds = (rd_source *)malloc(sizeof(rd_source)); @@ -108,17 +35,6 @@ void *new_rd_source(PyObject *obj) { */ -int del_file_source(void *ptr) { - file_source *fs = ptr; - if (fs == NULL) return 0; - - free(fs->buffer); - close(fs->fd); - free(fs); - - return 0; -} - int del_rd_source(void *rds) { Py_XDECREF(RDS(rds)->obj); Py_XDECREF(RDS(rds)->buffer); @@ -133,35 +49,6 @@ int del_rd_source(void *rds) { */ -void *buffer_file_bytes(void *source, size_t nbytes, size_t *bytes_read, - int *status) { - file_source *fs = FS(source); - ssize_t rv; - - if (nbytes > fs->size) { - nbytes = fs->size; - } - - rv = read(fs->fd, fs->buffer, nbytes); - switch (rv) { - case -1: - *status = CALLING_READ_FAILED; - *bytes_read = 0; - return NULL; - case 0: - *status = REACHED_EOF; - *bytes_read = 0; - return NULL; - default: - *status = 0; - *bytes_read = rv; - fs->buffer[rv] = '\0'; - break; - } - - return (void *)fs->buffer; -} - void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read, int *status, const char *encoding_errors) { PyGILState_STATE state; @@ -218,98 +105,3 @@ void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read, return retval; } - -#ifdef HAVE_MMAP - -#include <sys/mman.h> - -void *new_mmap(char *fname) { - memory_map *mm; - struct stat stat; - size_t filesize; - - mm = (memory_map *)malloc(sizeof(memory_map)); - if (mm == NULL) { - return NULL; - } - mm->fd = open(fname, O_RDONLY | O_BINARY); - if (mm->fd == -1) { - free(mm); - return NULL; - } - - if (fstat(mm->fd, &stat) == -1) { - close(mm->fd); - free(mm); - return NULL; - } - filesize = stat.st_size; /* XXX This might be 32 bits. */ - - mm->memmap = mmap(NULL, filesize, PROT_READ, MAP_SHARED, mm->fd, 0); - if (mm->memmap == MAP_FAILED) { - close(mm->fd); - free(mm); - return NULL; - } - - mm->size = (off_t)filesize; - mm->position = 0; - - return mm; -} - -int del_mmap(void *ptr) { - memory_map *mm = ptr; - - if (mm == NULL) return 0; - - munmap(mm->memmap, mm->size); - close(mm->fd); - free(mm); - - return 0; -} - -void *buffer_mmap_bytes(void *source, size_t nbytes, size_t *bytes_read, - int *status) { - void *retval; - memory_map *src = source; - size_t remaining = src->size - src->position; - - if (remaining == 0) { - *bytes_read = 0; - *status = REACHED_EOF; - return NULL; - } - - if (nbytes > remaining) { - nbytes = remaining; - } - - retval = src->memmap + src->position; - - /* advance position in mmap data structure */ - src->position += nbytes; - - *bytes_read = nbytes; - *status = 0; - - return retval; -} - -#else - -/* kludgy */ - -void *new_mmap(char *fname) { return NULL; } - -int del_mmap(void *src) { return 0; } - -/* don't use this! */ - -void *buffer_mmap_bytes(void *source, size_t nbytes, size_t *bytes_read, - int *status) { - return NULL; -} - -#endif // HAVE_MMAP diff --git a/pandas/_libs/src/parser/io.h b/pandas/_libs/src/parser/io.h index dbe757b458c54..f0e8b01855304 100644 --- a/pandas/_libs/src/parser/io.h +++ b/pandas/_libs/src/parser/io.h @@ -14,37 +14,8 @@ The full license is in the LICENSE file, distributed with this software. #include <Python.h> #include "tokenizer.h" -typedef struct _file_source { - /* The file being read. */ - int fd; - - char *buffer; - size_t size; -} file_source; - #define FS(source) ((file_source *)source) -#if !defined(_WIN32) && !defined(HAVE_MMAP) -#define HAVE_MMAP -#endif // HAVE_MMAP - -typedef struct _memory_map { - int fd; - - /* Size of the file, in bytes. */ - char *memmap; - size_t size; - - size_t position; -} memory_map; - -void *new_mmap(char *fname); - -int del_mmap(void *src); - -void *buffer_mmap_bytes(void *source, size_t nbytes, size_t *bytes_read, - int *status); - typedef struct _rd_source { PyObject *obj; PyObject *buffer; @@ -53,16 +24,10 @@ typedef struct _rd_source { #define RDS(source) ((rd_source *)source) -void *new_file_source(char *fname, size_t buffer_size); - void *new_rd_source(PyObject *obj); -int del_file_source(void *src); int del_rd_source(void *src); -void *buffer_file_bytes(void *source, size_t nbytes, size_t *bytes_read, - int *status); - void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read, int *status, const char *encoding_errors);
The removed code has never been used since #36997. `TextReader` is only ever called with file handles.
https://api.github.com/repos/pandas-dev/pandas/pulls/40431
2021-03-14T15:50:33Z
2021-03-15T01:08:49Z
2021-03-15T01:08:49Z
2021-03-15T01:08:54Z
DEPR: **kwargs in ExcelWriter
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index fad734a0e39ad..9aa27dd8e4223 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -370,6 +370,7 @@ Deprecations - Deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` in favour of :meth:`.Styler.format` with ``na_rep`` and ``precision`` as existing and new input arguments respectively (:issue:`40134`, :issue:`40425`) - Deprecated allowing partial failure in :meth:`Series.transform` and :meth:`DataFrame.transform` when ``func`` is list-like or dict-like; will raise if any function fails on a column in a future version (:issue:`40211`) - Deprecated support for ``np.ma.mrecords.MaskedRecords`` in the :class:`DataFrame` constructor, pass ``{name: data[name] for name in data.dtype.names}`` instead (:issue:`40363`) +- Deprecated the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 2bf70a18e810f..c0904c0393af6 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -667,6 +667,16 @@ class ExcelWriter(metaclass=abc.ABCMeta): be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". .. versionadded:: 1.2.0 + engine_kwargs : dict, optional + Keyword arguments to be passed into the engine. + + .. versionadded:: 1.3.0 + **kwargs : dict, optional + Keyword arguments to be passed into the engine. + + .. deprecated:: 1.3.0 + + Use engine_kwargs instead. Attributes ---------- @@ -745,7 +755,26 @@ class ExcelWriter(metaclass=abc.ABCMeta): # You also need to register the class with ``register_writer()``. # Technically, ExcelWriter implementations don't need to subclass # ExcelWriter. - def __new__(cls, path, engine=None, **kwargs): + def __new__( + cls, + path: Union[FilePathOrBuffer, ExcelWriter], + engine=None, + date_format=None, + datetime_format=None, + mode: str = "w", + storage_options: StorageOptions = None, + engine_kwargs: Optional[Dict] = None, + **kwargs, + ): + if kwargs: + if engine_kwargs is not None: + raise ValueError("Cannot use both engine_kwargs and **kwargs") + warnings.warn( + "Use of **kwargs is deprecated, use engine_kwargs instead.", + FutureWarning, + stacklevel=2, + ) + # only switch class if generic(ExcelWriter) if cls is ExcelWriter: @@ -835,7 +864,8 @@ def __init__( datetime_format=None, mode: str = "w", storage_options: StorageOptions = None, - **engine_kwargs, + engine_kwargs: Optional[Dict] = None, + **kwargs, ): # validate that this engine can handle the extension if isinstance(path, str): diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index d00e600b4e5d4..bfd1bcf466a7a 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -26,19 +26,22 @@ def __init__( self, path: str, engine: Optional[str] = None, + date_format=None, + datetime_format=None, mode: str = "w", storage_options: StorageOptions = None, - **engine_kwargs, + engine_kwargs: Optional[Dict[str, Any]] = None, ): from odf.opendocument import OpenDocumentSpreadsheet - engine_kwargs["engine"] = engine - if mode == "a": raise ValueError("Append mode is not supported with odf!") super().__init__( - path, mode=mode, storage_options=storage_options, **engine_kwargs + path, + mode=mode, + storage_options=storage_options, + engine_kwargs=engine_kwargs, ) self.book = OpenDocumentSpreadsheet() diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index be2c9b919a5c3..72950db72e067 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -3,6 +3,7 @@ import mmap from typing import ( TYPE_CHECKING, + Any, Dict, List, Optional, @@ -35,15 +36,20 @@ def __init__( self, path, engine=None, + date_format=None, + datetime_format=None, mode: str = "w", storage_options: StorageOptions = None, - **engine_kwargs, + engine_kwargs: Optional[Dict[str, Any]] = None, ): # Use the openpyxl module as the Excel writer. from openpyxl.workbook import Workbook super().__init__( - path, mode=mode, storage_options=storage_options, **engine_kwargs + path, + mode=mode, + storage_options=storage_options, + engine_kwargs=engine_kwargs, ) # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index 849572cff813a..6e1b064534707 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -1,6 +1,8 @@ from typing import ( + Any, Dict, List, + Optional, Tuple, ) @@ -175,11 +177,13 @@ def __init__( datetime_format=None, mode: str = "w", storage_options: StorageOptions = None, - **engine_kwargs, + engine_kwargs: Optional[Dict[str, Any]] = None, ): # Use the xlsxwriter module as the Excel writer. from xlsxwriter import Workbook + engine_kwargs = engine_kwargs or {} + if mode == "a": raise ValueError("Append mode is not supported with xlsxwriter!") @@ -190,7 +194,7 @@ def __init__( datetime_format=datetime_format, mode=mode, storage_options=storage_options, - **engine_kwargs, + engine_kwargs=engine_kwargs, ) self.book = Workbook(self.handles.handle, **engine_kwargs) diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index a8386242faf72..776baf66536b1 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -1,6 +1,8 @@ from typing import ( TYPE_CHECKING, + Any, Dict, + Optional, ) import pandas._libs.json as json @@ -21,21 +23,24 @@ def __init__( self, path, engine=None, + date_format=None, + datetime_format=None, encoding=None, mode: str = "w", storage_options: StorageOptions = None, - **engine_kwargs, + engine_kwargs: Optional[Dict[str, Any]] = None, ): # Use the xlwt module as the Excel writer. import xlwt - engine_kwargs["engine"] = engine - if mode == "a": raise ValueError("Append mode is not supported with xlwt!") super().__init__( - path, mode=mode, storage_options=storage_options, **engine_kwargs + path, + mode=mode, + storage_options=storage_options, + engine_kwargs=engine_kwargs, ) if encoding is None: diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 3a1c93bdfee29..cce8c3d01025d 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -6,6 +6,7 @@ from functools import partial from io import BytesIO import os +import re import numpy as np import pytest @@ -1382,6 +1383,40 @@ def check_called(func): with tm.ensure_clean("something.xls") as filepath: check_called(lambda: df.to_excel(filepath, engine="dummy")) + @pytest.mark.parametrize( + "ext", + [ + pytest.param(".xlsx", marks=td.skip_if_no("xlsxwriter")), + pytest.param(".xlsx", marks=td.skip_if_no("openpyxl")), + pytest.param(".ods", marks=td.skip_if_no("odf")), + ], + ) + def test_kwargs_deprecated(self, ext): + # GH 40430 + msg = re.escape("Use of **kwargs is deprecated") + with tm.assert_produces_warning(FutureWarning, match=msg): + with tm.ensure_clean(ext) as path: + try: + with ExcelWriter(path, kwarg=1): + pass + except TypeError: + pass + + @pytest.mark.parametrize( + "ext", + [ + pytest.param(".xlsx", marks=td.skip_if_no("xlsxwriter")), + pytest.param(".xlsx", marks=td.skip_if_no("openpyxl")), + pytest.param(".ods", marks=td.skip_if_no("odf")), + ], + ) + def test_engine_kwargs_and_kwargs_raises(self, ext): + # GH 40430 + msg = re.escape("Cannot use both engine_kwargs and **kwargs") + with pytest.raises(ValueError, match=msg): + with ExcelWriter("", engine_kwargs={"a": 1}, b=2): + pass + @td.skip_if_no("xlrd") @td.skip_if_no("openpyxl")
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Currently ExcelWriter has **engine_kwargs to pass through to the engine, [although this is undocumented](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.ExcelWriter.html). All other places in pandas where engine_kwargs is used, they are a regular argument. This PR - Renames **engine_kwargs to **kwargs (not an API change) - Adds **kwargs to the docstring of ExcelWriter - Deprecates **kwargs - Introduces engine_kwargs as a regular argument for their replacement
https://api.github.com/repos/pandas-dev/pandas/pulls/40430
2021-03-14T14:17:50Z
2021-03-17T21:17:56Z
2021-03-17T21:17:56Z
2021-03-17T23:35:16Z
CLN: Don't modify state in FrameApply.agg
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 57147461284fb..3a2c2d7124963 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -633,28 +633,28 @@ def agg(self): obj = self.obj axis = self.axis - # TODO: Avoid having to change state - self.obj = self.obj if self.axis == 0 else self.obj.T - self.axis = 0 - - result = None try: - result = super().agg() + if axis == 1: + result = FrameRowApply( + obj.T, + self.orig_f, + self.raw, + self.result_type, + self.args, + self.kwargs, + ).agg() + result = result.T if result is not None else result + else: + result = super().agg() except TypeError as err: exc = TypeError( "DataFrame constructor called with " f"incompatible data and dtype: {err}" ) raise exc from err - finally: - self.obj = obj - self.axis = axis - - if axis == 1: - result = result.T if result is not None else result if result is None: - result = self.obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) + result = obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) return result
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40428
2021-03-14T11:16:36Z
2021-03-14T23:55:02Z
2021-03-14T23:55:02Z
2021-03-15T02:27:44Z
CLN/TST: Parametrize/deduplicate test_invalid_arg
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index 2499c90535469..690d6bed0cb9b 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -91,27 +91,15 @@ def test_nested_renamer(box, method, func): getattr(obj, method)(func) -def test_series_agg_nested_renamer(): - s = Series(range(6), dtype="int64", name="series") - msg = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - s.agg({"foo": ["min", "max"]}) - - -def test_multiple_aggregators_with_dict_api(): - +@pytest.mark.parametrize( + "renamer", + [{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}], +) +def test_series_nested_renamer(renamer): s = Series(range(6), dtype="int64", name="series") - # nested renaming msg = "nested renamer is not supported" with pytest.raises(SpecificationError, match=msg): - s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]}) - - -def test_transform_nested_renamer(): - # GH 35964 - match = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=match): - Series([1]).transform({"A": {"B": ["sum"]}}) + s.agg(renamer) def test_agg_dict_nested_renaming_depr_agg(): @@ -156,14 +144,6 @@ def test_missing_column(method, func): getattr(obj, method)(func) -def test_transform_missing_columns(axis): - # GH#35964 - df = DataFrame({"A": [1, 2], "B": [3, 4]}) - match = re.escape("Column(s) ['C'] do not exist") - with pytest.raises(KeyError, match=match): - df.transform({"C": "cumsum"}) - - def test_transform_mixed_column_name_dtypes(): # GH39025 df = DataFrame({"a": ["1"]}) @@ -328,14 +308,8 @@ def test_transform_and_agg_err_agg(axis, float_frame): with np.errstate(all="ignore"): float_frame.agg(["max", "sqrt"], axis=axis) - df = DataFrame({"A": range(5), "B": 5}) - - def f(): - with np.errstate(all="ignore"): - df.agg({"A": ["abs", "sum"], "B": ["mean", "max"]}, axis=axis) - -def test_transform_and_agg_error_agg(string_series): +def test_transform_and_agg_err_series(string_series): # we are trying to transform with an aggregator msg = "cannot combine transform and aggregation" with pytest.raises(ValueError, match=msg): @@ -348,7 +322,7 @@ def test_transform_and_agg_error_agg(string_series): string_series.agg({"foo": np.sqrt, "bar": "sum"}) -def test_transform_and_agg_err_transform(axis, float_frame): +def test_transform_and_agg_err_frame(axis, float_frame): # GH 35964 # cannot both transform and agg msg = "Function did not transform"
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - Combined test_series_agg_nested_renamer and test_series_nested_renamer via parametrization - test_transform_nested_renamer is already a part of test_nested_renamer - test_transform_missing_columns is essentially a duplicate of test_missing_column
https://api.github.com/repos/pandas-dev/pandas/pulls/40426
2021-03-14T10:19:06Z
2021-03-15T00:29:46Z
2021-03-15T00:29:46Z
2021-03-15T02:27:55Z
PERF: make Styler default formatter arguments statics instead of repeated dynamic
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 56a5412d4ecfc..3acd9f3548702 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -367,7 +367,7 @@ Deprecations - Deprecated :meth:`core.window.ewm.ExponentialMovingWindow.vol` (:issue:`39220`) - Using ``.astype`` to convert between ``datetime64[ns]`` dtype and :class:`DatetimeTZDtype` is deprecated and will raise in a future version, use ``obj.tz_localize`` or ``obj.dt.tz_localize`` instead (:issue:`38622`) - Deprecated casting ``datetime.date`` objects to ``datetime64`` when used as ``fill_value`` in :meth:`DataFrame.unstack`, :meth:`DataFrame.shift`, :meth:`Series.shift`, and :meth:`DataFrame.reindex`, pass ``pd.Timestamp(dateobj)`` instead (:issue:`39767`) -- Deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` in favour of :meth:`.Styler.format` with ``na_rep`` and ``precision`` as existing and new input arguments respectively (:issue:`40134`) +- Deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` in favour of :meth:`.Styler.format` with ``na_rep`` and ``precision`` as existing and new input arguments respectively (:issue:`40134`, :issue:`40425`) - Deprecated allowing partial failure in :meth:`Series.transform` and :meth:`DataFrame.transform` when ``func`` is list-like or dict-like; will raise if any function fails on a column in a future version (:issue:`40211`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index cc5f3164385cb..2ede8789b164e 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -196,9 +196,10 @@ def __init__( self.cell_context: Dict[str, Any] = {} self._todo: List[Tuple[Callable, Tuple, Dict]] = [] self.tooltips: Optional[_Tooltips] = None + def_precision = get_option("display.precision") self._display_funcs: DefaultDict[ # maps (row, col) -> formatting function Tuple[int, int], Callable[[Any], str] - ] = defaultdict(lambda: partial(_default_formatter, precision=None)) + ] = defaultdict(lambda: partial(_default_formatter, precision=def_precision)) self.precision = precision # can be removed on set_precision depr cycle self.na_rep = na_rep # can be removed on set_na_rep depr cycle self.format(formatter=None, precision=precision, na_rep=na_rep) @@ -2127,7 +2128,7 @@ def _get_level_lengths(index, hidden_elements=None): return non_zero_lengths -def _default_formatter(x: Any, precision: Optional[int] = None) -> Any: +def _default_formatter(x: Any, precision: int) -> Any: """ Format the display of a value @@ -2135,7 +2136,7 @@ def _default_formatter(x: Any, precision: Optional[int] = None) -> Any: ---------- x : Any Input variable to be formatted - precision : Int, optional + precision : Int Floating point precision used if ``x`` is float or complex. Returns @@ -2143,8 +2144,6 @@ def _default_formatter(x: Any, precision: Optional[int] = None) -> Any: value : Any Matches input type, or string if input is float or complex. """ - if precision is None: - precision = get_option("display.precision") if isinstance(x, (float, complex)): return f"{x:.{precision}f}" return x @@ -2165,6 +2164,7 @@ def _maybe_wrap_formatter( elif callable(formatter): formatter_func = formatter elif formatter is None: + precision = get_option("display.precision") if precision is None else precision formatter_func = partial(_default_formatter, precision=precision) else: raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}")
This PR is a minor fix/improvement to a recent PR which deprecated `set_na_rep` and `set_precision`. #40134 For a dataframe with 100,000 floats the current Styler default formatter will make 100,000 `get_option` lookups for the pandas precision. Doing this calculation once prevents all these lookups and saves just over 10% render time for larger tables. ``` rows THIS PR ------ ------------------------- cols 12 120 ====== ============ ============ 12 11.8±0.2ms 40.8±0.8ms 24 22.5±0.3ms 69.2±2ms 36 32.3±0.6ms 96.9±1ms ====== ============ ============ ``` ``` rows MASTER ------ ------------------------- cols 12 120 ====== ============ ============ 12 12.9±0.4ms 43.4±1ms 24 22.3±0.1ms 75.2±0.7ms 36 32.3±0.3ms 105±0.5ms ====== ============ ============ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40425
2021-03-14T09:12:31Z
2021-03-14T23:50:51Z
2021-03-14T23:50:51Z
2021-03-15T14:27:55Z
BUG: read_csv with specified kwargs
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 67c7ce150132a..0f2c9c4756987 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -64,6 +64,7 @@ Bug Fixes **I/O** +- Bug in :func:`read_csv` that caused it to incorrectly raise an error when ``nrows=0``, ``low_memory=True``, and ``index_col`` was not ``None`` (:issue:`21141`) - - diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2c8f98732c92f..65df2bffb4abf 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -3209,12 +3209,22 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): col = columns[k] if is_integer(k) else k dtype[col] = v - if index_col is None or index_col is False: + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic emtpy Index. + if (index_col is None or index_col is False) or index_names is None: index = Index([]) else: data = [Series([], dtype=dtype[name]) for name in index_names] index = _ensure_index_from_sequences(data, names=index_names) index_col.sort() + for i, n in enumerate(index_col): columns.pop(n - i) diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 2b7ff1f5a9879..b39122e5e7906 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -238,6 +238,21 @@ def test_csv_mixed_type(self): out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) + def test_read_csv_low_memory_no_rows_with_index(self): + if self.engine == "c" and not self.low_memory: + pytest.skip("This is a low-memory specific test") + + # see gh-21141 + data = """A,B,C +1,1,1,2 +2,2,3,4 +3,3,4,5 +""" + out = self.read_csv(StringIO(data), low_memory=True, + index_col=0, nrows=0) + expected = DataFrame(columns=["A", "B", "C"]) + tm.assert_frame_equal(out, expected) + def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0,
- [+] closes #21141 - [+] tests added / passed - [+] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Solves the issue 21141.
https://api.github.com/repos/pandas-dev/pandas/pulls/21176
2018-05-22T20:36:04Z
2018-06-19T11:26:49Z
2018-06-19T11:26:49Z
2018-06-29T14:58:23Z
Fix nonzero of a SparseArray
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index b5d0532c6dfa3..18edc290768c8 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1437,6 +1437,7 @@ Sparse - Bug in ``DataFrame.groupby`` not including ``fill_value`` in the groups for non-NA ``fill_value`` when grouping by a sparse column (:issue:`5078`) - Bug in unary inversion operator (``~``) on a ``SparseSeries`` with boolean values. The performance of this has also been improved (:issue:`22835`) - Bug in :meth:`SparseArary.unique` not returning the unique values (:issue:`19595`) +- Bug in :meth:`SparseArray.nonzero` and :meth:`SparseDataFrame.dropna` returning shifted/incorrect results (:issue:`21172`) Build Changes ^^^^^^^^^^^^^ diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 0e5a8280cc467..619cd05128ddb 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -784,6 +784,23 @@ def test_fillna_overlap(self): exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64) tm.assert_sp_array_equal(res, exp) + def test_nonzero(self): + # Tests regression #21172. + sa = pd.SparseArray([ + float('nan'), + float('nan'), + 1, 0, 0, + 2, 0, 0, 0, + 3, 0, 0 + ]) + expected = np.array([2, 5, 9], dtype=np.int32) + result, = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + result, = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + class TestSparseArrayAnalytics(object): diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index dd73ec69c3b9a..f802598542cb9 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1360,3 +1360,16 @@ def test_assign_with_sparse_frame(self): for column in res.columns: assert type(res[column]) is SparseSeries + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("how", ["all", "any"]) + def test_dropna(self, inplace, how): + # Tests regression #21172. + expected = pd.SparseDataFrame({"F2": [0, 1]}) + input_df = pd.SparseDataFrame( + {"F1": [float('nan'), float('nan')], "F2": [0, 1]} + ) + result_df = input_df.dropna(axis=1, inplace=inplace, how=how) + if inplace: + result_df = input_df + tm.assert_sp_frame_equal(expected, result_df)
The nonzero operation returned the nonzero locations of the underlying index. However we need to get the nonzero locations in the real array. For this operation to be faster an inverse index structure would be beneficial or it could be implemented using binary search. ```python sa = pd.SparseArray([float('nan'), float('nan'), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) ``` returned `0, 3, 7`. The index is shifted by two because of the two first `NaN`s and that's why the `0, 3, 7` are returned. The correct result would be `2, 5, 9` and is found in the method. For the above sample the code works. However for other implementations of `SparseIndex` it could be broken. - [x] closes #21172 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21175
2018-05-22T20:27:37Z
2018-11-17T22:15:58Z
2018-11-17T22:15:57Z
2018-11-17T22:16:02Z
Remove deprecated Slepian test
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index d8e90ae0e1b35..74f2c977e0db2 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -41,7 +41,7 @@ def win_types(request): return request.param -@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian', 'slepian']) +@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian']) def win_types_special(request): return request.param @@ -1079,8 +1079,7 @@ def test_cmov_window_special(self, win_types_special): kwds = { 'kaiser': {'beta': 1.}, 'gaussian': {'std': 1.}, - 'general_gaussian': {'power': 2., 'width': 2.}, - 'slepian': {'width': 0.5}} + 'general_gaussian': {'power': 2., 'width': 2.}} vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) @@ -1090,8 +1089,6 @@ def test_cmov_window_special(self, win_types_special): 13.65671, 12.01002, np.nan, np.nan], 'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161, 13.08516, 12.95111, 12.74577, np.nan, np.nan], - 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331, - 12.96079, 12.77008, np.nan, np.nan], 'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129, 12.90702, 12.83757, np.nan, np.nan] }
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21173
2018-05-22T16:22:41Z
2018-05-23T10:35:43Z
2018-05-23T10:35:43Z
2018-05-23T17:26:16Z
Small typo in deprecation message added in PR #21060
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 4c237da7b6d0e..e2b0b33053f83 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1246,7 +1246,7 @@ class Timedelta(_Timedelta): deprecated. Use 'array // timedelta.value' instead. If you want to obtain epochs from an array of timestamps, you can rather use - 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. + '(array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. """) warnings.warn(msg, FutureWarning) return other // self.value
https://api.github.com/repos/pandas-dev/pandas/pulls/21170
2018-05-22T10:31:47Z
2018-05-22T18:32:37Z
2018-05-22T18:32:37Z
2018-05-22T18:32:58Z
BUG: Enable stata files to be written to buffers
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index a071d7f3f5534..a7ba0dfbbd1c4 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -83,7 +83,7 @@ Indexing I/O ^^^ -- +- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0a07e85401638..1d8f225bd4342 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1774,8 +1774,11 @@ def to_stata(self, fname, convert_dates=None, write_index=True, Parameters ---------- - fname : str or buffer - String path of file-like object. + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 8f91c7a497e2d..2797924985c70 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1758,11 +1758,25 @@ def value_labels(self): return self.value_label_dict -def _open_file_binary_write(fname, encoding): +def _open_file_binary_write(fname): + """ + Open a binary file or no-op if file-like + + Parameters + ---------- + fname : string path, path object or buffer + + Returns + ------- + file : file-like object + File object supporting write + own : bool + True if the file was created, otherwise False + """ if hasattr(fname, 'write'): # if 'b' not in fname.mode: - return fname - return open(fname, "wb") + return fname, False + return open(fname, "wb"), True def _set_endianness(endianness): @@ -1899,7 +1913,9 @@ class StataWriter(StataParser): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. .. versionadded:: 0.23.0 support for pathlib, py.path. @@ -1970,6 +1986,7 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels + self._own_file = True # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) @@ -2183,9 +2200,7 @@ def _prepare_pandas(self, data): self.fmtlist[key] = self._convert_dates[key] def write_file(self): - self._file = _open_file_binary_write( - self._fname, self._encoding or self._default_encoding - ) + self._file, self._own_file = _open_file_binary_write(self._fname) try: self._write_header(time_stamp=self._time_stamp, data_label=self._data_label) @@ -2205,6 +2220,23 @@ def write_file(self): self._write_file_close_tag() self._write_map() finally: + self._close() + + def _close(self): + """ + Close the file if it was created by the writer. + + If a buffer or file-like object was passed in, for example a GzipFile, + then leave this file open for the caller to close. In either case, + attempt to flush the file contents to ensure they are written to disk + (if supported) + """ + # Some file-like objects might not support flush + try: + self._file.flush() + except AttributeError: + pass + if self._own_file: self._file.close() def _write_map(self): @@ -2374,7 +2406,7 @@ def _prepare_data(self): def _write_data(self): data = self.data - data.tofile(self._file) + self._file.write(data.tobytes()) def _null_terminate(self, s, as_string=False): null_byte = '\x00' @@ -2641,7 +2673,9 @@ class StataWriter117(StataWriter): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. data : DataFrame Input to save convert_dates : dict @@ -2879,7 +2913,7 @@ def _write_data(self): self._update_map('data') data = self.data self._file.write(b'<data>') - data.tofile(self._file) + self._file.write(data.tobytes()) self._file.write(b'</data>') def _write_strls(self): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 110b790a65037..f3a465da4e87f 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -2,6 +2,8 @@ # pylint: disable=E1101 import datetime as dt +import io +import gzip import os import struct import warnings @@ -1473,3 +1475,28 @@ def test_invalid_date_conversion(self): with pytest.raises(ValueError): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) + + @pytest.mark.parametrize('version', [114, 117]) + def test_nonfile_writing(self, version): + # GH 21041 + bio = io.BytesIO() + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + df.to_stata(bio, version=version) + bio.seek(0) + with open(path, 'wb') as dta: + dta.write(bio.read()) + reread = pd.read_stata(path, index_col='index') + tm.assert_frame_equal(df, reread) + + def test_gzip_writing(self): + # writing version 117 requires seek and cannot be used with gzip + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + with gzip.GzipFile(path, 'wb') as gz: + df.to_stata(gz, version=114) + with gzip.GzipFile(path, 'rb') as gz: + reread = pd.read_stata(gz, index_col='index') + tm.assert_frame_equal(df, reread)
Enable support for general file-like objects when exporting stata files - [x] closes #21041 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21169
2018-05-22T09:02:36Z
2018-05-24T22:11:46Z
2018-05-24T22:11:46Z
2018-06-12T14:46:44Z
DOC: move mention of #21115 from bugs to enhancements
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index f204fce3a525f..44f7280d5535f 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -15,6 +15,8 @@ and bug fixes. We recommend that all users upgrade to this version. New features ~~~~~~~~~~~~ +- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) + .. _whatsnew_0231.deprecations: @@ -75,7 +77,6 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) -- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) - I/O
Very trivial followup to https://github.com/pandas-dev/pandas/pull/21116#discussion_r189738781
https://api.github.com/repos/pandas-dev/pandas/pulls/21165
2018-05-22T05:20:42Z
2018-05-22T10:19:52Z
2018-05-22T10:19:52Z
2018-07-08T08:31:43Z
BUG: Fix nested_to_record with None values in nested levels
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 974527624a312..e29cb0a5a2626 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -97,6 +97,7 @@ I/O - Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) +- Bug when :meth:`pandas.io.json.json_normalize` was called with ``None`` values in nested levels in JSON (:issue:`21158`) - Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`) - Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`) - diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 549204abd3caf..17393d458e746 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -80,7 +80,7 @@ def nested_to_record(ds, prefix="", sep=".", level=0): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v - if v is None: # pop the key if the value is None + elif v is None: # pop the key if the value is None new_d.pop(k) continue else: diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 0fabaf747b6de..dc34ba81f679d 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -375,3 +375,59 @@ def test_nonetype_dropping(self): 'info.last_updated': '26/05/2012'}] assert result == expected + + def test_nonetype_top_level_bottom_level(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "country": { + "state": { + "id": None, + "town.info": { + "id": None, + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'location.country.state.id': None, + 'location.country.state.town.info.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected + + def test_nonetype_multiple_levels(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "id": None, + "country": { + "id": None, + "state": { + "id": None, + "town.info": { + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'location.id': None, + 'location.country.id': None, + 'location.country.state.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected
- [ ] closes #21158 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Continue after pop so you dont pop with the same key twice in a row
https://api.github.com/repos/pandas-dev/pandas/pulls/21164
2018-05-22T04:39:53Z
2018-06-07T15:58:48Z
2018-06-07T15:58:48Z
2018-06-12T16:30:33Z
Cleanup clipboard tests
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 98c0effabec84..80fddd50fc9a8 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -9,10 +9,11 @@ from pandas import DataFrame from pandas import read_clipboard from pandas import get_option +from pandas.compat import PY2 from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf from pandas.io.clipboard.exceptions import PyperclipException -from pandas.io.clipboard import clipboard_set +from pandas.io.clipboard import clipboard_set, clipboard_get try: @@ -22,73 +23,134 @@ _DEPS_INSTALLED = 0 +def build_kwargs(sep, excel): + kwargs = {} + if excel != 'default': + kwargs['excel'] = excel + if sep != 'default': + kwargs['sep'] = sep + return kwargs + + +@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', + 'colwidth', 'mixed', 'float', 'int']) +def df(request): + data_type = request.param + + if data_type == 'delims': + return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'], + 'b': ['hi\'j', 'k\'\'lm']}) + elif data_type == 'utf8': + return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], + 'b': ['øπ∆˚¬', 'œ∑´®']}) + elif data_type == 'string': + return mkdf(5, 3, c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'long': + max_rows = get_option('display.max_rows') + return mkdf(max_rows + 1, 3, + data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'nonascii': + return pd.DataFrame({'en': 'in English'.split(), + 'es': 'en español'.split()}) + elif data_type == 'colwidth': + _cw = get_option('display.max_colwidth') + 1 + return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'mixed': + return DataFrame({'a': np.arange(1.0, 6.0) + 0.01, + 'b': np.arange(1, 6), + 'c': list('abcde')}) + elif data_type == 'float': + return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'int': + return mkdf(5, 3, data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + else: + raise ValueError + + @pytest.mark.single @pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed") class TestClipboard(object): - - @classmethod - def setup_class(cls): - cls.data = {} - cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['float'] = mkdf(5, 3, - data_gen_f=lambda r, c: float(r) + 0.01, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01, - 'b': np.arange(1, 6), - 'c': list('abcde')}) - - # Test columns exceeding "max_colwidth" (GH8305) - _cw = get_option('display.max_colwidth') + 1 - cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test GH-5346 - max_rows = get_option('display.max_rows') - cls.data['longdf'] = mkdf(max_rows + 1, 3, - data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test for non-ascii text: GH9263 - cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(), - 'es': 'en español'.split()}) - # unicode round trip test for GH 13747, GH 12529 - cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], - 'b': ['øπ∆˚¬', 'œ∑´®']}) - cls.data_types = list(cls.data.keys()) - - @classmethod - def teardown_class(cls): - del cls.data_types, cls.data - - def check_round_trip_frame(self, data_type, excel=None, sep=None, + def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None): - data = self.data[data_type] data.to_clipboard(excel=excel, sep=sep, encoding=encoding) - if sep is not None: - result = read_clipboard(sep=sep, index_col=0, encoding=encoding) - else: - result = read_clipboard(encoding=encoding) + result = read_clipboard(sep=sep or '\t', index_col=0, + encoding=encoding) tm.assert_frame_equal(data, result, check_dtype=False) - def test_round_trip_frame_sep(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, sep=',') - self.check_round_trip_frame(dt, sep=r'\s+') - self.check_round_trip_frame(dt, sep='|') - - def test_round_trip_frame_string(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, excel=False) - - def test_round_trip_frame(self): - for dt in self.data_types: - self.check_round_trip_frame(dt) + # Test that default arguments copy as tab delimited + @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' + 'Issue in #21104, Fixed in #21111') + def test_round_trip_frame(self, df): + self.check_round_trip_frame(df) + + # Test that explicit delimiters are respected + @pytest.mark.parametrize('sep', ['\t', ',', '|']) + def test_round_trip_frame_sep(self, df, sep): + self.check_round_trip_frame(df, sep=sep) + + # Test white space separator + @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " + "aren't handled correctly in default c engine. Fixed " + "in #21111 by defaulting to python engine for " + "whitespace separator") + def test_round_trip_frame_string(self, df): + df.to_clipboard(excel=False, sep=None) + result = read_clipboard() + assert df.to_string() == result.to_string() + assert df.shape == result.shape + + # Two character separator is not supported in to_clipboard + # Test that multi-character separators are not silently passed + @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") + def test_excel_sep_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=True, sep=r'\t') + + # Separator is ignored when excel=False and should produce a warning + @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") + def test_copy_delim_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=False, sep='\t') + + # Tests that the default behavior of to_clipboard is tab + # delimited and excel="True" + @pytest.mark.xfail(reason="to_clipboard defaults to space delim. Issue in " + "#21104, Fixed in #21111") + @pytest.mark.parametrize('sep', ['\t', None, 'default']) + @pytest.mark.parametrize('excel', [True, None, 'default']) + def test_clipboard_copy_tabs_default(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + if PY2: + # to_clipboard copies unicode, to_csv produces bytes. This is + # expected behavior + assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t') + else: + assert clipboard_get() == df.to_csv(sep='\t') + + # Tests reading of white space separated tables + @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " + "aren't handled correctly. in default c engine. Fixed " + "in #21111 by defaulting to python engine for " + "whitespace separator") + @pytest.mark.parametrize('sep', [None, 'default']) + @pytest.mark.parametrize('excel', [False]) + def test_clipboard_copy_strings(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + result = read_clipboard(sep=r'\s+') + assert result.to_string() == df.to_string() + assert df.shape == result.shape def test_read_clipboard_infer_excel(self): # gh-19010: avoid warnings @@ -124,15 +186,15 @@ def test_read_clipboard_infer_excel(self): tm.assert_frame_equal(res, exp) - def test_invalid_encoding(self): + def test_invalid_encoding(self, df): # test case for testing invalid encoding - data = self.data['string'] with pytest.raises(ValueError): - data.to_clipboard(encoding='ascii') + df.to_clipboard(encoding='ascii') with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') - def test_round_trip_valid_encodings(self): - for enc in ['UTF-8', 'utf-8', 'utf8']: - for dt in self.data_types: - self.check_round_trip_frame(dt, encoding=enc) + @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' + 'Issue in #21104, Fixed in #21111') + @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) + def test_round_trip_valid_encodings(self, enc, df): + self.check_round_trip_frame(df, encoding=enc)
As requested in #21111, I've refactored the original clipboard tests as well as adding my own. The set of tests have been improved to include testing of: - failure of tab delimited tables (#21104) - errors relating to copying and reading space-delimited tables - copy and pasting tables where the text contains quotes or delimiters As a result, these tests will fail on the current master branch, but should succeed on the commit in #21111 @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/21163
2018-05-22T02:52:55Z
2018-06-26T22:19:42Z
2018-06-26T22:19:42Z
2018-07-02T15:48:18Z
BUG: Fix interval_range when start/periods or end/periods are specified with float start/end
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 44f7280d5535f..a071d7f3f5534 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -77,6 +77,7 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) +- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`) - I/O diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 408a8cc435b63..8f8d8760583ce 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1572,6 +1572,10 @@ def interval_range(start=None, end=None, periods=None, freq=None, periods += 1 if is_number(endpoint): + # force consistency between start/end/freq (lower end if freq skips it) + if com._all_not_none(start, end, freq): + end -= (end - start) % freq + # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 @@ -1580,10 +1584,6 @@ def interval_range(start=None, end=None, periods=None, freq=None, elif end is None: end = start + (periods - 1) * freq - # force end to be consistent with freq (lower if freq skips end) - if freq is not None: - end -= end % freq - breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com._not_none(start, end, freq)): # np.linspace always produces float output diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 0fadfcf0c7f28..29fe2b0185662 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -110,6 +110,8 @@ def test_constructor_timedelta(self, closed, name, freq, periods): @pytest.mark.parametrize('start, end, freq, expected_endpoint', [ (0, 10, 3, 9), + (0, 10, 1.5, 9), + (0.5, 10, 3, 9.5), (Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')), (Timestamp('2018-01-01'), Timestamp('2018-02-09'), @@ -125,6 +127,22 @@ def test_early_truncation(self, start, end, freq, expected_endpoint): result_endpoint = result.right[-1] assert result_endpoint == expected_endpoint + @pytest.mark.parametrize('start, end, freq', [ + (0.5, None, None), + (None, 4.5, None), + (0.5, None, 1.5), + (None, 6.5, 1.5)]) + def test_no_invalid_float_truncation(self, start, end, freq): + # GH 21161 + if freq is None: + breaks = [0.5, 1.5, 2.5, 3.5, 4.5] + else: + breaks = [0.5, 2.0, 3.5, 5.0, 6.5] + expected = IntervalIndex.from_breaks(breaks) + + result = interval_range(start=start, end=end, periods=4, freq=freq) + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('start, mid, end', [ (Timestamp('2018-03-10', tz='US/Eastern'), Timestamp('2018-03-10 23:30:00', tz='US/Eastern'),
- [X] closes #21161 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Should be good for 0.23.1; the code changes only hit the `interval_range` function, which is outside the `IntervalIndex` class, so should be independent of any enhancements related to `IntervalIndex` that we might want to push to 0.24.0.
https://api.github.com/repos/pandas-dev/pandas/pulls/21162
2018-05-22T00:42:16Z
2018-05-23T04:22:15Z
2018-05-23T04:22:14Z
2018-06-08T17:14:49Z
ENH: Integer NA Extension Array
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a0076118a28a7..4d5c34b883d18 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -13,6 +13,7 @@ v0.24.0 (Month XX, 2018) New features ~~~~~~~~~~~~ + - ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`) .. _whatsnew_0240.enhancements.extension_array_operators: @@ -31,6 +32,62 @@ See the :ref:`ExtensionArray Operator Support <extending.extension.operator>` documentation section for details on both ways of adding operator support. +.. _whatsnew_0240.enhancements.intna: + +Optional Integer NA Support +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`. +Here is an example of the usage. + +We can construct a ``Series`` with the specified dtype. The dtype string ``Int64`` is a pandas ``ExtensionDtype``. Specifying a list or array using the traditional missing value +marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`) + +.. ipython:: python + + s = pd.Series([1, 2, np.nan], dtype='Int64') + s + + +Operations on these dtypes will propagate ``NaN`` as other pandas operations. + +.. ipython:: python + + # arithmetic + s + 1 + + # comparison + s == 1 + + # indexing + s.iloc[1:3] + + # operate with other dtypes + s + s.iloc[1:3].astype('Int8') + + # coerce when needed + s + 0.01 + +These dtypes can operate as part of of ``DataFrame``. + +.. ipython:: python + + df = pd.DataFrame({'A': s, 'B': [1, 1, 3], 'C': list('aab')}) + df + df.dtypes + + +These dtypes can be merged & reshaped & casted. + +.. ipython:: python + + pd.concat([df[['A']], df[['B', 'C']]], axis=1).dtypes + df['A'].astype(float) + +.. warning:: + + The Integer NA support currently uses the captilized dtype version, e.g. ``Int8`` as compared to the traditional ``int8``. This may be changed at a future date. + .. _whatsnew_0240.enhancements.read_html: ``read_html`` Enhancements @@ -256,6 +313,7 @@ Previous Behavior: ExtensionType Changes ^^^^^^^^^^^^^^^^^^^^^ +- ``ExtensionArray`` has gained the abstract methods ``.dropna()`` (:issue:`21185`) - ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) - The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 83a781cdd38fd..9132c74091410 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -1,7 +1,10 @@ from .base import (ExtensionArray, # noqa + ExtensionOpsMixin, ExtensionScalarOpsMixin) from .categorical import Categorical # noqa from .datetimes import DatetimeArrayMixin # noqa from .interval import IntervalArray # noqa from .period import PeriodArrayMixin # noqa from .timedeltas import TimedeltaArrayMixin # noqa +from .integer import ( # noqa + IntegerArray, to_integer_array) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fe4e461b0bd4f..01ed085dd2b9f 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -12,8 +12,8 @@ from pandas.errors import AbstractMethodError from pandas.compat.numpy import function as nv from pandas.compat import set_function_name, PY3 -from pandas.core.dtypes.common import is_list_like from pandas.core import ops +from pandas.core.dtypes.common import is_list_like _not_implemented_message = "{} does not implement {}." @@ -88,7 +88,7 @@ class ExtensionArray(object): # Constructors # ------------------------------------------------------------------------ @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new ExtensionArray from a sequence of scalars. Parameters @@ -96,8 +96,11 @@ def _from_sequence(cls, scalars, copy=False): scalars : Sequence Each element will be an instance of the scalar type for this array, ``cls.dtype.type``. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. copy : boolean, default False - if True, copy the underlying data + If True, copy the underlying data. Returns ------- ExtensionArray @@ -378,7 +381,7 @@ def fillna(self, value=None, method=None, limit=None): func = pad_1d if method == 'pad' else backfill_1d new_values = func(self.astype(object), limit=limit, mask=mask) - new_values = self._from_sequence(new_values) + new_values = self._from_sequence(new_values, dtype=self.dtype) else: # fill with value new_values = self.copy() @@ -407,7 +410,7 @@ def unique(self): from pandas import unique uniques = unique(self.astype(object)) - return self._from_sequence(uniques) + return self._from_sequence(uniques, dtype=self.dtype) def _values_for_factorize(self): # type: () -> Tuple[ndarray, Any] @@ -559,7 +562,7 @@ def take(self, indices, allow_fill=False, fill_value=None): result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill) - return self._from_sequence(result) + return self._from_sequence(result, dtype=self.dtype) """ # Implementer note: The `fill_value` parameter should be a user-facing # value, an instance of self.dtype.type. When passed `fill_value=None`, diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 973a8af76bb07..0d73b2c60d76d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -488,8 +488,8 @@ def _constructor(self): return Categorical @classmethod - def _from_sequence(cls, scalars): - return Categorical(scalars) + def _from_sequence(cls, scalars, dtype=None, copy=False): + return Categorical(scalars, dtype=dtype) def copy(self): """ Copy constructor. """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py new file mode 100644 index 0000000000000..c126117060c3d --- /dev/null +++ b/pandas/core/arrays/integer.py @@ -0,0 +1,599 @@ +import sys +import warnings +import copy +import numpy as np + +from pandas._libs.lib import infer_dtype +from pandas.util._decorators import cache_readonly +from pandas.compat import u, range +from pandas.compat import set_function_name + +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass +from pandas.core.dtypes.common import ( + is_integer, is_scalar, is_float, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + is_list_like) +from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.dtypes import registry +from pandas.core.dtypes.missing import isna, notna + +from pandas.io.formats.printing import ( + format_object_summary, format_object_attrs, default_pprint) + + +class _IntegerDtype(ExtensionDtype): + """ + An ExtensionDtype to hold a single size & kind of integer dtype. + + These specific implementations are subclasses of the non-public + _IntegerDtype. For example we have Int8Dtype to represnt signed int 8s. + + The attributes name & type are set when these subclasses are created. + """ + name = None + type = None + na_value = np.nan + + @cache_readonly + def is_signed_integer(self): + return self.kind == 'i' + + @cache_readonly + def is_unsigned_integer(self): + return self.kind == 'u' + + @cache_readonly + def numpy_dtype(self): + """ Return an instance of our numpy dtype """ + return np.dtype(self.type) + + @cache_readonly + def kind(self): + return self.numpy_dtype.kind + + @classmethod + def construct_array_type(cls): + """Return the array type associated with this dtype + + Returns + ------- + type + """ + return IntegerArray + + @classmethod + def construct_from_string(cls, string): + """ + Construction from a string, raise a TypeError if not + possible + """ + if string == cls.name: + return cls() + raise TypeError("Cannot construct a '{}' from " + "'{}'".format(cls, string)) + + +def to_integer_array(values, dtype=None): + """ + Infer and return an integer array of the values. + + Parameters + ---------- + values : 1D list-like + dtype : dtype, optional + dtype to coerce + + Returns + ------- + IntegerArray + + Raises + ------ + TypeError if incompatible types + """ + return IntegerArray(values, dtype=dtype, copy=False) + + +def safe_cast(values, dtype, copy): + """ + Safely cast the values to the dtype if they + are equivalent, meaning floats must be equivalent to the + ints. + + """ + + try: + return values.astype(dtype, casting='safe', copy=copy) + except TypeError: + + casted = values.astype(dtype, copy=copy) + if (casted == values).all(): + return casted + + raise TypeError("cannot safely cast non-equivalent {} to {}".format( + values.dtype, np.dtype(dtype))) + + +def coerce_to_array(values, dtype, mask=None, copy=False): + """ + Coerce the input values array to numpy arrays with a mask + + Parameters + ---------- + values : 1D list-like + dtype : integer dtype + mask : boolean 1D array, optional + copy : boolean, default False + if True, copy the input + + Returns + ------- + tuple of (values, mask) + """ + if dtype is not None: + if not issubclass(type(dtype), _IntegerDtype): + try: + dtype = _dtypes[str(np.dtype(dtype))] + except KeyError: + raise ValueError("invalid dtype specified {}".format(dtype)) + + if isinstance(values, IntegerArray): + values, mask = values._data, values._mask + if dtype is not None: + values = values.astype(dtype.numpy_dtype, copy=False) + + if copy: + values = values.copy() + mask = mask.copy() + return values, mask + + values = np.array(values, copy=copy) + if is_object_dtype(values): + inferred_type = infer_dtype(values) + if inferred_type not in ['floating', 'integer', + 'mixed-integer', 'mixed-integer-float']: + raise TypeError("{} cannot be converted to an IntegerDtype".format( + values.dtype)) + + elif not (is_integer_dtype(values) or is_float_dtype(values)): + raise TypeError("{} cannot be converted to an IntegerDtype".format( + values.dtype)) + + if mask is None: + mask = isna(values) + else: + assert len(mask) == len(values) + + if not values.ndim == 1: + raise TypeError("values must be a 1D list-like") + if not mask.ndim == 1: + raise TypeError("mask must be a 1D list-like") + + # infer dtype if needed + if dtype is None: + if is_integer_dtype(values): + dtype = values.dtype + else: + dtype = np.dtype('int64') + else: + dtype = dtype.type + + # if we are float, let's make sure that we can + # safely cast + + # we copy as need to coerce here + if mask.any(): + values = values.copy() + values[mask] = 1 + values = safe_cast(values, dtype, copy=False) + else: + values = safe_cast(values, dtype, copy=False) + + return values, mask + + +class IntegerArray(ExtensionArray, ExtensionOpsMixin): + """ + We represent an IntegerArray with 2 numpy arrays + - data: contains a numpy integer array of the appropriate dtype + - mask: a boolean array holding a mask on the data, False is missing + """ + + @cache_readonly + def dtype(self): + return _dtypes[str(self._data.dtype)] + + def __init__(self, values, mask=None, dtype=None, copy=False): + """ + Parameters + ---------- + values : 1D list-like / IntegerArray + mask : 1D list-like, optional + dtype : subclass of _IntegerDtype, optional + copy : bool, default False + + Returns + ------- + IntegerArray + """ + self._data, self._mask = coerce_to_array( + values, dtype=dtype, mask=mask, copy=copy) + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values, dtype=original.dtype) + + def __getitem__(self, item): + if is_integer(item): + if self._mask[item]: + return self.dtype.na_value + return self._data[item] + return type(self)(self._data[item], + mask=self._mask[item], + dtype=self.dtype) + + def _coerce_to_ndarray(self): + """ + coerce to an ndarary of object dtype + """ + + # TODO(jreback) make this better + data = self._data.astype(object) + data[self._mask] = self._na_value + return data + + def __array__(self, dtype=None): + """ + the array interface, return my values + We return an object array here to preserve our scalar values + """ + return self._coerce_to_ndarray() + + def __iter__(self): + """Iterate over elements of the array. + + """ + # This needs to be implemented so that pandas recognizes extension + # arrays as list-like. The default implementation makes successive + # calls to ``__getitem__``, which may be slower than necessary. + for i in range(len(self)): + if self._mask[i]: + yield self.dtype.na_value + else: + yield self._data[i] + + def _formatting_values(self): + # type: () -> np.ndarray + return self._coerce_to_ndarray() + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + # we always fill with 1 internally + # to avoid upcasting + data_fill_value = 1 if isna(fill_value) else fill_value + result = take(self._data, indexer, fill_value=data_fill_value, + allow_fill=allow_fill) + + mask = take(self._mask, indexer, fill_value=True, + allow_fill=allow_fill) + + # if we are filling + # we only fill where the indexer is null + # not existing missing values + # TODO(jreback) what if we have a non-na float as a fill value? + if allow_fill and notna(fill_value): + fill_mask = np.asarray(indexer) == -1 + result[fill_mask] = fill_value + mask = mask ^ fill_mask + + return type(self)(result, mask=mask, dtype=self.dtype, copy=False) + + def copy(self, deep=False): + data, mask = self._data, self._mask + if deep: + data = copy.deepcopy(data) + mask = copy.deepcopy(mask) + else: + data = data.copy() + mask = mask.copy() + return type(self)(data, mask, dtype=self.dtype, copy=False) + + def __setitem__(self, key, value): + _is_scalar = is_scalar(value) + if _is_scalar: + value = [value] + value, mask = coerce_to_array(value, dtype=self.dtype) + + if _is_scalar: + value = value[0] + mask = mask[0] + + self._data[key] = value + self._mask[key] = mask + + def __len__(self): + return len(self._data) + + def __repr__(self): + """ + Return a string representation for this object. + + Invoked by unicode(df) in py2 only. Yields a Unicode String in both + py2/py3. + """ + klass = self.__class__.__name__ + data = format_object_summary(self, default_pprint, False) + attrs = format_object_attrs(self) + space = " " + + prepr = (u(",%s") % + space).join(u("%s=%s") % (k, v) for k, v in attrs) + + res = u("%s(%s%s)") % (klass, data, prepr) + + return res + + @property + def nbytes(self): + return self._data.nbytes + self._mask.nbytes + + def isna(self): + return self._mask + + @property + def _na_value(self): + return np.nan + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x._data for x in to_concat]) + mask = np.concatenate([x._mask for x in to_concat]) + return cls(data, mask=mask, dtype=to_concat[0].dtype) + + def astype(self, dtype, copy=True): + """Cast to a NumPy array or IntegerArray with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ndarray or IntegerArray + NumPy ndarray or IntergerArray with 'dtype' for its dtype. + + Raises + ------ + TypeError + if incompatible type with an IntegerDtype, equivalent of same_kind + casting + """ + + # if we are astyping to an existing IntegerDtype we can fastpath + if isinstance(dtype, _IntegerDtype): + result = self._data.astype(dtype.numpy_dtype, + casting='same_kind', copy=False) + return type(self)(result, mask=self._mask, + dtype=dtype, copy=False) + + # coerce + data = self._coerce_to_ndarray() + return data.astype(dtype=dtype, copy=False) + + @property + def _ndarray_values(self): + # type: () -> np.ndarray + """Internal pandas method for lossy conversion to a NumPy ndarray. + + This method is not part of the pandas interface. + + The expectation is that this is cheap to compute, and is primarily + used for interacting with our indexers. + """ + return self._data + + def value_counts(self, dropna=True): + """ + Returns a Series containing counts of each category. + + Every category will have an entry, even those with a count of 0. + + Parameters + ---------- + dropna : boolean, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + + """ + + from pandas import Index, Series + + # compute counts on the data with no nans + data = self._data[~self._mask] + value_counts = Index(data).value_counts() + array = value_counts.values + + # TODO(extension) + # if we have allow Index to hold an ExtensionArray + # this is easier + index = value_counts.index.astype(object) + + # if we want nans, count the mask + if not dropna: + + # TODO(extension) + # appending to an Index *always* infers + # w/o passing the dtype + array = np.append(array, [self._mask.sum()]) + index = Index(np.concatenate( + [index.values, + np.array([np.nan], dtype=object)]), dtype=object) + + return Series(array, index=index) + + def _values_for_argsort(self): + # type: () -> ndarray + """Return values for sorting. + + Returns + ------- + ndarray + The transformed values should maintain the ordering between values + within the array. + + See Also + -------- + ExtensionArray.argsort + """ + data = self._data.copy() + data[self._mask] = data.min() - 1 + return data + + @classmethod + def _create_comparison_method(cls, op): + def cmp_method(self, other): + + op_name = op.__name__ + mask = None + if isinstance(other, IntegerArray): + other, mask = other._data, other._mask + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 0 and len(self) != len(other): + raise ValueError('Lengths must match to compare') + + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(record=True): + with np.errstate(all='ignore'): + result = op(self._data, other) + + # nans propagate + if mask is None: + mask = self._mask + else: + mask = self._mask | mask + + result[mask] = True if op_name == 'ne' else False + return result + + name = '__{name}__'.format(name=op.__name__) + return set_function_name(cmp_method, name, cls) + + def _maybe_mask_result(self, result, mask, other, op_name): + """ + Parameters + ---------- + result : array-like + mask : array-like bool + other : scalar or array-like + op_name : str + """ + + # may need to fill infs + # and mask wraparound + if is_float_dtype(result): + mask |= (result == np.inf) | (result == -np.inf) + + # if we have a float operand we are by-definition + # a float result + # or our op is a divide + if ((is_float_dtype(other) or is_float(other)) or + (op_name in ['rtruediv', 'truediv', 'rdiv', 'div'])): + result[mask] = np.nan + return result + + return type(self)(result, mask=mask, dtype=self.dtype, copy=False) + + @classmethod + def _create_arithmetic_method(cls, op): + def integer_arithmetic_method(self, other): + + op_name = op.__name__ + mask = None + if isinstance(other, (ABCSeries, ABCIndexClass)): + other = getattr(other, 'values', other) + + if isinstance(other, IntegerArray): + other, mask = other._data, other._mask + elif getattr(other, 'ndim', 0) > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures") + elif is_list_like(other): + other = np.asarray(other) + if not other.ndim: + other = other.item() + elif other.ndim == 1: + if not (is_float_dtype(other) or is_integer_dtype(other)): + raise TypeError( + "can only perform ops with numeric values") + else: + if not (is_float(other) or is_integer(other)): + raise TypeError("can only perform ops with numeric values") + + # nans propagate + if mask is None: + mask = self._mask + else: + mask = self._mask | mask + + with np.errstate(all='ignore'): + result = op(self._data, other) + + # divmod returns a tuple + if op_name == 'divmod': + div, mod = result + return (self._maybe_mask_result(div, mask, other, 'floordiv'), + self._maybe_mask_result(mod, mask, other, 'mod')) + + return self._maybe_mask_result(result, mask, other, op_name) + + name = '__{name}__'.format(name=op.__name__) + return set_function_name(integer_arithmetic_method, name, cls) + + +IntegerArray._add_arithmetic_ops() +IntegerArray._add_comparison_ops() + + +module = sys.modules[__name__] + + +# create the Dtype +_dtypes = {} +for dtype in ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64']: + + if dtype.startswith('u'): + name = "U{}".format(dtype[1:].capitalize()) + else: + name = dtype.capitalize() + classname = "{}Dtype".format(name) + attributes_dict = {'type': getattr(np, dtype), + 'name': name} + dtype_type = type(classname, (_IntegerDtype, ), attributes_dict) + setattr(module, classname, dtype_type) + + # register + registry.register(dtype_type) + _dtypes[dtype] = dtype_type() diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c915b272aee8b..2c8853dec4f69 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -191,8 +191,8 @@ def _simple_new(cls, left, right, closed=None, return result @classmethod - def _from_sequence(cls, scalars): - return cls(scalars) + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars, dtype=dtype, copy=copy) @classmethod def _from_factorized(cls, values, original): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8675d3be06287..ead7b39309f5e 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -651,7 +651,8 @@ def astype_nansafe(arr, dtype, copy=True): # dispatch on extension dtype if needed if is_extension_array_dtype(dtype): - return dtype.array_type._from_sequence(arr, copy=copy) + return dtype.construct_array_type()._from_sequence( + arr, dtype=dtype, copy=copy) if not isinstance(dtype, np.dtype): dtype = pandas_dtype(dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5a2f91d775fb2..355bf58540219 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1714,6 +1714,11 @@ def is_extension_array_dtype(arr_or_dtype): if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): arr_or_dtype = arr_or_dtype._values + try: + arr_or_dtype = pandas_dtype(arr_or_dtype) + except TypeError: + pass + return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) @@ -1804,6 +1809,9 @@ def _get_dtype(arr_or_dtype): TypeError : The passed in object is None. """ + # TODO(extension) + # replace with pandas_dtype + if arr_or_dtype is None: raise TypeError("Cannot deduce dtype from null object") if isinstance(arr_or_dtype, np.dtype): @@ -1851,6 +1859,8 @@ def _get_dtype_type(arr_or_dtype): passed in array or dtype object. """ + # TODO(extension) + # replace with pandas_dtype if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.type elif isinstance(arr_or_dtype, type): @@ -1976,7 +1986,17 @@ def pandas_dtype(dtype): Returns ------- np.dtype or a pandas dtype + + Raises + ------ + TypeError if not a dtype + """ + # short-circuit + if isinstance(dtype, np.ndarray): + return dtype.dtype + elif isinstance(dtype, np.dtype): + return dtype # registered extension types result = registry.find(dtype) @@ -1984,13 +2004,19 @@ def pandas_dtype(dtype): return result # un-registered extension types - if isinstance(dtype, ExtensionDtype): + elif isinstance(dtype, ExtensionDtype): return dtype + # try a numpy dtype + # raise a consistent TypeError if failed try: npdtype = np.dtype(dtype) - except (TypeError, ValueError): - raise + except Exception: + # we don't want to force a repr of the non-string + if not isinstance(dtype, string_types): + raise TypeError("data type not understood") + raise TypeError("data type '{}' not understood".format( + dtype)) # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will @@ -2000,6 +2026,6 @@ def pandas_dtype(dtype): if dtype in [object, np.object_, 'object', 'O']: return npdtype elif npdtype.kind == 'O': - raise TypeError('dtype {dtype} not understood'.format(dtype=dtype)) + raise TypeError("dtype '{}' not understood".format(dtype)) return npdtype diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4a41b14cee071..5768fd361c3db 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -534,6 +534,7 @@ def _concat_index_asobject(to_concat, name=None): to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] + return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..93982b4466a7f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -44,6 +44,7 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, + is_extension_array_dtype, is_hashable, is_iterator, is_list_like, is_scalar) @@ -260,19 +261,33 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, name=name) # categorical - if is_categorical_dtype(data) or is_categorical_dtype(dtype): + elif is_categorical_dtype(data) or is_categorical_dtype(dtype): from .category import CategoricalIndex return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) # interval - if ((is_interval_dtype(data) or is_interval_dtype(dtype)) and - not is_object_dtype(dtype)): + elif ((is_interval_dtype(data) or is_interval_dtype(dtype)) and + not is_object_dtype(dtype)): from .interval import IntervalIndex closed = kwargs.get('closed', None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) + # extension dtype + elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): + data = np.asarray(data) + if not (dtype is None or is_object_dtype(dtype)): + + # coerce to the provided dtype + data = dtype.construct_array_type()( + data, dtype=dtype, copy=False) + + # coerce to the object dtype + data = data.astype(object) + return Index(data, dtype=object, copy=copy, name=name, + **kwargs) + # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): @@ -1170,10 +1185,15 @@ def _to_embed(self, keep_tz=False, dtype=None): def astype(self, dtype, copy=True): if is_dtype_equal(self.dtype, dtype): return self.copy() if copy else self + elif is_categorical_dtype(dtype): from .category import CategoricalIndex return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy) + + elif is_extension_array_dtype(dtype): + return Index(np.asarray(self), dtype=dtype, copy=copy) + try: if is_datetime64tz_dtype(dtype): from pandas import DatetimeIndex diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a5418dcc1e7f..5a87a8368dc88 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -39,7 +39,8 @@ is_re, is_re_compilable, is_scalar, - _get_dtype) + _get_dtype, + pandas_dtype) from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, maybe_upcast, @@ -631,9 +632,10 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, return self.make_block(Categorical(self.values, dtype=dtype)) + # convert dtypes if needed + dtype = pandas_dtype(dtype) + # astype processing - if not is_extension_array_dtype(dtype): - dtype = np.dtype(dtype) if is_dtype_equal(self.dtype, dtype): if copy: return self.copy() diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 16820dcbb55bc..a46c19e2d399c 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -638,7 +638,8 @@ def fill_zeros(result, x, y, name, fill): # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): - signs = np.sign(y if name.startswith(('r', '__r')) else x) + signs = y if name.startswith(('r', '__r')) else x + signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index bccc5a587bd83..a8c1b954a61b7 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -13,7 +13,7 @@ import numpy as np import pandas as pd -from pandas._libs import algos as libalgos, ops as libops +from pandas._libs import lib, algos as libalgos, ops as libops from pandas import compat from pandas.util._decorators import Appender @@ -135,6 +135,13 @@ def rfloordiv(left, right): def rmod(left, right): + # check if right is a string as % is the string + # formatting operation; this is a TypeError + # otherwise perform the op + if isinstance(right, compat.string_types): + raise TypeError("{typ} cannot perform the operation mod".format( + typ=type(left).__name__)) + return right % left @@ -1018,7 +1025,7 @@ def _align_method_SERIES(left, right, align_asobject=False): return left, right -def _construct_result(left, result, index, name, dtype): +def _construct_result(left, result, index, name, dtype=None): """ If the raw op result has a non-None name (e.g. it is an Index object) and the name argument is None, then passing name to the constructor will @@ -1030,7 +1037,7 @@ def _construct_result(left, result, index, name, dtype): return out -def _construct_divmod_result(left, result, index, name, dtype): +def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """ constructor = left._constructor @@ -1048,16 +1055,39 @@ def dispatch_to_extension_op(op, left, right): # The op calls will raise TypeError if the op is not defined # on the ExtensionArray + # TODO(jreback) + # we need to listify to avoid ndarray, or non-same-type extension array + # dispatching + if is_extension_array_dtype(left): - res_values = op(left.values, right) + + new_left = left.values + if isinstance(right, np.ndarray): + + # handle numpy scalars, this is a PITA + # TODO(jreback) + new_right = lib.item_from_zerodim(right) + if is_scalar(new_right): + new_right = [new_right] + new_right = list(new_right) + elif is_extension_array_dtype(right) and type(left) != type(right): + new_right = list(new_right) + else: + new_right = right + else: - # We know that left is not ExtensionArray and is Series and right is - # ExtensionArray. Want to force ExtensionArray op to get called - res_values = op(list(left.values), right.values) + new_left = list(left.values) + new_right = right + + res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) - return left._constructor(res_values, index=left.index, - name=res_name) + + if op.__name__ == 'divmod': + return _construct_divmod_result( + left, res_values, left.index, res_name) + + return _construct_result(left, res_values, left.index, res_name) def _arith_method_SERIES(cls, op, special): @@ -1074,7 +1104,6 @@ def _arith_method_SERIES(cls, op, special): def na_op(x, y): import pandas.core.computation.expressions as expressions - try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: @@ -1095,6 +1124,20 @@ def na_op(x, y): return result def safe_na_op(lvalues, rvalues): + """ + return the result of evaluating na_op on the passed in values + + try coercion to object type if the native types are not compatible + + Parameters + ---------- + lvalues : array-like + rvalues : array-like + + Raises + ------ + TypeError: invalid operation + """ try: with np.errstate(all='ignore'): return na_op(lvalues, rvalues) @@ -1105,14 +1148,21 @@ def safe_na_op(lvalues, rvalues): raise def wrapper(left, right): - if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) - if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + if is_categorical_dtype(left): + raise TypeError("{typ} cannot perform the operation " + "{op}".format(typ=type(left).__name__, op=str_rep)) + + elif (is_extension_array_dtype(left) or + is_extension_array_dtype(right)): + return dispatch_to_extension_op(op, left, right) + + elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, @@ -1124,15 +1174,6 @@ def wrapper(left, right): index=left.index, name=res_name, dtype=result.dtype) - elif is_categorical_dtype(left): - raise TypeError("{typ} cannot perform the operation " - "{op}".format(typ=type(left).__name__, op=str_rep)) - - elif (is_extension_array_dtype(left) or - (is_extension_array_dtype(right) and - not is_categorical_dtype(right))): - return dispatch_to_extension_op(op, left, right) - lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): @@ -1204,6 +1245,9 @@ def _comp_method_SERIES(cls, op, special): masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): + # TODO: + # should have guarantess on what x, y can be type-wise + # Extension Dtypes are not called here # dispatch to the categorical if we have a categorical # in either operand @@ -1312,7 +1356,7 @@ def wrapper(self, other, axis=None): elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and - not is_categorical_dtype(other))): + not is_scalar(other))): return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): diff --git a/pandas/core/series.py b/pandas/core/series.py index 77445159129f2..3571e908fc6a7 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -236,13 +236,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, '`index` argument. `copy` must ' 'be False.') - elif is_extension_array_dtype(data) and dtype is not None: - if not data.dtype.is_dtype(dtype): - raise ValueError("Cannot specify a dtype '{}' with an " - "extension array of a different " - "dtype ('{}').".format(dtype, - data.dtype)) - + elif is_extension_array_dtype(data): + pass elif (isinstance(data, types.GeneratorType) or (compat.PY3 and isinstance(data, map))): data = list(data) @@ -4096,7 +4091,7 @@ def _try_cast(arr, take_fast_path): elif is_extension_array_dtype(dtype): # create an extension array from its dtype array_type = dtype.construct_array_type() - subarr = array_type(subarr, copy=copy) + subarr = array_type(subarr, dtype=dtype, copy=copy) elif dtype is not None and raise_cast_failure: raise @@ -4133,10 +4128,7 @@ def _try_cast(arr, take_fast_path): subarr = data if dtype is not None and not data.dtype.is_dtype(dtype): - msg = ("Cannot coerce extension array to dtype '{typ}'. " - "Do the coercion before passing to the constructor " - "instead.".format(typ=dtype)) - raise ValueError(msg) + subarr = data.astype(dtype) if copy: subarr = data.copy() diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 640b894e2245f..b6b81bb941a59 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -47,7 +47,7 @@ class TestMyDtype(BaseDtypeTests): from .groupby import BaseGroupbyTests # noqa from .interface import BaseInterfaceTests # noqa from .methods import BaseMethodsTests # noqa -from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests # noqa +from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa from .missing import BaseMissingTests # noqa from .reshaping import BaseReshapingTests # noqa from .setitem import BaseSetitemTests # noqa diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 52a12816c8722..2125458e8a0ba 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -54,3 +54,28 @@ def test_array_type(self, data, dtype): def test_array_type_with_arg(self, data, dtype): with pytest.raises(NotImplementedError): dtype.construct_array_type('foo') + + def test_check_dtype(self, data): + dtype = data.dtype + + # check equivalency for using .dtypes + df = pd.DataFrame({'A': pd.Series(data, dtype=dtype), + 'B': data, + 'C': 'foo', 'D': 1}) + + # np.dtype('int64') == 'Int64' == 'int64' + # so can't distinguish + if dtype.name == 'Int64': + expected = pd.Series([True, True, False, True], + index=list('ABCD')) + else: + expected = pd.Series([True, True, False, False], + index=list('ABCD')) + + result = df.dtypes == str(dtype) + self.assert_series_equal(result, expected) + + expected = pd.Series([True, True, False, False], + index=list('ABCD')) + result = df.dtypes.apply(str) == str(dtype) + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index e9df49780f119..886a0f66b5f66 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -226,12 +226,14 @@ def test_reindex(self, data, na_value): n = len(data) result = s.reindex([-1, 0, n]) expected = pd.Series( - data._from_sequence([na_value, data[0], na_value]), + data._from_sequence([na_value, data[0], na_value], + dtype=s.dtype), index=[-1, 0, n]) self.assert_series_equal(result, expected) result = s.reindex([n, n + 1]) - expected = pd.Series(data._from_sequence([na_value, na_value]), + expected = pd.Series(data._from_sequence([na_value, na_value], + dtype=s.dtype), index=[n, n + 1]) self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index 659b9757ac1e3..f7bfdb8ec218a 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -3,10 +3,12 @@ import operator import pandas as pd +from pandas.core import ops from .base import BaseExtensionTests class BaseOpsUtil(BaseExtensionTests): + def get_op_from_name(self, op_name): short_opname = op_name.strip('_') try: @@ -32,17 +34,38 @@ def _check_op(self, s, op, other, exc=NotImplementedError): with pytest.raises(exc): op(s, other) + def _check_divmod_op(self, s, op, other, exc=NotImplementedError): + # divmod has multiple return values, so check separatly + if exc is None: + result_div, result_mod = op(s, other) + if op is divmod: + expected_div, expected_mod = s // other, s % other + else: + expected_div, expected_mod = other // s, other % s + self.assert_series_equal(result_div, expected_div) + self.assert_series_equal(result_mod, expected_mod) + else: + with pytest.raises(exc): + divmod(s, other) + class BaseArithmeticOpsTests(BaseOpsUtil): """Various Series and DataFrame arithmetic ops methods.""" - def test_arith_scalar(self, data, all_arithmetic_operators): - # scalar + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + # series & scalar op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname(s, op_name, s.iloc[0], exc=TypeError) - def test_arith_array(self, data, all_arithmetic_operators): + @pytest.mark.xfail(run=False, reason="_reduce needs implementation") + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + # frame & scalar + op_name = all_arithmetic_operators + df = pd.DataFrame({'A': data}) + self.check_opname(df, op_name, data[0], exc=TypeError) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) @@ -50,8 +73,8 @@ def test_arith_array(self, data, all_arithmetic_operators): def test_divmod(self, data): s = pd.Series(data) - self._check_op(s, divmod, 1, exc=TypeError) - self._check_op(1, divmod, s, exc=TypeError) + self._check_divmod_op(s, divmod, 1, exc=TypeError) + self._check_divmod_op(1, ops.rdivmod, s, exc=TypeError) def test_error(self, data, all_arithmetic_operators): # invalid ops diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index c83726c5278a5..0340289e0b674 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -82,7 +82,8 @@ def test_concat_columns(self, data, na_value): # non-aligned df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3]) expected = pd.DataFrame({ - 'A': data._from_sequence(list(data[:3]) + [na_value]), + 'A': data._from_sequence(list(data[:3]) + [na_value], + dtype=data.dtype), 'B': [np.nan, 1, 2, 3]}) result = pd.concat([df1, df2], axis=1) @@ -96,8 +97,10 @@ def test_align(self, data, na_value): r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) # Assumes that the ctor can take a list of scalars of the type - e1 = pd.Series(data._from_sequence(list(a) + [na_value])) - e2 = pd.Series(data._from_sequence([na_value] + list(b))) + e1 = pd.Series(data._from_sequence(list(a) + [na_value], + dtype=data.dtype)) + e2 = pd.Series(data._from_sequence([na_value] + list(b), + dtype=data.dtype)) self.assert_series_equal(r1, e1) self.assert_series_equal(r2, e2) @@ -109,8 +112,10 @@ def test_align_frame(self, data, na_value): ) # Assumes that the ctor can take a list of scalars of the type - e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value])}) - e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b))}) + e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value], + dtype=data.dtype)}) + e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b), + dtype=data.dtype)}) self.assert_frame_equal(r1, e1) self.assert_frame_equal(r2, e2) @@ -120,7 +125,8 @@ def test_align_series_frame(self, data, na_value): df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) r1, r2 = ser.align(df) - e1 = pd.Series(data._from_sequence(list(data) + [na_value]), + e1 = pd.Series(data._from_sequence(list(data) + [na_value], + dtype=data.dtype), name=ser.name) self.assert_series_equal(r1, e1) @@ -153,7 +159,8 @@ def test_merge(self, data, na_value): res = pd.merge(df1, df2) exp = pd.DataFrame( {'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1], - 'ext': data._from_sequence([data[0], data[0], data[1]])}) + 'ext': data._from_sequence([data[0], data[0], data[1]], + dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) res = pd.merge(df1, df2, how='outer') @@ -161,5 +168,6 @@ def test_merge(self, data, na_value): {'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4], 'key': [0, 0, 1, 2, 3], 'ext': data._from_sequence( - [data[0], data[0], data[1], data[2], na_value])}) + [data[0], data[0], data[1], data[2], na_value], + dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py index 715e8bd40a2d0..76f6b03907ef8 100644 --- a/pandas/tests/extension/category/test_categorical.py +++ b/pandas/tests/extension/category/test_categorical.py @@ -189,11 +189,12 @@ class TestCasting(base.BaseCastingTests): class TestArithmeticOps(base.BaseArithmeticOpsTests): - def test_arith_scalar(self, data, all_arithmetic_operators): + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators if op_name != '__rmod__': - super(TestArithmeticOps, self).test_arith_scalar(data, op_name) + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, op_name) else: pytest.skip('rmod never called when string is first argument') diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 33adebbbe5780..108b8874b3ac5 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -1,6 +1,5 @@ import decimal import numbers -import random import sys import numpy as np @@ -38,7 +37,7 @@ def construct_from_string(cls, string): class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin): dtype = DecimalDtype() - def __init__(self, values, copy=False): + def __init__(self, values, dtype=None, copy=False): for val in values: if not isinstance(val, self.dtype.type): raise TypeError("All values must be of type " + @@ -54,7 +53,7 @@ def __init__(self, values, copy=False): # self._values = self.values = self.data @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): return cls(scalars) @classmethod @@ -117,7 +116,3 @@ def _concat_same_type(cls, to_concat): DecimalArray._add_arithmetic_ops() DecimalArray._add_comparison_ops() - - -def make_data(): - return [decimal.Decimal(random.random()) for _ in range(100)] diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 0832e9f7d08df..bc7237f263b1d 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -1,5 +1,6 @@ import decimal +import random import numpy as np import pandas as pd import pandas.util.testing as tm @@ -7,7 +8,11 @@ from pandas.tests.extension import base -from .array import DecimalDtype, DecimalArray, make_data +from .array import DecimalDtype, DecimalArray + + +def make_data(): + return [decimal.Decimal(random.random()) for _ in range(100)] @pytest.fixture @@ -176,35 +181,28 @@ def test_series_constructor_coerce_data_to_extension_dtype_raises(): pd.Series([0, 1, 2], dtype=DecimalDtype()) -def test_series_constructor_with_same_dtype_ok(): +def test_series_constructor_with_dtype(): arr = DecimalArray([decimal.Decimal('10.0')]) result = pd.Series(arr, dtype=DecimalDtype()) expected = pd.Series(arr) tm.assert_series_equal(result, expected) - -def test_series_constructor_coerce_extension_array_to_dtype_raises(): - arr = DecimalArray([decimal.Decimal('10.0')]) - xpr = r"Cannot specify a dtype 'int64' .* \('decimal'\)." - - with tm.assert_raises_regex(ValueError, xpr): - pd.Series(arr, dtype='int64') + result = pd.Series(arr, dtype='int64') + expected = pd.Series([10]) + tm.assert_series_equal(result, expected) -def test_dataframe_constructor_with_same_dtype_ok(): +def test_dataframe_constructor_with_dtype(): arr = DecimalArray([decimal.Decimal('10.0')]) result = pd.DataFrame({"A": arr}, dtype=DecimalDtype()) expected = pd.DataFrame({"A": arr}) tm.assert_frame_equal(result, expected) - -def test_dataframe_constructor_with_different_dtype_raises(): arr = DecimalArray([decimal.Decimal('10.0')]) - - xpr = "Cannot coerce extension array to dtype 'int64'. " - with tm.assert_raises_regex(ValueError, xpr): - pd.DataFrame({"A": arr}, dtype='int64') + result = pd.DataFrame({"A": arr}, dtype='int64') + expected = pd.DataFrame({"A": [10]}) + tm.assert_frame_equal(result, expected) class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests): @@ -213,7 +211,7 @@ def check_opname(self, s, op_name, other, exc=None): super(TestArithmeticOps, self).check_opname(s, op_name, other, exc=None) - def test_arith_array(self, data, all_arithmetic_operators): + def test_arith_series_with_array(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators s = pd.Series(data) diff --git a/pandas/tests/extension/integer/__init__.py b/pandas/tests/extension/integer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/extension/integer/test_integer.py new file mode 100644 index 0000000000000..451f7488bd38a --- /dev/null +++ b/pandas/tests/extension/integer/test_integer.py @@ -0,0 +1,700 @@ +import numpy as np +import pandas as pd +import pandas.util.testing as tm +import pytest + +from pandas.tests.extension import base +from pandas.api.types import ( + is_integer, is_scalar, is_float, is_float_dtype) +from pandas.core.dtypes.generic import ABCIndexClass + +from pandas.core.arrays import ( + to_integer_array, IntegerArray) +from pandas.core.arrays.integer import ( + Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) + + +def make_data(): + return (list(range(8)) + + [np.nan] + + list(range(10, 98)) + + [np.nan] + + [99, 100]) + + +@pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype]) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + return IntegerArray(make_data(), dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return IntegerArray([np.nan, 1], dtype=dtype) + + +@pytest.fixture +def data_repeated(data): + def gen(count): + for _ in range(count): + yield data + yield gen + + +@pytest.fixture +def data_for_sorting(dtype): + return IntegerArray([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return IntegerArray([1, np.nan, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are np.nan + return lambda x, y: np.isnan(x) and np.isnan(y) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(dtype): + b = 1 + a = 0 + c = 2 + na = np.nan + return IntegerArray([b, b, na, na, a, a, b, c], dtype=dtype) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == 'i' + else: + assert np.dtype(dtype.type).kind == 'u' + assert dtype.name is not None + + +class BaseInteger(object): + + def assert_index_equal(self, left, right, *args, **kwargs): + + left_na = left.isna() + right_na = right.isna() + + tm.assert_numpy_array_equal(left_na, right_na) + return tm.assert_index_equal(left[~left_na], + right[~right_na], + *args, **kwargs) + + def assert_series_equal(self, left, right, *args, **kwargs): + + left_na = left.isna() + right_na = right.isna() + + tm.assert_series_equal(left_na, right_na) + return tm.assert_series_equal(left[~left_na], + right[~right_na], + *args, **kwargs) + + def assert_frame_equal(self, left, right, *args, **kwargs): + # TODO(EA): select_dtypes + tm.assert_index_equal( + left.columns, right.columns, + exact=kwargs.get('check_column_type', 'equiv'), + check_names=kwargs.get('check_names', True), + check_exact=kwargs.get('check_exact', False), + check_categorical=kwargs.get('check_categorical', True), + obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame'))) + + integers = (left.dtypes == 'integer').index + + for col in integers: + self.assert_series_equal(left[col], right[col], + *args, **kwargs) + + left = left.drop(columns=integers) + right = right.drop(columns=integers) + tm.assert_frame_equal(left, right, *args, **kwargs) + + +class TestDtype(BaseInteger, base.BaseDtypeTests): + + @pytest.mark.skip(reason="using multiple dtypes") + def test_is_dtype_unboxes_dtype(self): + # we have multiple dtypes, so skip + pass + + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is IntegerArray + + +class TestArithmeticOps(BaseInteger, base.BaseArithmeticOpsTests): + + def _check_divmod_op(self, s, op, other, exc=None): + super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + + def _check_op(self, s, op_name, other, exc=None): + op = self.get_op_from_name(op_name) + result = op(s, other) + + # compute expected + mask = s.isna() + + # other array is an Integer + if isinstance(other, IntegerArray): + omask = getattr(other, 'mask', None) + mask = getattr(other, 'data', other) + if omask is not None: + mask |= omask + + # float result type or float op + if ((is_float_dtype(other) or is_float(other) or + op_name in ['__rtruediv__', '__truediv__', + '__rdiv__', '__div__'])): + rs = s.astype('float') + expected = op(rs, other) + self._check_op_float(result, expected, mask, s, op_name, other) + + # integer result type + else: + rs = pd.Series(s.values._data) + expected = op(rs, other) + self._check_op_integer(result, expected, mask, s, op_name, other) + + def _check_op_float(self, result, expected, mask, s, op_name, other): + # check comparisions that are resulting in float dtypes + + expected[mask] = np.nan + self.assert_series_equal(result, expected) + + def _check_op_integer(self, result, expected, mask, s, op_name, other): + # check comparisions that are resulting in integer dtypes + + # to compare properly, we convert the expected + # to float, mask to nans and convert infs + # if we have uints then we process as uints + # then conert to float + # and we ultimately want to create a IntArray + # for comparisons + + fill_value = 0 + + # mod/rmod turn floating 0 into NaN while + # integer works as expected (no nan) + if op_name in ['__mod__', '__rmod__']: + if is_scalar(other): + if other == 0: + expected[s.values == 0] = 0 + else: + expected = expected.fillna(0) + else: + expected[(s.values == 0) & + ((expected == 0) | expected.isna())] = 0 + + try: + expected[(expected == np.inf) | (expected == -np.inf)] = fill_value + original = expected + expected = expected.astype(s.dtype) + + except ValueError: + + expected = expected.astype(float) + expected[(expected == np.inf) | (expected == -np.inf)] = fill_value + original = expected + expected = expected.astype(s.dtype) + + expected[mask] = np.nan + + # assert that the expected astype is ok + # (skip for unsigned as they have wrap around) + if not s.dtype.is_unsigned_integer: + original = pd.Series(original) + + # we need to fill with 0's to emulate what an astype('int') does + # (truncation) for certain ops + if op_name in ['__rtruediv__', '__rdiv__']: + mask |= original.isna() + original = original.fillna(0).astype('int') + + original = original.astype('float') + original[mask] = np.nan + self.assert_series_equal(original, expected.astype('float')) + + # assert our expected result + self.assert_series_equal(result, expected) + + def test_arith_integer_array(self, data, all_arithmetic_operators): + # we operate with a rhs of an integer array + + op = all_arithmetic_operators + + s = pd.Series(data) + rhs = pd.Series([1] * len(data), dtype=data.dtype) + rhs.iloc[-1] = np.nan + + self._check_op(s, op, rhs) + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + # scalar + op = all_arithmetic_operators + + s = pd.Series(data) + self._check_op(s, op, 1, exc=TypeError) + + @pytest.mark.xfail(run=False, reason="_reduce needs implementation") + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + # frame & scalar + op = all_arithmetic_operators + + df = pd.DataFrame({'A': data}) + self._check_op(df, op, 1, exc=TypeError) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + # ndarray & other series + op = all_arithmetic_operators + + s = pd.Series(data) + other = np.ones(len(s), dtype=s.dtype.type) + self._check_op(s, op, other, exc=TypeError) + + def test_arith_coerce_scalar(self, data, all_arithmetic_operators): + + op = all_arithmetic_operators + s = pd.Series(data) + + other = 0.01 + self._check_op(s, op, other) + + @pytest.mark.parametrize("other", [1., 1.0, np.array(1.), np.array([1.])]) + def test_arithmetic_conversion(self, all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if if that is equal to an integer + op = self.get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype='Int64') + result = op(s, other) + assert result.dtype is np.dtype('float') + + def test_error(self, data, all_arithmetic_operators): + # invalid ops + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + opa = getattr(data, op) + + # invalid scalars + with pytest.raises(TypeError): + ops('foo') + with pytest.raises(TypeError): + ops(pd.Timestamp('20180101')) + + # invalid array-likes + with pytest.raises(TypeError): + ops(pd.Series('foo', index=s.index)) + + if op != '__rpow__': + # TODO(extension) + # rpow with a datetimelike coerces the integer array incorrectly + with pytest.raises(TypeError): + ops(pd.Series(pd.date_range('20180101', periods=len(s)))) + + # 2d + with pytest.raises(NotImplementedError): + opa(pd.DataFrame({'A': s})) + with pytest.raises(NotImplementedError): + opa(np.arange(len(s)).reshape(-1, len(s))) + + +class TestComparisonOps(BaseInteger, base.BaseComparisonOpsTests): + + def _compare_other(self, s, data, op_name, other): + op = self.get_op_from_name(op_name) + + # array + result = op(s, other) + expected = pd.Series(op(data._data, other)) + + # fill the nan locations + expected[data._mask] = True if op_name == '__ne__' else False + + tm.assert_series_equal(result, expected) + + # series + s = pd.Series(data) + result = op(s, other) + + expected = pd.Series(data._data) + expected = op(expected, other) + + # fill the nan locations + expected[data._mask] = True if op_name == '__ne__' else False + + tm.assert_series_equal(result, expected) + + +class TestInterface(BaseInteger, base.BaseInterfaceTests): + + def test_repr_array(self, data): + result = repr(data) + + # not long + assert '...' not in result + + assert 'dtype=' in result + assert 'IntegerArray' in result + + def test_repr_array_long(self, data): + # some arrays may be able to assert a ... in the repr + with pd.option_context('display.max_seq_items', 1): + result = repr(data) + + assert '...' in result + assert 'length' in result + + +class TestConstructors(BaseInteger, base.BaseConstructorsTests): + + def test_from_dtype_from_float(self, data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) + self.assert_series_equal(result, expected) + + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + self.assert_series_equal(result, expected) + + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + result = pd.Series(dropped, dtype=str(dtype)) + self.assert_series_equal(result, expected) + + +class TestReshaping(BaseInteger, base.BaseReshapingTests): + + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + df1 = pd.DataFrame({'A': data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') + df4 = pd.DataFrame({"A": pd.SparseArray([1, 2, 3])}) + dfs = [df1, df2, df3, df4] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat([x.astype(object) for x in dfs]) + self.assert_frame_equal(result, expected) + + # series + result = pd.concat([x['A'] for x in dfs]) + expected = pd.concat([x['A'].astype(object) for x in dfs]) + self.assert_series_equal(result, expected) + + result = pd.concat([df1, df2]) + expected = pd.concat([df1.astype('object'), df2.astype('object')]) + self.assert_frame_equal(result, expected) + + # concat of an Integer and Int coerces to object dtype + # TODO(jreback) once integrated this would + # be a result of Integer + result = pd.concat([df1['A'], df2['A']]) + expected = pd.concat([df1['A'].astype('object'), + df2['A'].astype('object')]) + self.assert_series_equal(result, expected) + + +class TestGetitem(BaseInteger, base.BaseGetitemTests): + pass + + +class TestMissing(BaseInteger, base.BaseMissingTests): + pass + + +class TestMethods(BaseInteger, base.BaseMethodsTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts( + dropna=dropna).sort_index() + expected.index = expected.index.astype(all_data.dtype) + + self.assert_series_equal(result, expected) + + def test_combine_add(self, data_repeated): + # GH 20825 + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + + # fundamentally this is not a great operation + # as overflow / underflow can easily happen here + # e.g. int8 + int8 + def scalar_add(a, b): + + # TODO; should really be a type specific NA + if pd.isna(a) or pd.isna(b): + return np.nan + if is_integer(a): + a = int(a) + elif is_integer(b): + b = int(b) + return a + b + + result = s1.combine(s2, scalar_add) + expected = pd.Series( + orig_data1._from_sequence([scalar_add(a, b) for (a, b) in + zip(orig_data1, + orig_data2)])) + self.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series( + orig_data1._from_sequence([a + val for a in list(orig_data1)])) + self.assert_series_equal(result, expected) + + +class TestCasting(BaseInteger, base.BaseCastingTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_construct_index(self, all_data, dropna): + # ensure that we do not coerce to Float64Index, rather + # keep as Index + + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Index(IntegerArray(other, + dtype=all_data.dtype)) + expected = pd.Index(other, dtype=object) + + self.assert_index_equal(result, expected) + + @pytest.mark.parametrize('dropna', [True, False]) + def test_astype_index(self, all_data, dropna): + # as an int/uint index to Index + + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + dtype = all_data.dtype + idx = pd.Index(np.array(other)) + assert isinstance(idx, ABCIndexClass) + + result = idx.astype(dtype) + expected = idx.astype(object).astype(dtype) + self.assert_index_equal(result, expected) + + def test_astype(self, all_data): + all_data = all_data[:10] + + ints = all_data[~all_data.isna()] + mixed = all_data + dtype = Int8Dtype() + + # coerce to same type - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype) + expected = pd.Series(ints) + self.assert_series_equal(result, expected) + + # coerce to same other - ints + s = pd.Series(ints) + result = s.astype(dtype) + expected = pd.Series(ints, dtype=dtype) + self.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype.numpy_dtype) + expected = pd.Series(ints._data.astype( + all_data.dtype.numpy_dtype)) + tm.assert_series_equal(result, expected) + + # coerce to same type - mixed + s = pd.Series(mixed) + result = s.astype(all_data.dtype) + expected = pd.Series(mixed) + self.assert_series_equal(result, expected) + + # coerce to same other - mixed + s = pd.Series(mixed) + result = s.astype(dtype) + expected = pd.Series(mixed, dtype=dtype) + self.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - mixed + s = pd.Series(mixed) + with pytest.raises(ValueError): + s.astype(all_data.dtype.numpy_dtype) + + # coerce to object + s = pd.Series(mixed) + result = s.astype('object') + expected = pd.Series(np.asarray(mixed)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [Int8Dtype(), 'Int8']) + def test_astype_specific_casting(self, dtype): + s = pd.Series([1, 2, 3], dtype='Int64') + result = s.astype(dtype) + expected = pd.Series([1, 2, 3], dtype='Int8') + self.assert_series_equal(result, expected) + + s = pd.Series([1, 2, 3, None], dtype='Int64') + result = s.astype(dtype) + expected = pd.Series([1, 2, 3, None], dtype='Int8') + self.assert_series_equal(result, expected) + + def test_construct_cast_invalid(self, dtype): + + msg = "cannot safely" + arr = [1.2, 2.3, 3.7] + with tm.assert_raises_regex(TypeError, msg): + IntegerArray(arr, dtype=dtype) + + with tm.assert_raises_regex(TypeError, msg): + pd.Series(arr).astype(dtype) + + arr = [1.2, 2.3, 3.7, np.nan] + with tm.assert_raises_regex(TypeError, msg): + IntegerArray(arr, dtype=dtype) + + with tm.assert_raises_regex(TypeError, msg): + pd.Series(arr).astype(dtype) + + +class TestGroupby(BaseInteger, base.BaseGroupbyTests): + + @pytest.mark.xfail(reason="groupby not working") + def test_groupby_extension_no_sort(self, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_no_sort( + data_for_grouping) + + @pytest.mark.xfail(reason="groupby not working") + @pytest.mark.parametrize('as_index', [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_agg( + as_index, data_for_grouping) + + +def test_frame_repr(data_missing): + + df = pd.DataFrame({'A': data_missing}) + result = repr(df) + expected = ' A\n0 NaN\n1 1' + assert result == expected + + +def test_conversions(data_missing): + + # astype to object series + df = pd.DataFrame({'A': data_missing}) + result = df['A'].astype('object') + expected = pd.Series(np.array([np.nan, 1], dtype=object), name='A') + tm.assert_series_equal(result, expected) + + # convert to object ndarray + # we assert that we are exactly equal + # including type conversions of scalars + result = df['A'].astype('object').values + expected = np.array([np.nan, 1], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + for r, e in zip(result, expected): + if pd.isnull(r): + assert pd.isnull(e) + elif is_integer(r): + # PY2 can be int or long + assert r == e + assert is_integer(e) + else: + assert r == e + assert type(r) == type(e) + + +@pytest.mark.parametrize( + 'values', + [ + ['foo', 'bar'], + 'foo', + 1, + 1.0, + pd.date_range('20130101', periods=2), + np.array(['foo'])]) +def test_to_integer_array_error(values): + # error in converting existing arrays to IntegerArrays + with pytest.raises(TypeError): + to_integer_array(values) + + +@pytest.mark.parametrize( + 'values, to_dtype, result_dtype', + [ + (np.array([1], dtype='int64'), None, Int64Dtype), + (np.array([1, np.nan]), None, Int64Dtype), + (np.array([1, np.nan]), 'int8', Int8Dtype)]) +def test_to_integer_array(values, to_dtype, result_dtype): + # convert existing arrays to IntegerArrays + result = to_integer_array(values, dtype=to_dtype) + expected = IntegerArray(values, dtype=result_dtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_cross_type_arithmetic(): + + df = pd.DataFrame({'A': pd.Series([1, 2, np.nan], dtype='Int64'), + 'B': pd.Series([1, np.nan, 3], dtype='UInt8'), + 'C': [1, 2, 3]}) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype='Int64') + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, False]) + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype='Int64') + tm.assert_series_equal(result, expected) + + +# TODO(jreback) - these need testing / are broken + +# shift + +# set_index (destroys type) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 160bf259e1e32..34c397252a8bb 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -13,8 +13,6 @@ import collections import itertools import numbers -import random -import string import sys import numpy as np @@ -54,7 +52,7 @@ def construct_from_string(cls, string): class JSONArray(ExtensionArray): dtype = JSONDtype() - def __init__(self, values, copy=False): + def __init__(self, values, dtype=None, copy=False): for val in values: if not isinstance(val, self.dtype.type): raise TypeError("All values must be of type " + @@ -69,7 +67,7 @@ def __init__(self, values, copy=False): # self._values = self.values = self.data @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): return cls(scalars) @classmethod @@ -180,10 +178,3 @@ def _values_for_argsort(self): # cast them to an (N, P) array, instead of an (N,) array of tuples. frozen = [()] + list(tuple(x.items()) for x in self) return np.array(frozen, dtype=object)[1:] - - -def make_data(): - # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer - return [collections.UserDict([ - (random.choice(string.ascii_letters), random.randint(0, 100)) - for _ in range(random.randint(0, 10))]) for _ in range(100)] diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 7eeaf7946663e..520c303f1990b 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -1,5 +1,7 @@ import operator import collections +import random +import string import pytest @@ -8,11 +10,18 @@ from pandas.compat import PY2, PY36 from pandas.tests.extension import base -from .array import JSONArray, JSONDtype, make_data +from .array import JSONArray, JSONDtype pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict") +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + return [collections.UserDict([ + (random.choice(string.ascii_letters), random.randint(0, 100)) + for _ in range(random.randint(0, 10))]) for _ in range(100)] + + @pytest.fixture def dtype(): return JSONDtype() @@ -203,7 +212,8 @@ def test_combine_add(self, data_repeated): class TestCasting(BaseJSON, base.BaseCastingTests): - @pytest.mark.xfail + + @pytest.mark.xfail(reason="failing on np.array(self, dtype=str)") def test_astype_str(self): """This currently fails in NumPy on np.array(self, dtype=str) with diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 44b818be84e31..b6223ea96d7dd 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -22,7 +22,16 @@ def __array__(self, dtype): @property def dtype(self): - return self.data.dtype + return DummyDtype() + + def astype(self, dtype, copy=True): + # we don't support anything but a single dtype + if isinstance(dtype, DummyDtype): + if copy: + return type(self)(self.data) + return self + + return np.array(self, dtype=dtype, copy=copy) class TestExtensionArrayDtype(object): @@ -61,10 +70,10 @@ def test_astype_no_copy(): arr = DummyArray(np.array([1, 2, 3], dtype=np.int64)) result = arr.astype(arr.dtype, copy=False) - assert arr.data is result + assert arr is result result = arr.astype(arr.dtype) - assert arr.data is not result + assert arr is not result @pytest.mark.parametrize('dtype', [ diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index b3a4bfa878c3f..1e96ac730a0eb 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -56,7 +56,7 @@ def test_astype_cannot_cast(self, index, dtype): index.astype(dtype) def test_astype_invalid_dtype(self, index): - msg = 'data type "fake_dtype" not understood' + msg = "data type 'fake_dtype' not understood" with tm.assert_raises_regex(TypeError, msg): index.astype('fake_dtype') diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index d46e19ef56dd0..8c9d0459eff55 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -139,7 +139,7 @@ def test_generic_errors(self, constructor): constructor(dtype='int64', **filler) # invalid dtype - msg = 'data type "invalid" not understood' + msg = "data type 'invalid' not understood" with tm.assert_raises_regex(TypeError, msg): constructor(dtype='invalid', **filler) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index fe224436c52e6..e95e41bbdeefa 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -226,10 +226,13 @@ def test_constructor_categorical(self): res = Series(cat) tm.assert_categorical_equal(res.values, cat) + # can cast to a new dtype + result = Series(pd.Categorical([1, 2, 3]), + dtype='int64') + expected = pd.Series([1, 2, 3], dtype='int64') + tm.assert_series_equal(result, expected) + # GH12574 - pytest.raises( - ValueError, lambda: Series(pd.Categorical([1, 2, 3]), - dtype='int64')) cat = Series(pd.Categorical([1, 2, 3]), dtype='category') assert is_categorical_dtype(cat) assert is_categorical_dtype(cat.dtype)
closes #20700 closes #20747 ``` In [1]: df = pd.DataFrame({ 'A': pd.Series([1, 2, np.nan], dtype='Int64'), 'B': pd.Series([1, np.nan, 3], dtype='UInt8'), 'C': [1, 2, 3]}) In [2]: df Out[2]: A B C 0 1 1 1 1 2 NaN 2 2 NaN 3 3 In [3]: df.dtypes Out[3]: A Int64 B UInt8 C int64 dtype: object In [4]: df.A + df.B Out[4]: 0 2 1 NaN 2 NaN dtype: Int64 In [5]: df.A + df.C Out[5]: 0 2 1 4 2 NaN dtype: Int64 In [6]: (df.A + df.C) * 3 Out[6]: 0 6 1 12 2 NaN dtype: Int64 In [7]: (df.A + df.C) * 3 == 1 Out[7]: 0 False 1 False 2 False dtype: bool In [8]: (df.A + df.C) * 3 == 12 Out[8]: 0 False 1 True 2 False dtype: bool ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21160
2018-05-22T00:00:03Z
2018-07-20T20:44:33Z
2018-07-20T20:44:33Z
2018-07-20T22:21:06Z
TST: Escape invalid escape characters
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 81d775157cf62..5d50c45fe7eca 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -241,7 +241,7 @@ def str_count(arr, pat, flags=0): Escape ``'$'`` to find the literal dollar sign. >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat']) - >>> s.str.count('\$') + >>> s.str.count('\\$') 0 1 1 0 2 1 @@ -358,7 +358,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): Returning any digit using regular expression. - >>> s1.str.contains('\d', regex=True) + >>> s1.str.contains('\\d', regex=True) 0 False 1 False 2 False
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21154
2018-05-21T16:53:25Z
2018-05-21T23:11:16Z
2018-05-21T23:11:16Z
2018-05-22T00:33:29Z
TST: Remove .ix deprecation warnings
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6d74ce54faa94..5459b6910e11a 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -397,11 +397,13 @@ def test_getitem_setitem_ix_negative_integers(self): df = DataFrame(np.random.randn(8, 4)) # ix does label-based indexing when having an integer index - with pytest.raises(KeyError): - df.ix[[-1]] + with catch_warnings(record=True): + with pytest.raises(KeyError): + df.ix[[-1]] - with pytest.raises(KeyError): - df.ix[:, [-1]] + with catch_warnings(record=True): + with pytest.raises(KeyError): + df.ix[:, [-1]] # #1942 a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)]) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 7a17408d4468f..9c992770fc64c 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -197,7 +197,7 @@ def test_dups_fancy_indexing(self): # List containing only missing label dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD')) with pytest.raises(KeyError): - dfnu.ix[['E']] + dfnu.loc[['E']] # ToDo: check_index_type can be True after GH 11497
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21148
2018-05-21T07:42:41Z
2018-05-21T10:34:40Z
2018-05-21T10:34:40Z
2018-05-21T10:43:48Z
BUG: set keyword argument so zipfile actually compresses
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 4876678baaa6e..c02d988a7bc63 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -83,6 +83,7 @@ Indexing I/O ^^^ +- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - diff --git a/pandas/io/common.py b/pandas/io/common.py index 0827216975f15..a492b7c0b8e8e 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -5,7 +5,7 @@ import codecs import mmap from contextlib import contextmanager, closing -from zipfile import ZipFile +import zipfile from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat @@ -428,7 +428,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, return f, handles -class BytesZipFile(ZipFile, BytesIO): +class BytesZipFile(zipfile.ZipFile, BytesIO): """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. @@ -437,10 +437,10 @@ class BytesZipFile(ZipFile, BytesIO): bytes strings into a member of the archive. """ # GH 17778 - def __init__(self, file, mode='r', **kwargs): + def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs): if mode in ['wb', 'rb']: mode = mode.replace('b', '') - super(BytesZipFile, self).__init__(file, mode, **kwargs) + super(BytesZipFile, self).__init__(file, mode, compression, **kwargs) def write(self, data): super(BytesZipFile, self).writestr(self.filename, data) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 0b329f64dafa3..bb7ee1b911fee 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- import pytest +import os import collections from functools import partial import numpy as np -from pandas import Series, Timestamp +from pandas import Series, DataFrame, Timestamp from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops @@ -222,3 +223,21 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) + + +@pytest.mark.parametrize('obj', [ + DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(obj, method, compression): + if not compression: + pytest.skip("only test compression case.") + + with tm.ensure_clean() as filename: + getattr(obj, method)(filename, compression=compression) + compressed = os.path.getsize(filename) + getattr(obj, method)(filename, compression=None) + uncompressed = os.path.getsize(filename) + assert uncompressed > compressed
- [x] closes https://github.com/pandas-dev/pandas/issues/17778 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry zipfile.ZipFile has default compression mode `zipfile.ZIP_STORED`. It creates an uncompressed archive member. Whilst it doesn't cause issue, it is a strange default to have given users would want to compress files. In order for zip compression to actually reduce file size, keyword argument `compression=zipfile.ZIP_DEFLATED` is added.
https://api.github.com/repos/pandas-dev/pandas/pulls/21144
2018-05-20T16:16:35Z
2018-05-29T10:41:28Z
2018-05-29T10:41:28Z
2018-06-08T17:18:38Z